mindspore 2.1.0__cp38-cp38-manylinux1_x86_64.whl → 2.2.0__cp38-cp38-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -1
- mindspore/_akg/akg/build_module.py +5 -6
- mindspore/_akg/akg/composite/build_module.py +49 -16
- mindspore/_akg/akg/composite/split_stitch.py +10 -11
- mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
- mindspore/_akg/akg/tvm/api.py +4 -3
- mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
- mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
- mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
- mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
- mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
- mindspore/_akg/akg/tvm/build_module.py +16 -1
- mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
- mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
- mindspore/_akg/akg/tvm/ir_builder.py +1 -1
- mindspore/_akg/akg/tvm/module.py +1 -2
- mindspore/_akg/akg/tvm/stmt.py +2 -2
- mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
- mindspore/_akg/akg/utils/kernel_exec.py +58 -260
- mindspore/_akg/akg/utils/result_analysis.py +4 -24
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
- mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -1
- mindspore/_checkparam.py +26 -32
- mindspore/_extends/graph_kernel/__init__.py +0 -1
- mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
- mindspore/_extends/graph_kernel/splitter.py +1 -9
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +122 -15
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +2 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
- mindspore/_extends/parse/__init__.py +12 -15
- mindspore/_extends/parse/namespace.py +7 -33
- mindspore/_extends/parse/parser.py +61 -71
- mindspore/_extends/parse/resources.py +1 -1
- mindspore/_extends/parse/standard_method.py +72 -95
- mindspore/_extends/parse/trope.py +1 -1
- mindspore/_extends/remote/kernel_build_server.py +24 -7
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_install_custom.py +43 -0
- mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/amp.py +47 -11
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost.py +1 -8
- mindspore/boost/boost_cell_wrapper.py +3 -2
- mindspore/boost/grad_accumulation.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +8 -7
- mindspore/common/__init__.py +5 -3
- mindspore/common/_jit_fallback_utils.py +6 -0
- mindspore/common/_register_for_adapter.py +2 -0
- mindspore/common/_register_for_tensor.py +2 -2
- mindspore/common/_stub_tensor.py +13 -0
- mindspore/common/_utils.py +13 -0
- mindspore/common/api.py +173 -258
- mindspore/common/auto_dynamic_shape.py +498 -0
- mindspore/common/dtype.py +18 -11
- mindspore/common/dump.py +6 -4
- mindspore/common/initializer.py +14 -14
- mindspore/common/jit_config.py +33 -15
- mindspore/common/lazy_inline.py +126 -7
- mindspore/common/mindir_util.py +101 -0
- mindspore/common/parameter.py +51 -41
- mindspore/common/seed.py +4 -4
- mindspore/common/sparse_tensor.py +13 -14
- mindspore/common/tensor.py +240 -145
- mindspore/communication/__init__.py +7 -4
- mindspore/communication/_comm_helper.py +83 -4
- mindspore/communication/management.py +152 -84
- mindspore/config/op_info.config +13 -2
- mindspore/config/super_bar_config.json +4 -2
- mindspore/context.py +143 -59
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +2 -2
- mindspore/dataset/audio/transforms.py +52 -52
- mindspore/dataset/callback/ds_callback.py +16 -2
- mindspore/dataset/core/config.py +68 -51
- mindspore/dataset/engine/cache_client.py +28 -5
- mindspore/dataset/engine/datasets.py +250 -112
- mindspore/dataset/engine/datasets_audio.py +43 -211
- mindspore/dataset/engine/datasets_standard_format.py +11 -35
- mindspore/dataset/engine/datasets_text.py +43 -67
- mindspore/dataset/engine/datasets_user_defined.py +86 -100
- mindspore/dataset/engine/datasets_vision.py +219 -1029
- mindspore/dataset/engine/iterators.py +11 -4
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +4 -0
- mindspore/dataset/engine/obs/util.py +3 -0
- mindspore/dataset/engine/samplers.py +1 -1
- mindspore/dataset/engine/validators.py +19 -5
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +101 -127
- mindspore/dataset/text/utils.py +205 -138
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/py_transforms_util.py +40 -12
- mindspore/dataset/transforms/transforms.py +95 -40
- mindspore/dataset/utils/browse_dataset.py +8 -2
- mindspore/dataset/utils/line_reader.py +17 -19
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +6 -3
- mindspore/dataset/vision/transforms.py +409 -287
- mindspore/dataset/vision/utils.py +13 -14
- mindspore/dataset/vision/validators.py +11 -1
- mindspore/experimental/map_parameter.py +14 -0
- mindspore/{nn/optim_ex → experimental/optim}/__init__.py +30 -29
- mindspore/{nn/optim_ex → experimental/optim}/adam.py +59 -66
- mindspore/{nn/optim_ex → experimental/optim}/adamw.py +181 -203
- mindspore/experimental/optim/lr_scheduler.py +1427 -0
- mindspore/{nn/optim_ex → experimental/optim}/optimizer.py +252 -259
- mindspore/{nn/optim_ex → experimental/optim}/sgd.py +147 -152
- mindspore/gen_ops.py +273 -0
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/data_type.h +2 -1
- mindspore/include/api/graph.h +0 -15
- mindspore/include/api/kernel.h +2 -0
- mindspore/include/api/kernel_api.h +37 -12
- mindspore/include/api/model.h +0 -14
- mindspore/include/api/types.h +37 -4
- mindspore/include/c_api/ms/abstract.h +67 -0
- mindspore/include/c_api/ms/attribute.h +197 -0
- mindspore/include/c_api/ms/base/handle_types.h +43 -0
- mindspore/include/c_api/ms/base/macros.h +32 -0
- mindspore/include/c_api/ms/base/status.h +33 -0
- mindspore/include/c_api/ms/base/types.h +282 -0
- mindspore/include/c_api/ms/context.h +102 -0
- mindspore/include/c_api/ms/graph.h +160 -0
- mindspore/include/c_api/ms/node.h +606 -0
- mindspore/include/c_api/ms/tensor.h +161 -0
- mindspore/include/c_api/ms/value.h +84 -0
- mindspore/include/dataset/constants.h +6 -5
- mindspore/include/dataset/execute.h +23 -13
- mindspore/include/dataset/text.h +26 -26
- mindspore/include/dataset/transforms.h +13 -13
- mindspore/include/dataset/vision.h +60 -60
- mindspore/include/dataset/vision_ascend.h +5 -6
- mindspore/include/dataset/vision_lite.h +17 -17
- mindspore/include/mindapi/base/type_id.h +1 -0
- mindspore/include/mindapi/base/types.h +1 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libjemalloc.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/nn/__init__.py +0 -2
- mindspore/nn/cell.py +316 -74
- mindspore/nn/dynamic_lr.py +21 -21
- mindspore/nn/layer/activation.py +21 -28
- mindspore/nn/layer/basic.py +15 -13
- mindspore/nn/layer/channel_shuffle.py +1 -1
- mindspore/nn/layer/container.py +271 -9
- mindspore/nn/layer/conv.py +310 -207
- mindspore/nn/layer/dense.py +8 -5
- mindspore/nn/layer/embedding.py +33 -27
- mindspore/nn/layer/flash_attention.py +82 -41
- mindspore/nn/layer/image.py +8 -6
- mindspore/nn/layer/math.py +13 -18
- mindspore/nn/layer/normalization.py +107 -66
- mindspore/nn/layer/padding.py +1 -1
- mindspore/nn/layer/pooling.py +131 -109
- mindspore/nn/layer/rnn_cells.py +22 -17
- mindspore/nn/layer/rnns.py +13 -16
- mindspore/nn/layer/thor_layer.py +1 -1
- mindspore/nn/layer/transformer.py +221 -154
- mindspore/nn/learning_rate_schedule.py +9 -1
- mindspore/nn/loss/loss.py +235 -174
- mindspore/nn/optim/ada_grad.py +2 -1
- mindspore/nn/optim/adadelta.py +1 -0
- mindspore/nn/optim/adafactor.py +2 -1
- mindspore/nn/optim/adam.py +7 -4
- mindspore/nn/optim/adamax.py +3 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -3
- mindspore/nn/optim/ftrl.py +6 -5
- mindspore/nn/optim/lamb.py +7 -4
- mindspore/nn/optim/lars.py +1 -1
- mindspore/nn/optim/lazyadam.py +5 -3
- mindspore/nn/optim/momentum.py +2 -1
- mindspore/nn/optim/optimizer.py +53 -4
- mindspore/nn/optim/proximal_ada_grad.py +3 -4
- mindspore/nn/optim/rmsprop.py +4 -3
- mindspore/nn/optim/rprop.py +23 -12
- mindspore/nn/optim/sgd.py +26 -11
- mindspore/nn/optim/thor.py +9 -7
- mindspore/nn/probability/bijector/bijector.py +5 -5
- mindspore/nn/probability/bijector/power_transform.py +27 -27
- mindspore/nn/probability/bijector/softplus.py +3 -3
- mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -3
- mindspore/nn/probability/distribution/bernoulli.py +5 -5
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +7 -7
- mindspore/nn/probability/distribution/cauchy.py +0 -1
- mindspore/nn/probability/distribution/distribution.py +3 -3
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +4 -4
- mindspore/nn/probability/distribution/gumbel.py +4 -4
- mindspore/nn/probability/distribution/log_normal.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +4 -4
- mindspore/nn/probability/distribution/transformed_distribution.py +3 -3
- mindspore/nn/probability/distribution/uniform.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +78 -34
- mindspore/nn/wrap/grad_reducer.py +8 -5
- mindspore/nn/wrap/loss_scale.py +105 -42
- mindspore/numpy/array_creations.py +1 -2
- mindspore/numpy/array_ops.py +3 -2
- mindspore/offline_debug/convert_async.py +2 -2
- mindspore/ops/_grad_experimental/__init__.py +0 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +1 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +15 -2
- mindspore/ops/_grad_experimental/grad_debug_ops.py +0 -37
- mindspore/ops/_grad_experimental/grad_implementations.py +10 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +2 -216
- mindspore/ops/_grad_experimental/grad_math_ops.py +0 -181
- mindspore/ops/_grad_experimental/grad_sparse.py +15 -0
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +165 -109
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +144 -86
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +172 -187
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +51 -57
- mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +6 -17
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +14 -2
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/aicpu/eps.py +32 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +3 -3
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
- mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
- mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
- mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
- mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -5
- mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -5
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
- mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
- mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
- mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
- mindspore/ops/_op_impl/tbe/__init__.py +4 -4
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +1 -1
- mindspore/ops/_tracefunc.py +45 -13
- mindspore/ops/_utils/utils.py +4 -1
- mindspore/ops/_vmap/vmap_array_ops.py +3 -3
- mindspore/ops/_vmap/vmap_base.py +3 -3
- mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
- mindspore/ops/_vmap/vmap_math_ops.py +5 -2
- mindspore/ops/_vmap/vmap_nn_ops.py +61 -7
- mindspore/ops/arg_dtype_cast.py +54 -0
- mindspore/ops/composite/base.py +37 -10
- mindspore/ops/composite/math_ops.py +5 -4
- mindspore/ops/composite/multitype_ops/_compile_utils.py +273 -72
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +16 -9
- mindspore/ops/composite/multitype_ops/add_impl.py +43 -4
- mindspore/ops/composite/multitype_ops/getitem_impl.py +40 -2
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
- mindspore/ops/deprecated.py +304 -0
- mindspore/ops/function/__init__.py +4 -1
- mindspore/ops/function/array_func.py +167 -189
- mindspore/ops/function/clip_func.py +81 -13
- mindspore/ops/function/debug_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +18 -8
- mindspore/ops/function/image_func.py +10 -4
- mindspore/ops/function/linalg_func.py +5 -5
- mindspore/ops/function/math_func.py +575 -386
- mindspore/ops/function/nn_func.py +470 -251
- mindspore/ops/function/random_func.py +86 -56
- mindspore/ops/function/sparse_func.py +1 -1
- mindspore/ops/function/sparse_unary_func.py +14 -12
- mindspore/ops/function/vmap_func.py +6 -5
- mindspore/ops/functional.py +15 -10
- mindspore/ops/op_info_register.py +235 -19
- mindspore/ops/operations/__init__.py +25 -17
- mindspore/ops/operations/_grad_ops.py +52 -7
- mindspore/ops/operations/_inner_ops.py +213 -12
- mindspore/ops/operations/_quant_ops.py +4 -8
- mindspore/ops/operations/_sequence_ops.py +42 -0
- mindspore/ops/operations/array_ops.py +64 -280
- mindspore/ops/operations/comm_ops.py +105 -57
- mindspore/ops/operations/custom_ops.py +10 -3
- mindspore/ops/operations/debug_ops.py +8 -4
- mindspore/ops/operations/image_ops.py +18 -12
- mindspore/ops/operations/math_ops.py +185 -138
- mindspore/ops/operations/nn_ops.py +716 -492
- mindspore/ops/operations/other_ops.py +0 -22
- mindspore/ops/operations/random_ops.py +53 -111
- mindspore/ops/operations/sparse_ops.py +3 -1
- mindspore/ops/primitive.py +24 -18
- mindspore/parallel/_auto_parallel_context.py +68 -8
- mindspore/parallel/_cost_model_context.py +2 -2
- mindspore/parallel/_offload_context.py +17 -3
- mindspore/parallel/_parallel_serialization.py +2 -2
- mindspore/parallel/_ps_context.py +12 -0
- mindspore/parallel/_tensor.py +14 -12
- mindspore/parallel/_transformer/layers.py +5 -3
- mindspore/parallel/_transformer/loss.py +1 -0
- mindspore/parallel/_transformer/moe.py +2 -2
- mindspore/parallel/_transformer/op_parallel_config.py +12 -1
- mindspore/parallel/_transformer/transformer.py +23 -3
- mindspore/parallel/_utils.py +11 -7
- mindspore/parallel/algo_parameter_config.py +85 -5
- mindspore/parallel/checkpoint_transform.py +6 -10
- mindspore/parallel/shard.py +4 -4
- mindspore/profiler/common/struct_type.py +3 -3
- mindspore/profiler/common/util.py +3 -2
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/aicpu_data_parser.py +5 -3
- mindspore/profiler/parser/ascend_flops_generator.py +2 -2
- mindspore/profiler/parser/ascend_fpbp_generator.py +1 -1
- mindspore/profiler/parser/ascend_hccl_generator.py +17 -12
- mindspore/profiler/parser/ascend_msprof_exporter.py +104 -252
- mindspore/profiler/parser/ascend_msprof_generator.py +8 -8
- mindspore/profiler/parser/ascend_op_generator.py +5 -5
- mindspore/profiler/parser/ascend_steptrace_generator.py +6 -4
- mindspore/profiler/parser/ascend_timeline_generator.py +9 -6
- mindspore/profiler/parser/base_timeline_generator.py +9 -7
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +14 -10
- mindspore/profiler/parser/flops_parser.py +15 -11
- mindspore/profiler/parser/framework_parser.py +37 -21
- mindspore/profiler/parser/hccl_parser.py +16 -12
- mindspore/profiler/parser/integrator.py +22 -11
- mindspore/profiler/parser/memory_usage_parser.py +2 -2
- mindspore/profiler/parser/minddata_analyzer.py +12 -14
- mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +8 -4
- mindspore/profiler/parser/op_intermediate_parser.py +5 -2
- mindspore/profiler/parser/optime_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +2 -2
- mindspore/profiler/parser/step_trace_parser.py +11 -14
- mindspore/profiler/profiling.py +139 -71
- mindspore/rewrite/api/node.py +102 -19
- mindspore/rewrite/api/node_type.py +5 -1
- mindspore/rewrite/api/scoped_value.py +9 -17
- mindspore/rewrite/api/symbol_tree.py +131 -47
- mindspore/rewrite/ast_helpers/__init__.py +2 -1
- mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +93 -46
- mindspore/rewrite/common/rewrite_elog.py +5 -1
- mindspore/rewrite/namer.py +33 -24
- mindspore/rewrite/namespace.py +14 -5
- mindspore/{_extends/graph_kernel/expanders/complex → rewrite/node}/__init__.py +9 -9
- mindspore/rewrite/node/call_function.py +79 -0
- mindspore/rewrite/node/cell_container.py +135 -0
- mindspore/rewrite/node/control_flow.py +88 -0
- mindspore/rewrite/{node.py → node/node.py} +273 -234
- mindspore/rewrite/node/node_manager.py +254 -0
- mindspore/rewrite/{topological_manager.py → node/node_topological_manager.py} +13 -46
- mindspore/rewrite/parsers/arguments_parser.py +22 -21
- mindspore/rewrite/parsers/assign_parser.py +216 -221
- mindspore/rewrite/parsers/attribute_parser.py +9 -7
- mindspore/rewrite/parsers/class_def_parser.py +174 -113
- mindspore/rewrite/parsers/constant_parser.py +9 -6
- mindspore/rewrite/parsers/container_parser.py +9 -7
- mindspore/rewrite/parsers/for_parser.py +36 -15
- mindspore/rewrite/parsers/function_def_parser.py +24 -16
- mindspore/rewrite/parsers/if_parser.py +28 -24
- mindspore/rewrite/parsers/module_parser.py +196 -25
- mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
- mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
- mindspore/rewrite/parsers/return_parser.py +6 -6
- mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree.py +525 -577
- mindspore/rewrite/symbol_tree_builder.py +9 -193
- mindspore/rewrite/symbol_tree_dumper.py +2 -2
- mindspore/run_check/_check_version.py +2 -2
- mindspore/{ops/bprop_mindir → safeguard}/__init__.py +4 -3
- mindspore/safeguard/rewrite_obfuscation.py +517 -0
- mindspore/scipy/linalg.py +1 -1
- mindspore/scipy/optimize/minimize.py +7 -3
- mindspore/train/_utils.py +7 -3
- mindspore/train/amp.py +323 -123
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/_backup_and_restore.py +2 -12
- mindspore/train/callback/_callback.py +29 -4
- mindspore/train/callback/_checkpoint.py +23 -8
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +4 -4
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +3 -4
- mindspore/train/callback/_summary_collector.py +14 -7
- mindspore/train/callback/_time_monitor.py +58 -5
- mindspore/train/data_sink.py +5 -11
- mindspore/train/dataset_helper.py +83 -57
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/__init__.py +3 -3
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +3 -2
- mindspore/train/metrics/mean_surface_distance.py +3 -2
- mindspore/train/metrics/metric.py +39 -19
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +4 -3
- mindspore/train/mind_ir_pb2.py +85 -36
- mindspore/train/model.py +185 -45
- mindspore/train/serialization.py +390 -150
- mindspore/train/summary/_writer_pool.py +3 -2
- mindspore/train/summary/summary_record.py +14 -10
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/train/train_thor/dataset_helper.py +1 -1
- mindspore/version.py +1 -1
- {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/METADATA +6 -7
- {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/RECORD +458 -518
- {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
- mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
- mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
- mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
- mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
- mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
- mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
- mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
- mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
- mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
- mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
- mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
- mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
- mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
- mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
- mindspore/_akg/akg/tvm/rpc/base.py +0 -182
- mindspore/_akg/akg/tvm/rpc/client.py +0 -436
- mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
- mindspore/_akg/akg/tvm/rpc/server.py +0 -413
- mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
- mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
- mindspore/_extends/graph_kernel/expander.py +0 -80
- mindspore/_extends/graph_kernel/expanders/__init__.py +0 -54
- mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
- mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
- mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
- mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
- mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
- mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
- mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
- mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
- mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
- mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
- mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
- mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
- mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
- mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
- mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
- mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
- mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
- mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
- mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
- mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
- mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
- mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
- mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
- mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
- mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
- mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
- mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
- mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
- mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
- mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
- mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
- mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
- mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
- mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
- mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
- mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
- mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
- mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
- mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
- mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
- mindspore/dataset/datapreprocess/__init__.py +0 -20
- mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
- mindspore/include/api/net.h +0 -142
- mindspore/nn/lr_scheduler.py +0 -262
- mindspore/ops/_grad_experimental/grad_image_ops.py +0 -248
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -181
- mindspore/ops/_grad_experimental/grad_other_ops.py +0 -72
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
- mindspore/ops/_grad_experimental/grad_sequence_ops.py +0 -351
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
- mindspore/rewrite/node_visitor.py +0 -44
- {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
- {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
mindspore/nn/layer/conv.py
CHANGED
|
@@ -20,6 +20,7 @@ import numpy as np
|
|
|
20
20
|
|
|
21
21
|
from mindspore import context
|
|
22
22
|
from mindspore.ops import operations as P
|
|
23
|
+
import mindspore.common.dtype as mstype
|
|
23
24
|
from mindspore.ops.primitive import _primexpr
|
|
24
25
|
from mindspore.common.parameter import Parameter
|
|
25
26
|
from mindspore.common.initializer import initializer, HeUniform, Uniform, _calculate_fan_in_and_fan_out
|
|
@@ -50,7 +51,8 @@ class _Conv(Cell):
|
|
|
50
51
|
weight_init,
|
|
51
52
|
bias_init,
|
|
52
53
|
data_format='NCHW',
|
|
53
|
-
transposed=False
|
|
54
|
+
transposed=False,
|
|
55
|
+
dtype=mstype.float32):
|
|
54
56
|
"""Initialize _Conv."""
|
|
55
57
|
super(_Conv, self).__init__()
|
|
56
58
|
self.in_channels = Validator.check_positive_int(in_channels, 'in_channels', self.cls_name)
|
|
@@ -97,7 +99,7 @@ class _Conv(Cell):
|
|
|
97
99
|
if weight_init is None:
|
|
98
100
|
weight_init = HeUniform(math.sqrt(5))
|
|
99
101
|
self.weight_init = weight_init
|
|
100
|
-
self.weight = Parameter(initializer(self.weight_init, shape), name='weight')
|
|
102
|
+
self.weight = Parameter(initializer(self.weight_init, shape, dtype=dtype), name='weight')
|
|
101
103
|
|
|
102
104
|
self.bias_init = bias_init
|
|
103
105
|
if Validator.check_bool(has_bias, "has_bias", self.cls_name):
|
|
@@ -109,7 +111,7 @@ class _Conv(Cell):
|
|
|
109
111
|
else:
|
|
110
112
|
bias_init = 'zeros'
|
|
111
113
|
self.bias_init = bias_init
|
|
112
|
-
self.bias = Parameter(initializer(self.bias_init, [out_channels]), name='bias')
|
|
114
|
+
self.bias = Parameter(initializer(self.bias_init, [out_channels], dtype=dtype), name='bias')
|
|
113
115
|
else:
|
|
114
116
|
self.bias = None
|
|
115
117
|
|
|
@@ -139,31 +141,44 @@ class _Conv(Cell):
|
|
|
139
141
|
|
|
140
142
|
class Conv2d(_Conv):
|
|
141
143
|
r"""
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
:math:`
|
|
146
|
-
|
|
144
|
+
2D convolution layer.
|
|
145
|
+
|
|
146
|
+
Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
|
|
147
|
+
where :math:`N` is batch size, :math:`C` is channel number, :math:`H` is feature height, :math:`W` is feature width.
|
|
148
|
+
|
|
149
|
+
The output is calculated based on formula:
|
|
147
150
|
|
|
148
151
|
.. math::
|
|
149
152
|
|
|
150
153
|
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
|
|
151
154
|
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
152
155
|
|
|
153
|
-
where :math:`
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
:math:`
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
:math:`
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
156
|
+
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
157
|
+
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
158
|
+
, :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
159
|
+
|
|
160
|
+
Here are the indices' meanings:
|
|
161
|
+
- :math:`i` corresponds to the batch number, ranging from 0 to N-1, where N is the batch size of the input.
|
|
162
|
+
|
|
163
|
+
- :math:`j` corresponds to the output channel, ranging from 0 to C_{out}-1, where C_{out} is the number of
|
|
164
|
+
output channels, which is also equal to the number of kernels.
|
|
165
|
+
|
|
166
|
+
- :math:`k` corresponds to the input channel, ranging from 0 to C_{in}-1, where C_{in} is the number of
|
|
167
|
+
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
168
|
+
|
|
169
|
+
Therefore, in the above formula, :math:`{bias}(C_{out_j})` represents the bias of the :math:`j`-th
|
|
170
|
+
output channel, :math:`{weight}(C_{out_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
171
|
+
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
172
|
+
channel in the :math:`i`-th batch of the input feature map.
|
|
173
|
+
|
|
174
|
+
The shape of the convolutional kernel is given by :math:`(kernel\_size[0], kernel\_size[1])`,
|
|
175
|
+
where :math:`kernel\_size[0]` and :math:`kernel\_size[1]` are the height and width of the kernel, respectively.
|
|
176
|
+
If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
|
|
177
|
+
will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]}, \text{kernel_size[1]})`,
|
|
178
|
+
where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
|
|
179
|
+
|
|
180
|
+
For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
|
|
181
|
+
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
|
|
167
182
|
|
|
168
183
|
Note:
|
|
169
184
|
On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
|
|
@@ -176,48 +191,62 @@ class Conv2d(_Conv):
|
|
|
176
191
|
The data type is an integer or a tuple of two integers. An integer represents the height
|
|
177
192
|
and width of the convolution kernel. A tuple of two integers represents the height
|
|
178
193
|
and width of the convolution kernel respectively.
|
|
179
|
-
stride (Union[int, tuple[int]]): The movement stride of the 2D convolution kernel.
|
|
194
|
+
stride (Union[int, tuple[int]], optional): The movement stride of the 2D convolution kernel.
|
|
180
195
|
The data type is an integer or a tuple of two or four integers. An integer represents the movement step size
|
|
181
196
|
in both height and width directions. A tuple of two integers represents the movement step size in the height
|
|
182
197
|
and width directions respectively. Default: ``1`` .
|
|
183
|
-
pad_mode (str): Specifies padding mode.
|
|
184
|
-
``"same"`` , ``"valid"``
|
|
185
|
-
|
|
186
|
-
- ``"same"``:
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
If this mode is set,
|
|
194
|
-
|
|
195
|
-
|
|
198
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
199
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
200
|
+
|
|
201
|
+
- ``"same"``: Pad the input around its edges so that the shape of input and output
|
|
202
|
+
are the same when `stride` is set to ``1``.
|
|
203
|
+
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
204
|
+
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
|
|
205
|
+
If this mode is set, `padding` must be 0.
|
|
206
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
207
|
+
possible height and width. Extra pixels that could not complete a full stride will
|
|
208
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
209
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
210
|
+
in the height and width directions is determined by the `padding` parameter.
|
|
211
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
212
|
+
|
|
213
|
+
padding (Union[int, tuple[int]], optional): The number of padding
|
|
214
|
+
on the height and width directions of the input.
|
|
196
215
|
The data type is an integer or a tuple of four integers. If `padding` is an integer,
|
|
197
216
|
then the top, bottom, left, and right padding are all equal to `padding`.
|
|
198
217
|
If `padding` is a tuple of 4 integers, then the top, bottom, left, and right padding
|
|
199
218
|
is equal to `padding[0]`, `padding[1]`, `padding[2]`, and `padding[3]` respectively.
|
|
200
219
|
The value should be greater than or equal to 0. Default: ``0`` .
|
|
201
|
-
dilation (Union
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
and
|
|
205
|
-
|
|
220
|
+
dilation (Union(int, tuple[int]), optional): Specifies the dilation rate to use for dilated convolution.
|
|
221
|
+
It can be a single int or a tuple of 2 or 4 integers. A single int means the dilation size is the same
|
|
222
|
+
in both the height and width directions. A tuple of two ints represents the dilation size in
|
|
223
|
+
the height and width directions, respectively. For a tuple of four ints, the two ints correspond
|
|
224
|
+
to (N, C) dimension are treated as 1, and the two correspond to (H, W) dimensions is the
|
|
225
|
+
dilation size in the height and width directions respectively.
|
|
226
|
+
Assuming :math:`dilation=(d0, d1)`, the convolutional kernel samples the input with a
|
|
227
|
+
spacing of :math:`d0-1` elements in the height direction and :math:`d1-1` elements in the width direction.
|
|
228
|
+
The values in the height and width dimensions are in the ranges [1, H] and [1, W], respectively.
|
|
229
|
+
Default: ``1`` .
|
|
230
|
+
group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
206
231
|
divisible by `group`. If the group is equal to `in_channels` and `out_channels`,
|
|
207
232
|
this 2D convolution layer also can be called 2D depthwise convolution layer. Default: ``1`` .
|
|
208
|
-
has_bias (bool): Whether the Conv2d layer has a bias parameter. Default: ``False`` .
|
|
209
|
-
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of
|
|
233
|
+
has_bias (bool, optional): Whether the Conv2d layer has a bias parameter. Default: ``False`` .
|
|
234
|
+
weight_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of
|
|
235
|
+
weight parameter.
|
|
210
236
|
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
|
|
211
237
|
values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
|
|
212
238
|
distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
|
|
213
239
|
``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
|
|
214
|
-
lowercase are both acceptable. Refer to the values of
|
|
215
|
-
|
|
216
|
-
|
|
240
|
+
lowercase are both acceptable. Refer to the values of
|
|
241
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
242
|
+
for more details. Default: ``None`` , weight will be initialized using ``'HeUniform'``.
|
|
243
|
+
bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
|
|
217
244
|
Available initialization methods are the same as 'weight_init'. Refer to the values of
|
|
218
|
-
Initializer
|
|
219
|
-
|
|
245
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
246
|
+
for more details. Default: ``None`` , bias will be initialized using ``'Uniform'`` .
|
|
247
|
+
data_format (str, optional): The optional value for data format, is ``'NHWC'`` or ``'NCHW'`` .
|
|
220
248
|
Default: ``'NCHW'`` .
|
|
249
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
221
250
|
|
|
222
251
|
Inputs:
|
|
223
252
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` \
|
|
@@ -230,17 +259,17 @@ class Conv2d(_Conv):
|
|
|
230
259
|
|
|
231
260
|
.. math::
|
|
232
261
|
\begin{array}{ll} \\
|
|
233
|
-
H_{out}
|
|
234
|
-
W_{out}
|
|
262
|
+
H_{out} = \left \lceil{\frac{H_{in}}{\text{stride[0]}}} \right \rceil \\
|
|
263
|
+
W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[1]}}} \right \rceil \\
|
|
235
264
|
\end{array}
|
|
236
265
|
|
|
237
266
|
pad_mode is ``'valid'``:
|
|
238
267
|
|
|
239
268
|
.. math::
|
|
240
269
|
\begin{array}{ll} \\
|
|
241
|
-
H_{out}
|
|
270
|
+
H_{out} = \left \lceil{\frac{H_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
|
|
242
271
|
{\text{stride[0]}}} \right \rceil \\
|
|
243
|
-
W_{out}
|
|
272
|
+
W_{out} = \left \lceil{\frac{W_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
|
|
244
273
|
{\text{stride[1]}}} \right \rceil \\
|
|
245
274
|
\end{array}
|
|
246
275
|
|
|
@@ -248,9 +277,9 @@ class Conv2d(_Conv):
|
|
|
248
277
|
|
|
249
278
|
.. math::
|
|
250
279
|
\begin{array}{ll} \\
|
|
251
|
-
H_{out}
|
|
280
|
+
H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - (\text{kernel_size[0]} - 1) \times
|
|
252
281
|
\text{dilation[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
|
|
253
|
-
W_{out}
|
|
282
|
+
W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - (\text{kernel_size[1]} - 1) \times
|
|
254
283
|
\text{dilation[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
|
|
255
284
|
\end{array}
|
|
256
285
|
|
|
@@ -291,7 +320,8 @@ class Conv2d(_Conv):
|
|
|
291
320
|
has_bias=False,
|
|
292
321
|
weight_init=None,
|
|
293
322
|
bias_init=None,
|
|
294
|
-
data_format='NCHW'
|
|
323
|
+
data_format='NCHW',
|
|
324
|
+
dtype=mstype.float32):
|
|
295
325
|
"""Initialize Conv2d."""
|
|
296
326
|
kernel_size = twice(kernel_size)
|
|
297
327
|
stride = twice(stride)
|
|
@@ -314,7 +344,8 @@ class Conv2d(_Conv):
|
|
|
314
344
|
has_bias,
|
|
315
345
|
weight_init,
|
|
316
346
|
bias_init,
|
|
317
|
-
data_format
|
|
347
|
+
data_format,
|
|
348
|
+
dtype=dtype)
|
|
318
349
|
self.conv2d = P.Conv2D(out_channel=self.out_channels,
|
|
319
350
|
kernel_size=self.kernel_size,
|
|
320
351
|
mode=1,
|
|
@@ -341,26 +372,45 @@ def _check_input_3d(input_shape, op_name):
|
|
|
341
372
|
|
|
342
373
|
class Conv1d(_Conv):
|
|
343
374
|
r"""
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
375
|
+
1D convolution layer.
|
|
376
|
+
|
|
377
|
+
Applies a 1D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, L_{in})`,
|
|
378
|
+
where :math:`N` is batch size, :math:`C` is channel number, :math:`L` is input sequence width.
|
|
379
|
+
|
|
380
|
+
The output is calculated based on formula:
|
|
347
381
|
|
|
348
382
|
.. math::
|
|
349
383
|
|
|
350
384
|
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
|
|
351
385
|
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
352
386
|
|
|
353
|
-
where :math:`
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
:math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size})`,
|
|
360
|
-
where `group` is the number of groups to split the input `x` in the channel dimension.
|
|
387
|
+
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
388
|
+
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
389
|
+
, :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
390
|
+
|
|
391
|
+
Here are the indices' meanings:
|
|
392
|
+
- :math:`i` corresponds to the batch number, ranging from 0 to N-1, where N is the batch size of the input.
|
|
361
393
|
|
|
362
|
-
|
|
363
|
-
|
|
394
|
+
- :math:`j` corresponds to the output channel, ranging from 0 to C_{out}-1, where C_{out} is the number of
|
|
395
|
+
output channels, which is also equal to the number of kernels.
|
|
396
|
+
|
|
397
|
+
- :math:`k` corresponds to the input channel, ranging from 0 to C_{in}-1, where C_{in} is the number of
|
|
398
|
+
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
399
|
+
|
|
400
|
+
Therefore, in the above formula, :math:`{bias}(C_{out_j})` represents the bias of the :math:`j`-th
|
|
401
|
+
output channel, :math:`{weight}(C_{out_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
402
|
+
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
403
|
+
channel in the :math:`i`-th batch of the input feature map.
|
|
404
|
+
|
|
405
|
+
The shape of the convolutional kernel is given by :math:`(kernel\_size)`,
|
|
406
|
+
where :math:`kernel\_size` is the width of the kernel.
|
|
407
|
+
If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
|
|
408
|
+
will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size})`,
|
|
409
|
+
where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
|
|
410
|
+
|
|
411
|
+
For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
|
|
412
|
+
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_
|
|
413
|
+
and `ConvNets <http://cs231n.github.io/convolutional-networks/>`_ .
|
|
364
414
|
|
|
365
415
|
Note:
|
|
366
416
|
On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
|
|
@@ -370,39 +420,52 @@ class Conv1d(_Conv):
|
|
|
370
420
|
in_channels (int): The channel number of the input tensor of the Conv1d layer.
|
|
371
421
|
out_channels (int): The channel number of the output tensor of the Conv1d layer.
|
|
372
422
|
kernel_size (int): Specifies the width of the 1D convolution kernel.
|
|
373
|
-
stride (int): The movement stride of the 1D convolution kernel. Default: ``1`` .
|
|
374
|
-
pad_mode (str): Specifies padding mode.
|
|
375
|
-
``"same"`` , ``"valid"``
|
|
376
|
-
|
|
377
|
-
- ``"same"``:
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
If this mode is set,
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
423
|
+
stride (int, optional): The movement stride of the 1D convolution kernel. Default: ``1`` .
|
|
424
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
425
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
426
|
+
|
|
427
|
+
- ``"same"``: Pad the input at the begin and end so that the shape of input and output
|
|
428
|
+
are the same when `stride` is set to ``1``.
|
|
429
|
+
The amount of padding to is calculated by the operator internally. If the amount is even, it is
|
|
430
|
+
uniformly distributed around the input, if it is odd, the excess padding is goes to the right side.
|
|
431
|
+
If this mode is set, `padding` must be 0.
|
|
432
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
433
|
+
possible length. Extra pixels that could not complete a full stride will
|
|
434
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
435
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
436
|
+
at the begin and end is determined by the `padding` parameter.
|
|
437
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
438
|
+
|
|
439
|
+
padding (Union(int, tuple[int], list[int]), optional): Specifies the amount of padding to apply on
|
|
440
|
+
both side of `input` when `pad_mode` is set to ``"pad"``. The
|
|
441
|
+
paddings of left and right are the same, equal to padding or padding[0] when padding is a tuple of
|
|
442
|
+
1 integer. Default: ``0`` .
|
|
443
|
+
dilation (Union(int, tuple[int]), optional): Specifies the dilation rate to use for dilated convolution.
|
|
444
|
+
It can be a single int or a tuple of 1 integer.
|
|
445
|
+
Assuming :math:`dilation=(d0,)`, the convolutional kernel samples the input with a
|
|
446
|
+
spacing of :math:`d0-1` elements in the width direction.
|
|
447
|
+
The value should be in the ranges [1, L].
|
|
448
|
+
Default: ``1`` .
|
|
449
|
+
group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
391
450
|
divisible by `group`. Default: ``1`` .
|
|
392
|
-
has_bias (bool): Whether the Conv1d layer has a bias parameter. Default: ``False`` .
|
|
393
|
-
weight_init (Union[Tensor, str, Initializer, numbers.Number]):
|
|
451
|
+
has_bias (bool, optional): Whether the Conv1d layer has a bias parameter. Default: ``False`` .
|
|
452
|
+
weight_init (Union[Tensor, str, Initializer, numbers.Number], optional):
|
|
453
|
+
Initialization method of weight parameter.
|
|
394
454
|
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
|
|
395
455
|
values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
|
|
396
456
|
distributions as well as constant 'One' and 'Zero' distributions are possible. Alias ``'xavier_uniform'`` ,
|
|
397
457
|
``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and lowercase are both acceptable.
|
|
398
|
-
Refer to the values of
|
|
399
|
-
|
|
400
|
-
|
|
458
|
+
Refer to the values of
|
|
459
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
460
|
+
for more details. Default: ``None`` , weight will be initialized using ``'HeUniform'``.
|
|
461
|
+
bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
|
|
401
462
|
Available initialization methods are the same as 'weight_init'. Refer to the values of
|
|
402
|
-
Initializer
|
|
463
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
464
|
+
for more details. Default: ``None`` , bias will be initialized using ``'Uniform'``.
|
|
465
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
403
466
|
|
|
404
467
|
Inputs:
|
|
405
|
-
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})
|
|
468
|
+
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` .
|
|
406
469
|
|
|
407
470
|
Outputs:
|
|
408
471
|
Tensor of shape :math:`(N, C_{out}, L_{out})`.
|
|
@@ -456,7 +519,8 @@ class Conv1d(_Conv):
|
|
|
456
519
|
group=1,
|
|
457
520
|
has_bias=False,
|
|
458
521
|
weight_init=None,
|
|
459
|
-
bias_init=None
|
|
522
|
+
bias_init=None,
|
|
523
|
+
dtype=mstype.float32):
|
|
460
524
|
"""Initialize Conv1d."""
|
|
461
525
|
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
|
|
462
526
|
Validator.check_value_type("stride", stride, [int], self.cls_name)
|
|
@@ -495,7 +559,8 @@ class Conv1d(_Conv):
|
|
|
495
559
|
group,
|
|
496
560
|
has_bias,
|
|
497
561
|
weight_init,
|
|
498
|
-
bias_init
|
|
562
|
+
bias_init,
|
|
563
|
+
dtype=dtype)
|
|
499
564
|
self.padding = (0, 0, padding, padding)
|
|
500
565
|
Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
|
|
501
566
|
self.conv2d = P.Conv2D(out_channel=self.out_channels,
|
|
@@ -531,31 +596,48 @@ def _check_input_5dims(input_shape, op_name):
|
|
|
531
596
|
|
|
532
597
|
class Conv3d(_Conv):
|
|
533
598
|
r"""
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
:math:`D_{in}, H_{in}, W_{in}`
|
|
538
|
-
|
|
599
|
+
3D convolution layer.
|
|
600
|
+
|
|
601
|
+
Applies a 3D convolution over an input tensor which is typically of shape
|
|
602
|
+
:math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`, where :math:`N` is batch size, :math:`C` is channel number,
|
|
603
|
+
:math:`D` is feature depth, :math:`H` is feature height, :math:`W` is feature width.
|
|
604
|
+
|
|
605
|
+
The output is calculated based on formula:
|
|
539
606
|
|
|
540
607
|
.. math::
|
|
541
608
|
|
|
542
609
|
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
|
|
543
610
|
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
544
611
|
|
|
545
|
-
where :math:`
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
612
|
+
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
613
|
+
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
614
|
+
, :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
615
|
+
|
|
616
|
+
Here are the indices' meanings:
|
|
617
|
+
- :math:`i` corresponds to the batch number, ranging from 0 to N-1, where N is the batch size of the input.
|
|
618
|
+
|
|
619
|
+
- :math:`j` corresponds to the output channel, ranging from 0 to C_{out}-1, where C_{out} is the number of
|
|
620
|
+
output channels, which is also equal to the number of kernels.
|
|
621
|
+
|
|
622
|
+
- :math:`k` corresponds to the input channel, ranging from 0 to C_{in}-1, where C_{in} is the number of
|
|
623
|
+
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
624
|
+
|
|
625
|
+
Therefore, in the above formula, :math:`{bias}(C_{out_j})` represents the bias of the :math:`j`-th
|
|
626
|
+
output channel, :math:`{weight}(C_{out_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
627
|
+
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
628
|
+
channel in the :math:`i`-th batch of the input feature map.
|
|
629
|
+
|
|
630
|
+
The shape of the convolutional kernel is given by
|
|
631
|
+
:math:`(\text{kernel_size[0]}, \text{kernel_size[1]}, \text{kernel_size[2]})`
|
|
632
|
+
where :math:`kernel\_size[0]` , :math:`kernel\_size[1]` and :math:`kernel\_size[2]` are the depth,
|
|
633
|
+
height and width of the kernel, respectively.
|
|
634
|
+
If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
|
|
635
|
+
will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]},
|
|
636
|
+
\text{kernel_size[1]}, \text{kernel_size[2]})`,
|
|
637
|
+
where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
|
|
638
|
+
|
|
639
|
+
For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
|
|
640
|
+
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
|
|
559
641
|
|
|
560
642
|
Note:
|
|
561
643
|
On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
|
|
@@ -565,49 +647,65 @@ class Conv3d(_Conv):
|
|
|
565
647
|
in_channels (int): The channel number of the input tensor of the Conv3d layer.
|
|
566
648
|
out_channels (int): The channel number of the output tensor of the Conv3d layer.
|
|
567
649
|
kernel_size (Union[int, tuple[int]]): Specifies the depth, height and width of the 3D convolution kernel.
|
|
568
|
-
|
|
569
|
-
and
|
|
570
|
-
and
|
|
571
|
-
stride (Union[int, tuple[int]]): The movement stride of the 3D convolution kernel.
|
|
650
|
+
It can be a single int or a tuple of 3 integers. A single int means the value is for depth, height
|
|
651
|
+
and the width. A tuple of 3 ints means the first value is
|
|
652
|
+
for depth and the rest is for the height and width.
|
|
653
|
+
stride (Union[int, tuple[int]], optional): The movement stride of the 3D convolution kernel.
|
|
572
654
|
The data type is an integer or a tuple of three integers. An integer represents the movement step size
|
|
573
655
|
in depth, height and width directions. A tuple of three integers represents the movement step size
|
|
574
656
|
in the depth, height and width directions respectively. Default: ``1`` .
|
|
575
|
-
pad_mode (str): Specifies padding mode.
|
|
576
|
-
``"same"`` , ``"valid"``
|
|
577
|
-
|
|
578
|
-
- ``"same"``:
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
- ``"
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
657
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
658
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
659
|
+
|
|
660
|
+
- ``"same"``: Pad the input around its depth/height/width dimension so that the shape of input and output
|
|
661
|
+
are the same when `stride` is set to ``1``.
|
|
662
|
+
The amount of padding to is calculated by the operator internally. If the amount is even,
|
|
663
|
+
it isuniformly distributed around the input, if it is odd, the excess amount goes
|
|
664
|
+
to the front/right/bottom side.
|
|
665
|
+
If this mode is set, `padding` must be 0.
|
|
666
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
667
|
+
possible depth, height and width. Extra pixels that could not complete a full stride will
|
|
668
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
669
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
670
|
+
in the depth, height and width dimension is determined by the `padding` parameter.
|
|
671
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
672
|
+
|
|
673
|
+
padding (Union(int, tuple[int]), optional): The number of padding on the depth,
|
|
674
|
+
height and width directions of the input.
|
|
588
675
|
The data type is an integer or a tuple of six integers. If `padding` is an integer,
|
|
589
676
|
then the head, tail, top, bottom, left, and right padding are all equal to `padding`.
|
|
590
677
|
If `padding` is a tuple of six integers, then the head, tail, top, bottom, left, and right padding
|
|
591
678
|
is equal to `padding[0]`, `padding[1]`, `padding[2]`, `padding[3]`, `padding[4]` and `padding[5]`
|
|
592
679
|
respectively. The value should be greater than or equal to 0. Default: ``0`` .
|
|
593
|
-
dilation (Union[int, tuple[int]]):
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
680
|
+
dilation (Union[int, tuple[int]], optional): Specifies the dilation rate to use for dilated convolution.
|
|
681
|
+
It can be a single int or a tuple of 3 integers. A single int means the dilation size is the same
|
|
682
|
+
in the depth, height and width directions. A tuple of 3 ints represents the dilation size in
|
|
683
|
+
the depth, height and width directions, respectively.
|
|
684
|
+
Assuming :math:`dilation=(d0, d1, d2)`, the convolutional kernel samples the input with a
|
|
685
|
+
spacing of :math:`d0-1` elements in the depth direction, :math:`d1-1` elements in the height direction,
|
|
686
|
+
:math:`d2-1` elements in the width direction respectively.
|
|
687
|
+
The values in the depth, height and width dimensions are in
|
|
688
|
+
the ranges [1, D], [1, H] and [1, W], respectively.
|
|
689
|
+
Default: ``1`` .
|
|
690
|
+
group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
598
691
|
divisible by `group`. Default: ``1`` .
|
|
599
|
-
has_bias (bool): Whether the Conv3d layer has a bias parameter. Default: ``False`` .
|
|
600
|
-
weight_init (Union[Tensor, str, Initializer, numbers.Number]):
|
|
692
|
+
has_bias (bool, optional): Whether the Conv3d layer has a bias parameter. Default: ``False`` .
|
|
693
|
+
weight_init (Union[Tensor, str, Initializer, numbers.Number], optional):
|
|
694
|
+
Initialization method of weight parameter.
|
|
601
695
|
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
|
|
602
696
|
values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
|
|
603
697
|
distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
|
|
604
698
|
``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
|
|
605
|
-
lowercase are both acceptable. Refer to the values of
|
|
606
|
-
|
|
607
|
-
|
|
699
|
+
lowercase are both acceptable. Refer to the values of
|
|
700
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
701
|
+
for more details. Default: ``None`` , weight will be initialized using ``'HeUniform'``.
|
|
702
|
+
bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
|
|
608
703
|
Available initialization methods are the same as 'weight_init'. Refer to the values of
|
|
609
|
-
Initializer
|
|
610
|
-
|
|
704
|
+
`Initializer <https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.common.initializer.html>`_,
|
|
705
|
+
for more details. Default: ``None`` , bias will be initialized using ``'Uniform'`` .
|
|
706
|
+
data_format (str, optional): The optional value for data format. Currently only support ``'NCDHW'`` .
|
|
707
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
708
|
+
|
|
611
709
|
|
|
612
710
|
Inputs:
|
|
613
711
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
|
|
@@ -687,7 +785,8 @@ class Conv3d(_Conv):
|
|
|
687
785
|
has_bias=False,
|
|
688
786
|
weight_init=None,
|
|
689
787
|
bias_init=None,
|
|
690
|
-
data_format='NCDHW'
|
|
788
|
+
data_format='NCDHW',
|
|
789
|
+
dtype=mstype.float32):
|
|
691
790
|
"""Initialize Conv3d."""
|
|
692
791
|
if not (in_channels % group == 0 and out_channels % group == 0):
|
|
693
792
|
raise ValueError("The argument 'group' should be divisible by 'in_channels' " \
|
|
@@ -711,8 +810,9 @@ class Conv3d(_Conv):
|
|
|
711
810
|
has_bias,
|
|
712
811
|
weight_init,
|
|
713
812
|
bias_init,
|
|
714
|
-
data_format
|
|
715
|
-
|
|
813
|
+
data_format,
|
|
814
|
+
dtype=dtype)
|
|
815
|
+
out_channels = self.out_channels
|
|
716
816
|
self.conv3d = P.Conv3D(out_channel=out_channels,
|
|
717
817
|
kernel_size=self.kernel_size,
|
|
718
818
|
mode=1,
|
|
@@ -720,33 +820,17 @@ class Conv3d(_Conv):
|
|
|
720
820
|
pad=self.padding,
|
|
721
821
|
stride=self.stride,
|
|
722
822
|
dilation=self.dilation,
|
|
723
|
-
group=
|
|
823
|
+
group=group,
|
|
724
824
|
data_format=self.data_format)
|
|
725
825
|
self.bias_add = P.BiasAdd(data_format=self.data_format)
|
|
726
826
|
self.shape = P.Shape()
|
|
727
|
-
self.concat = P.Concat(1)
|
|
728
|
-
self.split_0 = P.Split(0, self.group)
|
|
729
|
-
self.split_1 = P.Split(1, self.group)
|
|
730
827
|
|
|
731
828
|
def construct(self, x):
|
|
732
829
|
x_shape = self.shape(x)
|
|
733
830
|
_check_input_5dims(x_shape, self.cls_name)
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
out = self.bias_add(out, self.bias)
|
|
738
|
-
else:
|
|
739
|
-
features = self.split_1(x)
|
|
740
|
-
weights = self.split_0(self.weight)
|
|
741
|
-
outputs = ()
|
|
742
|
-
for i in range(self.group):
|
|
743
|
-
output = self.conv3d(features[i], weights[i])
|
|
744
|
-
outputs = outputs + (output,)
|
|
745
|
-
out = self.concat(outputs)
|
|
746
|
-
if self.bias is not None:
|
|
747
|
-
new_shape = [1 for _ in range(out.ndim)]
|
|
748
|
-
new_shape[1] = self.out_channels
|
|
749
|
-
out = out + self.bias.reshape(new_shape)
|
|
831
|
+
out = self.conv3d(x, self.weight)
|
|
832
|
+
if self.has_bias:
|
|
833
|
+
out = self.bias_add(out, self.bias)
|
|
750
834
|
return out
|
|
751
835
|
|
|
752
836
|
|
|
@@ -776,17 +860,21 @@ class Conv3dTranspose(_Conv):
|
|
|
776
860
|
The data type is an integer or a tuple of three integers. An integer represents the movement step size
|
|
777
861
|
in depth, height and width directions. A tuple of three integers represents the movement step size
|
|
778
862
|
in the depth, height and width directions respectively. Default: ``1`` .
|
|
779
|
-
pad_mode (str): Specifies padding mode.
|
|
780
|
-
``"same"`` , ``"valid"``
|
|
781
|
-
|
|
782
|
-
- ``"same"``:
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
- ``"
|
|
789
|
-
|
|
863
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
864
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
865
|
+
|
|
866
|
+
- ``"same"``: Pad the input around its depth/height/width dimension so that the shape of input and output
|
|
867
|
+
are the same when `stride` is set to ``1``.
|
|
868
|
+
The amount of padding to is calculated by the operator internally. If the amount is even,
|
|
869
|
+
it isuniformly distributed around the input, if it is odd, the excess amount goes
|
|
870
|
+
to the front/right/bottom side.
|
|
871
|
+
If this mode is set, `padding` must be 0.
|
|
872
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
873
|
+
possible depth, height and width. Extra pixels that could not complete a full stride will
|
|
874
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
875
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
876
|
+
in the depth, height and width dimension is determined by the `padding` parameter.
|
|
877
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
790
878
|
|
|
791
879
|
padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of the input.
|
|
792
880
|
The data type is an integer or a tuple of six integers. If `padding` is an integer,
|
|
@@ -820,10 +908,11 @@ class Conv3dTranspose(_Conv):
|
|
|
820
908
|
Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
|
|
821
909
|
data_format (str): The optional value for data format. Currently only support ``'NCDHW'`` .
|
|
822
910
|
Default: ``'NCDHW'`` .
|
|
911
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
823
912
|
|
|
824
913
|
Inputs:
|
|
825
914
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
|
|
826
|
-
Currently input data
|
|
915
|
+
Currently input data dtype only support float16 and float32.
|
|
827
916
|
|
|
828
917
|
Outputs:
|
|
829
918
|
Tensor, the shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
|
|
@@ -902,7 +991,8 @@ class Conv3dTranspose(_Conv):
|
|
|
902
991
|
has_bias=False,
|
|
903
992
|
weight_init=None,
|
|
904
993
|
bias_init=None,
|
|
905
|
-
data_format='NCDHW'
|
|
994
|
+
data_format='NCDHW',
|
|
995
|
+
dtype=mstype.float32):
|
|
906
996
|
"""Initialize Conv3dTranspose."""
|
|
907
997
|
if not (in_channels % group == 0 and out_channels % group == 0):
|
|
908
998
|
raise ValueError("The argument 'group' should be divisible by 'in_channels' " \
|
|
@@ -929,7 +1019,8 @@ class Conv3dTranspose(_Conv):
|
|
|
929
1019
|
weight_init,
|
|
930
1020
|
bias_init,
|
|
931
1021
|
data_format,
|
|
932
|
-
transposed=True
|
|
1022
|
+
transposed=True,
|
|
1023
|
+
dtype=dtype)
|
|
933
1024
|
self.conv3d_transpose = P.Conv3DTranspose(in_channel=self.in_channels,
|
|
934
1025
|
out_channel=self.out_channels,
|
|
935
1026
|
kernel_size=self.kernel_size,
|
|
@@ -996,17 +1087,20 @@ class Conv2dTranspose(_Conv):
|
|
|
996
1087
|
The data type is an integer or a tuple of two integers. An integer represents the movement step size
|
|
997
1088
|
in both height and width directions. A tuple of two integers represents the movement step size in the height
|
|
998
1089
|
and width directions respectively. Default: ``1`` .
|
|
999
|
-
pad_mode (str): Specifies padding mode.
|
|
1000
|
-
``"same"`` , ``"valid"``
|
|
1001
|
-
|
|
1002
|
-
- ``"same"``:
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
If this mode is set,
|
|
1090
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
1091
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
1092
|
+
|
|
1093
|
+
- ``"same"``: Pad the input around its edges so that the shape of input and output
|
|
1094
|
+
are the same when `stride` is set to ``1``.
|
|
1095
|
+
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
1096
|
+
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
|
|
1097
|
+
If this mode is set, `padding` must be 0.
|
|
1098
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
1099
|
+
possible height and width. Extra pixels that could not complete a full stride will
|
|
1100
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
1101
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
1102
|
+
in the height and width directions is determined by the `padding` parameter.
|
|
1103
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
1010
1104
|
|
|
1011
1105
|
padding (Union[int, tuple[int]]): The number of padding on the height and width directions of the input.
|
|
1012
1106
|
The data type is an integer or a tuple of four integers. If `padding` is an integer,
|
|
@@ -1037,6 +1131,7 @@ class Conv2dTranspose(_Conv):
|
|
|
1037
1131
|
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of bias parameter.
|
|
1038
1132
|
Available initialization methods are the same as 'weight_init'. Refer to the values of
|
|
1039
1133
|
Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
|
|
1134
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
1040
1135
|
|
|
1041
1136
|
Inputs:
|
|
1042
1137
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
|
|
@@ -1109,7 +1204,8 @@ class Conv2dTranspose(_Conv):
|
|
|
1109
1204
|
group=1,
|
|
1110
1205
|
has_bias=False,
|
|
1111
1206
|
weight_init=None,
|
|
1112
|
-
bias_init=None
|
|
1207
|
+
bias_init=None,
|
|
1208
|
+
dtype=mstype.float32):
|
|
1113
1209
|
"""Initialize Conv2dTranspose."""
|
|
1114
1210
|
kernel_size = twice(kernel_size)
|
|
1115
1211
|
stride = twice(stride)
|
|
@@ -1135,7 +1231,8 @@ class Conv2dTranspose(_Conv):
|
|
|
1135
1231
|
has_bias,
|
|
1136
1232
|
weight_init,
|
|
1137
1233
|
bias_init,
|
|
1138
|
-
transposed=True
|
|
1234
|
+
transposed=True,
|
|
1235
|
+
dtype=dtype)
|
|
1139
1236
|
|
|
1140
1237
|
self.in_channels = in_channels
|
|
1141
1238
|
self.out_channels = out_channels
|
|
@@ -1218,17 +1315,20 @@ class Conv1dTranspose(_Conv):
|
|
|
1218
1315
|
out_channels (int): The channel number of the output tensor of the Conv1dTranspose layer.
|
|
1219
1316
|
kernel_size (int): Specifies the width of the 1D convolution kernel.
|
|
1220
1317
|
stride (int): The movement stride of the 1D convolution kernel. Default: ``1`` .
|
|
1221
|
-
pad_mode (str): Specifies padding mode.
|
|
1222
|
-
``"same"`` , ``"valid"``
|
|
1223
|
-
|
|
1224
|
-
- ``"same"``:
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
If this mode is set,
|
|
1318
|
+
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
1319
|
+
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
|
|
1320
|
+
|
|
1321
|
+
- ``"same"``: Pad the input at the begin and end so that the shape of input and output
|
|
1322
|
+
are the same when `stride` is set to ``1``.
|
|
1323
|
+
The amount of padding to is calculated by the operator internally. If the amount is even, it is
|
|
1324
|
+
uniformly distributed around the input, if it is odd, the excess padding is goes to the right side.
|
|
1325
|
+
If this mode is set, `padding` must be 0.
|
|
1326
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
1327
|
+
possible length. Extra pixels that could not complete a full stride will
|
|
1328
|
+
be discarded. If this mode is set, `padding` must be 0.
|
|
1329
|
+
- ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
|
|
1330
|
+
at the begin and end is determined by the `padding` parameter.
|
|
1331
|
+
If this mode is set, `padding` must be greater than or equal to 0.
|
|
1232
1332
|
|
|
1233
1333
|
padding (int): The number of padding on both sides of input.
|
|
1234
1334
|
The value should be greater than or equal to 0. Default: ``0`` .
|
|
@@ -1247,6 +1347,7 @@ class Conv1dTranspose(_Conv):
|
|
|
1247
1347
|
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of bias parameter.
|
|
1248
1348
|
Available initialization methods are the same as 'weight_init'. Refer to the values of
|
|
1249
1349
|
Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
|
|
1350
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
1250
1351
|
|
|
1251
1352
|
Inputs:
|
|
1252
1353
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})`.
|
|
@@ -1294,7 +1395,8 @@ class Conv1dTranspose(_Conv):
|
|
|
1294
1395
|
group=1,
|
|
1295
1396
|
has_bias=False,
|
|
1296
1397
|
weight_init=None,
|
|
1297
|
-
bias_init=None
|
|
1398
|
+
bias_init=None,
|
|
1399
|
+
dtype=mstype.float32):
|
|
1298
1400
|
"""Initialize Conv1dTranspose."""
|
|
1299
1401
|
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
|
|
1300
1402
|
Validator.check_value_type("stride", stride, [int], self.cls_name)
|
|
@@ -1331,7 +1433,8 @@ class Conv1dTranspose(_Conv):
|
|
|
1331
1433
|
has_bias,
|
|
1332
1434
|
weight_init,
|
|
1333
1435
|
bias_init,
|
|
1334
|
-
transposed=True
|
|
1436
|
+
transposed=True,
|
|
1437
|
+
dtype=dtype)
|
|
1335
1438
|
self.padding = (0, 0, padding, padding)
|
|
1336
1439
|
self.in_channels = in_channels
|
|
1337
1440
|
self.out_channels = out_channels
|