mindspore 1.10.0__cp37-cp37m-win_amd64.whl → 2.0.0rc1__cp37-cp37m-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/ConcurrencyCheck.dll +0 -0
- mindspore/CppBuildInsights.dll +0 -0
- mindspore/CppCoreCheck.dll +0 -0
- mindspore/EnumIndex.dll +0 -0
- mindspore/EspXEngine.dll +0 -0
- mindspore/HResultCheck.dll +0 -0
- mindspore/KernelTraceControl.dll +0 -0
- mindspore/LocalESPC.dll +0 -0
- mindspore/Microsoft.Diagnostics.Tracing.EventSource.dll +0 -0
- mindspore/Microsoft.VisualStudio.RemoteControl.dll +0 -0
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Microsoft.VisualStudio.Utilities.Internal.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/System.Runtime.CompilerServices.Unsafe.dll +0 -0
- mindspore/VariantClear.dll +0 -0
- mindspore/__init__.py +9 -4
- mindspore/_c_dataengine.cp37-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp37-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp37-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/builtin_operations.py +32 -4
- mindspore/_extends/graph_kernel/model/graph_split.py +66 -222
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +12 -9
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +119 -26
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -50
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -6
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -25
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -27
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +17 -2
- mindspore/_extends/parse/parser.py +193 -34
- mindspore/_extends/parse/resources.py +7 -8
- mindspore/_extends/parse/standard_method.py +1780 -435
- mindspore/_extends/parse/trope.py +3 -1
- mindspore/amp.py +53 -58
- mindspore/atlprov.dll +0 -0
- mindspore/boost/adasum.py +3 -2
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +46 -26
- mindspore/boost/dim_reduce.py +6 -5
- mindspore/boost/grad_accumulation.py +2 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/cfgpersist.dll +0 -0
- mindspore/clang_rt.asan_dbg_dynamic-x86_64.dll +0 -0
- mindspore/clang_rt.asan_dynamic-x86_64.dll +0 -0
- mindspore/common/__init__.py +11 -10
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +57 -0
- mindspore/common/api.py +582 -297
- mindspore/common/dtype.py +66 -18
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +38 -1
- mindspore/common/jit_config.py +25 -13
- mindspore/common/mutable.py +53 -24
- mindspore/common/parameter.py +60 -37
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +927 -0
- mindspore/common/tensor.py +1627 -3900
- mindspore/communication/__init__.py +10 -5
- mindspore/communication/_comm_helper.py +78 -214
- mindspore/communication/_hccl_management.py +2 -1
- mindspore/communication/management.py +136 -47
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +291 -56
- mindspore/d3dcompiler_47.dll +0 -0
- mindspore/dataset/__init__.py +12 -8
- mindspore/dataset/audio/__init__.py +9 -9
- mindspore/dataset/audio/transforms.py +1090 -228
- mindspore/dataset/audio/utils.py +87 -39
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +17 -15
- mindspore/dataset/core/config.py +246 -17
- mindspore/dataset/core/py_util_helpers.py +4 -3
- mindspore/dataset/core/validator_helpers.py +10 -10
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +9 -9
- mindspore/dataset/engine/datasets.py +648 -477
- mindspore/dataset/engine/datasets_audio.py +165 -167
- mindspore/dataset/engine/datasets_standard_format.py +93 -67
- mindspore/dataset/engine/datasets_text.py +492 -342
- mindspore/dataset/engine/datasets_user_defined.py +85 -50
- mindspore/dataset/engine/datasets_vision.py +1224 -699
- mindspore/dataset/engine/graphdata.py +134 -69
- mindspore/dataset/engine/iterators.py +50 -9
- mindspore/dataset/engine/offload.py +52 -31
- mindspore/dataset/engine/samplers.py +27 -24
- mindspore/dataset/engine/serializer_deserializer.py +14 -15
- mindspore/dataset/engine/validators.py +213 -52
- mindspore/dataset/text/__init__.py +10 -8
- mindspore/dataset/text/transforms.py +152 -57
- mindspore/dataset/text/utils.py +98 -49
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +4 -2
- mindspore/dataset/transforms/c_transforms.py +11 -13
- mindspore/dataset/transforms/py_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms_util.py +10 -0
- mindspore/dataset/transforms/transforms.py +13 -15
- mindspore/dataset/transforms/validators.py +7 -7
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/browse_dataset.py +13 -13
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +8 -7
- mindspore/dataset/vision/c_transforms.py +125 -126
- mindspore/dataset/vision/py_transforms.py +37 -37
- mindspore/dataset/vision/py_transforms_util.py +23 -20
- mindspore/dataset/vision/transforms.py +316 -315
- mindspore/dataset/vision/utils.py +313 -17
- mindspore/dataset/vision/validators.py +6 -6
- mindspore/default_config.py +0 -1
- mindspore/dpcmi.dll +0 -0
- mindspore/{compression → experimental}/__init__.py +6 -5
- mindspore/experimental/map_parameter.py +275 -0
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +70 -9
- mindspore/include/api/delegate.h +8 -1
- mindspore/include/api/dual_abi_helper.h +8 -24
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_group.h +68 -0
- mindspore/include/api/model_parallel_runner.h +17 -17
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +20 -4
- mindspore/include/api/status.h +7 -1
- mindspore/include/api/types.h +25 -21
- mindspore/include/api/visible.h +4 -0
- mindspore/include/c_api/model_c.h +5 -0
- mindspore/include/c_api/status_c.h +1 -1
- mindspore/include/dataset/config.h +1 -1
- mindspore/include/dataset/constants.h +14 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/include/dataset/vision.h +56 -117
- mindspore/include/dataset/vision_lite.h +102 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +28 -28
- mindspore/mindrecord/common/exceptions.py +2 -4
- mindspore/mindrecord/filereader.py +19 -1
- mindspore/mindrecord/filewriter.py +250 -88
- mindspore/mindrecord/mindpage.py +13 -13
- mindspore/mindrecord/shardheader.py +15 -15
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +29 -29
- mindspore/mindrecord/tools/cifar100_to_mr.py +9 -9
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -9
- mindspore/mindrecord/tools/csv_to_mr.py +4 -4
- mindspore/mindrecord/tools/imagenet_to_mr.py +70 -65
- mindspore/mindrecord/tools/mnist_to_mr.py +41 -41
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/{libmindspore_backend.dll → mindspore_backend.dll} +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +1 -5
- mindspore/nn/cell.py +297 -234
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +17 -42
- mindspore/nn/layer/__init__.py +7 -4
- mindspore/nn/layer/activation.py +131 -88
- mindspore/nn/layer/basic.py +313 -613
- mindspore/nn/layer/channel_shuffle.py +103 -0
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +52 -6
- mindspore/nn/layer/conv.py +112 -43
- mindspore/nn/layer/dense.py +10 -9
- mindspore/nn/layer/embedding.py +36 -34
- mindspore/nn/layer/image.py +123 -27
- mindspore/nn/layer/math.py +108 -107
- mindspore/nn/layer/normalization.py +212 -366
- mindspore/nn/layer/padding.py +370 -42
- mindspore/nn/layer/pooling.py +1443 -219
- mindspore/nn/layer/rnn_cells.py +11 -16
- mindspore/nn/layer/rnns.py +38 -39
- mindspore/nn/layer/thor_layer.py +24 -25
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +9 -6
- mindspore/nn/loss/loss.py +678 -142
- mindspore/nn/metrics.py +53 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +2 -2
- mindspore/nn/optim/ada_grad.py +8 -8
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +18 -14
- mindspore/nn/optim/adam.py +429 -87
- mindspore/nn/optim/adamax.py +5 -6
- mindspore/nn/optim/adasum.py +10 -8
- mindspore/nn/optim/asgd.py +7 -7
- mindspore/nn/optim/ftrl.py +81 -11
- mindspore/nn/optim/lamb.py +7 -8
- mindspore/nn/optim/lars.py +4 -4
- mindspore/nn/optim/lazyadam.py +82 -7
- mindspore/nn/optim/momentum.py +8 -7
- mindspore/nn/optim/optimizer.py +19 -10
- mindspore/nn/optim/proximal_ada_grad.py +6 -5
- mindspore/nn/optim/rmsprop.py +3 -3
- mindspore/nn/optim/rprop.py +20 -16
- mindspore/nn/optim/sgd.py +21 -15
- mindspore/nn/optim/thor.py +23 -21
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -6
- mindspore/nn/probability/bijector/invert.py +4 -2
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/__init__.py +6 -0
- mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -2
- mindspore/nn/probability/distribution/_utils/utils.py +11 -17
- mindspore/nn/probability/distribution/bernoulli.py +6 -6
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +9 -9
- mindspore/nn/probability/distribution/cauchy.py +8 -8
- mindspore/nn/probability/distribution/distribution.py +12 -6
- mindspore/nn/probability/distribution/exponential.py +5 -5
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +6 -5
- mindspore/nn/probability/distribution/gumbel.py +5 -5
- mindspore/nn/probability/distribution/half_normal.py +133 -0
- mindspore/nn/probability/distribution/laplace.py +128 -0
- mindspore/nn/probability/distribution/log_normal.py +0 -1
- mindspore/nn/probability/distribution/logistic.py +4 -5
- mindspore/nn/probability/distribution/normal.py +11 -15
- mindspore/nn/probability/distribution/poisson.py +6 -2
- mindspore/nn/probability/distribution/student_t.py +150 -0
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +5 -5
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +8 -1
- mindspore/nn/wrap/cell_wrapper.py +55 -27
- mindspore/nn/wrap/grad_reducer.py +20 -11
- mindspore/nn/wrap/loss_scale.py +47 -30
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +46 -42
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +26 -19
- mindspore/numpy/utils.py +1 -8
- mindspore/numpy/utils_const.py +112 -62
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +6 -3
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +209 -152
- mindspore/ops/_grad/grad_base.py +55 -17
- mindspore/ops/_grad/grad_clip_ops.py +11 -3
- mindspore/ops/_grad/grad_comm_ops.py +58 -47
- mindspore/ops/_grad/grad_implementations.py +21 -61
- mindspore/ops/_grad/grad_inner_ops.py +48 -6
- mindspore/ops/_grad/grad_math_ops.py +306 -161
- mindspore/ops/_grad/grad_nn_ops.py +192 -181
- mindspore/ops/_grad/grad_other_ops.py +1 -1
- mindspore/ops/_grad/grad_quant_ops.py +5 -5
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +15 -9
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +441 -55
- mindspore/ops/_grad_experimental/grad_image_ops.py +25 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +3 -44
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +16 -21
- mindspore/ops/_grad_experimental/grad_math_ops.py +979 -49
- mindspore/ops/_grad_experimental/grad_nn_ops.py +78 -8
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +197 -13
- mindspore/ops/_op_impl/__init__.py +3 -3
- mindspore/ops/_op_impl/_custom_op/__init__.py +0 -1
- mindspore/ops/_op_impl/_custom_op/_basic.py +0 -1
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +4 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +5 -5
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +3 -3
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +4 -8
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +238 -3
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
- mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
- mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
- mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/cauchy.py} +17 -10
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/cholesky.py +1 -1
- mindspore/ops/_op_impl/{cpu/bias_add.py → aicpu/choleskygrad.py} +9 -7
- mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +2 -2
- mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
- mindspore/ops/_op_impl/aicpu/diag.py +36 -0
- mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
- mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
- mindspore/ops/_op_impl/{cpu/bias_add_grad.py → aicpu/digamma.py} +9 -7
- mindspore/ops/_op_impl/aicpu/eig.py +35 -0
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +41 -0
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/glu.py +33 -0
- mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/{tbe/scatter_add_ds.py → aicpu/inplace_index_add.py} +17 -21
- mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
- mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/lgamma.py +32 -0
- mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +39 -0
- mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
- mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +32 -0
- mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
- mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +2 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
- mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/qr.py +36 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +3 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/range.py +36 -0
- mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
- mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/search_sorted.py +12 -6
- mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sort.py +39 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
- mindspore/ops/_op_impl/{tbe/slice_ds.py → aicpu/sparse_segment_sum.py} +16 -24
- mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
- mindspore/ops/_op_impl/aicpu/squared_difference.py +2 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +93 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +66 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/{tbe/gather_v2.py → aicpu/tile.py} +24 -24
- mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/__init__.py +1 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/maximum_grad.py +2 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/cpu/pyexecute.py} +13 -8
- mindspore/ops/_op_impl/cpu/reduce_sum.py +8 -0
- mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -608
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +42 -0
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +41 -0
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +1 -0
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +2 -0
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +40 -0
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -2
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -2
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +1 -1
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/greater.py +2 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -6
- mindspore/ops/_op_impl/tbe/{greater_ds.py → reduce_all_ds.py} +13 -16
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +39 -0
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/scatter_add.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +2 -2
- mindspore/ops/_op_impl/tbe/slice.py +26 -15
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +1 -0
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +1 -1
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +3 -2
- mindspore/ops/_register_for_op.py +11 -0
- mindspore/ops/_utils/__init__.py +1 -1
- mindspore/ops/_utils/utils.py +20 -41
- mindspore/ops/_vmap/__init__.py +2 -2
- mindspore/ops/_vmap/vmap_array_ops.py +170 -78
- mindspore/ops/_vmap/vmap_base.py +24 -10
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -9
- mindspore/ops/_vmap/vmap_image_ops.py +52 -0
- mindspore/ops/_vmap/vmap_math_ops.py +77 -6
- mindspore/ops/_vmap/vmap_nn_ops.py +78 -29
- mindspore/ops/_vmap/vmap_other_ops.py +3 -1
- mindspore/ops/_vmap/vmap_random_ops.py +55 -3
- mindspore/ops/_vmap/vmap_sparse_ops.py +1 -0
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +16 -16
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +306 -0
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +12 -8
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DType_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -24
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -14
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +57 -0
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +13 -10
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +14 -11
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +22 -19
- mindspore/ops/bprop_mindir/Load_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +25 -23
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +13 -13
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/Range_bprop.mindir +21 -19
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +11 -11
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +19 -23
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +60 -0
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +89 -0
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +52 -0
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Round_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +24 -0
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/Select_bprop.mindir +30 -34
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +95 -0
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +98 -0
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +28 -32
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +11 -13
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/__init__.py +1 -4
- mindspore/ops/bprop_mindir/generate_mindir.py +32 -20
- mindspore/ops/composite/__init__.py +12 -13
- mindspore/ops/composite/base.py +261 -254
- mindspore/ops/composite/env_ops.py +41 -0
- mindspore/ops/composite/math_ops.py +197 -156
- mindspore/ops/composite/multitype_ops/_compile_utils.py +428 -176
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +188 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +23 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/equal_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +52 -5
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +33 -2
- mindspore/ops/composite/multitype_ops/less_impl.py +33 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -7
- mindspore/ops/composite/multitype_ops/not_in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +62 -70
- mindspore/ops/composite/multitype_ops/sub_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +41 -4
- mindspore/ops/function/__init__.py +323 -8
- mindspore/ops/function/array_func.py +3511 -780
- mindspore/ops/function/clip_func.py +329 -0
- mindspore/ops/function/debug_func.py +6 -6
- mindspore/ops/function/grad/__init__.py +5 -1
- mindspore/ops/function/grad/grad_func.py +736 -65
- mindspore/ops/function/image_func.py +270 -0
- mindspore/ops/function/linalg_func.py +268 -8
- mindspore/ops/function/math_func.py +8032 -3164
- mindspore/ops/function/nn_func.py +5619 -1855
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +11 -10
- mindspore/ops/function/random_func.py +939 -77
- mindspore/ops/function/sparse_func.py +249 -84
- mindspore/ops/function/sparse_unary_func.py +2303 -0
- mindspore/ops/function/spectral_func.py +146 -0
- mindspore/ops/function/vmap_func.py +114 -0
- mindspore/ops/functional.py +182 -254
- mindspore/ops/op_info_register.py +79 -34
- mindspore/ops/operations/__init__.py +210 -118
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +25 -15
- mindspore/ops/operations/_grad_ops.py +447 -322
- mindspore/ops/operations/_inner_ops.py +547 -176
- mindspore/ops/operations/_map_tensor_ops.py +112 -0
- mindspore/ops/operations/_ms_kernel.py +29 -27
- mindspore/ops/operations/_ocr_ops.py +11 -11
- mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
- mindspore/ops/operations/_quant_ops.py +186 -101
- mindspore/ops/operations/_rl_inner_ops.py +122 -61
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1047 -0
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +4 -4
- mindspore/ops/operations/array_ops.py +1428 -1226
- mindspore/ops/operations/comm_ops.py +180 -117
- mindspore/ops/operations/control_ops.py +4 -2
- mindspore/ops/operations/custom_ops.py +185 -98
- mindspore/ops/operations/debug_ops.py +92 -54
- mindspore/ops/operations/image_ops.py +406 -211
- mindspore/ops/operations/inner_ops.py +42 -53
- mindspore/ops/operations/linalg_ops.py +32 -29
- mindspore/ops/operations/math_ops.py +2076 -897
- mindspore/ops/operations/nn_ops.py +1282 -1252
- mindspore/ops/operations/other_ops.py +124 -278
- mindspore/ops/operations/random_ops.py +345 -178
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +502 -157
- mindspore/ops/operations/spectral_ops.py +107 -0
- mindspore/ops/primitive.py +192 -15
- mindspore/ops/vm_impl_registry.py +23 -2
- mindspore/parallel/__init__.py +6 -1
- mindspore/parallel/_auto_parallel_context.py +199 -92
- mindspore/parallel/_cell_wrapper.py +4 -2
- mindspore/parallel/_cost_model_context.py +3 -0
- mindspore/parallel/_dp_allreduce_fusion.py +2 -1
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +167 -28
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +9 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +59 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +160 -35
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +235 -196
- mindspore/parallel/_utils.py +47 -7
- mindspore/parallel/algo_parameter_config.py +5 -1
- mindspore/parallel/checkpoint_transform.py +329 -0
- mindspore/parallel/shard.py +229 -0
- mindspore/perf_msvcbuildinsights.dll +0 -0
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/common/util.py +4 -3
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +249 -0
- mindspore/profiler/parser/aicpu_data_parser.py +38 -39
- mindspore/profiler/parser/ascend_timeline_generator.py +497 -0
- mindspore/profiler/parser/base_timeline_generator.py +471 -0
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +684 -0
- mindspore/profiler/parser/framework_parser.py +42 -16
- mindspore/profiler/parser/hccl_parser.py +158 -158
- mindspore/profiler/parser/hwts_log_parser.py +7 -6
- mindspore/profiler/parser/integrator.py +18 -1579
- mindspore/profiler/parser/minddata_analyzer.py +8 -8
- mindspore/profiler/parser/msadvisor_analyzer.py +14 -27
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +108 -0
- mindspore/profiler/parser/step_trace_parser.py +1 -1
- mindspore/profiler/profiling.py +396 -194
- mindspore/rewrite/__init__.py +6 -2
- mindspore/rewrite/api/node.py +51 -110
- mindspore/rewrite/api/node_type.py +10 -6
- mindspore/rewrite/api/pattern_engine.py +51 -7
- mindspore/rewrite/api/scoped_value.py +64 -53
- mindspore/rewrite/api/symbol_tree.py +108 -61
- mindspore/rewrite/api/tree_node_helper.py +2 -3
- mindspore/{compression/quant/__init__.py → rewrite/ast_creator_register.py} +20 -11
- mindspore/rewrite/ast_helpers/__init__.py +6 -3
- mindspore/rewrite/ast_helpers/ast_creator.py +115 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +99 -1
- mindspore/rewrite/ast_helpers/ast_modifier.py +17 -4
- mindspore/rewrite/ast_helpers/ast_replacer.py +1 -1
- mindspore/rewrite/ast_transformers/__init__.py +0 -1
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +46 -5
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +6 -3
- mindspore/rewrite/common/__init__.py +2 -0
- mindspore/rewrite/common/event.py +1 -1
- mindspore/rewrite/common/observable.py +1 -1
- mindspore/rewrite/common/observer.py +1 -1
- mindspore/rewrite/common/rewrite_elog.py +35 -0
- mindspore/rewrite/namer.py +2 -2
- mindspore/rewrite/namespace.py +14 -4
- mindspore/rewrite/node.py +161 -13
- mindspore/rewrite/parser.py +0 -1
- mindspore/rewrite/parser_register.py +0 -1
- mindspore/rewrite/parsers/arguments_parser.py +3 -2
- mindspore/rewrite/parsers/assign_parser.py +267 -67
- mindspore/rewrite/parsers/attribute_parser.py +56 -0
- mindspore/rewrite/parsers/class_def_parser.py +191 -108
- mindspore/rewrite/parsers/constant_parser.py +101 -0
- mindspore/rewrite/parsers/container_parser.py +88 -0
- mindspore/rewrite/parsers/for_parser.py +28 -15
- mindspore/rewrite/parsers/function_def_parser.py +21 -5
- mindspore/rewrite/parsers/if_parser.py +11 -28
- mindspore/rewrite/parsers/module_parser.py +9 -6
- mindspore/rewrite/parsers/return_parser.py +3 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +322 -109
- mindspore/rewrite/symbol_tree_builder.py +45 -8
- mindspore/rewrite/symbol_tree_dumper.py +0 -1
- mindspore/rewrite/topological_manager.py +1 -2
- mindspore/run_check/_check_version.py +209 -112
- mindspore/run_check/run_check.py +2 -1
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +6 -4
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +321 -50
- mindspore/train/callback/__init__.py +3 -1
- mindspore/train/callback/_backup_and_restore.py +120 -0
- mindspore/train/callback/_callback.py +8 -8
- mindspore/train/callback/_checkpoint.py +12 -9
- mindspore/train/callback/_early_stop.py +13 -7
- mindspore/train/callback/_history.py +8 -8
- mindspore/train/callback/_lambda_callback.py +6 -6
- mindspore/train/callback/_landscape.py +36 -38
- mindspore/train/callback/_loss_monitor.py +12 -6
- mindspore/train/callback/_lr_scheduler_callback.py +2 -4
- mindspore/train/callback/_on_request_exit.py +212 -0
- mindspore/train/callback/_reduce_lr_on_plateau.py +13 -7
- mindspore/train/callback/_summary_collector.py +27 -19
- mindspore/train/callback/_time_monitor.py +13 -7
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +122 -33
- mindspore/train/dataset_helper.py +28 -87
- mindspore/train/loss_scale_manager.py +4 -7
- mindspore/{nn → train}/metrics/__init__.py +20 -20
- mindspore/{nn → train}/metrics/accuracy.py +12 -10
- mindspore/{nn → train}/metrics/auc.py +4 -4
- mindspore/{nn → train}/metrics/bleu_score.py +4 -4
- mindspore/{nn → train}/metrics/confusion_matrix.py +10 -8
- mindspore/{nn → train}/metrics/cosine_similarity.py +4 -4
- mindspore/{nn → train}/metrics/dice.py +6 -5
- mindspore/{nn → train}/metrics/error.py +7 -5
- mindspore/{nn → train}/metrics/fbeta.py +9 -7
- mindspore/{nn → train}/metrics/hausdorff_distance.py +8 -6
- mindspore/{nn → train}/metrics/loss.py +4 -3
- mindspore/{nn → train}/metrics/mean_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/metric.py +6 -5
- mindspore/{nn → train}/metrics/occlusion_sensitivity.py +4 -3
- mindspore/{nn → train}/metrics/perplexity.py +5 -4
- mindspore/{nn → train}/metrics/precision.py +5 -4
- mindspore/{nn → train}/metrics/recall.py +5 -4
- mindspore/{nn → train}/metrics/roc.py +7 -6
- mindspore/{nn → train}/metrics/root_mean_square_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/topk.py +7 -5
- mindspore/train/mind_ir_pb2.py +339 -32
- mindspore/train/model.py +113 -84
- mindspore/train/serialization.py +547 -167
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -12
- mindspore/train/train_thor/convert_utils.py +7 -1
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/train/train_thor/model_thor.py +0 -4
- mindspore/turbojpeg.dll +0 -0
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +4 -3
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +901 -660
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -514
- mindspore/compression/quant/qat.py +0 -636
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/libatomic-1.dll +0 -0
- mindspore/libgcc_s_seh-1.dll +0 -0
- mindspore/libgfortran-4.dll +0 -0
- mindspore/libgomp-1.dll +0 -0
- mindspore/libjpeg-62.dll +0 -0
- mindspore/libmindspore.dll +0 -0
- mindspore/libmindspore_common.dll +0 -0
- mindspore/libmindspore_core.dll +0 -0
- mindspore/libmindspore_glog.dll +0 -0
- mindspore/libnnacl.dll +0 -0
- mindspore/libopencv_core452.dll +0 -0
- mindspore/libopencv_imgcodecs452.dll +0 -0
- mindspore/libopencv_imgproc452.dll +0 -0
- mindspore/libquadmath-0.dll +0 -0
- mindspore/libsqlite3.dll +0 -0
- mindspore/libssp-0.dll +0 -0
- mindspore/libstdc++-6.dll +0 -0
- mindspore/libtinyxml2.dll +0 -0
- mindspore/libturbojpeg.dll +0 -0
- mindspore/libwinpthread-1.dll +0 -0
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -138
- mindspore/nn/probability/dpn/vae/vae.py +0 -122
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -363
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/tbe/bias_add_grad_ds.py +0 -52
- mindspore/ops/_op_impl/tbe/scatter_nd_add_ds.py +0 -43
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Identity_bprop.mindir +0 -9
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/stop_gradient_bprop.mindir +0 -12
- mindspore/ops/composite/array_ops.py +0 -210
- mindspore/ops/composite/clip_ops.py +0 -238
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/ops/operations/sponge_ops.py +0 -3531
- mindspore/ops/operations/sponge_update_ops.py +0 -2546
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- mindspore/run_check/_check_deps_version.py +0 -84
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# Copyright 2022 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""channel shuffle"""
|
|
16
|
+
from mindspore.ops import operations as P
|
|
17
|
+
from mindspore.nn.cell import Cell
|
|
18
|
+
from mindspore.ops.primitive import _primexpr
|
|
19
|
+
|
|
20
|
+
__all__ = ['ChannelShuffle']
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ChannelShuffle(Cell):
|
|
24
|
+
r"""
|
|
25
|
+
Divide the channels of Tensor whose shape is :math:`(*, C, H, W)` into :math:`g` groups to obtain a Tensor with
|
|
26
|
+
shape :math:`(*, C \frac g, g, H, W)`, and transpose along the corresponding axis of :math:`C`,
|
|
27
|
+
:math:`\frac{g}{}` and :math:`g` to restore Tensor to the original shape.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
groups (int): Number of groups to divide channels in, must be greater than 0. Refer to :math:`g`.
|
|
31
|
+
|
|
32
|
+
Inputs:
|
|
33
|
+
- **x** (Tensor) - Tensor of shape :math:`(*, C_{in}, H_{in}, W_{in})`.
|
|
34
|
+
|
|
35
|
+
Outputs:
|
|
36
|
+
Tensor, with the same type and shape as the `x`.
|
|
37
|
+
|
|
38
|
+
Raises:
|
|
39
|
+
TypeError: If `groups` is not an int.
|
|
40
|
+
ValueError: If `groups` is less than 1.
|
|
41
|
+
ValueError: If dims of `x` is less than 3.
|
|
42
|
+
ValueError: If number of channels can not be divisible by groups.
|
|
43
|
+
|
|
44
|
+
Supported Platforms:
|
|
45
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
46
|
+
|
|
47
|
+
Examples:
|
|
48
|
+
>>> channel_shuffle = nn.ChannelShuffle(2)
|
|
49
|
+
>>> x = Tensor(np.arange(16).astype(np.int32).reshape(1, 4, 2, 2))
|
|
50
|
+
>>> print(x)
|
|
51
|
+
[[[[0 1],
|
|
52
|
+
[2 3]],
|
|
53
|
+
[[4 5],
|
|
54
|
+
[6 7]],
|
|
55
|
+
[[8 9],
|
|
56
|
+
[10 11]],
|
|
57
|
+
[[12 13],
|
|
58
|
+
[14 15]],
|
|
59
|
+
]]
|
|
60
|
+
>>> output = channel_shuffle(x)
|
|
61
|
+
>>> print(output)
|
|
62
|
+
[[[[0 1],
|
|
63
|
+
[2 3]],
|
|
64
|
+
[[8 9],
|
|
65
|
+
[10 11]],
|
|
66
|
+
[[4 5],
|
|
67
|
+
[6 7]],
|
|
68
|
+
[[12 13],
|
|
69
|
+
[14 15]],
|
|
70
|
+
]]
|
|
71
|
+
"""
|
|
72
|
+
def __init__(self, groups):
|
|
73
|
+
"""Initialize ChannelShuffle."""
|
|
74
|
+
super(ChannelShuffle, self).__init__()
|
|
75
|
+
if not isinstance(groups, int):
|
|
76
|
+
raise TypeError("For ChannelShuffle, the param `groups` must be int, but got {}.".format(type(groups)))
|
|
77
|
+
if groups < 1:
|
|
78
|
+
raise ValueError(f"For ChannelShuffle, the param `groups` must be larger than 0, but got {groups}.")
|
|
79
|
+
|
|
80
|
+
self.groups = groups
|
|
81
|
+
self.shape = P.Shape()
|
|
82
|
+
self.reshape = P.Reshape()
|
|
83
|
+
self.transpose = P.Transpose()
|
|
84
|
+
|
|
85
|
+
@staticmethod
|
|
86
|
+
@_primexpr
|
|
87
|
+
def _check_input_dim(shape, channels, groups, cls_name):
|
|
88
|
+
"""check input dim"""
|
|
89
|
+
dim = len(shape)
|
|
90
|
+
if dim < 3:
|
|
91
|
+
raise ValueError(f"For {cls_name}, the in_shape must have more than 2 dims, but got {dim}.")
|
|
92
|
+
|
|
93
|
+
if channels % groups != 0:
|
|
94
|
+
raise ValueError(f"For {cls_name}, number of channels must be divisible by groups, "
|
|
95
|
+
f"but got {channels} channels and {groups} groups.")
|
|
96
|
+
|
|
97
|
+
def construct(self, x):
|
|
98
|
+
x_shape = self.shape(x)
|
|
99
|
+
n, c = x_shape[0], x_shape[1]
|
|
100
|
+
self._check_input_dim(x_shape, c, self.groups, self.cls_name)
|
|
101
|
+
out = self.reshape(x, (n, self.groups, c // self.groups, -1))
|
|
102
|
+
out = self.transpose(out, (0, 2, 1, 3))
|
|
103
|
+
return self.reshape(out, x_shape)
|
mindspore/nn/layer/combined.py
CHANGED
|
@@ -17,7 +17,7 @@ from __future__ import absolute_import
|
|
|
17
17
|
|
|
18
18
|
from mindspore import nn
|
|
19
19
|
from mindspore.ops.primitive import Primitive
|
|
20
|
-
|
|
20
|
+
import mindspore._checkparam as Validator
|
|
21
21
|
from mindspore.nn.layer.normalization import BatchNorm2d, BatchNorm1d
|
|
22
22
|
from mindspore.nn.layer.activation import get_activation, LeakyReLU
|
|
23
23
|
from mindspore.nn.cell import Cell
|
mindspore/nn/layer/container.py
CHANGED
|
@@ -109,7 +109,7 @@ class _CellListBase:
|
|
|
109
109
|
class SequentialCell(Cell):
|
|
110
110
|
"""
|
|
111
111
|
Sequential Cell container. For more details about Cell, please refer to
|
|
112
|
-
`Cell <https://www.mindspore.cn/docs/en/
|
|
112
|
+
`Cell <https://www.mindspore.cn/docs/en/r2.0/api_python/nn/mindspore.nn.Cell.html#mindspore.nn.Cell>`_.
|
|
113
113
|
|
|
114
114
|
A list of Cells will be added to it in the order they are passed in the constructor.
|
|
115
115
|
Alternatively, an ordered dict of cells can also be passed in.
|
|
@@ -142,7 +142,7 @@ class SequentialCell(Cell):
|
|
|
142
142
|
>>> conv = nn.Conv2d(3, 2, 3, pad_mode='valid', weight_init="ones")
|
|
143
143
|
>>> relu = nn.ReLU()
|
|
144
144
|
>>> seq = nn.SequentialCell([conv, relu])
|
|
145
|
-
>>> x = Tensor(np.ones([1, 3, 4, 4]), dtype=mindspore.float32)
|
|
145
|
+
>>> x = Tensor(np.ones([1, 3, 4, 4]), dtype = mindspore.float32)
|
|
146
146
|
>>> output = seq(x)
|
|
147
147
|
>>> print(output)
|
|
148
148
|
[[[[27. 27.]
|
|
@@ -168,7 +168,12 @@ class SequentialCell(Cell):
|
|
|
168
168
|
self._is_dynamic_name = []
|
|
169
169
|
if len(args) == 1:
|
|
170
170
|
cells = args[0]
|
|
171
|
-
if isinstance(cells,
|
|
171
|
+
if isinstance(cells, Cell):
|
|
172
|
+
cell = cells
|
|
173
|
+
self.insert_child_to_cell(str(0), cell)
|
|
174
|
+
cell.update_parameters_name(str(0) + ".")
|
|
175
|
+
self._is_dynamic_name.append(True)
|
|
176
|
+
elif isinstance(cells, list):
|
|
172
177
|
for index, cell in enumerate(cells):
|
|
173
178
|
self.insert_child_to_cell(str(index), cell)
|
|
174
179
|
cell.update_parameters_name(str(index) + ".")
|
|
@@ -179,7 +184,7 @@ class SequentialCell(Cell):
|
|
|
179
184
|
cell.update_parameters_name(name + ".")
|
|
180
185
|
self._is_dynamic_name.append(False)
|
|
181
186
|
else:
|
|
182
|
-
raise TypeError(f"For '{self.__class__.__name__}', the 'args[0]' must be list or orderedDict, "
|
|
187
|
+
raise TypeError(f"For '{self.__class__.__name__}', the 'args[0]' must be Cell, list or orderedDict, "
|
|
183
188
|
f"but got {type(cells).__name__}")
|
|
184
189
|
else:
|
|
185
190
|
for index, cell in enumerate(args):
|
|
@@ -233,6 +238,9 @@ class SequentialCell(Cell):
|
|
|
233
238
|
self._cells = temp_dict
|
|
234
239
|
self.cell_list = list(self._cells.values())
|
|
235
240
|
|
|
241
|
+
def __bool__(self):
|
|
242
|
+
return len(self._cells) != 0
|
|
243
|
+
|
|
236
244
|
def __len__(self):
|
|
237
245
|
return len(self._cells)
|
|
238
246
|
|
|
@@ -279,13 +287,40 @@ class SequentialCell(Cell):
|
|
|
279
287
|
input_data = cell(input_data)
|
|
280
288
|
return input_data
|
|
281
289
|
|
|
290
|
+
def _insert(self, index, cell):
|
|
291
|
+
"""
|
|
292
|
+
Inserts a given Cell before a given index in the list.
|
|
293
|
+
|
|
294
|
+
Args:
|
|
295
|
+
index(int): The Insert index in the CellList.
|
|
296
|
+
cell(Cell): The Cell to be inserted.
|
|
297
|
+
"""
|
|
298
|
+
cls_name = self.__class__.__name__
|
|
299
|
+
idx = _valid_index(len(self), index, cls_name)
|
|
300
|
+
_valid_cell(cell, cls_name)
|
|
301
|
+
length = len(self)
|
|
302
|
+
prefix, key_index = _get_prefix_and_index(self._cells)
|
|
303
|
+
while length > idx:
|
|
304
|
+
if self._auto_prefix:
|
|
305
|
+
tmp_cell = self._cells[str(length-1)]
|
|
306
|
+
for _, param in tmp_cell.parameters_and_names():
|
|
307
|
+
param.name = f'{prefix}{str(length)}{"."}{".".join(param.name.split(".")[key_index+1:])}'
|
|
308
|
+
self._cells[str(length)] = self._cells[str(length - 1)]
|
|
309
|
+
length -= 1
|
|
310
|
+
self._cells[str(idx)] = cell
|
|
311
|
+
if self._auto_prefix:
|
|
312
|
+
cell.update_parameters_name(prefix + str(idx) + ".")
|
|
313
|
+
self.cell_list = list(self._cells.values())
|
|
314
|
+
self._is_dynamic_name.insert(index, True)
|
|
315
|
+
|
|
282
316
|
|
|
283
317
|
class CellList(_CellListBase, Cell):
|
|
284
318
|
"""
|
|
285
319
|
Holds Cells in a list. For more details about Cell, please refer to
|
|
286
|
-
`Cell <https://www.mindspore.cn/docs/en/
|
|
320
|
+
`Cell <https://www.mindspore.cn/docs/en/r2.0/api_python/nn/mindspore.nn.Cell.html#mindspore.nn.Cell>`_.
|
|
287
321
|
|
|
288
|
-
CellList can be used like a regular Python list, the Cells it contains have been initialized.
|
|
322
|
+
CellList can be used like a regular Python list, the Cells it contains have been initialized. Unlike the
|
|
323
|
+
SequentialCell, the cells in CellList are not connected.
|
|
289
324
|
|
|
290
325
|
Args:
|
|
291
326
|
args (list, optional): List of subclass of Cell.
|
|
@@ -295,6 +330,8 @@ class CellList(_CellListBase, Cell):
|
|
|
295
330
|
|
|
296
331
|
Examples:
|
|
297
332
|
>>> import mindspore.nn as nn
|
|
333
|
+
>>> import mindspore as ms
|
|
334
|
+
>>> import numpy as np
|
|
298
335
|
>>>
|
|
299
336
|
>>> conv = nn.Conv2d(100, 20, 3)
|
|
300
337
|
>>> bn = nn.BatchNorm2d(20)
|
|
@@ -303,6 +340,12 @@ class CellList(_CellListBase, Cell):
|
|
|
303
340
|
>>> cell_ls.insert(0, conv)
|
|
304
341
|
>>> cell_ls.append(relu)
|
|
305
342
|
>>> cell_ls.extend([relu, relu])
|
|
343
|
+
>>> cell_ls_3 = cell_ls[3]
|
|
344
|
+
>>> input1 = ms.Tensor(np.ones([2, 3]), ms.float32)
|
|
345
|
+
>>> output = cell_ls_3(input1)
|
|
346
|
+
>>> print(output)
|
|
347
|
+
[[1. 1. 1.]
|
|
348
|
+
[1. 1. 1.]]
|
|
306
349
|
"""
|
|
307
350
|
def __init__(self, *args, **kwargs):
|
|
308
351
|
"""Initialize CellList."""
|
|
@@ -355,6 +398,9 @@ class CellList(_CellListBase, Cell):
|
|
|
355
398
|
temp_dict[str(idx)] = cell
|
|
356
399
|
self._cells = temp_dict
|
|
357
400
|
|
|
401
|
+
def __bool__(self):
|
|
402
|
+
return len(self._cells) != 0
|
|
403
|
+
|
|
358
404
|
def __len__(self):
|
|
359
405
|
return len(self._cells)
|
|
360
406
|
|
mindspore/nn/layer/conv.py
CHANGED
|
@@ -20,11 +20,12 @@ import numpy as np
|
|
|
20
20
|
from mindspore import log as logger
|
|
21
21
|
from mindspore import context
|
|
22
22
|
from mindspore.ops import operations as P
|
|
23
|
-
from mindspore.ops.primitive import
|
|
23
|
+
from mindspore.ops.primitive import _primexpr
|
|
24
24
|
from mindspore.common.parameter import Parameter
|
|
25
25
|
from mindspore.common.initializer import initializer
|
|
26
26
|
from mindspore.common.tensor import Tensor
|
|
27
|
-
from mindspore
|
|
27
|
+
from mindspore import _checkparam as Validator
|
|
28
|
+
from mindspore._checkparam import twice, _check_3d_int_or_tuple
|
|
28
29
|
from mindspore._extends import cell_attr_register
|
|
29
30
|
from mindspore.nn.cell import Cell
|
|
30
31
|
|
|
@@ -93,8 +94,8 @@ class _Conv(Cell):
|
|
|
93
94
|
if transposed:
|
|
94
95
|
shape = [in_channels, out_channels // group, *kernel_size]
|
|
95
96
|
else:
|
|
96
|
-
shape = [out_channels, *kernel_size, in_channels // group] \
|
|
97
|
-
|
|
97
|
+
shape = [out_channels, *kernel_size, in_channels // group] if self.data_format == "NHWC" else \
|
|
98
|
+
[out_channels, in_channels // group, *kernel_size]
|
|
98
99
|
self.weight = Parameter(initializer(self.weight_init, shape), name='weight')
|
|
99
100
|
|
|
100
101
|
if Validator.check_bool(has_bias, "has_bias", self.cls_name):
|
|
@@ -142,7 +143,7 @@ class Conv2d(_Conv):
|
|
|
142
143
|
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
143
144
|
|
|
144
145
|
where :math:`ccor` is the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
145
|
-
:math:`C_{in}` is the channel number of the input, :math:`out_{j}` corresponds to the
|
|
146
|
+
:math:`C_{in}` is the channel number of the input, :math:`out_{j}` corresponds to the :math:`j`-th channel of
|
|
146
147
|
the output and :math:`j` is in the range of :math:`[0, C_{out}-1]`. :math:`\text{weight}(C_{\text{out}_j}, k)`
|
|
147
148
|
is a convolution kernel slice with shape :math:`(\text{kernel_size[0]}, \text{kernel_size[1]})`,
|
|
148
149
|
where :math:`\text{kernel_size[0]}` and :math:`\text{kernel_size[1]}` are the height and width of the convolution
|
|
@@ -156,6 +157,10 @@ class Conv2d(_Conv):
|
|
|
156
157
|
For more details, please refers to the paper `Gradient Based Learning Applied to Document
|
|
157
158
|
Recognition <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
|
|
158
159
|
|
|
160
|
+
Note:
|
|
161
|
+
On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
|
|
162
|
+
That is, when `group>1`, condition `in\_channels` = `out\_channels` = `group` must be satisfied.
|
|
163
|
+
|
|
159
164
|
Args:
|
|
160
165
|
in_channels (int): The channel number of the input tensor of the Conv2d layer.
|
|
161
166
|
out_channels (int): The channel number of the output tensor of the Conv2d layer.
|
|
@@ -216,17 +221,17 @@ class Conv2d(_Conv):
|
|
|
216
221
|
|
|
217
222
|
.. math::
|
|
218
223
|
\begin{array}{ll} \\
|
|
219
|
-
H_{out}
|
|
220
|
-
W_{out}
|
|
224
|
+
H_{out} = \left \lceil{\frac{H_{in}}{\text{stride[0]}}} \right \rceil \\
|
|
225
|
+
W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[1]}}} \right \rceil \\
|
|
221
226
|
\end{array}
|
|
222
227
|
|
|
223
228
|
pad_mode is 'valid':
|
|
224
229
|
|
|
225
230
|
.. math::
|
|
226
231
|
\begin{array}{ll} \\
|
|
227
|
-
H_{out}
|
|
232
|
+
H_{out} = \left \lceil{\frac{H_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
|
|
228
233
|
{\text{stride[0]}}} \right \rceil \\
|
|
229
|
-
W_{out}
|
|
234
|
+
W_{out} = \left \lceil{\frac{W_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
|
|
230
235
|
{\text{stride[1]}}} \right \rceil \\
|
|
231
236
|
\end{array}
|
|
232
237
|
|
|
@@ -234,9 +239,9 @@ class Conv2d(_Conv):
|
|
|
234
239
|
|
|
235
240
|
.. math::
|
|
236
241
|
\begin{array}{ll} \\
|
|
237
|
-
H_{out}
|
|
242
|
+
H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - (\text{kernel_size[0]} - 1) \times
|
|
238
243
|
\text{dilation[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
|
|
239
|
-
W_{out}
|
|
244
|
+
W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - (\text{kernel_size[1]} - 1) \times
|
|
240
245
|
\text{dilation[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
|
|
241
246
|
\end{array}
|
|
242
247
|
|
|
@@ -311,7 +316,7 @@ class Conv2d(_Conv):
|
|
|
311
316
|
return output
|
|
312
317
|
|
|
313
318
|
|
|
314
|
-
@
|
|
319
|
+
@_primexpr
|
|
315
320
|
def _check_input_3d(input_shape, op_name):
|
|
316
321
|
if len(input_shape) != 3:
|
|
317
322
|
raise ValueError(f"For '{op_name}', the dimension of input must be 3d, but got {len(input_shape)}.")
|
|
@@ -329,7 +334,7 @@ class Conv1d(_Conv):
|
|
|
329
334
|
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
330
335
|
|
|
331
336
|
where :math:`ccor` is the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
332
|
-
:math:`C_{in}` is the channel number of the input, :math:`out_{j}` corresponds to the
|
|
337
|
+
:math:`C_{in}` is the channel number of the input, :math:`out_{j}` corresponds to the :math:`j`-th channel of
|
|
333
338
|
the output and :math:`j` is in the range of :math:`[0, C_{out}-1]`. :math:`\text{weight}(C_{\text{out}_j}, k)`
|
|
334
339
|
is a convolution kernel slice with shape :math:`\text{kernel_size}`, where :math:`\text{kernel_size}`
|
|
335
340
|
is the width of the convolution kernel. :math:`\text{bias}` is the bias parameter,
|
|
@@ -340,6 +345,10 @@ class Conv1d(_Conv):
|
|
|
340
345
|
For more details, please refers to the paper `Gradient Based Learning Applied to Document
|
|
341
346
|
Recognition <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
|
|
342
347
|
|
|
348
|
+
Note:
|
|
349
|
+
On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
|
|
350
|
+
That is, when `group>1`, condition `in\_channels` = `out\_channels` = `group` must be satisfied.
|
|
351
|
+
|
|
343
352
|
Args:
|
|
344
353
|
in_channels (int): The channel number of the input tensor of the Conv1d layer.
|
|
345
354
|
out_channels (int): The channel number of the output tensor of the Conv1d layer.
|
|
@@ -432,10 +441,10 @@ class Conv1d(_Conv):
|
|
|
432
441
|
Validator.check_value_type("stride", stride, [int], self.cls_name)
|
|
433
442
|
Validator.check_value_type("padding", padding, [int], self.cls_name)
|
|
434
443
|
Validator.check_value_type("dilation", dilation, [int], self.cls_name)
|
|
435
|
-
Validator.check_int(kernel_size, 1,
|
|
436
|
-
Validator.check_int(stride, 1,
|
|
444
|
+
Validator.check_int(kernel_size, 1, Validator.GE, 'kernel_size', self.cls_name)
|
|
445
|
+
Validator.check_int(stride, 1, Validator.GE, 'stride', self.cls_name)
|
|
437
446
|
Validator.check_non_negative_int(padding, 'padding', self.cls_name)
|
|
438
|
-
Validator.check_int(dilation, 1,
|
|
447
|
+
Validator.check_int(dilation, 1, Validator.GE, 'dilation', self.cls_name)
|
|
439
448
|
kernel_size = (1, kernel_size)
|
|
440
449
|
stride = (1, stride)
|
|
441
450
|
dilation = (1, dilation)
|
|
@@ -488,7 +497,7 @@ class Conv1d(_Conv):
|
|
|
488
497
|
return output
|
|
489
498
|
|
|
490
499
|
|
|
491
|
-
@
|
|
500
|
+
@_primexpr
|
|
492
501
|
def _check_input_5dims(input_shape, op_name):
|
|
493
502
|
if len(input_shape) != 5:
|
|
494
503
|
raise ValueError(f"For '{op_name}', the dimension of input must be 5d, but got {len(input_shape)}.")
|
|
@@ -508,8 +517,8 @@ class Conv3d(_Conv):
|
|
|
508
517
|
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
509
518
|
|
|
510
519
|
where :math:`ccor` is the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
511
|
-
:math:`C_{in}` is the channel number of the input, :math:`out_{j}` corresponds to the
|
|
512
|
-
the output and :math:`j` is in the range of :math:`[0
|
|
520
|
+
:math:`C_{in}` is the channel number of the input, :math:`out_{j}` corresponds to the :math:`j`-th channel of
|
|
521
|
+
the output and :math:`j` is in the range of :math:`[0, C_{out}-1]`. :math:`\text{weight}(C_{\text{out}_j}, k)`
|
|
513
522
|
is a convolution kernel slice with shape
|
|
514
523
|
:math:`(\text{kernel_size[0]}, \text{kernel_size[1]}, \text{kernel_size[2]})`,
|
|
515
524
|
where :math:`\text{kernel_size[0]}`, :math:`\text{kernel_size[1]}` and :math:`\text{kernel_size[2]}` are
|
|
@@ -522,6 +531,10 @@ class Conv3d(_Conv):
|
|
|
522
531
|
For more details, please refers to the paper `Gradient Based Learning Applied to Document
|
|
523
532
|
Recognition <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
|
|
524
533
|
|
|
534
|
+
Note:
|
|
535
|
+
On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
|
|
536
|
+
That is, when `group>1`, condition `in\_channels` = `out\_channels` = `group` must be satisfied.
|
|
537
|
+
|
|
525
538
|
Args:
|
|
526
539
|
in_channels (int): The channel number of the input tensor of the Conv3d layer.
|
|
527
540
|
out_channels (int): The channel number of the output tensor of the Conv3d layer.
|
|
@@ -580,9 +593,9 @@ class Conv3d(_Conv):
|
|
|
580
593
|
|
|
581
594
|
.. math::
|
|
582
595
|
\begin{array}{ll} \\
|
|
583
|
-
D_{out} = \left \
|
|
584
|
-
H_{out} = \left \
|
|
585
|
-
W_{out} = \left \
|
|
596
|
+
D_{out} = \left \lceil{\frac{D_{in}}{\text{stride[0]}}} \right \rceil \\
|
|
597
|
+
H_{out} = \left \lceil{\frac{H_{in}}{\text{stride[1]}}} \right \rceil \\
|
|
598
|
+
W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[2]}}} \right \rceil \\
|
|
586
599
|
\end{array}
|
|
587
600
|
|
|
588
601
|
|
|
@@ -646,6 +659,10 @@ class Conv3d(_Conv):
|
|
|
646
659
|
bias_init='zeros',
|
|
647
660
|
data_format='NCDHW'):
|
|
648
661
|
"""Initialize Conv3d."""
|
|
662
|
+
if not in_channels % group == 0 and out_channels % group == 0:
|
|
663
|
+
raise ValueError("The argument 'group' should be divisible by 'in_channels' " \
|
|
664
|
+
"and 'out_channels'")
|
|
665
|
+
|
|
649
666
|
kernel_size = _check_3d_int_or_tuple("kernel_size", kernel_size, self.cls_name)
|
|
650
667
|
stride = _check_3d_int_or_tuple("stride", stride, self.cls_name)
|
|
651
668
|
dilation = _check_3d_int_or_tuple("dilation", dilation, self.cls_name)
|
|
@@ -665,25 +682,42 @@ class Conv3d(_Conv):
|
|
|
665
682
|
weight_init,
|
|
666
683
|
bias_init,
|
|
667
684
|
data_format)
|
|
668
|
-
|
|
685
|
+
out_channels = self.out_channels // group
|
|
686
|
+
self.conv3d = P.Conv3D(out_channel=out_channels,
|
|
669
687
|
kernel_size=self.kernel_size,
|
|
670
688
|
mode=1,
|
|
671
689
|
pad_mode=self.pad_mode,
|
|
672
690
|
pad=self.padding,
|
|
673
691
|
stride=self.stride,
|
|
674
692
|
dilation=self.dilation,
|
|
675
|
-
group=
|
|
693
|
+
group=1,
|
|
676
694
|
data_format=self.data_format)
|
|
677
695
|
self.bias_add = P.BiasAdd(data_format=self.data_format)
|
|
678
696
|
self.shape = P.Shape()
|
|
697
|
+
self.concat = P.Concat(1)
|
|
698
|
+
self.split_0 = P.Split(0, self.group)
|
|
699
|
+
self.split_1 = P.Split(1, self.group)
|
|
679
700
|
|
|
680
701
|
def construct(self, x):
|
|
681
702
|
x_shape = self.shape(x)
|
|
682
703
|
_check_input_5dims(x_shape, self.cls_name)
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
704
|
+
if self.group == 1:
|
|
705
|
+
out = self.conv3d(x, self.weight)
|
|
706
|
+
if self.has_bias:
|
|
707
|
+
out = self.bias_add(out, self.bias)
|
|
708
|
+
else:
|
|
709
|
+
features = self.split_1(x)
|
|
710
|
+
weights = self.split_0(self.weight)
|
|
711
|
+
outputs = ()
|
|
712
|
+
for i in range(self.group):
|
|
713
|
+
output = self.conv3d(features[i], weights[i])
|
|
714
|
+
outputs = outputs + (output,)
|
|
715
|
+
out = self.concat(outputs)
|
|
716
|
+
if self.bias is not None:
|
|
717
|
+
new_shape = [1 for _ in range(out.ndim)]
|
|
718
|
+
new_shape[1] = self.out_channels
|
|
719
|
+
out = out + self.bias.reshape(new_shape)
|
|
720
|
+
return out
|
|
687
721
|
|
|
688
722
|
|
|
689
723
|
class Conv3dTranspose(_Conv):
|
|
@@ -691,7 +725,8 @@ class Conv3dTranspose(_Conv):
|
|
|
691
725
|
Calculates a 3D transposed convolution, which can be regarded as Conv3d for the gradient of the input.
|
|
692
726
|
It also called deconvolution (although it is not an actual deconvolution).
|
|
693
727
|
|
|
694
|
-
|
|
728
|
+
he input is typically of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`,
|
|
729
|
+
where :math:`N` is batch size, :math:`C_{in}` is a number of
|
|
695
730
|
channels, :math:`D_{in}, H_{in}, W_{in}` are the depth, height and width of the feature layer respectively.
|
|
696
731
|
|
|
697
732
|
When Conv3d and Conv3dTranspose are initialized with the same parameters, and `pad_mode` is set to 'pad',
|
|
@@ -940,6 +975,12 @@ class Conv2dTranspose(_Conv):
|
|
|
940
975
|
If `padding` is a tuple of 4 integers, then the top, bottom, left, and right padding
|
|
941
976
|
is equal to `padding[0]`, `padding[1]`, `padding[2]`, and `padding[3]` respectively.
|
|
942
977
|
The value should be greater than or equal to 0. Default: 0.
|
|
978
|
+
output_padding (Union[int, tuple[int]]): The number of padding on the height and width directions of the output.
|
|
979
|
+
The data type is an integer or a tuple of two integers. If `output_padding` is an integer,
|
|
980
|
+
then the bottom and right padding are all equal to `output_padding`. If `output_padding` is a tuple of
|
|
981
|
+
2 integers, then the bottom and right padding is equal to `output_padding[0]`, `output_padding[1]`
|
|
982
|
+
respectively. If `output_padding` is not equal to 0, `pad_mode` must be `pad`.
|
|
983
|
+
The value should be in range of `[0, max(stride, dilation))` . Default: 0.
|
|
943
984
|
dilation (Union[int, tuple[int]]): Dilation size of 2D convolution kernel.
|
|
944
985
|
The data type is an integer or a tuple of two integers. If :math:`k > 1`, the kernel is sampled
|
|
945
986
|
every `k` elements. The value of `k` on the height and width directions is in range of [1, H]
|
|
@@ -985,12 +1026,12 @@ class Conv2dTranspose(_Conv):
|
|
|
985
1026
|
|
|
986
1027
|
.. math::
|
|
987
1028
|
\begin{array}{ll} \\
|
|
988
|
-
H_{out} = \text H_{in}\times \text {stride[0]} - (padding[0] + padding[1])
|
|
989
|
-
\text{kernel_size[0]} + (\text{dilation[0]} - 1) \times
|
|
990
|
-
(\text{kernel_size[0]} - 1) - \text {stride[0]} \\
|
|
991
|
-
W_{out} = \text W_{in}\times \text {stride[1]} - (padding[2] + padding[3])
|
|
992
|
-
\text{kernel_size[1]} + (\text{dilation[1]} - 1) \times
|
|
993
|
-
(\text{kernel_size[1]} - 1) - \text {stride[1]} \\
|
|
1029
|
+
H_{out} = \text H_{in}\times \text {stride[0]} - (padding[0] + padding[1])
|
|
1030
|
+
+ \text{kernel_size[0]} + (\text{dilation[0]} - 1) \times
|
|
1031
|
+
(\text{kernel_size[0]} - 1) - \text {stride[0]} + \text {output_padding[0]} \\
|
|
1032
|
+
W_{out} = \text W_{in}\times \text {stride[1]} - (padding[2] + padding[3])
|
|
1033
|
+
+ \text{kernel_size[1]} + (\text{dilation[1]} - 1) \times
|
|
1034
|
+
(\text{kernel_size[1]} - 1) - \text {stride[1]} + \text {output_padding[1]} \\
|
|
994
1035
|
\end{array}
|
|
995
1036
|
|
|
996
1037
|
Raises:
|
|
@@ -1020,6 +1061,7 @@ class Conv2dTranspose(_Conv):
|
|
|
1020
1061
|
stride=1,
|
|
1021
1062
|
pad_mode='same',
|
|
1022
1063
|
padding=0,
|
|
1064
|
+
output_padding=0,
|
|
1023
1065
|
dilation=1,
|
|
1024
1066
|
group=1,
|
|
1025
1067
|
has_bias=False,
|
|
@@ -1032,6 +1074,9 @@ class Conv2dTranspose(_Conv):
|
|
|
1032
1074
|
Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
|
|
1033
1075
|
if isinstance(padding, tuple):
|
|
1034
1076
|
Validator.check_equal_int(len(padding), 4, 'padding size', self.cls_name)
|
|
1077
|
+
Validator.check_value_type('output_padding', output_padding, (int, tuple), self.cls_name)
|
|
1078
|
+
if isinstance(output_padding, tuple):
|
|
1079
|
+
Validator.check_equal_int(len(output_padding), 2, 'output_padding size', self.cls_name)
|
|
1035
1080
|
# out_channels and in_channels swap.
|
|
1036
1081
|
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
|
|
1037
1082
|
# then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
|
|
@@ -1056,6 +1101,7 @@ class Conv2dTranspose(_Conv):
|
|
|
1056
1101
|
self.is_valid = self.pad_mode == 'valid'
|
|
1057
1102
|
self.is_same = self.pad_mode == 'same'
|
|
1058
1103
|
self.is_pad = self.pad_mode == 'pad'
|
|
1104
|
+
self.output_padding = output_padding
|
|
1059
1105
|
if Validator.check_bool(has_bias, "has_bias", self.cls_name):
|
|
1060
1106
|
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
|
|
1061
1107
|
|
|
@@ -1087,7 +1133,29 @@ class Conv2dTranspose(_Conv):
|
|
|
1087
1133
|
if self.has_bias:
|
|
1088
1134
|
return self.bias_add(self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out)),
|
|
1089
1135
|
self.bias)
|
|
1090
|
-
|
|
1136
|
+
conv2d_trans_ret = self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
|
|
1137
|
+
if isinstance(self.output_padding, tuple):
|
|
1138
|
+
if self.output_padding[0] < 0 or self.output_padding[0] >= max(self.dilation[0], self.stride[0]):
|
|
1139
|
+
raise ValueError("output_padding[0] must be in range of [0, max(stride_h, dilation_h)).")
|
|
1140
|
+
if self.output_padding[1] < 0 or self.output_padding[1] >= max(self.dilation[1], self.stride[1]):
|
|
1141
|
+
raise ValueError("output_padding[1] must be in range of [0, max(stride_w, dilation_w)).")
|
|
1142
|
+
if not self.is_pad and (self.output_padding[0] > 0 or self.output_padding[1] > 0):
|
|
1143
|
+
raise ValueError("when output_padding is not zero, pad_mode must be 'pad'")
|
|
1144
|
+
|
|
1145
|
+
pad = P.Pad(paddings=((0, 0), (0, 0), (0, self.output_padding[0]), (0, self.output_padding[1])))
|
|
1146
|
+
return pad(conv2d_trans_ret)
|
|
1147
|
+
|
|
1148
|
+
if self.output_padding == 0:
|
|
1149
|
+
return conv2d_trans_ret
|
|
1150
|
+
|
|
1151
|
+
if self.output_padding < 0 or self.output_padding >= max(self.dilation[0], self.stride[0]):
|
|
1152
|
+
raise ValueError("output_padding must be in range of [0, max(stride_h, dilation_h)).")
|
|
1153
|
+
if self.output_padding < 0 or self.output_padding >= max(self.dilation[1], self.stride[1]):
|
|
1154
|
+
raise ValueError("output_padding must be in range of [0, max(stride_w, dilation_w)).")
|
|
1155
|
+
if not self.is_pad and self.output_padding > 0:
|
|
1156
|
+
raise ValueError("when output_padding is not zero, pad_mode must be 'pad'")
|
|
1157
|
+
pad = P.Pad(paddings=((0, 0), (0, 0), (0, self.output_padding), (0, self.output_padding)))
|
|
1158
|
+
return pad(conv2d_trans_ret)
|
|
1091
1159
|
|
|
1092
1160
|
|
|
1093
1161
|
class Conv1dTranspose(_Conv):
|
|
@@ -1095,7 +1163,8 @@ class Conv1dTranspose(_Conv):
|
|
|
1095
1163
|
Calculates a 1D transposed convolution, which can be regarded as Conv1d for the gradient of the input,
|
|
1096
1164
|
also called deconvolution (although it is not an actual deconvolution).
|
|
1097
1165
|
|
|
1098
|
-
The input is typically of shape :math:`(N,
|
|
1166
|
+
The input is typically of shape :math:`(N, C_{in}, L_{in})`, where :math:`N` is batch size,
|
|
1167
|
+
:math:`C` is a number of channels
|
|
1099
1168
|
and :math:`L_{in}` is a length of sequence.
|
|
1100
1169
|
|
|
1101
1170
|
When Conv1d and ConvTranspose1d are initialized with the same parameters, and `pad_mode` is set to 'pad',
|
|
@@ -1147,18 +1216,18 @@ class Conv1dTranspose(_Conv):
|
|
|
1147
1216
|
pad_mode is 'same':
|
|
1148
1217
|
|
|
1149
1218
|
.. math::
|
|
1150
|
-
L_{out}
|
|
1219
|
+
L_{out} = \left \lfloor{\frac{L_{in}}{\text{stride}} + 1} \right \rfloor
|
|
1151
1220
|
|
|
1152
1221
|
pad_mode is 'valid':
|
|
1153
1222
|
|
|
1154
1223
|
.. math::
|
|
1155
|
-
L_{out}
|
|
1224
|
+
L_{out} = \left \lfloor{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) }
|
|
1156
1225
|
{\text{stride}} + 1} \right \rfloor
|
|
1157
1226
|
|
|
1158
1227
|
pad_mode is 'pad':
|
|
1159
1228
|
|
|
1160
1229
|
.. math::
|
|
1161
|
-
L_{out}
|
|
1230
|
+
L_{out} = \left \lfloor{\frac{L_{in} + 2 \times padding - (\text{dilation} - 1) \times
|
|
1162
1231
|
\text{kernel_size} - 1 }{\text{stride}} + 1} \right \rfloor
|
|
1163
1232
|
|
|
1164
1233
|
Raises:
|
|
@@ -1195,10 +1264,10 @@ class Conv1dTranspose(_Conv):
|
|
|
1195
1264
|
Validator.check_value_type("stride", stride, [int], self.cls_name)
|
|
1196
1265
|
Validator.check_value_type("padding", padding, [int], self.cls_name)
|
|
1197
1266
|
Validator.check_value_type("dilation", dilation, [int], self.cls_name)
|
|
1198
|
-
Validator.check_int(kernel_size, 1,
|
|
1199
|
-
Validator.check_int(stride, 1,
|
|
1267
|
+
Validator.check_int(kernel_size, 1, Validator.GE, 'kernel_size', self.cls_name)
|
|
1268
|
+
Validator.check_int(stride, 1, Validator.GE, 'stride', self.cls_name)
|
|
1200
1269
|
Validator.check_non_negative_int(padding, 'padding', self.cls_name)
|
|
1201
|
-
Validator.check_int(dilation, 1,
|
|
1270
|
+
Validator.check_int(dilation, 1, Validator.GE, 'dilation', self.cls_name)
|
|
1202
1271
|
kernel_size = (1, kernel_size)
|
|
1203
1272
|
stride = (1, stride)
|
|
1204
1273
|
dilation = (1, dilation)
|
mindspore/nn/layer/dense.py
CHANGED
|
@@ -23,14 +23,14 @@ import mindspore.common.dtype as mstype
|
|
|
23
23
|
from mindspore.common.tensor import Tensor
|
|
24
24
|
from mindspore.common.initializer import initializer, Uniform
|
|
25
25
|
from mindspore.common.parameter import Parameter
|
|
26
|
-
from mindspore.ops.primitive import constexpr
|
|
27
|
-
from mindspore
|
|
26
|
+
from mindspore.ops.primitive import constexpr, _primexpr
|
|
27
|
+
from mindspore import _checkparam as Validator
|
|
28
28
|
from mindspore.nn.cell import Cell
|
|
29
29
|
|
|
30
30
|
__all__ = ['BiDense']
|
|
31
31
|
|
|
32
32
|
|
|
33
|
-
@
|
|
33
|
+
@_primexpr
|
|
34
34
|
def check_dense_inputs_same_shape(input1, input2, prim_name=None):
|
|
35
35
|
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
36
36
|
if input1[:-1] != input2[:-1]:
|
|
@@ -38,7 +38,7 @@ def check_dense_inputs_same_shape(input1, input2, prim_name=None):
|
|
|
38
38
|
f"{input1} of 'input1' and {input2} of 'input2'")
|
|
39
39
|
|
|
40
40
|
|
|
41
|
-
@constexpr
|
|
41
|
+
@constexpr(check=False)
|
|
42
42
|
def _check_is_tensor(param_name, input_data, cls_name):
|
|
43
43
|
"""Internal function, used to check whether the input data is Tensor."""
|
|
44
44
|
if input_data is not None and not isinstance(P.typeof(input_data), mstype.tensor_type):
|
|
@@ -46,7 +46,7 @@ def _check_is_tensor(param_name, input_data, cls_name):
|
|
|
46
46
|
f"but got '{P.typeof(input_data)}'")
|
|
47
47
|
|
|
48
48
|
|
|
49
|
-
@
|
|
49
|
+
@_primexpr
|
|
50
50
|
def check_last_dimension(input_dim, input_channels, input_name, input_channels_name, prim_name=None):
|
|
51
51
|
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
52
52
|
if input_dim != input_channels:
|
|
@@ -63,7 +63,7 @@ class BiDense(Cell):
|
|
|
63
63
|
.. math::
|
|
64
64
|
y = x_1^T A x_2 + b,
|
|
65
65
|
|
|
66
|
-
where :math:`
|
|
66
|
+
where :math:`x_{1}` is the first input tensor, :math:`x_{2}` is the second input tensor
|
|
67
67
|
, :math:`A` is a weight matrix with the same data type as the :math:`x_{*}` created by the layer
|
|
68
68
|
, and :math:`b` is a bias vector with the same data type as the :math:`x_{*}` created by the layer
|
|
69
69
|
(only if has_bias is True).
|
|
@@ -76,7 +76,7 @@ class BiDense(Cell):
|
|
|
76
76
|
The values of str refer to the function `initializer`. Default: None.
|
|
77
77
|
bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter.
|
|
78
78
|
The values of str refer to the function `initializer`. Default: None.
|
|
79
|
-
has_bias (bool): Specifies whether the layer uses
|
|
79
|
+
has_bias (bool): Specifies whether the layer uses :math:`\text{bias}` vector. Default: True.
|
|
80
80
|
|
|
81
81
|
Shape:
|
|
82
82
|
- **input1** - :math:`(*, H_{in1})` where :math:`H_{in1}=\text{in1_channels}` and
|
|
@@ -85,8 +85,9 @@ class BiDense(Cell):
|
|
|
85
85
|
- **input2** - :math:`(*, H_{in2})` where :math:`H_{in2}=\text{in2_channels}` and
|
|
86
86
|
:math:`*` means any number of additional dimensions including none. All but the last dimension
|
|
87
87
|
of the inputs should be the same.
|
|
88
|
-
- **output** - :math:`(*, H_{out})` where :math:`H_{out}=\text{out_channels}`
|
|
89
|
-
|
|
88
|
+
- **output** - :math:`(*, H_{out})` where :math:`H_{out}=\text{out_channels}` and
|
|
89
|
+
:math:`*` means any number of additional dimensions including none. All but the last dimension
|
|
90
|
+
are the same shape as the inputs.
|
|
90
91
|
|
|
91
92
|
Dtype:
|
|
92
93
|
- **input1** (Tensor) - The dtype must be float16 or float32 and be same as **input2**.
|