mindspore 2.0.0a0__cp37-cp37m-win_amd64.whl → 2.0.0rc1__cp37-cp37m-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -2
- mindspore/_c_dataengine.cp37-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp37-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp37-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +16 -1
- mindspore/_extends/parse/parser.py +107 -22
- mindspore/_extends/parse/resources.py +0 -7
- mindspore/_extends/parse/standard_method.py +885 -413
- mindspore/amp.py +52 -57
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +38 -20
- mindspore/boost/dim_reduce.py +3 -3
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -6
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +41 -7
- mindspore/common/api.py +215 -141
- mindspore/common/dtype.py +8 -1
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +4 -2
- mindspore/common/jit_config.py +17 -13
- mindspore/common/mutable.py +33 -13
- mindspore/common/parameter.py +23 -21
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +62 -41
- mindspore/common/tensor.py +852 -1154
- mindspore/communication/__init__.py +2 -2
- mindspore/communication/_comm_helper.py +11 -4
- mindspore/communication/management.py +22 -21
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +201 -23
- mindspore/dataset/__init__.py +6 -6
- mindspore/dataset/audio/__init__.py +7 -7
- mindspore/dataset/audio/transforms.py +670 -30
- mindspore/dataset/audio/utils.py +47 -4
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +210 -14
- mindspore/dataset/core/validator_helpers.py +2 -2
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +322 -66
- mindspore/dataset/engine/datasets_audio.py +80 -76
- mindspore/dataset/engine/datasets_standard_format.py +51 -38
- mindspore/dataset/engine/datasets_text.py +232 -118
- mindspore/dataset/engine/datasets_user_defined.py +41 -17
- mindspore/dataset/engine/datasets_vision.py +746 -225
- mindspore/dataset/engine/graphdata.py +75 -10
- mindspore/dataset/engine/iterators.py +45 -5
- mindspore/dataset/engine/offload.py +48 -28
- mindspore/dataset/engine/validators.py +117 -8
- mindspore/dataset/text/__init__.py +6 -5
- mindspore/dataset/text/transforms.py +86 -3
- mindspore/dataset/text/utils.py +6 -4
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +3 -2
- mindspore/dataset/transforms/c_transforms.py +1 -1
- mindspore/dataset/transforms/transforms.py +2 -2
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +2 -3
- mindspore/dataset/vision/c_transforms.py +9 -9
- mindspore/dataset/vision/py_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -0
- mindspore/dataset/vision/transforms.py +160 -161
- mindspore/dataset/vision/utils.py +3 -3
- mindspore/experimental/map_parameter.py +38 -26
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +44 -9
- mindspore/include/api/delegate.h +1 -1
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_parallel_runner.h +2 -2
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +19 -3
- mindspore/include/api/types.h +3 -3
- mindspore/include/dataset/constants.h +7 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filereader.py +18 -0
- mindspore/mindrecord/filewriter.py +197 -34
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
- mindspore/mindrecord/tools/csv_to_mr.py +3 -3
- mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/__init__.py +0 -4
- mindspore/nn/cell.py +204 -132
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +7 -6
- mindspore/nn/layer/__init__.py +5 -4
- mindspore/nn/layer/activation.py +40 -89
- mindspore/nn/layer/basic.py +255 -624
- mindspore/nn/layer/channel_shuffle.py +7 -6
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +41 -4
- mindspore/nn/layer/conv.py +64 -28
- mindspore/nn/layer/dense.py +9 -8
- mindspore/nn/layer/embedding.py +27 -25
- mindspore/nn/layer/image.py +53 -46
- mindspore/nn/layer/math.py +97 -105
- mindspore/nn/layer/normalization.py +117 -86
- mindspore/nn/layer/padding.py +185 -95
- mindspore/nn/layer/pooling.py +817 -414
- mindspore/nn/layer/rnn_cells.py +10 -15
- mindspore/nn/layer/rnns.py +37 -38
- mindspore/nn/layer/thor_layer.py +11 -12
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +5 -4
- mindspore/nn/loss/loss.py +334 -199
- mindspore/nn/optim/ada_grad.py +6 -6
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +4 -5
- mindspore/nn/optim/adam.py +126 -62
- mindspore/nn/optim/adamax.py +3 -4
- mindspore/nn/optim/adasum.py +6 -6
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +67 -38
- mindspore/nn/optim/lamb.py +4 -5
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +43 -4
- mindspore/nn/optim/momentum.py +6 -5
- mindspore/nn/optim/optimizer.py +3 -1
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +1 -1
- mindspore/nn/optim/rprop.py +8 -9
- mindspore/nn/optim/sgd.py +19 -13
- mindspore/nn/optim/thor.py +10 -15
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +4 -4
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +9 -15
- mindspore/nn/probability/distribution/bernoulli.py +3 -3
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +5 -7
- mindspore/nn/probability/distribution/cauchy.py +3 -3
- mindspore/nn/probability/distribution/distribution.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +3 -3
- mindspore/nn/probability/distribution/half_normal.py +15 -11
- mindspore/nn/probability/distribution/laplace.py +16 -13
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/normal.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/student_t.py +20 -15
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +27 -10
- mindspore/nn/wrap/grad_reducer.py +2 -2
- mindspore/nn/wrap/loss_scale.py +40 -24
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +35 -30
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +22 -19
- mindspore/numpy/utils.py +1 -1
- mindspore/numpy/utils_const.py +108 -58
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +86 -117
- mindspore/ops/_grad/grad_base.py +23 -1
- mindspore/ops/_grad/grad_clip_ops.py +2 -3
- mindspore/ops/_grad/grad_comm_ops.py +34 -24
- mindspore/ops/_grad/grad_implementations.py +9 -45
- mindspore/ops/_grad/grad_inner_ops.py +47 -4
- mindspore/ops/_grad/grad_math_ops.py +142 -117
- mindspore/ops/_grad/grad_nn_ops.py +71 -165
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +7 -6
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
- mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
- mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
- mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -611
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_register_for_op.py +1 -0
- mindspore/ops/_utils/__init__.py +1 -2
- mindspore/ops/_utils/utils.py +19 -40
- mindspore/ops/_vmap/vmap_array_ops.py +116 -38
- mindspore/ops/_vmap/vmap_base.py +16 -9
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
- mindspore/ops/_vmap/vmap_image_ops.py +12 -5
- mindspore/ops/_vmap/vmap_math_ops.py +46 -5
- mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
- mindspore/ops/_vmap/vmap_random_ops.py +1 -1
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
- mindspore/ops/composite/__init__.py +7 -8
- mindspore/ops/composite/base.py +101 -47
- mindspore/ops/composite/math_ops.py +188 -158
- mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
- mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
- mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
- mindspore/ops/function/__init__.py +152 -8
- mindspore/ops/function/array_func.py +2555 -674
- mindspore/ops/function/clip_func.py +209 -13
- mindspore/ops/function/debug_func.py +2 -2
- mindspore/ops/function/grad/__init__.py +2 -1
- mindspore/ops/function/grad/grad_func.py +147 -62
- mindspore/ops/function/image_func.py +54 -38
- mindspore/ops/function/linalg_func.py +167 -16
- mindspore/ops/function/math_func.py +4849 -1492
- mindspore/ops/function/nn_func.py +2573 -988
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +3 -3
- mindspore/ops/function/random_func.py +790 -73
- mindspore/ops/function/sparse_func.py +98 -78
- mindspore/ops/function/sparse_unary_func.py +54 -53
- mindspore/ops/function/spectral_func.py +27 -24
- mindspore/ops/function/vmap_func.py +22 -2
- mindspore/ops/functional.py +97 -37
- mindspore/ops/op_info_register.py +70 -28
- mindspore/ops/operations/__init__.py +47 -14
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +5 -5
- mindspore/ops/operations/_grad_ops.py +276 -187
- mindspore/ops/operations/_inner_ops.py +319 -113
- mindspore/ops/operations/_ms_kernel.py +10 -8
- mindspore/ops/operations/_ocr_ops.py +9 -9
- mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
- mindspore/ops/operations/_quant_ops.py +137 -102
- mindspore/ops/operations/_rl_inner_ops.py +121 -60
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1004 -2
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +801 -466
- mindspore/ops/operations/comm_ops.py +51 -49
- mindspore/ops/operations/control_ops.py +2 -2
- mindspore/ops/operations/custom_ops.py +123 -44
- mindspore/ops/operations/debug_ops.py +24 -24
- mindspore/ops/operations/image_ops.py +240 -153
- mindspore/ops/operations/inner_ops.py +34 -50
- mindspore/ops/operations/linalg_ops.py +31 -9
- mindspore/ops/operations/math_ops.py +988 -757
- mindspore/ops/operations/nn_ops.py +965 -819
- mindspore/ops/operations/other_ops.py +51 -40
- mindspore/ops/operations/random_ops.py +204 -122
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +254 -93
- mindspore/ops/operations/spectral_ops.py +35 -3
- mindspore/ops/primitive.py +111 -9
- mindspore/parallel/_auto_parallel_context.py +189 -83
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +99 -7
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +7 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
- mindspore/parallel/_utils.py +1 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +37 -34
- mindspore/parallel/shard.py +17 -18
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +69 -47
- mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
- mindspore/profiler/parser/base_timeline_generator.py +49 -56
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
- mindspore/profiler/parser/hwts_log_parser.py +1 -1
- mindspore/profiler/parser/integrator.py +15 -14
- mindspore/profiler/parser/minddata_analyzer.py +2 -2
- mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +2 -1
- mindspore/profiler/profiling.py +218 -186
- mindspore/rewrite/__init__.py +3 -1
- mindspore/rewrite/api/node.py +1 -114
- mindspore/rewrite/api/node_type.py +3 -0
- mindspore/rewrite/api/pattern_engine.py +31 -1
- mindspore/rewrite/api/scoped_value.py +4 -4
- mindspore/rewrite/api/symbol_tree.py +3 -78
- mindspore/rewrite/api/tree_node_helper.py +1 -1
- mindspore/rewrite/ast_creator_register.py +1 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -2
- mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
- mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
- mindspore/rewrite/namespace.py +0 -2
- mindspore/rewrite/node.py +157 -11
- mindspore/rewrite/parsers/assign_parser.py +231 -53
- mindspore/rewrite/parsers/class_def_parser.py +187 -109
- mindspore/rewrite/parsers/for_parser.py +24 -14
- mindspore/rewrite/parsers/function_def_parser.py +21 -4
- mindspore/rewrite/parsers/if_parser.py +6 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +256 -133
- mindspore/rewrite/symbol_tree_builder.py +38 -1
- mindspore/run_check/_check_version.py +69 -63
- mindspore/run_check/run_check.py +2 -1
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +1 -1
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +273 -102
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +2 -2
- mindspore/train/callback/_checkpoint.py +3 -3
- mindspore/train/callback/_early_stop.py +3 -3
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +29 -31
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +3 -3
- mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
- mindspore/train/callback/_summary_collector.py +23 -16
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +15 -3
- mindspore/train/dataset_helper.py +10 -15
- mindspore/train/loss_scale_manager.py +8 -11
- mindspore/train/metrics/__init__.py +1 -1
- mindspore/train/metrics/bleu_score.py +1 -1
- mindspore/train/metrics/confusion_matrix.py +1 -1
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/dice.py +2 -2
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +4 -3
- mindspore/train/metrics/mean_surface_distance.py +2 -2
- mindspore/train/metrics/occlusion_sensitivity.py +1 -1
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +1 -1
- mindspore/train/metrics/recall.py +1 -1
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +45 -28
- mindspore/train/serialization.py +295 -188
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -13
- mindspore/train/train_thor/convert_utils.py +2 -2
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
- mindspore/compression/__init__.py +0 -19
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -515
- mindspore/compression/quant/__init__.py +0 -28
- mindspore/compression/quant/qat.py +0 -634
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -140
- mindspore/nn/probability/dpn/vae/vae.py +0 -124
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
- mindspore/ops/composite/array_ops.py +0 -241
- mindspore/ops/composite/clip_ops.py +0 -134
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
mindspore/nn/layer/basic.py
CHANGED
|
@@ -28,16 +28,17 @@ from mindspore.common.initializer import initializer
|
|
|
28
28
|
from mindspore.ops import operations as P
|
|
29
29
|
from mindspore.ops import functional as F
|
|
30
30
|
from mindspore.ops.operations import _inner_ops as inner
|
|
31
|
-
from mindspore.ops.primitive import constexpr, Primitive
|
|
31
|
+
from mindspore.ops.primitive import constexpr, Primitive, _primexpr
|
|
32
32
|
from mindspore.common.parameter import Parameter
|
|
33
33
|
from mindspore._extends import cell_attr_register
|
|
34
|
-
from mindspore
|
|
34
|
+
from mindspore import _checkparam as Validator
|
|
35
35
|
from mindspore.nn.cell import Cell
|
|
36
36
|
from mindspore.nn.layer.activation import get_activation
|
|
37
|
+
from mindspore.common._decorator import deprecated
|
|
37
38
|
|
|
38
39
|
__all__ = ['Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'Pad', 'Unfold', 'Tril', 'Triu',
|
|
39
40
|
'ResizeBilinear', 'MatrixDiag', 'MatrixDiagPart', 'MatrixSetDiag', 'L1Regularizer', 'Dropout1d',
|
|
40
|
-
'Dropout2d', 'Dropout3d', 'Roll', 'Identity', 'Unflatten']
|
|
41
|
+
'Dropout2d', 'Dropout3d', 'Upsample', 'Roll', 'Identity', 'Unflatten']
|
|
41
42
|
|
|
42
43
|
|
|
43
44
|
class L1Regularizer(Cell):
|
|
@@ -87,15 +88,18 @@ class L1Regularizer(Cell):
|
|
|
87
88
|
super(L1Regularizer, self).__init__()
|
|
88
89
|
Validator.check_value_type("scale", scale, [int, float], self.cls_name)
|
|
89
90
|
if scale <= 0:
|
|
90
|
-
raise ValueError(
|
|
91
|
+
raise ValueError(
|
|
92
|
+
f"For '{self.cls_name}', the 'scale' must be greater than 0, but got {scale}.")
|
|
91
93
|
if math.isinf(scale) or math.isnan(scale):
|
|
92
|
-
raise ValueError(
|
|
94
|
+
raise ValueError(
|
|
95
|
+
f"For '{self.cls_name}', the 'scale' can not be INF or NAN, but got {scale}.")
|
|
93
96
|
self.abs = P.Abs()
|
|
94
97
|
self.reduce_sum = P.ReduceSum()
|
|
95
98
|
self.scale = Tensor(scale, dtype=mstype.float32)
|
|
96
99
|
|
|
97
100
|
def construct(self, weights):
|
|
98
|
-
const_utils.check_type_valid(
|
|
101
|
+
const_utils.check_type_valid(
|
|
102
|
+
F.dtype(weights), mstype.number_type, 'weights')
|
|
99
103
|
l1_regularization = self.scale * self.reduce_sum(self.abs(weights))
|
|
100
104
|
return l1_regularization
|
|
101
105
|
|
|
@@ -104,12 +108,9 @@ class Dropout(Cell):
|
|
|
104
108
|
r"""
|
|
105
109
|
Dropout layer for the input.
|
|
106
110
|
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
The outputs are scaled by a factor of :math:`\frac{1}{keep\_prob}` during training so
|
|
111
|
-
that the output layer remains at a similar scale. During inference, this
|
|
112
|
-
layer returns the same tensor as the `x`.
|
|
111
|
+
Dropout is a regularization method. The operator randomly sets some neurons output to 0
|
|
112
|
+
according to the probability of discarding the probability of discarding.
|
|
113
|
+
During the reasoning, this layer returns the same Tensor as the `x`.
|
|
113
114
|
|
|
114
115
|
This technique is proposed in paper `Dropout: A Simple Way to Prevent Neural Networks from Overfitting
|
|
115
116
|
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ and proved to be effective to reduce
|
|
@@ -118,25 +119,30 @@ class Dropout(Cell):
|
|
|
118
119
|
<https://arxiv.org/pdf/1207.0580.pdf>`_.
|
|
119
120
|
|
|
120
121
|
Note:
|
|
121
|
-
Each channel will be zeroed out independently on every construct call.
|
|
122
|
-
Parameter `
|
|
122
|
+
- Each channel will be zeroed out independently on every construct call.
|
|
123
|
+
- Parameter `keep_prob` will be removed in a future version, please use parameter `p` instead.
|
|
124
|
+
Parameter `p` means the probability of the element of the input tensor to be zeroed.
|
|
125
|
+
- Parameter `dtype` will be removed in a future version. It is not recommended to define this parameter.
|
|
123
126
|
|
|
124
127
|
Args:
|
|
125
|
-
keep_prob (float): The keep rate, greater than 0 and less equal than 1.
|
|
126
|
-
|
|
127
|
-
|
|
128
|
+
keep_prob (float): Deprecated. The keep rate, greater than 0 and less equal than 1.
|
|
129
|
+
E.g. rate=0.9, dropping out 10% of input neurons. Default: 0.5.
|
|
130
|
+
p (Union[float, int, None]): The dropout rate, greater than or equal to 0 and less than 1.
|
|
131
|
+
E.g. rate=0.9, dropping out 90% of input neurons. Default: None.
|
|
132
|
+
dtype (:class:`mindspore.dtype`): Data type of `input`. Default: mindspore.float32.
|
|
128
133
|
|
|
129
134
|
Inputs:
|
|
130
135
|
- **x** (Tensor) - The input of Dropout with data type of float16 or float32.
|
|
131
|
-
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
132
136
|
|
|
133
137
|
Outputs:
|
|
134
138
|
Tensor, output tensor with the same shape as the `x`.
|
|
135
139
|
|
|
136
140
|
Raises:
|
|
137
141
|
TypeError: If `keep_prob` is not a float.
|
|
142
|
+
TypeError: If the dtype of `p` is not float or int.
|
|
138
143
|
TypeError: If dtype of `x` is not neither float16 nor float32.
|
|
139
144
|
ValueError: If `keep_prob` is not in range (0, 1].
|
|
145
|
+
ValueError: If `p` is not in range [0, 1).
|
|
140
146
|
ValueError: If length of shape of `x` is less than 1.
|
|
141
147
|
|
|
142
148
|
Supported Platforms:
|
|
@@ -144,48 +150,55 @@ class Dropout(Cell):
|
|
|
144
150
|
|
|
145
151
|
Examples:
|
|
146
152
|
>>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32)
|
|
147
|
-
>>> net = nn.Dropout(
|
|
153
|
+
>>> net = nn.Dropout(p=0.2)
|
|
148
154
|
>>> net.set_train()
|
|
149
|
-
Dropout<keep_prob=0.8>
|
|
150
155
|
>>> output = net(x)
|
|
151
156
|
>>> print(output.shape)
|
|
152
157
|
(2, 2, 3)
|
|
153
158
|
"""
|
|
154
159
|
|
|
155
|
-
def __init__(self, keep_prob=0.5, dtype=mstype.float32):
|
|
160
|
+
def __init__(self, keep_prob=0.5, p=None, dtype=mstype.float32):
|
|
156
161
|
"""Initialize Dropout."""
|
|
157
162
|
super(Dropout, self).__init__()
|
|
158
|
-
Validator.check_value_type('keep_prob', keep_prob, [float], self.cls_name)
|
|
159
|
-
if keep_prob <= 0 or keep_prob > 1:
|
|
160
|
-
raise ValueError(f"For '{self.cls_name}', the 'keep_prob' must be a number in range (0, 1], "
|
|
161
|
-
f"but got {keep_prob}.")
|
|
162
|
-
Validator.check_subclass("dtype", dtype, mstype.number_type, self.cls_name)
|
|
163
163
|
if dtype != mstype.float32:
|
|
164
|
-
logger.
|
|
164
|
+
logger.warning(
|
|
165
|
+
"This parameter `dtype` will be deleted or invisible in the future. Please don't use it.")
|
|
166
|
+
if p is None:
|
|
167
|
+
logger.warning("For Dropout, this parameter `keep_prob` will be deprecated, please use `p` instead.")
|
|
168
|
+
Validator.check_value_type('keep_prob', keep_prob, [float], self.cls_name)
|
|
169
|
+
if keep_prob <= 0 or keep_prob > 1:
|
|
170
|
+
raise ValueError(f"For '{self.cls_name}', the 'keep_prob' must be a number in range (0, 1], "
|
|
171
|
+
f"but got {keep_prob}.")
|
|
172
|
+
seed0, seed1 = _get_graph_seed(0, "dropout")
|
|
173
|
+
self.dropout = P.Dropout(keep_prob, seed0, seed1)
|
|
174
|
+
else:
|
|
175
|
+
Validator.check_value_type('p', p, [float, int], self.cls_name)
|
|
176
|
+
if p < 0 or p >= 1:
|
|
177
|
+
raise ValueError(f"For '{self.cls_name}', the 'p' must be a number in range [0, 1), "
|
|
178
|
+
f"but got {p}.")
|
|
179
|
+
seed0, seed1 = _get_graph_seed(0, "dropout")
|
|
180
|
+
self.dropout = P.Dropout(1.0 - p, seed0, seed1)
|
|
181
|
+
self.p = p
|
|
165
182
|
self.keep_prob = keep_prob
|
|
166
|
-
seed0, seed1 = _get_graph_seed(0, "dropout")
|
|
167
|
-
self.seed0 = seed0
|
|
168
|
-
self.seed1 = seed1
|
|
169
|
-
self.dropout = P.Dropout(keep_prob, seed0, seed1)
|
|
170
183
|
|
|
171
184
|
def construct(self, x):
|
|
172
|
-
if not self.training:
|
|
173
|
-
return x
|
|
174
|
-
|
|
175
|
-
if self.keep_prob == 1:
|
|
185
|
+
if not self.training or self.keep_prob == 1 or self.p == 0:
|
|
176
186
|
return x
|
|
177
187
|
|
|
178
188
|
out, _ = self.dropout(x)
|
|
179
189
|
return out
|
|
180
190
|
|
|
181
191
|
def extend_repr(self):
|
|
182
|
-
|
|
192
|
+
if self.p is None:
|
|
193
|
+
logger.warning("For Dropout, this parameter `keep_prob` will be deprecated, please use `p` instead.")
|
|
194
|
+
return f'keep_prob={self.keep_prob}'
|
|
195
|
+
return f'p={self.p}'
|
|
183
196
|
|
|
184
197
|
|
|
185
198
|
class Dropout1d(Cell):
|
|
186
199
|
r"""
|
|
187
200
|
During training, randomly zeroes entire channels of the input tensor with probability `p`
|
|
188
|
-
from a Bernoulli distribution (For a 3-dimensional tensor with a shape of :math:`
|
|
201
|
+
from a Bernoulli distribution (For a 3-dimensional tensor with a shape of :math:`(N, C, L)`,
|
|
189
202
|
the channel feature map refers to a 1-dimensional feature map with the shape of :math:`L`).
|
|
190
203
|
|
|
191
204
|
For example, the :math:`j\_th` channel of the :math:`i\_th` sample in the batched input is a to-be-processed
|
|
@@ -193,8 +206,8 @@ class Dropout1d(Cell):
|
|
|
193
206
|
Each channel will be zeroed out independently on every forward call with probability `p` using samples
|
|
194
207
|
from a Bernoulli distribution.
|
|
195
208
|
|
|
196
|
-
The
|
|
197
|
-
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ mentioned this technology
|
|
209
|
+
The paper `Dropout: A Simple Way to Prevent Neural Networks from Overfitting
|
|
210
|
+
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ mentioned this technology, And it is proved that
|
|
198
211
|
it can effectively reduce over fitting and prevent neuronal coadaptation.
|
|
199
212
|
For more details, refer to `Improving neural networks by preventing co-adaptation of feature detectors
|
|
200
213
|
<https://arxiv.org/pdf/1207.0580.pdf>`_ .
|
|
@@ -202,8 +215,8 @@ class Dropout1d(Cell):
|
|
|
202
215
|
`Dropout1d` can improve the independence between channel feature maps.
|
|
203
216
|
|
|
204
217
|
Args:
|
|
205
|
-
p (float): The dropping probability of a channel, between 0 and 1, e.g. `p` = 0.8,
|
|
206
|
-
which means an 80% chance of
|
|
218
|
+
p (float, optional): The dropping probability of a channel, between 0 and 1, e.g. `p` = 0.8,
|
|
219
|
+
which means an 80% chance of being set to 0. Default: 0.5.
|
|
207
220
|
|
|
208
221
|
Inputs:
|
|
209
222
|
- **x** (Tensor) - A tensor with shape :math:`(N, C, L)` or :math:`(C, L)`, where `N` is the batch size,
|
|
@@ -215,7 +228,6 @@ class Dropout1d(Cell):
|
|
|
215
228
|
|
|
216
229
|
Raises:
|
|
217
230
|
TypeError: If `x` is not a Tensor.
|
|
218
|
-
TypeError: If dtype of `x` is not int8, int16, int32, int64, float16, float32 or float64.
|
|
219
231
|
TypeError: If the data type of `p` is not float.
|
|
220
232
|
ValueError: If `p` is out of the range `[0.0, 1.0]`.
|
|
221
233
|
ValueError: If `x` shape is not `2D` or `3D`.
|
|
@@ -224,11 +236,13 @@ class Dropout1d(Cell):
|
|
|
224
236
|
``Ascend`` ``GPU`` ``CPU``
|
|
225
237
|
|
|
226
238
|
Examples:
|
|
227
|
-
>>>
|
|
228
|
-
>>>
|
|
229
|
-
>>>
|
|
230
|
-
>>>
|
|
231
|
-
|
|
239
|
+
>>> import numpy as np
|
|
240
|
+
>>> import mindspore as ms
|
|
241
|
+
>>> from mindspore import nn, Tensor
|
|
242
|
+
>>> op = nn.Dropout1d(p=0.6)
|
|
243
|
+
>>> op.training = True
|
|
244
|
+
>>> a = Tensor(np.ones((3, 3)), ms.float32)
|
|
245
|
+
>>> output = op(a)
|
|
232
246
|
"""
|
|
233
247
|
|
|
234
248
|
def __init__(self, p=0.5):
|
|
@@ -241,10 +255,7 @@ class Dropout1d(Cell):
|
|
|
241
255
|
self.prob = p
|
|
242
256
|
|
|
243
257
|
def construct(self, x):
|
|
244
|
-
if not self.training:
|
|
245
|
-
return x
|
|
246
|
-
|
|
247
|
-
if self.prob == 0:
|
|
258
|
+
if not self.training or self.prob == 0:
|
|
248
259
|
return x
|
|
249
260
|
|
|
250
261
|
out = F.dropout1d(x, self.prob)
|
|
@@ -288,10 +299,7 @@ class Dropout2d(Cell):
|
|
|
288
299
|
self.dropout2d = P.Dropout2D(self.keep_prob)
|
|
289
300
|
|
|
290
301
|
def construct(self, x):
|
|
291
|
-
if not self.training:
|
|
292
|
-
return x
|
|
293
|
-
|
|
294
|
-
if self.keep_prob == 1:
|
|
302
|
+
if not self.training or self.keep_prob == 1:
|
|
295
303
|
return x
|
|
296
304
|
|
|
297
305
|
out, _ = self.dropout2d(x)
|
|
@@ -339,10 +347,7 @@ class Dropout3d(Cell):
|
|
|
339
347
|
self.dropout3d = P.Dropout3D(self.keep_prob)
|
|
340
348
|
|
|
341
349
|
def construct(self, x):
|
|
342
|
-
if not self.training:
|
|
343
|
-
return x
|
|
344
|
-
|
|
345
|
-
if self.keep_prob == 1:
|
|
350
|
+
if not self.training or self.keep_prob == 1:
|
|
346
351
|
return x
|
|
347
352
|
|
|
348
353
|
out, _ = self.dropout3d(x)
|
|
@@ -352,22 +357,65 @@ class Dropout3d(Cell):
|
|
|
352
357
|
return 'p={}'.format(self.keep_prob)
|
|
353
358
|
|
|
354
359
|
|
|
360
|
+
class Upsample(Cell):
|
|
361
|
+
r"""
|
|
362
|
+
For details, please refer to :func:`mindspore.ops.interpolate`.
|
|
363
|
+
|
|
364
|
+
Supported Platforms:
|
|
365
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
366
|
+
|
|
367
|
+
Examples:
|
|
368
|
+
>>> x = Tensor([[[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]])
|
|
369
|
+
>>> upsample = nn.Upsample(size=(5, 5))
|
|
370
|
+
>>> out = upsample(x)
|
|
371
|
+
>>> print(x.asnumpy())
|
|
372
|
+
[[[[1. 2. 3. 4.]
|
|
373
|
+
[5. 6. 7. 8.]]]]
|
|
374
|
+
>>> print(out.asnumpy())
|
|
375
|
+
[[[[1. 1. 2. 3. 4.]
|
|
376
|
+
[1. 1. 2. 3. 4.]
|
|
377
|
+
[1. 1. 2. 3. 4.]
|
|
378
|
+
[5. 5. 6. 7. 8.]
|
|
379
|
+
[5. 5. 6. 7. 8.]]]]
|
|
380
|
+
>>> print(out.shape)
|
|
381
|
+
(1, 1, 5, 5)
|
|
382
|
+
"""
|
|
383
|
+
|
|
384
|
+
def __init__(self, size=None, scale_factor=None, mode="nearest", align_corners=None, recompute_scale_factor=None):
|
|
385
|
+
"""Initialize Upsample."""
|
|
386
|
+
super(Upsample, self).__init__()
|
|
387
|
+
self.size = size
|
|
388
|
+
self.scale_factor = scale_factor
|
|
389
|
+
self.mode = mode
|
|
390
|
+
self.align_corners = align_corners
|
|
391
|
+
self.recompute_scale_factor = recompute_scale_factor
|
|
392
|
+
|
|
393
|
+
def construct(self, x):
|
|
394
|
+
out = F.interpolate(x, self.size, self.scale_factor, self.mode,
|
|
395
|
+
self.align_corners, self.recompute_scale_factor)
|
|
396
|
+
return out
|
|
397
|
+
|
|
398
|
+
|
|
355
399
|
class Flatten(Cell):
|
|
356
400
|
r"""
|
|
357
|
-
Flatten the
|
|
401
|
+
Flatten the input Tensor along dimensions from `start_dim` to `end_dim`.
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
start_dim (int, optional): The first dimension to flatten. Default: 1.
|
|
405
|
+
end_dim (int, optional): The last dimension to flatten. Default: -1.
|
|
358
406
|
|
|
359
407
|
Inputs:
|
|
360
|
-
- **x** (Tensor) - The input Tensor to be flattened.
|
|
361
|
-
`number <https://www.mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
362
|
-
The shape is :math:`(N, *)` , where :math:`*` means any number of additional dimensions
|
|
363
|
-
and the shape can't be ().
|
|
408
|
+
- **x** (Tensor) - The input Tensor to be flattened.
|
|
364
409
|
|
|
365
410
|
Outputs:
|
|
366
|
-
Tensor
|
|
367
|
-
|
|
411
|
+
Tensor. If no dimensions are flattened, returns the original `x`, otherwise return the flattened Tensor.
|
|
412
|
+
If `x` is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
|
|
368
413
|
|
|
369
414
|
Raises:
|
|
370
|
-
TypeError: If `x` is not a
|
|
415
|
+
TypeError: If `x` is not a Tensor.
|
|
416
|
+
TypeError: If `start_dim` or `end_dim` is not int.
|
|
417
|
+
ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
|
|
418
|
+
ValueError: If `start_dim` or `end_dim` is not in range of [-x.dim, x.dim-1].
|
|
371
419
|
|
|
372
420
|
Supported Platforms:
|
|
373
421
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -385,16 +433,25 @@ class Flatten(Cell):
|
|
|
385
433
|
after flatten the output shape is (2, 4)
|
|
386
434
|
"""
|
|
387
435
|
|
|
388
|
-
def __init__(self):
|
|
436
|
+
def __init__(self, start_dim=1, end_dim=-1):
|
|
389
437
|
"""Initialize Flatten."""
|
|
390
438
|
super(Flatten, self).__init__()
|
|
439
|
+
self.start_dim = start_dim
|
|
440
|
+
self.end_dim = end_dim
|
|
391
441
|
|
|
392
442
|
def construct(self, x):
|
|
393
|
-
|
|
443
|
+
x_rank = F.rank(x)
|
|
444
|
+
ndim = x_rank if x_rank != 0 else 1
|
|
445
|
+
if self.start_dim < -ndim or self.start_dim >= ndim:
|
|
446
|
+
const_utils.raise_value_error("'start_dim' out of range.")
|
|
447
|
+
if self.end_dim < -ndim or self.end_dim >= ndim:
|
|
448
|
+
const_utils.raise_value_error("'end_dim' out of range.")
|
|
449
|
+
return F.flatten(x, start_dim=self.start_dim, end_dim=self.end_dim)
|
|
394
450
|
|
|
395
451
|
|
|
396
|
-
@
|
|
452
|
+
@_primexpr
|
|
397
453
|
def check_dense_input_shape(x, prim_name=None):
|
|
454
|
+
""" check the shape of inputs"""
|
|
398
455
|
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
399
456
|
if len(x) < 2:
|
|
400
457
|
raise ValueError(f"{msg_prefix} dimension of 'x' should not be less than 2, but got {len(x)}.")
|
|
@@ -408,13 +465,13 @@ class Identity(Cell):
|
|
|
408
465
|
- **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.
|
|
409
466
|
|
|
410
467
|
Outputs:
|
|
411
|
-
Tensor, the shape of tensor and the data type are the same as `
|
|
468
|
+
Tensor, the shape of tensor and the data type are the same as `x`.
|
|
412
469
|
|
|
413
470
|
Raises:
|
|
414
471
|
TypeError: If `x` is not a Tensor.
|
|
415
472
|
|
|
416
473
|
Supported Platforms:
|
|
417
|
-
``Ascend`` ``
|
|
474
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
418
475
|
|
|
419
476
|
Examples:
|
|
420
477
|
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
|
|
@@ -455,7 +512,7 @@ class Dense(Cell):
|
|
|
455
512
|
is same as `x`. The values of str refer to the function `initializer`. Default: 'normal'.
|
|
456
513
|
bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
|
|
457
514
|
same as `x`. The values of str refer to the function `initializer`. Default: 'zeros'.
|
|
458
|
-
has_bias (bool): Specifies whether the layer uses a bias vector
|
|
515
|
+
has_bias (bool): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: True.
|
|
459
516
|
activation (Union[str, Cell, Primitive, None]): activate function applied to the output of the fully connected
|
|
460
517
|
layer. Both activation name, e.g. 'relu', and mindspore activation function, e.g. mindspore.ops.ReLU(),
|
|
461
518
|
are supported. Default: None.
|
|
@@ -497,9 +554,12 @@ class Dense(Cell):
|
|
|
497
554
|
activation=None):
|
|
498
555
|
"""Initialize Dense."""
|
|
499
556
|
super(Dense, self).__init__()
|
|
500
|
-
self.in_channels = Validator.check_positive_int(
|
|
501
|
-
|
|
502
|
-
self.
|
|
557
|
+
self.in_channels = Validator.check_positive_int(
|
|
558
|
+
in_channels, "in_channels", self.cls_name)
|
|
559
|
+
self.out_channels = Validator.check_positive_int(
|
|
560
|
+
out_channels, "out_channels", self.cls_name)
|
|
561
|
+
self.has_bias = Validator.check_bool(
|
|
562
|
+
has_bias, "has_bias", self.cls_name)
|
|
503
563
|
self.reshape = P.Reshape()
|
|
504
564
|
self.shape_op = P.Shape()
|
|
505
565
|
|
|
@@ -510,7 +570,8 @@ class Dense(Cell):
|
|
|
510
570
|
f"be equal to 2, and the first dim must be equal to 'out_channels', and the "
|
|
511
571
|
f"second dim must be equal to 'in_channels'. But got 'weight_init': {weight_init}, "
|
|
512
572
|
f"'out_channels': {out_channels}, 'in_channels': {in_channels}.")
|
|
513
|
-
self.weight = Parameter(initializer(
|
|
573
|
+
self.weight = Parameter(initializer(
|
|
574
|
+
weight_init, [out_channels, in_channels]), name="weight")
|
|
514
575
|
|
|
515
576
|
self.bias = None
|
|
516
577
|
if self.has_bias:
|
|
@@ -519,11 +580,13 @@ class Dense(Cell):
|
|
|
519
580
|
raise ValueError(f"For '{self.cls_name}', bias init shape error. The ndim of 'bias_init' must "
|
|
520
581
|
f"be equal to 1, and the first dim must be equal to 'out_channels'. But got "
|
|
521
582
|
f"'bias_init': {bias_init}, 'out_channels': {out_channels}.")
|
|
522
|
-
self.bias = Parameter(initializer(
|
|
583
|
+
self.bias = Parameter(initializer(
|
|
584
|
+
bias_init, [out_channels]), name="bias")
|
|
523
585
|
self.bias_add = P.BiasAdd()
|
|
524
586
|
|
|
525
587
|
self.matmul = P.MatMul(transpose_b=True)
|
|
526
|
-
self.activation = get_activation(activation) if isinstance(
|
|
588
|
+
self.activation = get_activation(activation) if isinstance(
|
|
589
|
+
activation, str) else activation
|
|
527
590
|
if activation is not None and not isinstance(self.activation, (Cell, Primitive)):
|
|
528
591
|
raise TypeError(f"For '{self.cls_name}', the 'activation' must be str or Cell or Primitive, but got "
|
|
529
592
|
f"{type(activation).__name__}.")
|
|
@@ -540,12 +603,13 @@ class Dense(Cell):
|
|
|
540
603
|
if self.activation_flag:
|
|
541
604
|
x = self.activation(x)
|
|
542
605
|
if len(x_shape) != 2:
|
|
543
|
-
out_shape = x_shape[:-1] + (-1,)
|
|
606
|
+
out_shape = x_shape[:-1] + (F.shape(x)[-1],)
|
|
544
607
|
x = self.reshape(x, out_shape)
|
|
545
608
|
return x
|
|
546
609
|
|
|
547
610
|
def extend_repr(self):
|
|
548
|
-
s = 'input_channels={}, output_channels={}'.format(
|
|
611
|
+
s = 'input_channels={}, output_channels={}'.format(
|
|
612
|
+
self.in_channels, self.out_channels)
|
|
549
613
|
if self.has_bias:
|
|
550
614
|
s += ', has_bias={}'.format(self.has_bias)
|
|
551
615
|
if self.activation_flag:
|
|
@@ -557,14 +621,15 @@ class Dense(Cell):
|
|
|
557
621
|
def _is_equal_one(x):
|
|
558
622
|
if x is None:
|
|
559
623
|
return False
|
|
560
|
-
return
|
|
624
|
+
return F.equal(F.reduce_mean(x), 1.0)
|
|
561
625
|
|
|
562
626
|
|
|
563
627
|
@constexpr
|
|
564
628
|
def _dtype_check(x_dtype, prim_name=None):
|
|
565
629
|
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
566
630
|
if x_dtype not in [mstype.float32, mstype.float16]:
|
|
567
|
-
raise TypeError(
|
|
631
|
+
raise TypeError(
|
|
632
|
+
f"{msg_prefix} x_dtype must be float32 or float16, but got {x_dtype}.")
|
|
568
633
|
|
|
569
634
|
|
|
570
635
|
@constexpr
|
|
@@ -634,66 +699,16 @@ class ClipByNorm(Cell):
|
|
|
634
699
|
|
|
635
700
|
class Norm(Cell):
|
|
636
701
|
r"""
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
.. math::
|
|
640
|
-
|
|
641
|
-
norm(x) = \sqrt{\sum_{i=1}^{n} (x_i^2)}
|
|
642
|
-
|
|
643
|
-
Args:
|
|
644
|
-
axis (Union[tuple, int]): The axis over which to compute vector norms. Default: ().
|
|
645
|
-
keep_dims (bool): If true, the axis indicated in `axis` are kept with size 1. Otherwise,
|
|
646
|
-
the dimensions in `axis` are removed from the output shape. Default: False.
|
|
647
|
-
|
|
648
|
-
Inputs:
|
|
649
|
-
- **x** (Tensor) - Tensor which is not empty. The data type should be float16 or float32.
|
|
650
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
651
|
-
|
|
652
|
-
Outputs:
|
|
653
|
-
Tensor, output tensor with dimensions in 'axis' reduced to 1 will be returned if 'keep_dims' is True;
|
|
654
|
-
otherwise a Tensor with dimensions in 'axis' removed is returned. The data type is the same with `x`.
|
|
655
|
-
|
|
656
|
-
Raises:
|
|
657
|
-
TypeError: If `axis` is neither an int nor a tuple.
|
|
658
|
-
TypeError: If `keep_dims` is not a bool.
|
|
659
|
-
|
|
660
|
-
Supported Platforms:
|
|
661
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
662
|
-
|
|
663
|
-
Examples:
|
|
664
|
-
>>> net = nn.Norm(axis=0)
|
|
665
|
-
>>> x = Tensor(np.array([[4, 4, 9, 1], [2, 1, 3, 6]]), mindspore.float32)
|
|
666
|
-
>>> print(x.shape)
|
|
667
|
-
(2, 4)
|
|
668
|
-
>>> output = net(x)
|
|
669
|
-
>>> print(output)
|
|
670
|
-
[4.472136 4.1231055 9.486833 6.0827627]
|
|
671
|
-
>>> print(output.shape)
|
|
672
|
-
(4,)
|
|
673
|
-
>>> net = nn.Norm(axis=0, keep_dims=True)
|
|
674
|
-
>>> x = Tensor(np.array([[4, 4, 9, 1], [2, 1, 3, 6]]), mindspore.float32)
|
|
675
|
-
>>> print(x.shape)
|
|
676
|
-
(2, 4)
|
|
677
|
-
>>> output = net(x)
|
|
678
|
-
>>> print(output)
|
|
679
|
-
[4.472136 4.1231055 9.486833 6.0827627]
|
|
680
|
-
>>> print(output.shape)
|
|
681
|
-
(1, 4)
|
|
682
|
-
>>> net = nn.Norm(axis=1)
|
|
683
|
-
>>> x = Tensor(np.array([[4, 4, 9, 1], [2, 1, 3, 6]]), mindspore.float32)
|
|
684
|
-
>>> print(x.shape)
|
|
685
|
-
(2, 4)
|
|
686
|
-
>>> output = net(x)
|
|
687
|
-
>>> print(output)
|
|
688
|
-
[10.677078 7.071068]
|
|
689
|
-
>>> print(output.shape)
|
|
690
|
-
(2,)
|
|
702
|
+
'nn.Norm' is deprecated from version 2.0 and will be removed in a future version,
|
|
703
|
+
use 'ops.norm' instead.
|
|
691
704
|
"""
|
|
692
705
|
|
|
706
|
+
@deprecated("2.0", "ops.norm", False)
|
|
693
707
|
def __init__(self, axis=(), keep_dims=False):
|
|
694
708
|
"""Initialize Norm."""
|
|
695
709
|
super(Norm, self).__init__()
|
|
696
|
-
Validator.check_value_type(
|
|
710
|
+
Validator.check_value_type(
|
|
711
|
+
"keep_dims", keep_dims, [bool], self.cls_name)
|
|
697
712
|
self.axis = axis
|
|
698
713
|
self.keep_dims = keep_dims
|
|
699
714
|
self.reduce_sum = P.ReduceSum(True)
|
|
@@ -713,119 +728,11 @@ class Norm(Cell):
|
|
|
713
728
|
|
|
714
729
|
class OneHot(Cell):
|
|
715
730
|
"""
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
The locations represented by indices in argument `indices` take value on_value,
|
|
719
|
-
while all other locations take value off_value.
|
|
720
|
-
|
|
721
|
-
Note:
|
|
722
|
-
If the input indices is rank :math:`N`, the output will have rank :math:`N+1`. The new
|
|
723
|
-
axis is created at dimension `axis`.
|
|
724
|
-
|
|
725
|
-
If `indices` is a scalar, the output shape will be a vector of length `depth`.
|
|
726
|
-
|
|
727
|
-
If `indices` is a vector of length `features`, the output shape will be:
|
|
728
|
-
|
|
729
|
-
.. code-block::
|
|
730
|
-
|
|
731
|
-
features * depth if axis == -1
|
|
732
|
-
|
|
733
|
-
depth * features if axis == 0
|
|
734
|
-
|
|
735
|
-
If `indices` is a matrix with shape `[batch, features]`, the output shape will be:
|
|
736
|
-
|
|
737
|
-
.. code-block::
|
|
738
|
-
|
|
739
|
-
batch * features * depth if axis == -1
|
|
740
|
-
|
|
741
|
-
batch * depth * features if axis == 1
|
|
742
|
-
|
|
743
|
-
depth * batch * features if axis == 0
|
|
744
|
-
|
|
745
|
-
Args:
|
|
746
|
-
axis (int): Features x depth if axis is -1, depth x features
|
|
747
|
-
if axis is 0. Default: -1.
|
|
748
|
-
depth (int): A scalar defining the depth of the one hot dimension. Default: 1.
|
|
749
|
-
on_value (float): A scalar defining the value to fill in output[i][j]
|
|
750
|
-
when indices[j] = i. Default: 1.0.
|
|
751
|
-
off_value (float): A scalar defining the value to fill in output[i][j]
|
|
752
|
-
when indices[j] != i. Default: 0.0.
|
|
753
|
-
dtype (:class:`mindspore.dtype`): Data type of 'on_value' and 'off_value', not the
|
|
754
|
-
data type of indices. Default: mindspore.float32.
|
|
755
|
-
|
|
756
|
-
Inputs:
|
|
757
|
-
- **indices** (Tensor) - A tensor of indices with data type of int32 or int64.
|
|
758
|
-
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
759
|
-
|
|
760
|
-
Outputs:
|
|
761
|
-
Tensor, the one-hot tensor of data type `dtype` with dimension at `axis` expanded to `depth` and filled with
|
|
762
|
-
on_value and off_value. The dimension of the `Outputs` is equal to the dimension of the `indices` plus one.
|
|
763
|
-
|
|
764
|
-
Raises:
|
|
765
|
-
TypeError: If `axis` or `depth` is not an int.
|
|
766
|
-
TypeError: If dtype of `indices` is neither int32 nor int64.
|
|
767
|
-
ValueError: If `axis` is not in range [-1, len(indices_shape)].
|
|
768
|
-
ValueError: If `depth` is less than 0.
|
|
769
|
-
|
|
770
|
-
Supported Platforms:
|
|
771
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
772
|
-
|
|
773
|
-
Examples:
|
|
774
|
-
>>> # 1st sample: add new coordinates at axis 1
|
|
775
|
-
>>> net = nn.OneHot(depth=4, axis=1)
|
|
776
|
-
>>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32)
|
|
777
|
-
>>> output = net(indices)
|
|
778
|
-
>>> print(output)
|
|
779
|
-
[[[0. 0.]
|
|
780
|
-
[1. 0.]
|
|
781
|
-
[0. 0.]
|
|
782
|
-
[0. 1.]]
|
|
783
|
-
[[1. 0.]
|
|
784
|
-
[0. 0.]
|
|
785
|
-
[0. 1.]
|
|
786
|
-
[0. 0.]]]
|
|
787
|
-
>>> # The results are shown below:
|
|
788
|
-
>>> print(output.shape)
|
|
789
|
-
(2, 4, 2)
|
|
790
|
-
>>> # 2nd sample: add new coordinates at axis 0
|
|
791
|
-
>>> net = nn.OneHot(depth=4, axis=0)
|
|
792
|
-
>>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32)
|
|
793
|
-
>>> output = net(indices)
|
|
794
|
-
>>> print(output)
|
|
795
|
-
[[[0. 0.]
|
|
796
|
-
[1. 0.]]
|
|
797
|
-
[[1. 0.]
|
|
798
|
-
[0. 0.]]
|
|
799
|
-
[[0. 0.]
|
|
800
|
-
[0. 1.]]
|
|
801
|
-
[[0. 1.]
|
|
802
|
-
[0. 0.]]]
|
|
803
|
-
>>> # The results are shown below:
|
|
804
|
-
>>> print(output.shape)
|
|
805
|
-
(4, 2, 2)
|
|
806
|
-
>>> # 3rd sample: add new coordinates at the last dimension.
|
|
807
|
-
>>> net = nn.OneHot(depth=4, axis=-1)
|
|
808
|
-
>>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32)
|
|
809
|
-
>>> output = net(indices)
|
|
810
|
-
>>> # The results are shown below:
|
|
811
|
-
>>> print(output)
|
|
812
|
-
[[[0. 1. 0. 0.]
|
|
813
|
-
[0. 0. 0. 1.]]
|
|
814
|
-
[[1. 0. 0. 0.]
|
|
815
|
-
[0. 0. 1. 0.]]]
|
|
816
|
-
>>> print(output.shape)
|
|
817
|
-
(2, 2, 4)
|
|
818
|
-
>>> indices = Tensor([1, 3, 0, 2], dtype=mindspore.int32)
|
|
819
|
-
>>> output = net(indices)
|
|
820
|
-
>>> print(output)
|
|
821
|
-
[[0. 1. 0. 0.]
|
|
822
|
-
[0. 0. 0. 1.]
|
|
823
|
-
[1. 0. 0. 0.]
|
|
824
|
-
[0. 0. 1. 0.]]
|
|
825
|
-
>>> print(output.shape)
|
|
826
|
-
(4, 4)
|
|
731
|
+
'nn.OneHot' is deprecated from version 2.0 and will be removed in a future version,
|
|
732
|
+
use 'ops.one_hot' instead.
|
|
827
733
|
"""
|
|
828
734
|
|
|
735
|
+
@deprecated("2.0", "ops.one_hot", False)
|
|
829
736
|
def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype=mstype.float32):
|
|
830
737
|
"""Initialize OneHot."""
|
|
831
738
|
super(OneHot, self).__init__()
|
|
@@ -844,11 +751,11 @@ class Pad(Cell):
|
|
|
844
751
|
Pads the input tensor according to the paddings and mode.
|
|
845
752
|
|
|
846
753
|
Args:
|
|
847
|
-
paddings (tuple): The shape of parameter `paddings` is (N, 2). N is the rank of input data. All
|
|
848
|
-
paddings are int type. For `D` th dimension of the `x`, paddings[D, 0] indicates how many
|
|
849
|
-
extended ahead of the `D` th dimension of the input tensor, and paddings[D, 1] indicates how
|
|
850
|
-
be extended behind of the `D` th dimension of the input tensor. The padded size of each
|
|
851
|
-
output is: :math:`paddings[D, 0] + input\_x.dim\_size(D) + paddings[D, 1]`,
|
|
754
|
+
paddings (tuple): The shape of parameter `paddings` is :math:`(N, 2)` . N is the rank of input data. All
|
|
755
|
+
elements of paddings are int type. For `D` th dimension of the `x`, paddings[D, 0] indicates how many
|
|
756
|
+
sizes to be extended ahead of the `D` th dimension of the input tensor, and paddings[D, 1] indicates how
|
|
757
|
+
many sizes to be extended behind of the `D` th dimension of the input tensor. The padded size of each
|
|
758
|
+
dimension D of the output is: :math:`paddings[D, 0] + input\_x.dim\_size(D) + paddings[D, 1]`,
|
|
852
759
|
e.g.:
|
|
853
760
|
|
|
854
761
|
.. code-block::
|
|
@@ -884,7 +791,7 @@ class Pad(Cell):
|
|
|
884
791
|
|
|
885
792
|
Raises:
|
|
886
793
|
TypeError: If `paddings` is not a tuple.
|
|
887
|
-
ValueError: If length of `paddings` is more than 4 or its shape is not (N, 2).
|
|
794
|
+
ValueError: If length of `paddings` is more than 4 or its shape is not :math:`(N, 2)` .
|
|
888
795
|
ValueError: If `mode` is not one of 'CONSTANT', 'REFLECT', 'SYMMETRIC'.
|
|
889
796
|
|
|
890
797
|
Supported Platforms:
|
|
@@ -979,7 +886,8 @@ class Pad(Cell):
|
|
|
979
886
|
super(Pad, self).__init__()
|
|
980
887
|
self.mode = mode
|
|
981
888
|
self.paddings = paddings
|
|
982
|
-
Validator.check_string(
|
|
889
|
+
Validator.check_string(
|
|
890
|
+
self.mode, ["CONSTANT", "REFLECT", "SYMMETRIC"], 'mode', self.cls_name)
|
|
983
891
|
if not isinstance(paddings, tuple):
|
|
984
892
|
raise TypeError(f"For '{self.cls_name}', the type of 'paddings' must be tuple, "
|
|
985
893
|
f"but got {type(paddings).__name__}.")
|
|
@@ -1009,66 +917,32 @@ def bilinear(shape, size, scale, align_corners, prim_name=None):
|
|
|
1009
917
|
"""Check input and calculate shape"""
|
|
1010
918
|
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
1011
919
|
if not isinstance(align_corners, bool):
|
|
1012
|
-
raise TypeError(
|
|
1013
|
-
|
|
920
|
+
raise TypeError(
|
|
921
|
+
f"{msg_prefix} type of 'align_corners' must be bool, but got {type(align_corners).__name__}.")
|
|
1014
922
|
if size is None and scale is None:
|
|
1015
923
|
raise ValueError(f"{msg_prefix} 'size' and 'scale' both none.")
|
|
1016
924
|
if size is not None and scale is not None:
|
|
1017
925
|
raise ValueError(f"{msg_prefix} 'size' and 'scale' both not none.")
|
|
1018
926
|
if size is not None:
|
|
1019
927
|
if not isinstance(size, (tuple, list)):
|
|
1020
|
-
raise ValueError(
|
|
1021
|
-
|
|
1022
|
-
Validator.check_int(size
|
|
1023
|
-
Validator.check_int(size[
|
|
928
|
+
raise ValueError(
|
|
929
|
+
f"{msg_prefix} 'size' must be tuple or list or None, but got {type(size).__name__}.")
|
|
930
|
+
Validator.check_int(len(size), 2, Validator.EQ, "size", "bilinear")
|
|
931
|
+
Validator.check_int(size[0], 1, Validator.GE, "size[0]", "bilinear")
|
|
932
|
+
Validator.check_int(size[1], 1, Validator.GE, "size[1]", "bilinear")
|
|
1024
933
|
return size
|
|
1025
|
-
Validator.check_int(scale, 1,
|
|
934
|
+
Validator.check_int(scale, 1, Validator.GE, "scale factor", "bilinear")
|
|
1026
935
|
ret = (scale * shape[2], scale * shape[3])
|
|
1027
936
|
return ret
|
|
1028
937
|
|
|
1029
938
|
|
|
1030
939
|
class ResizeBilinear(Cell):
|
|
1031
940
|
r"""
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
Args:
|
|
1035
|
-
half_pixel_centers (bool): Whether half pixel center. If set to True, `align_corners` should be False.
|
|
1036
|
-
Default: False.
|
|
1037
|
-
|
|
1038
|
-
Inputs:
|
|
1039
|
-
- **x** (Tensor) - Tensor to be resized. Input tensor must be a 4-D tensor with shape
|
|
1040
|
-
:math:`(batch, channels, height, width)`, with data type of float16 or float32.
|
|
1041
|
-
- **size** (Union[tuple[int], list[int], None]): A tuple or list of 2 int elements
|
|
1042
|
-
:math:`(new\_height, new\_width)`,the new size of the tensor.
|
|
1043
|
-
One and only one of size and scale_factor can be set to None. Default: None.
|
|
1044
|
-
- **scale_factor** (int, None): The scale factor of new size of the tensor. The value should be positive
|
|
1045
|
-
integer. One and only one of size and scale_factor can be set to None. Default: None.
|
|
1046
|
-
- **align_corners** (bool): If true, rescale input by :math:`(new\_height - 1) / (height - 1)`, which exactly
|
|
1047
|
-
aligns the 4 corners of images and resized images. If false, rescale by :math:`new\_height / height`.
|
|
1048
|
-
Default: False.
|
|
1049
|
-
|
|
1050
|
-
Outputs:
|
|
1051
|
-
Resized tensor.
|
|
1052
|
-
If size is set, the result is 4-D tensor with shape :math:`(batch, channels, new\_height, new\_width)`,
|
|
1053
|
-
and the data type is the same as `x`.
|
|
1054
|
-
If scale is set, the result is 4-D tensor with shape
|
|
1055
|
-
:math:`(batch, channels, scale\_factor * height, scale\_factor * width)` and the data type is the same as `x`.
|
|
1056
|
-
|
|
1057
|
-
Raises:
|
|
1058
|
-
TypeError: If `size` is not one of tuple, list, None.
|
|
1059
|
-
TypeError: If `scale_factor` is neither int nor None.
|
|
1060
|
-
TypeError: If `align_corners` is not a bool.
|
|
1061
|
-
TypeError: If `half_pixel_centers` is not a bool.
|
|
1062
|
-
TypeError: If `align_corners` and `half_pixel_centers` are all True.
|
|
1063
|
-
TypeError: If `half_pixel_centers` is True and device_target not Ascend.
|
|
1064
|
-
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
1065
|
-
ValueError: If `size` and `scale_factor` are both None or not None.
|
|
1066
|
-
ValueError: If length of shape of `x` is not equal to 4.
|
|
1067
|
-
ValueError: If `scale_factor` is an int which is less than 0.
|
|
1068
|
-
ValueError: If `size` is a list or tuple whose length is not equal to 2.
|
|
941
|
+
'nn.ResizeBilinear' is deprecated from version 2.0 and will be removed in a future version,
|
|
942
|
+
use :class:`mindspore.ops.ResizeBilinearV2` or :func:`mindspore.ops.interpolate` instead.
|
|
1069
943
|
|
|
1070
944
|
Supported Platforms:
|
|
1071
|
-
|
|
945
|
+
Deprecated
|
|
1072
946
|
|
|
1073
947
|
Examples:
|
|
1074
948
|
>>> x = Tensor([[[[1, 2, 3, 4], [5, 6, 7, 8]]]], mindspore.float32)
|
|
@@ -1090,11 +964,15 @@ class ResizeBilinear(Cell):
|
|
|
1090
964
|
def __init__(self, half_pixel_centers=False):
|
|
1091
965
|
"""Initialize ResizeBilinear."""
|
|
1092
966
|
super(ResizeBilinear, self).__init__()
|
|
967
|
+
logger.warning("'nn.ResizeBilinear' is deprecated from version 2.0 and will be removed in a "
|
|
968
|
+
"future version, use 'ops.ResizeBilinearV2' or 'ops.interpolate' instead.")
|
|
1093
969
|
self.half_pixel_centers = half_pixel_centers
|
|
1094
970
|
|
|
1095
971
|
def construct(self, x, size=None, scale_factor=None, align_corners=False):
|
|
1096
|
-
shape = bilinear(x.shape, size, scale_factor,
|
|
1097
|
-
|
|
972
|
+
shape = bilinear(x.shape, size, scale_factor,
|
|
973
|
+
align_corners, self.cls_name)
|
|
974
|
+
resize_bilinear = P.ResizeBilinear(
|
|
975
|
+
shape, align_corners, self.half_pixel_centers)
|
|
1098
976
|
return resize_bilinear(x)
|
|
1099
977
|
|
|
1100
978
|
|
|
@@ -1125,11 +1003,9 @@ class Unfold(Cell):
|
|
|
1125
1003
|
Tensor, a 4-D tensor whose data type is same as `x`,
|
|
1126
1004
|
and the shape is [out_batch, out_depth, out_row, out_col] where `out_batch` is the same as the `in_batch`.
|
|
1127
1005
|
|
|
1128
|
-
:math:`out\_depth = ksize\_row * ksize\_col * in\_depth`
|
|
1129
|
-
|
|
1130
|
-
:math:`out\
|
|
1131
|
-
|
|
1132
|
-
:math:`out\_col = (in\_col - (ksize\_col + (ksize\_col - 1) * (rate\_col - 1))) // stride\_col + 1`
|
|
1006
|
+
- :math:`out\_depth = ksize\_row * ksize\_col * in\_depth`
|
|
1007
|
+
- :math:`out\_row = (in\_row - (ksize\_row + (ksize\_row - 1) * (rate\_row - 1))) // stride\_row + 1`
|
|
1008
|
+
- :math:`out\_col = (in\_col - (ksize\_col + (ksize\_col - 1) * (rate\_col - 1))) // stride\_col + 1`
|
|
1133
1009
|
|
|
1134
1010
|
Raises:
|
|
1135
1011
|
TypeError: If `ksizes`, `strides` or `rates` is neither a tuple nor list.
|
|
@@ -1160,7 +1036,8 @@ class Unfold(Cell):
|
|
|
1160
1036
|
super(Unfold, self).__init__()
|
|
1161
1037
|
|
|
1162
1038
|
def _check_tuple_or_list(arg_name, arg_val, prim_name):
|
|
1163
|
-
Validator.check_value_type(f"{arg_name}s", ksizes, [
|
|
1039
|
+
Validator.check_value_type(f"{arg_name}s", ksizes, [
|
|
1040
|
+
tuple, list], self.cls_name)
|
|
1164
1041
|
if len(arg_val) != 4 or arg_val[0] != 1 or arg_val[3] != 1:
|
|
1165
1042
|
raise ValueError(f"For '{prim_name}' the format of '{arg_name}s' must be [1, {arg_name}_row, "
|
|
1166
1043
|
f"{arg_name}_col, 1], but got {arg_val}.")
|
|
@@ -1175,102 +1052,29 @@ class Unfold(Cell):
|
|
|
1175
1052
|
ksizes = ksizes[0], ksizes[3], ksizes[1], ksizes[2]
|
|
1176
1053
|
strides = strides[0], strides[3], strides[1], strides[2]
|
|
1177
1054
|
rates = rates[0], rates[3], rates[1], rates[2]
|
|
1178
|
-
self.extract_image_patches = inner.ExtractImagePatches(
|
|
1055
|
+
self.extract_image_patches = inner.ExtractImagePatches(
|
|
1056
|
+
ksizes, strides, rates, padding)
|
|
1179
1057
|
|
|
1180
1058
|
def construct(self, input_x):
|
|
1181
1059
|
result = self.extract_image_patches(input_x)
|
|
1182
1060
|
return result
|
|
1183
1061
|
|
|
1184
1062
|
|
|
1185
|
-
@
|
|
1063
|
+
@_primexpr
|
|
1186
1064
|
def tril(x_shape, x_dtype, k):
|
|
1187
|
-
Validator.check_int(len(x_shape), 1,
|
|
1065
|
+
Validator.check_int(len(x_shape), 1, Validator.GE, "x rank", "tril")
|
|
1188
1066
|
Validator.check_is_int(k, "k value", "tril")
|
|
1189
|
-
|
|
1190
|
-
return
|
|
1067
|
+
value = F.cast(P.Tril(diagonal=k)(F.ones(x_shape, x_dtype)), x_dtype)
|
|
1068
|
+
return value
|
|
1191
1069
|
|
|
1192
1070
|
|
|
1193
1071
|
class Tril(Cell):
|
|
1194
1072
|
"""
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
Divide the matrix elements into upper and lower triangles along the main diagonal (including diagonals).
|
|
1198
|
-
|
|
1199
|
-
The parameter `k` controls the choice of diagonal.
|
|
1200
|
-
If `k` = 0, split along the main diagonal and keep all the elements of the lower triangle.
|
|
1201
|
-
If `k` > 0, select the diagonal `k` along the main diagonal upwards, and keep all the elements of the lower
|
|
1202
|
-
triangle.
|
|
1203
|
-
If `k` < 0, select the diagonal `k` along the main diagonal down, and keep all the elements of the lower
|
|
1204
|
-
triangle.
|
|
1205
|
-
|
|
1206
|
-
Inputs:
|
|
1207
|
-
- **x** (Tensor) - The input tensor. The data type is
|
|
1208
|
-
`number <https://www.mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore.html#mindspore.dtype>`_.
|
|
1209
|
-
- **k** (Int) - The index of diagonal. Default: 0. If the dimensions of the input matrix are d1 and d2,
|
|
1210
|
-
the range of k should be in [-min(d1, d2)+1, min(d1, d2)-1], and the output value will be the same as the
|
|
1211
|
-
input `x` when `k` is out of range.
|
|
1212
|
-
|
|
1213
|
-
Outputs:
|
|
1214
|
-
Tensor, has the same shape and type as input `x`.
|
|
1215
|
-
|
|
1216
|
-
Raises:
|
|
1217
|
-
TypeError: If `k` is not an int.
|
|
1218
|
-
ValueError: If length of shape of `x` is less than 1.
|
|
1219
|
-
|
|
1220
|
-
Supported Platforms:
|
|
1221
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1222
|
-
|
|
1223
|
-
Examples:
|
|
1224
|
-
>>> # case1: k = 0
|
|
1225
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
1226
|
-
... [ 5, 6, 7, 8],
|
|
1227
|
-
... [10, 11, 12, 13],
|
|
1228
|
-
... [14, 15, 16, 17]]))
|
|
1229
|
-
>>> tril = nn.Tril()
|
|
1230
|
-
>>> result = tril(x)
|
|
1231
|
-
>>> print(result)
|
|
1232
|
-
[[ 1 0 0 0]
|
|
1233
|
-
[ 5 6 0 0]
|
|
1234
|
-
[10 11 12 0]
|
|
1235
|
-
[14 15 16 17]]
|
|
1236
|
-
>>> # case2: k = 1
|
|
1237
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
1238
|
-
... [ 5, 6, 7, 8],
|
|
1239
|
-
... [10, 11, 12, 13],
|
|
1240
|
-
... [14, 15, 16, 17]]))
|
|
1241
|
-
>>> tril = nn.Tril()
|
|
1242
|
-
>>> result = tril(x, 1)
|
|
1243
|
-
>>> print(result)
|
|
1244
|
-
[[ 1 2 0 0]
|
|
1245
|
-
[ 5 6 7 0]
|
|
1246
|
-
[10 11 12 13]
|
|
1247
|
-
[14 15 16 17]]
|
|
1248
|
-
>>> # case3: k = 2
|
|
1249
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
1250
|
-
... [ 5, 6, 7, 8],
|
|
1251
|
-
... [10, 11, 12, 13],
|
|
1252
|
-
... [14, 15, 16, 17]]))
|
|
1253
|
-
>>> tril = nn.Tril()
|
|
1254
|
-
>>> result = tril(x, 2)
|
|
1255
|
-
>>> print(result)
|
|
1256
|
-
[[ 1 2 3 0]
|
|
1257
|
-
[ 5 6 7 8]
|
|
1258
|
-
[10 11 12 13]
|
|
1259
|
-
[14 15 16 17]]
|
|
1260
|
-
>>> # case4: k = -1
|
|
1261
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
1262
|
-
... [ 5, 6, 7, 8],
|
|
1263
|
-
... [10, 11, 12, 13],
|
|
1264
|
-
... [14, 15, 16, 17]]))
|
|
1265
|
-
>>> tril = nn.Tril()
|
|
1266
|
-
>>> result = tril(x, -1)
|
|
1267
|
-
>>> print(result)
|
|
1268
|
-
[[ 0 0 0 0]
|
|
1269
|
-
[ 5 0 0 0]
|
|
1270
|
-
[10 11 0 0]
|
|
1271
|
-
[14 15 16 0]]
|
|
1073
|
+
'nn.Tril' is deprecated from version 2.0 and will be removed in a future version,
|
|
1074
|
+
use 'ops.tril' instead.
|
|
1272
1075
|
"""
|
|
1273
1076
|
|
|
1077
|
+
@deprecated("2.0", "ops.tril", False)
|
|
1274
1078
|
def __init__(self):
|
|
1275
1079
|
"""Initialize Tril."""
|
|
1276
1080
|
super(Tril, self).__init__()
|
|
@@ -1280,90 +1084,26 @@ class Tril(Cell):
|
|
|
1280
1084
|
|
|
1281
1085
|
def construct(self, x, k=0):
|
|
1282
1086
|
assist = tril(x.shape, self.dtype(x), k)
|
|
1283
|
-
result = self.mul(self.cast(x, mstype.float32),
|
|
1087
|
+
result = self.mul(self.cast(x, mstype.float32),
|
|
1088
|
+
self.cast(assist, mstype.float32))
|
|
1284
1089
|
return self.cast(result, self.dtype(x))
|
|
1285
1090
|
|
|
1286
1091
|
|
|
1287
|
-
@
|
|
1092
|
+
@_primexpr
|
|
1288
1093
|
def triu(x_shape, x_dtype, k):
|
|
1289
|
-
Validator.check_int(len(x_shape), 1,
|
|
1094
|
+
Validator.check_int(len(x_shape), 1, Validator.GE, "x rank", "triu")
|
|
1290
1095
|
Validator.check_is_int(k, "k value", "triu")
|
|
1291
|
-
|
|
1292
|
-
return
|
|
1096
|
+
value = F.cast(P.Triu(k)(F.ones(x_shape, x_dtype)), x_dtype)
|
|
1097
|
+
return value
|
|
1293
1098
|
|
|
1294
1099
|
|
|
1295
1100
|
class Triu(Cell):
|
|
1296
1101
|
"""
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
The upper triangular part of the matrix is defined as the elements on and above the diagonal.
|
|
1300
|
-
|
|
1301
|
-
The parameter `k` controls the diagonal to be considered. If `k` = 0, all elements on and above the main diagonal
|
|
1302
|
-
are retained. Positive values do not include as many diagonals above the main diagonal. Similarly,
|
|
1303
|
-
negative values include as many diagonals below the main diagonal.
|
|
1304
|
-
|
|
1305
|
-
Inputs:
|
|
1306
|
-
- **x** (Tensor) - The input tensor. The data type is Number.
|
|
1307
|
-
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
1308
|
-
- **k** (Int) - The index of diagonal. Default: 0
|
|
1309
|
-
|
|
1310
|
-
Outputs:
|
|
1311
|
-
Tensor, has the same type and shape as input `x`.
|
|
1312
|
-
|
|
1313
|
-
Raises:
|
|
1314
|
-
TypeError: If `k` is not an int.
|
|
1315
|
-
ValueError: If length of shape of `x` is less than 1.
|
|
1316
|
-
|
|
1317
|
-
Supported Platforms:
|
|
1318
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1319
|
-
|
|
1320
|
-
Examples:
|
|
1321
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
1322
|
-
... [ 5, 6, 7, 8],
|
|
1323
|
-
... [10, 11, 12, 13],
|
|
1324
|
-
... [14, 15, 16, 17]]))
|
|
1325
|
-
>>> triu = nn.Triu()
|
|
1326
|
-
>>> result = triu(x)
|
|
1327
|
-
>>> print(result)
|
|
1328
|
-
[[ 1 2 3 4]
|
|
1329
|
-
[ 0 6 7 8]
|
|
1330
|
-
[ 0 0 12 13]
|
|
1331
|
-
[ 0 0 0 17]]
|
|
1332
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
1333
|
-
... [ 5, 6, 7, 8],
|
|
1334
|
-
... [10, 11, 12, 13],
|
|
1335
|
-
... [14, 15, 16, 17]]))
|
|
1336
|
-
>>> triu = nn.Triu()
|
|
1337
|
-
>>> result = triu(x, 1)
|
|
1338
|
-
>>> print(result)
|
|
1339
|
-
[[ 0 2 3 4]
|
|
1340
|
-
[ 0 0 7 8]
|
|
1341
|
-
[ 0 0 0 13]
|
|
1342
|
-
[ 0 0 0 0]]
|
|
1343
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
1344
|
-
... [ 5, 6, 7, 8],
|
|
1345
|
-
... [10, 11, 12, 13],
|
|
1346
|
-
... [14, 15, 16, 17]]))
|
|
1347
|
-
>>> triu = nn.Triu()
|
|
1348
|
-
>>> result = triu(x, 2)
|
|
1349
|
-
>>> print(result)
|
|
1350
|
-
[[ 0 0 3 4]
|
|
1351
|
-
[ 0 0 0 8]
|
|
1352
|
-
[ 0 0 0 0]
|
|
1353
|
-
[ 0 0 0 0]]
|
|
1354
|
-
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
1355
|
-
... [ 5, 6, 7, 8],
|
|
1356
|
-
... [10, 11, 12, 13],
|
|
1357
|
-
... [14, 15, 16, 17]]))
|
|
1358
|
-
>>> triu = nn.Triu()
|
|
1359
|
-
>>> result = triu(x, -1)
|
|
1360
|
-
>>> print(result)
|
|
1361
|
-
[[ 1 2 3 4]
|
|
1362
|
-
[ 5 6 7 8]
|
|
1363
|
-
[ 0 11 12 13]
|
|
1364
|
-
[ 0 0 16 17]]
|
|
1102
|
+
'nn.Triu' is deprecated from version 2.0 and will be removed in a future version,
|
|
1103
|
+
use 'ops.triu' instead.
|
|
1365
1104
|
"""
|
|
1366
1105
|
|
|
1106
|
+
@deprecated("2.0", "ops.triu", False)
|
|
1367
1107
|
def __init__(self):
|
|
1368
1108
|
"""Initialize Triu."""
|
|
1369
1109
|
super(Triu, self).__init__()
|
|
@@ -1373,87 +1113,47 @@ class Triu(Cell):
|
|
|
1373
1113
|
|
|
1374
1114
|
def construct(self, x, k=0):
|
|
1375
1115
|
assist = triu(x.shape, self.dtype(x), k)
|
|
1376
|
-
result = self.mul(self.cast(x, mstype.float32),
|
|
1116
|
+
result = self.mul(self.cast(x, mstype.float32),
|
|
1117
|
+
self.cast(assist, mstype.float32))
|
|
1377
1118
|
return self.cast(result, self.dtype(x))
|
|
1378
1119
|
|
|
1379
1120
|
|
|
1380
|
-
@
|
|
1121
|
+
@_primexpr
|
|
1381
1122
|
def _get_matrix_diag_assist(x_shape, x_dtype):
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1123
|
+
"""Get matrix diag assist"""
|
|
1124
|
+
Validator.check_int(len(x_shape), 1, Validator.GE, "x rank", "_get_matrix_diag_assist")
|
|
1125
|
+
base_eye = F.reshape(
|
|
1126
|
+
F.eye(x_shape[-1], x_shape[-1], x_dtype), (x_shape[-1] * x_shape[-1],))
|
|
1127
|
+
if len(x_shape) == 1:
|
|
1128
|
+
assist = F.reshape(base_eye, x_shape + (x_shape[-1],))
|
|
1129
|
+
else:
|
|
1130
|
+
assist = F.reshape(
|
|
1131
|
+
F.tile(base_eye, x_shape[:-1]), x_shape + (x_shape[-1],))
|
|
1132
|
+
value = F.cast(assist, x_dtype)
|
|
1133
|
+
return value
|
|
1386
1134
|
|
|
1387
1135
|
|
|
1388
1136
|
@constexpr
|
|
1389
1137
|
def _get_matrix_diag_part_assist(x_shape, x_dtype):
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1138
|
+
"""Get matrix diag part assist"""
|
|
1139
|
+
Validator.check_int(len(x_shape), 2, Validator.GE, "x rank", "_get_matrix_diag_part_assist")
|
|
1140
|
+
base_eye = F.reshape(
|
|
1141
|
+
F.eye(x_shape[-2], x_shape[-1], x_dtype), (x_shape[-2] * x_shape[-1],))
|
|
1142
|
+
if len(x_shape) <= 2:
|
|
1143
|
+
assist = F.reshape(base_eye, x_shape)
|
|
1144
|
+
else:
|
|
1145
|
+
assist = F.reshape(F.tile(base_eye, x_shape[:-2]), x_shape)
|
|
1146
|
+
value = F.cast(assist, x_dtype)
|
|
1147
|
+
return value
|
|
1394
1148
|
|
|
1395
1149
|
|
|
1396
1150
|
class MatrixDiag(Cell):
|
|
1397
1151
|
r"""
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
Assume `x` has :math:`k` dimensions :math:`[I, J, K, ..., N]`, then the output is a tensor of rank
|
|
1401
|
-
:math:`k+1` with dimensions :math:`[I, J, K, ..., N, N]` where:
|
|
1402
|
-
:math:`output[i, j, k, ..., m, n] = 1\{m=n\} * x[i, j, k, ..., n]`.
|
|
1403
|
-
|
|
1404
|
-
Inputs:
|
|
1405
|
-
- **x** (Tensor) - The diagonal values. It can be one of the following data types:
|
|
1406
|
-
float32, float16, int32, int8, and uint8.
|
|
1407
|
-
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
1408
|
-
|
|
1409
|
-
Outputs:
|
|
1410
|
-
Tensor, has the same type as input `x`. The shape must be x.shape + (x.shape[-1], ).
|
|
1411
|
-
|
|
1412
|
-
Raises:
|
|
1413
|
-
TypeError: If dtype of `x` is not one of float32, float16, int32, int8 or uint8.
|
|
1414
|
-
|
|
1415
|
-
Supported Platforms:
|
|
1416
|
-
``Ascend``
|
|
1417
|
-
|
|
1418
|
-
Examples:
|
|
1419
|
-
>>> x = Tensor(np.array([1, -1]), mindspore.float32)
|
|
1420
|
-
>>> matrix_diag = nn.MatrixDiag()
|
|
1421
|
-
>>> output = matrix_diag(x)
|
|
1422
|
-
>>> print(x.shape)
|
|
1423
|
-
(2,)
|
|
1424
|
-
>>> print(output)
|
|
1425
|
-
[[ 1. 0.]
|
|
1426
|
-
[ 0. -1.]]
|
|
1427
|
-
>>> print(output.shape)
|
|
1428
|
-
(2, 2)
|
|
1429
|
-
>>> x = Tensor(np.array([[1, -1], [1, -1]]), mindspore.float32)
|
|
1430
|
-
>>> matrix_diag = nn.MatrixDiag()
|
|
1431
|
-
>>> output = matrix_diag(x)
|
|
1432
|
-
>>> print(x.shape)
|
|
1433
|
-
(2, 2)
|
|
1434
|
-
>>> print(output)
|
|
1435
|
-
[[[ 1. 0.]
|
|
1436
|
-
[ 0. -1.]]
|
|
1437
|
-
[[ 1. 0.]
|
|
1438
|
-
[ 0. -1.]]]
|
|
1439
|
-
>>> print(output.shape)
|
|
1440
|
-
(2, 2, 2)
|
|
1441
|
-
>>> x = Tensor(np.array([[1, -1, 1], [1, -1, 1]]), mindspore.float32)
|
|
1442
|
-
>>> matrix_diag = nn.MatrixDiag()
|
|
1443
|
-
>>> output = matrix_diag(x)
|
|
1444
|
-
>>> print(x.shape)
|
|
1445
|
-
(2, 3)
|
|
1446
|
-
>>> print(output)
|
|
1447
|
-
[[[ 1. 0. 0.]
|
|
1448
|
-
[ 0. -1. 0.]
|
|
1449
|
-
[ 0. 0. 1.]]
|
|
1450
|
-
[[ 1. 0. 0.]
|
|
1451
|
-
[ 0. -1. 0.]
|
|
1452
|
-
[ 0. 0. 1.]]]
|
|
1453
|
-
>>> print(output.shape)
|
|
1454
|
-
(2, 3, 3)
|
|
1152
|
+
'nn.MatrixDiag' is deprecated from version 2.0 and will be removed in a future version,
|
|
1153
|
+
use 'ops.diag' instead.
|
|
1455
1154
|
"""
|
|
1456
1155
|
|
|
1156
|
+
@deprecated("2.0", "ops.diag", False)
|
|
1457
1157
|
def __init__(self):
|
|
1458
1158
|
"""Initialize MatrixDiag."""
|
|
1459
1159
|
super(MatrixDiag, self).__init__()
|
|
@@ -1470,47 +1170,11 @@ class MatrixDiag(Cell):
|
|
|
1470
1170
|
|
|
1471
1171
|
class MatrixDiagPart(Cell):
|
|
1472
1172
|
r"""
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
Assume `x` has :math:`k` dimensions :math:`[I, J, K, ..., M, N]`, then the output is a tensor of rank
|
|
1476
|
-
:math:`k-1` with dimensions :math:`[I, J, K, ..., min(M, N)]` where:
|
|
1477
|
-
:math:`output[i, j, k, ..., n] = x[i, j, k, ..., n, n]`.
|
|
1478
|
-
|
|
1479
|
-
Inputs:
|
|
1480
|
-
- **x** (Tensor) - The batched tensor. It can be one of the following data types:
|
|
1481
|
-
float32, float16, int32, int8, and uint8.
|
|
1482
|
-
|
|
1483
|
-
Outputs:
|
|
1484
|
-
Tensor, has the same type as input `x`. The shape must be x.shape[:-2] + [min(x.shape[-2:])].
|
|
1485
|
-
|
|
1486
|
-
Raises:
|
|
1487
|
-
TypeError: If dtype of `x` is not one of float32, float16, int32, int8 or uint8.
|
|
1488
|
-
|
|
1489
|
-
Supported Platforms:
|
|
1490
|
-
``Ascend``
|
|
1491
|
-
|
|
1492
|
-
Examples:
|
|
1493
|
-
>>> import mindspore
|
|
1494
|
-
>>> from mindspore import Tensor, nn
|
|
1495
|
-
>>> x = Tensor([[[-1, 0], [0, 1]],
|
|
1496
|
-
... [[-1, 0], [0, 1]],
|
|
1497
|
-
... [[-1, 0], [0, 1]]], mindspore.float32)
|
|
1498
|
-
>>> matrix_diag_part = nn.MatrixDiagPart()
|
|
1499
|
-
>>> output = matrix_diag_part(x)
|
|
1500
|
-
>>> print(output)
|
|
1501
|
-
[[-1. 1.]
|
|
1502
|
-
[-1. 1.]
|
|
1503
|
-
[-1. 1.]]
|
|
1504
|
-
>>> x = Tensor([[-1, 0, 0, 1],
|
|
1505
|
-
... [-1, 0, 0, 1],
|
|
1506
|
-
... [-1, 0, 0, 1],
|
|
1507
|
-
... [-1, 0, 0, 1]], mindspore.float32)
|
|
1508
|
-
>>> matrix_diag_part = nn.MatrixDiagPart()
|
|
1509
|
-
>>> output = matrix_diag_part(x)
|
|
1510
|
-
>>> print(output)
|
|
1511
|
-
[-1. 0. 0. 1.]
|
|
1173
|
+
'nn.MatrixDiagPart' is deprecated from version 2.0 and will be removed in a future version,
|
|
1174
|
+
use 'ops.diagonal' instead.
|
|
1512
1175
|
"""
|
|
1513
1176
|
|
|
1177
|
+
@deprecated("2.0", "ops.diagonal", False)
|
|
1514
1178
|
def __init__(self):
|
|
1515
1179
|
"""Initialize MatrixDiagPart."""
|
|
1516
1180
|
super(MatrixDiagPart, self).__init__()
|
|
@@ -1586,59 +1250,23 @@ class MatrixSetDiag(Cell):
|
|
|
1586
1250
|
|
|
1587
1251
|
@constexpr
|
|
1588
1252
|
def _check_input_dim(axis, dim, cls_name):
|
|
1589
|
-
Validator.check_int_range(axis, -dim, dim,
|
|
1253
|
+
Validator.check_int_range(axis, -dim, dim, Validator.INC_LEFT, 'axis', cls_name)
|
|
1590
1254
|
|
|
1591
1255
|
|
|
1592
1256
|
class Roll(Cell):
|
|
1593
1257
|
"""
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
The elements are shifted positively (towards larger indices) by the offset of `shift` along the dimension of `axis`.
|
|
1597
|
-
Negative `shift` values will shift elements in the opposite direction. Elements that roll passed the last position
|
|
1598
|
-
will wrap around to the first and vice versa. Multiple shifts along multiple axes may be specified.
|
|
1599
|
-
|
|
1600
|
-
Args:
|
|
1601
|
-
shift (Union[list(int), tuple(int), int]): Specifies the number of places by which elements are shifted
|
|
1602
|
-
positively (towards larger indices) along the specified dimension. Negative shifts will roll the elements
|
|
1603
|
-
in the opposite direction.
|
|
1604
|
-
axis (Union[list(int), tuple(int), int]): Specifies the dimension indexes of shape to be rolled.
|
|
1605
|
-
|
|
1606
|
-
Inputs:
|
|
1607
|
-
- **input_x** (Tensor) - Input tensor.
|
|
1608
|
-
|
|
1609
|
-
Outputs:
|
|
1610
|
-
Tensor, has the same shape and type as `input_x`.
|
|
1611
|
-
|
|
1612
|
-
Raises:
|
|
1613
|
-
TypeError: If `shift` is not an int, a tuple or a list.
|
|
1614
|
-
TypeError: If `axis` is not an int, a tuple or a list.
|
|
1615
|
-
TypeError: If element of `shift` is not an int.
|
|
1616
|
-
TypeError: If element of `axis` is not an int.
|
|
1617
|
-
ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).
|
|
1618
|
-
ValueError: If length of shape of `shift` is not equal to length of shape of `axis`.
|
|
1619
|
-
|
|
1620
|
-
Supported Platforms:
|
|
1621
|
-
``Ascend`` ``GPU``
|
|
1622
|
-
|
|
1623
|
-
Examples:
|
|
1624
|
-
>>> input_x = Tensor(np.array([0, 1, 2, 3, 4]).astype(np.float32))
|
|
1625
|
-
>>> op = nn.Roll(shift=2, axis=0)
|
|
1626
|
-
>>> output = op(input_x)
|
|
1627
|
-
>>> print(output)
|
|
1628
|
-
[3. 4. 0. 1. 2.]
|
|
1629
|
-
>>> input_x = Tensor(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]).astype(np.float32))
|
|
1630
|
-
>>> op = nn.Roll(shift=[1, -2], axis=[0, 1])
|
|
1631
|
-
>>> output = op(input_x)
|
|
1632
|
-
>>> print(output)
|
|
1633
|
-
[[7. 8. 9. 5. 6.]
|
|
1634
|
-
[2. 3. 4. 0. 1.]]
|
|
1258
|
+
'nn.Roll' is deprecated from version 2.0 and will be removed in a future version,
|
|
1259
|
+
use 'ops.roll' instead.
|
|
1635
1260
|
"""
|
|
1636
1261
|
|
|
1262
|
+
@deprecated("2.0", "ops.roll", False)
|
|
1637
1263
|
def __init__(self, shift, axis):
|
|
1638
1264
|
"""Initialize Roll"""
|
|
1639
1265
|
super(Roll, self).__init__()
|
|
1640
|
-
Validator.check_value_type(
|
|
1641
|
-
|
|
1266
|
+
Validator.check_value_type(
|
|
1267
|
+
"shift", shift, [int, tuple, list], self.cls_name)
|
|
1268
|
+
Validator.check_value_type(
|
|
1269
|
+
"axis", axis, [int, tuple, list], self.cls_name)
|
|
1642
1270
|
self.shape_op = P.Shape()
|
|
1643
1271
|
self.shift = shift
|
|
1644
1272
|
self.axis = axis
|
|
@@ -1650,8 +1278,8 @@ class Roll(Cell):
|
|
|
1650
1278
|
if not isinstance(self.shift, (list, tuple)):
|
|
1651
1279
|
self.shift = [self.shift]
|
|
1652
1280
|
if context.get_context("device_target") == "GPU":
|
|
1653
|
-
Validator.check_int(len(self.shift), 1,
|
|
1654
|
-
Validator.check_int(len(self.axis), 1,
|
|
1281
|
+
Validator.check_int(len(self.shift), 1, Validator.GE, "shift", "Roll")
|
|
1282
|
+
Validator.check_int(len(self.axis), 1, Validator.GE, "axis", "Roll")
|
|
1655
1283
|
for s_axis in self.axis:
|
|
1656
1284
|
Validator.check_is_int(s_axis, "axis", "Roll")
|
|
1657
1285
|
for s_shift in self.shift:
|
|
@@ -1664,14 +1292,16 @@ class Roll(Cell):
|
|
|
1664
1292
|
f"and the length of 'axis' {len(self.axis)}.")
|
|
1665
1293
|
else:
|
|
1666
1294
|
if not isinstance(self.axis, (list, tuple)):
|
|
1667
|
-
self.op_list.append(
|
|
1295
|
+
self.op_list.append(
|
|
1296
|
+
(P.Roll(shift=self.shift, axis=0), self.axis))
|
|
1668
1297
|
else:
|
|
1669
1298
|
if len(self.shift) != len(self.axis):
|
|
1670
1299
|
raise ValueError(f"For '{self.cls_name}', the shape of 'shift' and the shape of 'axis' must be "
|
|
1671
1300
|
f"the same, but got the length of 'shift' {len(self.shift)} "
|
|
1672
1301
|
f"and the length of 'axis' {len(self.axis)}.")
|
|
1673
1302
|
for idx, _ in enumerate(self.axis):
|
|
1674
|
-
self.op_list.append(
|
|
1303
|
+
self.op_list.append(
|
|
1304
|
+
(P.Roll(shift=self.shift[idx], axis=0), self.axis[idx]))
|
|
1675
1305
|
|
|
1676
1306
|
def construct(self, input_x):
|
|
1677
1307
|
dim = len(self.shape_op(input_x))
|
|
@@ -1697,12 +1327,12 @@ class Roll(Cell):
|
|
|
1697
1327
|
class Unflatten(Cell):
|
|
1698
1328
|
r"""
|
|
1699
1329
|
Summary:
|
|
1700
|
-
Unflattens a
|
|
1330
|
+
Unflattens a Tensor dim according to `axis` and `unflattened_size`.
|
|
1701
1331
|
|
|
1702
1332
|
Args:
|
|
1703
|
-
axis (int): specifies the dimension of the input
|
|
1704
|
-
unflattened_size (Union(tuple[int], list[int])):
|
|
1705
|
-
the
|
|
1333
|
+
axis (int): specifies the dimension of the input Tensor to be unflattened.
|
|
1334
|
+
unflattened_size (Union(tuple[int], list[int])): the new shape of the unflattened dimension of
|
|
1335
|
+
the Tensor and it can be a tuple of ints or a list of ints. The product of `unflattened_size`
|
|
1706
1336
|
must equal to input_shape[axis].
|
|
1707
1337
|
|
|
1708
1338
|
Inputs:
|
|
@@ -1714,7 +1344,7 @@ class Unflatten(Cell):
|
|
|
1714
1344
|
Raises:
|
|
1715
1345
|
TypeError: If `axis` is not int.
|
|
1716
1346
|
TypeError: If `unflattened_size` is neither tuple of ints nor list of ints.
|
|
1717
|
-
TypeError:
|
|
1347
|
+
TypeError: The product of `unflattened_size` does not equal to input_shape[axis].
|
|
1718
1348
|
|
|
1719
1349
|
Supported Platforms:
|
|
1720
1350
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1735,7 +1365,8 @@ class Unflatten(Cell):
|
|
|
1735
1365
|
self.shape = P.Shape()
|
|
1736
1366
|
self.reshape = P.Reshape()
|
|
1737
1367
|
Validator.check_is_int(axis, 'axis', 'Unflatten')
|
|
1738
|
-
Validator.check_value_type(
|
|
1368
|
+
Validator.check_value_type(
|
|
1369
|
+
'unflattended_size', unflattened_size, (list, tuple), 'Unflatten')
|
|
1739
1370
|
self.axis = axis
|
|
1740
1371
|
if isinstance(unflattened_size, list):
|
|
1741
1372
|
unflattened_size = tuple(unflattened_size)
|