mindspore 2.0.0a0__cp37-cp37m-win_amd64.whl → 2.0.0rc1__cp37-cp37m-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -2
- mindspore/_c_dataengine.cp37-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp37-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp37-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +16 -1
- mindspore/_extends/parse/parser.py +107 -22
- mindspore/_extends/parse/resources.py +0 -7
- mindspore/_extends/parse/standard_method.py +885 -413
- mindspore/amp.py +52 -57
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +38 -20
- mindspore/boost/dim_reduce.py +3 -3
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -6
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +41 -7
- mindspore/common/api.py +215 -141
- mindspore/common/dtype.py +8 -1
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +4 -2
- mindspore/common/jit_config.py +17 -13
- mindspore/common/mutable.py +33 -13
- mindspore/common/parameter.py +23 -21
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +62 -41
- mindspore/common/tensor.py +852 -1154
- mindspore/communication/__init__.py +2 -2
- mindspore/communication/_comm_helper.py +11 -4
- mindspore/communication/management.py +22 -21
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +201 -23
- mindspore/dataset/__init__.py +6 -6
- mindspore/dataset/audio/__init__.py +7 -7
- mindspore/dataset/audio/transforms.py +670 -30
- mindspore/dataset/audio/utils.py +47 -4
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +210 -14
- mindspore/dataset/core/validator_helpers.py +2 -2
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +322 -66
- mindspore/dataset/engine/datasets_audio.py +80 -76
- mindspore/dataset/engine/datasets_standard_format.py +51 -38
- mindspore/dataset/engine/datasets_text.py +232 -118
- mindspore/dataset/engine/datasets_user_defined.py +41 -17
- mindspore/dataset/engine/datasets_vision.py +746 -225
- mindspore/dataset/engine/graphdata.py +75 -10
- mindspore/dataset/engine/iterators.py +45 -5
- mindspore/dataset/engine/offload.py +48 -28
- mindspore/dataset/engine/validators.py +117 -8
- mindspore/dataset/text/__init__.py +6 -5
- mindspore/dataset/text/transforms.py +86 -3
- mindspore/dataset/text/utils.py +6 -4
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +3 -2
- mindspore/dataset/transforms/c_transforms.py +1 -1
- mindspore/dataset/transforms/transforms.py +2 -2
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +2 -3
- mindspore/dataset/vision/c_transforms.py +9 -9
- mindspore/dataset/vision/py_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -0
- mindspore/dataset/vision/transforms.py +160 -161
- mindspore/dataset/vision/utils.py +3 -3
- mindspore/experimental/map_parameter.py +38 -26
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +44 -9
- mindspore/include/api/delegate.h +1 -1
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_parallel_runner.h +2 -2
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +19 -3
- mindspore/include/api/types.h +3 -3
- mindspore/include/dataset/constants.h +7 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filereader.py +18 -0
- mindspore/mindrecord/filewriter.py +197 -34
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
- mindspore/mindrecord/tools/csv_to_mr.py +3 -3
- mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/__init__.py +0 -4
- mindspore/nn/cell.py +204 -132
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +7 -6
- mindspore/nn/layer/__init__.py +5 -4
- mindspore/nn/layer/activation.py +40 -89
- mindspore/nn/layer/basic.py +255 -624
- mindspore/nn/layer/channel_shuffle.py +7 -6
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +41 -4
- mindspore/nn/layer/conv.py +64 -28
- mindspore/nn/layer/dense.py +9 -8
- mindspore/nn/layer/embedding.py +27 -25
- mindspore/nn/layer/image.py +53 -46
- mindspore/nn/layer/math.py +97 -105
- mindspore/nn/layer/normalization.py +117 -86
- mindspore/nn/layer/padding.py +185 -95
- mindspore/nn/layer/pooling.py +817 -414
- mindspore/nn/layer/rnn_cells.py +10 -15
- mindspore/nn/layer/rnns.py +37 -38
- mindspore/nn/layer/thor_layer.py +11 -12
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +5 -4
- mindspore/nn/loss/loss.py +334 -199
- mindspore/nn/optim/ada_grad.py +6 -6
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +4 -5
- mindspore/nn/optim/adam.py +126 -62
- mindspore/nn/optim/adamax.py +3 -4
- mindspore/nn/optim/adasum.py +6 -6
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +67 -38
- mindspore/nn/optim/lamb.py +4 -5
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +43 -4
- mindspore/nn/optim/momentum.py +6 -5
- mindspore/nn/optim/optimizer.py +3 -1
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +1 -1
- mindspore/nn/optim/rprop.py +8 -9
- mindspore/nn/optim/sgd.py +19 -13
- mindspore/nn/optim/thor.py +10 -15
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +4 -4
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +9 -15
- mindspore/nn/probability/distribution/bernoulli.py +3 -3
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +5 -7
- mindspore/nn/probability/distribution/cauchy.py +3 -3
- mindspore/nn/probability/distribution/distribution.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +3 -3
- mindspore/nn/probability/distribution/half_normal.py +15 -11
- mindspore/nn/probability/distribution/laplace.py +16 -13
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/normal.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/student_t.py +20 -15
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +27 -10
- mindspore/nn/wrap/grad_reducer.py +2 -2
- mindspore/nn/wrap/loss_scale.py +40 -24
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +35 -30
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +22 -19
- mindspore/numpy/utils.py +1 -1
- mindspore/numpy/utils_const.py +108 -58
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +86 -117
- mindspore/ops/_grad/grad_base.py +23 -1
- mindspore/ops/_grad/grad_clip_ops.py +2 -3
- mindspore/ops/_grad/grad_comm_ops.py +34 -24
- mindspore/ops/_grad/grad_implementations.py +9 -45
- mindspore/ops/_grad/grad_inner_ops.py +47 -4
- mindspore/ops/_grad/grad_math_ops.py +142 -117
- mindspore/ops/_grad/grad_nn_ops.py +71 -165
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +7 -6
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
- mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
- mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
- mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -611
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_register_for_op.py +1 -0
- mindspore/ops/_utils/__init__.py +1 -2
- mindspore/ops/_utils/utils.py +19 -40
- mindspore/ops/_vmap/vmap_array_ops.py +116 -38
- mindspore/ops/_vmap/vmap_base.py +16 -9
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
- mindspore/ops/_vmap/vmap_image_ops.py +12 -5
- mindspore/ops/_vmap/vmap_math_ops.py +46 -5
- mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
- mindspore/ops/_vmap/vmap_random_ops.py +1 -1
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
- mindspore/ops/composite/__init__.py +7 -8
- mindspore/ops/composite/base.py +101 -47
- mindspore/ops/composite/math_ops.py +188 -158
- mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
- mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
- mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
- mindspore/ops/function/__init__.py +152 -8
- mindspore/ops/function/array_func.py +2555 -674
- mindspore/ops/function/clip_func.py +209 -13
- mindspore/ops/function/debug_func.py +2 -2
- mindspore/ops/function/grad/__init__.py +2 -1
- mindspore/ops/function/grad/grad_func.py +147 -62
- mindspore/ops/function/image_func.py +54 -38
- mindspore/ops/function/linalg_func.py +167 -16
- mindspore/ops/function/math_func.py +4849 -1492
- mindspore/ops/function/nn_func.py +2573 -988
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +3 -3
- mindspore/ops/function/random_func.py +790 -73
- mindspore/ops/function/sparse_func.py +98 -78
- mindspore/ops/function/sparse_unary_func.py +54 -53
- mindspore/ops/function/spectral_func.py +27 -24
- mindspore/ops/function/vmap_func.py +22 -2
- mindspore/ops/functional.py +97 -37
- mindspore/ops/op_info_register.py +70 -28
- mindspore/ops/operations/__init__.py +47 -14
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +5 -5
- mindspore/ops/operations/_grad_ops.py +276 -187
- mindspore/ops/operations/_inner_ops.py +319 -113
- mindspore/ops/operations/_ms_kernel.py +10 -8
- mindspore/ops/operations/_ocr_ops.py +9 -9
- mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
- mindspore/ops/operations/_quant_ops.py +137 -102
- mindspore/ops/operations/_rl_inner_ops.py +121 -60
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1004 -2
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +801 -466
- mindspore/ops/operations/comm_ops.py +51 -49
- mindspore/ops/operations/control_ops.py +2 -2
- mindspore/ops/operations/custom_ops.py +123 -44
- mindspore/ops/operations/debug_ops.py +24 -24
- mindspore/ops/operations/image_ops.py +240 -153
- mindspore/ops/operations/inner_ops.py +34 -50
- mindspore/ops/operations/linalg_ops.py +31 -9
- mindspore/ops/operations/math_ops.py +988 -757
- mindspore/ops/operations/nn_ops.py +965 -819
- mindspore/ops/operations/other_ops.py +51 -40
- mindspore/ops/operations/random_ops.py +204 -122
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +254 -93
- mindspore/ops/operations/spectral_ops.py +35 -3
- mindspore/ops/primitive.py +111 -9
- mindspore/parallel/_auto_parallel_context.py +189 -83
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +99 -7
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +7 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
- mindspore/parallel/_utils.py +1 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +37 -34
- mindspore/parallel/shard.py +17 -18
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +69 -47
- mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
- mindspore/profiler/parser/base_timeline_generator.py +49 -56
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
- mindspore/profiler/parser/hwts_log_parser.py +1 -1
- mindspore/profiler/parser/integrator.py +15 -14
- mindspore/profiler/parser/minddata_analyzer.py +2 -2
- mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +2 -1
- mindspore/profiler/profiling.py +218 -186
- mindspore/rewrite/__init__.py +3 -1
- mindspore/rewrite/api/node.py +1 -114
- mindspore/rewrite/api/node_type.py +3 -0
- mindspore/rewrite/api/pattern_engine.py +31 -1
- mindspore/rewrite/api/scoped_value.py +4 -4
- mindspore/rewrite/api/symbol_tree.py +3 -78
- mindspore/rewrite/api/tree_node_helper.py +1 -1
- mindspore/rewrite/ast_creator_register.py +1 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -2
- mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
- mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
- mindspore/rewrite/namespace.py +0 -2
- mindspore/rewrite/node.py +157 -11
- mindspore/rewrite/parsers/assign_parser.py +231 -53
- mindspore/rewrite/parsers/class_def_parser.py +187 -109
- mindspore/rewrite/parsers/for_parser.py +24 -14
- mindspore/rewrite/parsers/function_def_parser.py +21 -4
- mindspore/rewrite/parsers/if_parser.py +6 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +256 -133
- mindspore/rewrite/symbol_tree_builder.py +38 -1
- mindspore/run_check/_check_version.py +69 -63
- mindspore/run_check/run_check.py +2 -1
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +1 -1
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +273 -102
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +2 -2
- mindspore/train/callback/_checkpoint.py +3 -3
- mindspore/train/callback/_early_stop.py +3 -3
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +29 -31
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +3 -3
- mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
- mindspore/train/callback/_summary_collector.py +23 -16
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +15 -3
- mindspore/train/dataset_helper.py +10 -15
- mindspore/train/loss_scale_manager.py +8 -11
- mindspore/train/metrics/__init__.py +1 -1
- mindspore/train/metrics/bleu_score.py +1 -1
- mindspore/train/metrics/confusion_matrix.py +1 -1
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/dice.py +2 -2
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +4 -3
- mindspore/train/metrics/mean_surface_distance.py +2 -2
- mindspore/train/metrics/occlusion_sensitivity.py +1 -1
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +1 -1
- mindspore/train/metrics/recall.py +1 -1
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +45 -28
- mindspore/train/serialization.py +295 -188
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -13
- mindspore/train/train_thor/convert_utils.py +2 -2
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
- mindspore/compression/__init__.py +0 -19
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -515
- mindspore/compression/quant/__init__.py +0 -28
- mindspore/compression/quant/qat.py +0 -634
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -140
- mindspore/nn/probability/dpn/vae/vae.py +0 -124
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
- mindspore/ops/composite/array_ops.py +0 -241
- mindspore/ops/composite/clip_ops.py +0 -134
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
mindspore/nn/grad/cell_grad.py
CHANGED
|
@@ -23,6 +23,7 @@ from mindspore.ops.primitive import Primitive
|
|
|
23
23
|
from mindspore.common import dtype as mstype
|
|
24
24
|
from mindspore.common.api import jit
|
|
25
25
|
from mindspore.common._decorator import deprecated
|
|
26
|
+
from mindspore.common import mutable
|
|
26
27
|
|
|
27
28
|
|
|
28
29
|
class _FirstGrad(Cell):
|
|
@@ -94,10 +95,10 @@ class Jvp(Cell):
|
|
|
94
95
|
|
|
95
96
|
if self.issubclass_(self.typeof(output), mstype.tuple_):
|
|
96
97
|
u = self.make_tuple()
|
|
97
|
-
for
|
|
98
|
-
u = u + self.make_tuple(self.oneslike(
|
|
98
|
+
for _, element in enumerate(output):
|
|
99
|
+
u = u + self.make_tuple(mutable(self.oneslike(element)))
|
|
99
100
|
else:
|
|
100
|
-
u = self.oneslike(output)
|
|
101
|
+
u = mutable(self.oneslike(output))
|
|
101
102
|
|
|
102
103
|
if len(jvp_input) == 1:
|
|
103
104
|
second_gradient_net = self.second_grad_op(self.first_grad_single_value)
|
|
@@ -130,10 +131,10 @@ class _JvpInner(Cell):
|
|
|
130
131
|
"""Compute the jacobian-vector-product of the given fn, vector, inputs and outputs."""
|
|
131
132
|
if self.issubclass_(self.typeof(output), mstype.tuple_):
|
|
132
133
|
u = self.make_tuple()
|
|
133
|
-
for
|
|
134
|
-
u = u + self.make_tuple(self.oneslike(
|
|
134
|
+
for _, element in enumerate(output):
|
|
135
|
+
u = u + self.make_tuple(mutable(self.oneslike(element)))
|
|
135
136
|
else:
|
|
136
|
-
u = self.oneslike(output)
|
|
137
|
+
u = mutable(self.oneslike(output))
|
|
137
138
|
|
|
138
139
|
if len(jvp_input) == 1:
|
|
139
140
|
second_gradient_net = self.second_grad_op(self.first_grad_single_value)
|
mindspore/nn/layer/__init__.py
CHANGED
|
@@ -20,7 +20,8 @@ The high-level components(Cells) used to construct the neural network.
|
|
|
20
20
|
from __future__ import absolute_import
|
|
21
21
|
|
|
22
22
|
from mindspore.nn.layer import activation, normalization, container, conv, basic, embedding, pooling, \
|
|
23
|
-
image,
|
|
23
|
+
image, math, combined, timedistributed, thor_layer, rnns, rnn_cells, padding, dense, transformer, \
|
|
24
|
+
channel_shuffle
|
|
24
25
|
from mindspore.nn.layer.activation import *
|
|
25
26
|
from mindspore.nn.layer.normalization import *
|
|
26
27
|
from mindspore.nn.layer.container import *
|
|
@@ -32,14 +33,14 @@ from mindspore.nn.layer.basic import *
|
|
|
32
33
|
from mindspore.nn.layer.embedding import *
|
|
33
34
|
from mindspore.nn.layer.pooling import *
|
|
34
35
|
from mindspore.nn.layer.image import *
|
|
35
|
-
from mindspore.nn.layer.quant import *
|
|
36
36
|
from mindspore.nn.layer.math import *
|
|
37
37
|
from mindspore.nn.layer.combined import *
|
|
38
38
|
from mindspore.nn.layer.timedistributed import *
|
|
39
|
+
from mindspore.nn.layer.transformer import *
|
|
39
40
|
from mindspore.nn.layer.channel_shuffle import ChannelShuffle
|
|
40
41
|
from mindspore.nn.layer.thor_layer import DenseThor, Conv2dThor, EmbeddingThor, EmbeddingLookupThor
|
|
41
42
|
from mindspore.nn.layer.padding import ConstantPad1d, ConstantPad2d, ConstantPad3d, ReflectionPad1d, \
|
|
42
|
-
ReflectionPad2d, ZeroPad2d, ReplicationPad1d, ReplicationPad2d, ReplicationPad3d
|
|
43
|
+
ReflectionPad2d, ReflectionPad3d, ZeroPad2d, ReplicationPad1d, ReplicationPad2d, ReplicationPad3d
|
|
43
44
|
|
|
44
45
|
__all__ = []
|
|
45
46
|
__all__.extend(activation.__all__)
|
|
@@ -53,10 +54,10 @@ __all__.extend(basic.__all__)
|
|
|
53
54
|
__all__.extend(embedding.__all__)
|
|
54
55
|
__all__.extend(pooling.__all__)
|
|
55
56
|
__all__.extend(image.__all__)
|
|
56
|
-
__all__.extend(quant.__all__)
|
|
57
57
|
__all__.extend(math.__all__)
|
|
58
58
|
__all__.extend(combined.__all__)
|
|
59
59
|
__all__.extend(timedistributed.__all__)
|
|
60
|
+
__all__.extend(transformer.__all__)
|
|
60
61
|
__all__.extend(thor_layer.__all__)
|
|
61
62
|
__all__.extend(padding.__all__)
|
|
62
63
|
__all__.extend(channel_shuffle.__all__)
|
mindspore/nn/layer/activation.py
CHANGED
|
@@ -17,7 +17,7 @@ from __future__ import absolute_import
|
|
|
17
17
|
|
|
18
18
|
import numpy as np
|
|
19
19
|
|
|
20
|
-
from mindspore
|
|
20
|
+
from mindspore import _checkparam as validator
|
|
21
21
|
from mindspore._extends import cell_attr_register
|
|
22
22
|
from mindspore.common import dtype as mstype
|
|
23
23
|
from mindspore.common.parameter import Parameter
|
|
@@ -25,8 +25,9 @@ from mindspore.common.tensor import Tensor
|
|
|
25
25
|
from mindspore.ops import functional as F
|
|
26
26
|
from mindspore.ops import operations as P
|
|
27
27
|
from mindspore.ops.operations import nn_ops as NN_OPS
|
|
28
|
-
from mindspore.ops.primitive import constexpr
|
|
29
28
|
from mindspore.nn.cell import Cell
|
|
29
|
+
from mindspore import ops
|
|
30
|
+
from mindspore.ops.primitive import _primexpr
|
|
30
31
|
|
|
31
32
|
__all__ = ['Softmin',
|
|
32
33
|
'Softmax',
|
|
@@ -156,19 +157,17 @@ class Softmin(Cell):
|
|
|
156
157
|
def __init__(self, axis=-1):
|
|
157
158
|
"""Initialize Softmin."""
|
|
158
159
|
super(Softmin, self).__init__()
|
|
159
|
-
self.
|
|
160
|
+
self.axis = axis
|
|
160
161
|
|
|
161
162
|
def construct(self, x):
|
|
162
|
-
x
|
|
163
|
-
return self.softmax(x)
|
|
163
|
+
return ops.function.softmin(x, self.axis)
|
|
164
164
|
|
|
165
165
|
|
|
166
166
|
class Softmax2d(Cell):
|
|
167
167
|
r"""
|
|
168
|
-
|
|
168
|
+
Softmax function applied to 2D features data.
|
|
169
169
|
|
|
170
|
-
|
|
171
|
-
apply `Softmax` to each location :math:`(c, h, w)`.
|
|
170
|
+
Applies `Softmax` to each location :math:`(c, h, w)` with an input Tensor of shape :math:`(C, H, W)` .
|
|
172
171
|
|
|
173
172
|
Inputs:
|
|
174
173
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
|
|
@@ -198,7 +197,7 @@ class Softmax2d(Cell):
|
|
|
198
197
|
self.shape = P.Shape()
|
|
199
198
|
|
|
200
199
|
@staticmethod
|
|
201
|
-
@
|
|
200
|
+
@_primexpr
|
|
202
201
|
def _check_input_dim(shape, cls_name):
|
|
203
202
|
dim = len(shape)
|
|
204
203
|
if dim not in (3, 4):
|
|
@@ -275,8 +274,6 @@ class LogSoftmax(Cell):
|
|
|
275
274
|
|
|
276
275
|
\text{logsoftmax}(x_i) = \log \left(\frac{\exp(x_i)}{\sum_{j=0}^{n-1} \exp(x_j)}\right),
|
|
277
276
|
|
|
278
|
-
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
|
|
279
|
-
|
|
280
277
|
Args:
|
|
281
278
|
axis (int): The axis to apply LogSoftmax operation, -1 means the last dimension. Default: -1.
|
|
282
279
|
|
|
@@ -381,7 +378,7 @@ class ReLU(Cell):
|
|
|
381
378
|
|
|
382
379
|
Inputs:
|
|
383
380
|
- **x** (Tensor) - The input of ReLU is a Tensor of any dimension. The data type is `number <https://www.mind
|
|
384
|
-
spore.cn/docs/en/r2.0
|
|
381
|
+
spore.cn/docs/en/r2.0/api_python/mindspore.html#mindspore.dtype>`_ .
|
|
385
382
|
|
|
386
383
|
Outputs:
|
|
387
384
|
Tensor, with the same type and shape as the `x`.
|
|
@@ -425,7 +422,6 @@ class ReLU6(Cell):
|
|
|
425
422
|
|
|
426
423
|
Inputs:
|
|
427
424
|
- **x** (Tensor) - The input of ReLU6 with data type of float16 or float32.
|
|
428
|
-
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
429
425
|
|
|
430
426
|
Outputs:
|
|
431
427
|
Tensor, which has the same type as `x`.
|
|
@@ -517,8 +513,8 @@ class RReLU(Cell):
|
|
|
517
513
|
The activation function is defined as:
|
|
518
514
|
|
|
519
515
|
.. math::
|
|
520
|
-
|
|
521
|
-
|
|
516
|
+
\text{RReLU}(x_{ji}) = \begin{cases}x_{ji}, &\text{if } x_{ji} \geq 0; \cr
|
|
517
|
+
{\alpha_{ji}} * x_{ji}, &\text{otherwise.}\end{cases}
|
|
522
518
|
|
|
523
519
|
where :math:`\alpha_{ji}` ~ :math:`U(l, u)`, :math:`l \le u`.
|
|
524
520
|
|
|
@@ -565,17 +561,17 @@ class RReLU(Cell):
|
|
|
565
561
|
if lower > upper:
|
|
566
562
|
raise ValueError(f"For {self.cls_name}, the value of 'upper' must be greater than 'lower', "
|
|
567
563
|
f"but got upper: {upper}, lower: {lower}. ")
|
|
568
|
-
|
|
569
|
-
self.
|
|
570
|
-
self.upper = upper
|
|
564
|
+
self.lower = Tensor(lower, dtype=mstype.float32)
|
|
565
|
+
self.upper = Tensor(upper, dtype=mstype.float32)
|
|
571
566
|
self.sign = P.Sign()
|
|
572
567
|
|
|
573
568
|
def construct(self, x):
|
|
574
|
-
|
|
569
|
+
_size = x.shape
|
|
570
|
+
_dtype = x.dtype
|
|
575
571
|
sign_matrix = self.sign(x)
|
|
576
572
|
negative_filter = sign_matrix.clip(None, 0)
|
|
577
573
|
positive_filter = sign_matrix.clip(0, None)
|
|
578
|
-
mask =
|
|
574
|
+
mask = ops.uniform(_size, self.lower, self.upper).astype(_dtype)
|
|
579
575
|
negative_mask = negative_filter * mask * -1
|
|
580
576
|
total_mask = negative_mask + positive_filter
|
|
581
577
|
out = total_mask * x
|
|
@@ -629,7 +625,7 @@ class SiLU(Cell):
|
|
|
629
625
|
`SiLU <https://en.wikipedia.org/wiki/Activation_function#/media/File:Swish.svg>`_ .
|
|
630
626
|
|
|
631
627
|
Inputs:
|
|
632
|
-
- **x** (Tensor) - Input with the data type float16 or float32.
|
|
628
|
+
- **x** (Tensor) - Input with the data type float16 or float32.
|
|
633
629
|
|
|
634
630
|
Outputs:
|
|
635
631
|
Tensor, with the same type and shape as the `x`.
|
|
@@ -651,10 +647,9 @@ class SiLU(Cell):
|
|
|
651
647
|
def __init__(self):
|
|
652
648
|
"""Initialize SiLU."""
|
|
653
649
|
super(SiLU, self).__init__()
|
|
654
|
-
self.sigmoid = P.Sigmoid()
|
|
655
650
|
|
|
656
651
|
def construct(self, x):
|
|
657
|
-
return
|
|
652
|
+
return ops.function.silu(x)
|
|
658
653
|
|
|
659
654
|
|
|
660
655
|
class Tanh(Cell):
|
|
@@ -713,14 +708,13 @@ class Tanhshrink(Cell):
|
|
|
713
708
|
where :math:`x_i` is an element of the input Tensor.
|
|
714
709
|
|
|
715
710
|
Inputs:
|
|
716
|
-
- **x** (Tensor) - Tensor of any dimension
|
|
711
|
+
- **x** (Tensor) - Tensor of any dimension.
|
|
717
712
|
|
|
718
713
|
Outputs:
|
|
719
714
|
Tensor, with the same type and shape as the `x`.
|
|
720
715
|
|
|
721
716
|
Raises:
|
|
722
717
|
TypeError: If `x` is not a Tensor.
|
|
723
|
-
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
724
718
|
|
|
725
719
|
Supported Platforms:
|
|
726
720
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -740,17 +734,9 @@ class Tanhshrink(Cell):
|
|
|
740
734
|
def __init__(self):
|
|
741
735
|
"""Initialize Tanhshrink."""
|
|
742
736
|
super(Tanhshrink, self).__init__()
|
|
743
|
-
self.tanh = P.Tanh()
|
|
744
737
|
|
|
745
738
|
def construct(self, x):
|
|
746
|
-
return
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
@constexpr
|
|
750
|
-
def _dtype_check(x_dtype, prim_name):
|
|
751
|
-
"""Check dtype."""
|
|
752
|
-
if x_dtype not in [mstype.float32, mstype.float16]:
|
|
753
|
-
raise TypeError("For {}, the x_dtype must be float32 or float16, but got {}.".format(prim_name, x_dtype))
|
|
739
|
+
return F.tanhshrink(x)
|
|
754
740
|
|
|
755
741
|
|
|
756
742
|
class Hardtanh(Cell):
|
|
@@ -785,7 +771,7 @@ class Hardtanh(Cell):
|
|
|
785
771
|
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
786
772
|
TypeError: If dtype of `min_val` is neither float nor int.
|
|
787
773
|
TypeError: If dtype of `max_val` is neither float nor int.
|
|
788
|
-
ValueError: If `
|
|
774
|
+
ValueError: If `min_val` is not less than `max_val`.
|
|
789
775
|
|
|
790
776
|
Supported Platforms:
|
|
791
777
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -804,34 +790,14 @@ class Hardtanh(Cell):
|
|
|
804
790
|
def __init__(self, min_val=-1.0, max_val=1.0):
|
|
805
791
|
"""Initialize Hardtanh."""
|
|
806
792
|
super(Hardtanh, self).__init__()
|
|
807
|
-
validator.check_value_type('min_val', min_val, [float, int], self.cls_name)
|
|
808
|
-
validator.check_value_type('max_val', max_val, [float, int], self.cls_name)
|
|
809
|
-
validator.check_number("max_val", max_val, min_val, Rel.GE, self.cls_name)
|
|
810
|
-
|
|
811
|
-
self.max = P.Maximum()
|
|
812
|
-
self.min = P.Minimum()
|
|
813
793
|
self.min_val = min_val
|
|
814
794
|
self.max_val = max_val
|
|
815
|
-
self.
|
|
816
|
-
|
|
817
|
-
|
|
795
|
+
if self.min_val >= self.max_val:
|
|
796
|
+
raise ValueError(f"For Hardtanh, min_val should be less than max_val,"
|
|
797
|
+
f"but got {self.min_val} and {self.max_val}")
|
|
818
798
|
|
|
819
799
|
def construct(self, x):
|
|
820
|
-
|
|
821
|
-
raise TypeError("'x' must be a Tensor")
|
|
822
|
-
_dtype_check(self.dtype(x), self.cls_name)
|
|
823
|
-
# min_val and max_val are scalars, if x is 0d, x is also a scalar.
|
|
824
|
-
# However, ops.Maximum does not support input two scalar.
|
|
825
|
-
# To solve this problem, expand x from scalar to tensor, apply Maximum, then squeeze the output back to scalar.
|
|
826
|
-
if not x.shape:
|
|
827
|
-
x = self.expand(x, 0)
|
|
828
|
-
x = self.max(x, self.min_val)
|
|
829
|
-
x = self.min(x, self.max_val)
|
|
830
|
-
x = self.squeeze(x)
|
|
831
|
-
else:
|
|
832
|
-
x = self.max(x, self.min_val)
|
|
833
|
-
x = self.min(x, self.max_val)
|
|
834
|
-
return x
|
|
800
|
+
return F.hardtanh(x, self.min_val, self.max_val)
|
|
835
801
|
|
|
836
802
|
|
|
837
803
|
class GELU(Cell):
|
|
@@ -1054,14 +1020,14 @@ class PReLU(Cell):
|
|
|
1054
1020
|
Activation_function#/media/File:Activation_prelu.svg>`_.
|
|
1055
1021
|
|
|
1056
1022
|
Args:
|
|
1057
|
-
channel (int): The elements number of parameter
|
|
1023
|
+
channel (int): The elements number of parameter `w`.
|
|
1058
1024
|
It could be an int, and the value is 1 or the channels number of input tensor `x`. Default: 1.
|
|
1059
1025
|
w (Union[float, list, Tensor]): The initial value of parameter. It could be a float, a float list or
|
|
1060
1026
|
a tensor has the same dtype as the input tensor `x`. Default: 0.25.
|
|
1061
1027
|
|
|
1062
1028
|
Inputs:
|
|
1063
1029
|
- **x** (Tensor) - The input of PReLU with data type of float16 or float32.
|
|
1064
|
-
The shape is :math:`(N
|
|
1030
|
+
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1065
1031
|
|
|
1066
1032
|
Outputs:
|
|
1067
1033
|
Tensor, with the same dtype and shape as the `x`.
|
|
@@ -1118,15 +1084,9 @@ class PReLU(Cell):
|
|
|
1118
1084
|
f"but got {type(w).__name__}.")
|
|
1119
1085
|
self.w = Parameter(w, name='a')
|
|
1120
1086
|
self.prelu = P.PReLU()
|
|
1121
|
-
self.relu = P.ReLU()
|
|
1122
|
-
self.assign = P.Assign()
|
|
1123
1087
|
|
|
1124
1088
|
def construct(self, x):
|
|
1125
|
-
|
|
1126
|
-
v = self.prelu(x, F.cast(u, x.dtype))
|
|
1127
|
-
if self.training:
|
|
1128
|
-
self.assign(self.w, u)
|
|
1129
|
-
return v
|
|
1089
|
+
return self.prelu(x, F.cast(self.w, x.dtype))
|
|
1130
1090
|
|
|
1131
1091
|
|
|
1132
1092
|
class HSwish(Cell):
|
|
@@ -1138,8 +1098,6 @@ class HSwish(Cell):
|
|
|
1138
1098
|
.. math::
|
|
1139
1099
|
\text{hswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6},
|
|
1140
1100
|
|
|
1141
|
-
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
|
|
1142
|
-
|
|
1143
1101
|
Inputs:
|
|
1144
1102
|
- **x** (Tensor) - The input of HSwish, data type must be float16 or float32.
|
|
1145
1103
|
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
@@ -1179,8 +1137,6 @@ class HSigmoid(Cell):
|
|
|
1179
1137
|
.. math::
|
|
1180
1138
|
\text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})),
|
|
1181
1139
|
|
|
1182
|
-
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
|
|
1183
|
-
|
|
1184
1140
|
Inputs:
|
|
1185
1141
|
- **input_x** (Tensor) - The input of HSigmoid. Tensor of any dimension.
|
|
1186
1142
|
|
|
@@ -1302,7 +1258,8 @@ class SoftShrink(Cell):
|
|
|
1302
1258
|
\end{cases}
|
|
1303
1259
|
|
|
1304
1260
|
Args:
|
|
1305
|
-
lambd: the :math:`\lambda` must be no less than zero for the SoftShrink formulation.
|
|
1261
|
+
lambd (float): the :math:`\lambda` must be no less than zero for the SoftShrink formulation.
|
|
1262
|
+
Default: 0.5.
|
|
1306
1263
|
|
|
1307
1264
|
Inputs:
|
|
1308
1265
|
- **input_x** (Tensor) - The input of SoftShrink with data type of float16 or float32.
|
|
@@ -1318,7 +1275,7 @@ class SoftShrink(Cell):
|
|
|
1318
1275
|
ValueError: If lambd is less than 0.
|
|
1319
1276
|
|
|
1320
1277
|
Supported Platforms:
|
|
1321
|
-
``Ascend`` ``
|
|
1278
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1322
1279
|
|
|
1323
1280
|
Examples:
|
|
1324
1281
|
>>> input_x = Tensor(np.array([[ 0.5297, 0.7871, 1.1754], [ 0.7836, 0.6218, -1.1542]]), mstype.float16)
|
|
@@ -1366,7 +1323,7 @@ class HShrink(Cell):
|
|
|
1366
1323
|
TypeError: If dtype of `input_x` is neither float16 nor float32.
|
|
1367
1324
|
|
|
1368
1325
|
Supported Platforms:
|
|
1369
|
-
``Ascend`` ``
|
|
1326
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1370
1327
|
|
|
1371
1328
|
Examples:
|
|
1372
1329
|
>>> import mindspore
|
|
@@ -1415,7 +1372,7 @@ class Threshold(Cell):
|
|
|
1415
1372
|
TypeError: If `value` is not a float or an int.
|
|
1416
1373
|
|
|
1417
1374
|
Supported Platforms:
|
|
1418
|
-
``Ascend`` ``
|
|
1375
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1419
1376
|
|
|
1420
1377
|
Examples:
|
|
1421
1378
|
>>> import mindspore
|
|
@@ -1429,19 +1386,12 @@ class Threshold(Cell):
|
|
|
1429
1386
|
|
|
1430
1387
|
def __init__(self, threshold, value):
|
|
1431
1388
|
"""Initialize Threshold."""
|
|
1432
|
-
super().__init__()
|
|
1433
|
-
validator.check_value_type('threshold', threshold, [float, int], self.cls_name)
|
|
1434
|
-
validator.check_value_type('value', value, [float, int], self.cls_name)
|
|
1389
|
+
super(Threshold, self).__init__()
|
|
1435
1390
|
self.threshold = threshold
|
|
1436
1391
|
self.value = value
|
|
1437
|
-
self.greater = P.Greater()
|
|
1438
|
-
self.fill = P.Fill()
|
|
1439
|
-
self.select = P.Select()
|
|
1440
1392
|
|
|
1441
1393
|
def construct(self, input_x):
|
|
1442
|
-
|
|
1443
|
-
value = self.fill(input_x.dtype, input_x.shape, self.value)
|
|
1444
|
-
return self.select(cond, input_x, value)
|
|
1394
|
+
return F.threshold(input_x, self.threshold, self.value)
|
|
1445
1395
|
|
|
1446
1396
|
|
|
1447
1397
|
class Mish(Cell):
|
|
@@ -1473,17 +1423,17 @@ class Mish(Cell):
|
|
|
1473
1423
|
|
|
1474
1424
|
class GLU(Cell):
|
|
1475
1425
|
r"""
|
|
1476
|
-
|
|
1426
|
+
The gated linear unit function.
|
|
1477
1427
|
|
|
1478
1428
|
.. math::
|
|
1479
1429
|
{GLU}(a, b)= a \otimes \sigma(b)
|
|
1480
1430
|
|
|
1481
1431
|
where :math:`a` is the first half of the input matrices and :math:`b` is the second half.
|
|
1482
1432
|
|
|
1483
|
-
Here :math:`\sigma` is the sigmoid function, and :math
|
|
1433
|
+
Here :math:`\sigma` is the sigmoid function, and :math:`\otimes` is the Hadamard product.
|
|
1484
1434
|
|
|
1485
1435
|
Args:
|
|
1486
|
-
axis (int): the
|
|
1436
|
+
axis (int): the axis to split the input. Default: -1, the last axis in `x`.
|
|
1487
1437
|
|
|
1488
1438
|
Inputs:
|
|
1489
1439
|
- **x** (Tensor) - :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional dimensions.
|
|
@@ -1499,7 +1449,7 @@ class GLU(Cell):
|
|
|
1499
1449
|
>>> input = Tensor([[0.1,0.2,0.3,0.4],[0.5,0.6,0.7,0.8]])
|
|
1500
1450
|
>>> output = m(input)
|
|
1501
1451
|
>>> print(output)
|
|
1502
|
-
[[0.05744425 0.11973753
|
|
1452
|
+
[[0.05744425 0.11973753]
|
|
1503
1453
|
[0.33409387 0.41398472]]
|
|
1504
1454
|
"""
|
|
1505
1455
|
|
|
@@ -1515,6 +1465,7 @@ class GLU(Cell):
|
|
|
1515
1465
|
x2 = self.sigmoid(x2)
|
|
1516
1466
|
return x1 * x2
|
|
1517
1467
|
|
|
1468
|
+
|
|
1518
1469
|
_activation = {
|
|
1519
1470
|
'softmin': Softmin,
|
|
1520
1471
|
'softmax': Softmax,
|