mindspore 2.0.0a0__cp39-cp39-win_amd64.whl → 2.0.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -2
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +16 -1
- mindspore/_extends/parse/parser.py +107 -22
- mindspore/_extends/parse/resources.py +0 -7
- mindspore/_extends/parse/standard_method.py +885 -413
- mindspore/amp.py +52 -57
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +38 -20
- mindspore/boost/dim_reduce.py +3 -3
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -6
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +41 -7
- mindspore/common/api.py +215 -141
- mindspore/common/dtype.py +8 -1
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +4 -2
- mindspore/common/jit_config.py +17 -13
- mindspore/common/mutable.py +33 -13
- mindspore/common/parameter.py +23 -21
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +62 -41
- mindspore/common/tensor.py +852 -1154
- mindspore/communication/__init__.py +2 -2
- mindspore/communication/_comm_helper.py +11 -4
- mindspore/communication/management.py +22 -21
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +201 -23
- mindspore/dataset/__init__.py +6 -6
- mindspore/dataset/audio/__init__.py +7 -7
- mindspore/dataset/audio/transforms.py +670 -30
- mindspore/dataset/audio/utils.py +47 -4
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +210 -14
- mindspore/dataset/core/validator_helpers.py +2 -2
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +322 -66
- mindspore/dataset/engine/datasets_audio.py +80 -76
- mindspore/dataset/engine/datasets_standard_format.py +51 -38
- mindspore/dataset/engine/datasets_text.py +232 -118
- mindspore/dataset/engine/datasets_user_defined.py +41 -17
- mindspore/dataset/engine/datasets_vision.py +746 -225
- mindspore/dataset/engine/graphdata.py +75 -10
- mindspore/dataset/engine/iterators.py +45 -5
- mindspore/dataset/engine/offload.py +48 -28
- mindspore/dataset/engine/validators.py +117 -8
- mindspore/dataset/text/__init__.py +6 -5
- mindspore/dataset/text/transforms.py +86 -3
- mindspore/dataset/text/utils.py +6 -4
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +3 -2
- mindspore/dataset/transforms/c_transforms.py +1 -1
- mindspore/dataset/transforms/transforms.py +2 -2
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +2 -3
- mindspore/dataset/vision/c_transforms.py +9 -9
- mindspore/dataset/vision/py_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -0
- mindspore/dataset/vision/transforms.py +160 -161
- mindspore/dataset/vision/utils.py +3 -3
- mindspore/experimental/map_parameter.py +38 -26
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +44 -9
- mindspore/include/api/delegate.h +1 -1
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_parallel_runner.h +2 -2
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +19 -3
- mindspore/include/api/types.h +3 -3
- mindspore/include/dataset/constants.h +7 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filereader.py +18 -0
- mindspore/mindrecord/filewriter.py +197 -34
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
- mindspore/mindrecord/tools/csv_to_mr.py +3 -3
- mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/__init__.py +0 -4
- mindspore/nn/cell.py +204 -132
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +7 -6
- mindspore/nn/layer/__init__.py +5 -4
- mindspore/nn/layer/activation.py +40 -89
- mindspore/nn/layer/basic.py +255 -624
- mindspore/nn/layer/channel_shuffle.py +7 -6
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +41 -4
- mindspore/nn/layer/conv.py +64 -28
- mindspore/nn/layer/dense.py +9 -8
- mindspore/nn/layer/embedding.py +27 -25
- mindspore/nn/layer/image.py +53 -46
- mindspore/nn/layer/math.py +97 -105
- mindspore/nn/layer/normalization.py +117 -86
- mindspore/nn/layer/padding.py +185 -95
- mindspore/nn/layer/pooling.py +817 -414
- mindspore/nn/layer/rnn_cells.py +10 -15
- mindspore/nn/layer/rnns.py +37 -38
- mindspore/nn/layer/thor_layer.py +11 -12
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +5 -4
- mindspore/nn/loss/loss.py +334 -199
- mindspore/nn/optim/ada_grad.py +6 -6
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +4 -5
- mindspore/nn/optim/adam.py +126 -62
- mindspore/nn/optim/adamax.py +3 -4
- mindspore/nn/optim/adasum.py +6 -6
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +67 -38
- mindspore/nn/optim/lamb.py +4 -5
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +43 -4
- mindspore/nn/optim/momentum.py +6 -5
- mindspore/nn/optim/optimizer.py +3 -1
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +1 -1
- mindspore/nn/optim/rprop.py +8 -9
- mindspore/nn/optim/sgd.py +19 -13
- mindspore/nn/optim/thor.py +10 -15
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +4 -4
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +9 -15
- mindspore/nn/probability/distribution/bernoulli.py +3 -3
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +5 -7
- mindspore/nn/probability/distribution/cauchy.py +3 -3
- mindspore/nn/probability/distribution/distribution.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +3 -3
- mindspore/nn/probability/distribution/half_normal.py +15 -11
- mindspore/nn/probability/distribution/laplace.py +16 -13
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/normal.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/student_t.py +20 -15
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +27 -10
- mindspore/nn/wrap/grad_reducer.py +2 -2
- mindspore/nn/wrap/loss_scale.py +40 -24
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +35 -30
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +22 -19
- mindspore/numpy/utils.py +1 -1
- mindspore/numpy/utils_const.py +108 -58
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +86 -117
- mindspore/ops/_grad/grad_base.py +23 -1
- mindspore/ops/_grad/grad_clip_ops.py +2 -3
- mindspore/ops/_grad/grad_comm_ops.py +34 -24
- mindspore/ops/_grad/grad_implementations.py +9 -45
- mindspore/ops/_grad/grad_inner_ops.py +47 -4
- mindspore/ops/_grad/grad_math_ops.py +142 -117
- mindspore/ops/_grad/grad_nn_ops.py +71 -165
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +7 -6
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
- mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
- mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
- mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -611
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_register_for_op.py +1 -0
- mindspore/ops/_utils/__init__.py +1 -2
- mindspore/ops/_utils/utils.py +19 -40
- mindspore/ops/_vmap/vmap_array_ops.py +116 -38
- mindspore/ops/_vmap/vmap_base.py +16 -9
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
- mindspore/ops/_vmap/vmap_image_ops.py +12 -5
- mindspore/ops/_vmap/vmap_math_ops.py +46 -5
- mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
- mindspore/ops/_vmap/vmap_random_ops.py +1 -1
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
- mindspore/ops/composite/__init__.py +7 -8
- mindspore/ops/composite/base.py +101 -47
- mindspore/ops/composite/math_ops.py +188 -158
- mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
- mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
- mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
- mindspore/ops/function/__init__.py +152 -8
- mindspore/ops/function/array_func.py +2555 -674
- mindspore/ops/function/clip_func.py +209 -13
- mindspore/ops/function/debug_func.py +2 -2
- mindspore/ops/function/grad/__init__.py +2 -1
- mindspore/ops/function/grad/grad_func.py +147 -62
- mindspore/ops/function/image_func.py +54 -38
- mindspore/ops/function/linalg_func.py +167 -16
- mindspore/ops/function/math_func.py +4849 -1492
- mindspore/ops/function/nn_func.py +2573 -988
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +3 -3
- mindspore/ops/function/random_func.py +790 -73
- mindspore/ops/function/sparse_func.py +98 -78
- mindspore/ops/function/sparse_unary_func.py +54 -53
- mindspore/ops/function/spectral_func.py +27 -24
- mindspore/ops/function/vmap_func.py +22 -2
- mindspore/ops/functional.py +97 -37
- mindspore/ops/op_info_register.py +70 -28
- mindspore/ops/operations/__init__.py +47 -14
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +5 -5
- mindspore/ops/operations/_grad_ops.py +276 -187
- mindspore/ops/operations/_inner_ops.py +319 -113
- mindspore/ops/operations/_ms_kernel.py +10 -8
- mindspore/ops/operations/_ocr_ops.py +9 -9
- mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
- mindspore/ops/operations/_quant_ops.py +137 -102
- mindspore/ops/operations/_rl_inner_ops.py +121 -60
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1004 -2
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +801 -466
- mindspore/ops/operations/comm_ops.py +51 -49
- mindspore/ops/operations/control_ops.py +2 -2
- mindspore/ops/operations/custom_ops.py +123 -44
- mindspore/ops/operations/debug_ops.py +24 -24
- mindspore/ops/operations/image_ops.py +240 -153
- mindspore/ops/operations/inner_ops.py +34 -50
- mindspore/ops/operations/linalg_ops.py +31 -9
- mindspore/ops/operations/math_ops.py +988 -757
- mindspore/ops/operations/nn_ops.py +965 -819
- mindspore/ops/operations/other_ops.py +51 -40
- mindspore/ops/operations/random_ops.py +204 -122
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +254 -93
- mindspore/ops/operations/spectral_ops.py +35 -3
- mindspore/ops/primitive.py +111 -9
- mindspore/parallel/_auto_parallel_context.py +189 -83
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +99 -7
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +7 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
- mindspore/parallel/_utils.py +1 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +37 -34
- mindspore/parallel/shard.py +17 -18
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +69 -47
- mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
- mindspore/profiler/parser/base_timeline_generator.py +49 -56
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
- mindspore/profiler/parser/hwts_log_parser.py +1 -1
- mindspore/profiler/parser/integrator.py +15 -14
- mindspore/profiler/parser/minddata_analyzer.py +2 -2
- mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +2 -1
- mindspore/profiler/profiling.py +218 -186
- mindspore/rewrite/__init__.py +3 -1
- mindspore/rewrite/api/node.py +1 -114
- mindspore/rewrite/api/node_type.py +3 -0
- mindspore/rewrite/api/pattern_engine.py +31 -1
- mindspore/rewrite/api/scoped_value.py +4 -4
- mindspore/rewrite/api/symbol_tree.py +3 -78
- mindspore/rewrite/api/tree_node_helper.py +1 -1
- mindspore/rewrite/ast_creator_register.py +1 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -2
- mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
- mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
- mindspore/rewrite/namespace.py +0 -2
- mindspore/rewrite/node.py +157 -11
- mindspore/rewrite/parsers/assign_parser.py +231 -53
- mindspore/rewrite/parsers/class_def_parser.py +187 -109
- mindspore/rewrite/parsers/for_parser.py +24 -14
- mindspore/rewrite/parsers/function_def_parser.py +21 -4
- mindspore/rewrite/parsers/if_parser.py +6 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +256 -133
- mindspore/rewrite/symbol_tree_builder.py +38 -1
- mindspore/run_check/_check_version.py +69 -63
- mindspore/run_check/run_check.py +2 -1
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +1 -1
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +273 -102
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +2 -2
- mindspore/train/callback/_checkpoint.py +3 -3
- mindspore/train/callback/_early_stop.py +3 -3
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +29 -31
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +3 -3
- mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
- mindspore/train/callback/_summary_collector.py +23 -16
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +15 -3
- mindspore/train/dataset_helper.py +10 -15
- mindspore/train/loss_scale_manager.py +8 -11
- mindspore/train/metrics/__init__.py +1 -1
- mindspore/train/metrics/bleu_score.py +1 -1
- mindspore/train/metrics/confusion_matrix.py +1 -1
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/dice.py +2 -2
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +4 -3
- mindspore/train/metrics/mean_surface_distance.py +2 -2
- mindspore/train/metrics/occlusion_sensitivity.py +1 -1
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +1 -1
- mindspore/train/metrics/recall.py +1 -1
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +45 -28
- mindspore/train/serialization.py +295 -188
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -13
- mindspore/train/train_thor/convert_utils.py +2 -2
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
- mindspore/compression/__init__.py +0 -19
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -515
- mindspore/compression/quant/__init__.py +0 -28
- mindspore/compression/quant/qat.py +0 -634
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -140
- mindspore/nn/probability/dpn/vae/vae.py +0 -124
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
- mindspore/ops/composite/array_ops.py +0 -241
- mindspore/ops/composite/clip_ops.py +0 -134
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
# Copyright 2020 Huawei Technologies Co., Ltd
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
# ============================================================================
|
|
15
|
-
"""
|
|
16
|
-
MindSpore compression module.
|
|
17
|
-
|
|
18
|
-
Note: This is an experimental interface that is subject to change and/or deletion.
|
|
19
|
-
"""
|
|
@@ -1,124 +0,0 @@
|
|
|
1
|
-
# Copyright 2020 Huawei Technologies Co., Ltd
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
# ============================================================================
|
|
15
|
-
"""
|
|
16
|
-
Note:
|
|
17
|
-
Constant module for compression. This is interface that is subject to change or deletion.
|
|
18
|
-
"""
|
|
19
|
-
from __future__ import absolute_import
|
|
20
|
-
|
|
21
|
-
import enum
|
|
22
|
-
import re
|
|
23
|
-
from types import DynamicClassAttribute
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
__all__ = ["QuantDtype"]
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
@enum.unique
|
|
30
|
-
class QuantDtype(enum.Enum):
|
|
31
|
-
"""
|
|
32
|
-
An enum for quant datatype, contains `INT2` ~ `INT8`, `UINT2` ~ `UINT8`.
|
|
33
|
-
"""
|
|
34
|
-
INT2 = "INT2"
|
|
35
|
-
INT3 = "INT3"
|
|
36
|
-
INT4 = "INT4"
|
|
37
|
-
INT5 = "INT5"
|
|
38
|
-
INT6 = "INT6"
|
|
39
|
-
INT7 = "INT7"
|
|
40
|
-
INT8 = "INT8"
|
|
41
|
-
|
|
42
|
-
UINT2 = "UINT2"
|
|
43
|
-
UINT3 = "UINT3"
|
|
44
|
-
UINT4 = "UINT4"
|
|
45
|
-
UINT5 = "UINT5"
|
|
46
|
-
UINT6 = "UINT6"
|
|
47
|
-
UINT7 = "UINT7"
|
|
48
|
-
UINT8 = "UINT8"
|
|
49
|
-
|
|
50
|
-
def __str__(self):
|
|
51
|
-
return f"{self.name}"
|
|
52
|
-
|
|
53
|
-
@staticmethod
|
|
54
|
-
def is_signed(dtype):
|
|
55
|
-
"""
|
|
56
|
-
Get whether the quant datatype is signed.
|
|
57
|
-
|
|
58
|
-
Args:
|
|
59
|
-
dtype (QuantDtype): quant datatype.
|
|
60
|
-
|
|
61
|
-
Returns:
|
|
62
|
-
bool, whether the input quant datatype is signed.
|
|
63
|
-
|
|
64
|
-
Examples:
|
|
65
|
-
>>> quant_dtype = QuantDtype.INT8
|
|
66
|
-
>>> is_signed = QuantDtype.is_signed(quant_dtype)
|
|
67
|
-
"""
|
|
68
|
-
return dtype in [QuantDtype.INT2, QuantDtype.INT3, QuantDtype.INT4, QuantDtype.INT5,
|
|
69
|
-
QuantDtype.INT6, QuantDtype.INT7, QuantDtype.INT8]
|
|
70
|
-
|
|
71
|
-
@staticmethod
|
|
72
|
-
def switch_signed(dtype):
|
|
73
|
-
"""
|
|
74
|
-
Switch the signed state of the input quant datatype.
|
|
75
|
-
|
|
76
|
-
Args:
|
|
77
|
-
dtype (QuantDtype): quant datatype.
|
|
78
|
-
|
|
79
|
-
Returns:
|
|
80
|
-
QuantDtype, quant datatype with opposite signed state as the input.
|
|
81
|
-
|
|
82
|
-
Examples:
|
|
83
|
-
>>> quant_dtype = QuantDtype.INT8
|
|
84
|
-
>>> quant_dtype = QuantDtype.switch_signed(quant_dtype)
|
|
85
|
-
"""
|
|
86
|
-
type_map = {
|
|
87
|
-
QuantDtype.INT2: QuantDtype.UINT2,
|
|
88
|
-
QuantDtype.INT3: QuantDtype.UINT3,
|
|
89
|
-
QuantDtype.INT4: QuantDtype.UINT4,
|
|
90
|
-
QuantDtype.INT5: QuantDtype.UINT5,
|
|
91
|
-
QuantDtype.INT6: QuantDtype.UINT6,
|
|
92
|
-
QuantDtype.INT7: QuantDtype.UINT7,
|
|
93
|
-
QuantDtype.INT8: QuantDtype.UINT8,
|
|
94
|
-
QuantDtype.UINT2: QuantDtype.INT2,
|
|
95
|
-
QuantDtype.UINT3: QuantDtype.INT3,
|
|
96
|
-
QuantDtype.UINT4: QuantDtype.INT4,
|
|
97
|
-
QuantDtype.UINT5: QuantDtype.INT5,
|
|
98
|
-
QuantDtype.UINT6: QuantDtype.INT6,
|
|
99
|
-
QuantDtype.UINT7: QuantDtype.INT7,
|
|
100
|
-
QuantDtype.UINT8: QuantDtype.INT8
|
|
101
|
-
}
|
|
102
|
-
return type_map.get(dtype)
|
|
103
|
-
|
|
104
|
-
@DynamicClassAttribute
|
|
105
|
-
def _value(self):
|
|
106
|
-
"""The value of the Enum member."""
|
|
107
|
-
return int(re.search(r"(\d+)", self._value_).group(1))
|
|
108
|
-
|
|
109
|
-
@DynamicClassAttribute
|
|
110
|
-
def num_bits(self):
|
|
111
|
-
"""
|
|
112
|
-
Get the num bits of the QuantDtype member.
|
|
113
|
-
|
|
114
|
-
Returns:
|
|
115
|
-
int, the num bits of the QuantDtype member.
|
|
116
|
-
|
|
117
|
-
Examples:
|
|
118
|
-
>>> from mindspore.compression.common import QuantDtype
|
|
119
|
-
>>> quant_dtype = QuantDtype.INT8
|
|
120
|
-
>>> num_bits = quant_dtype.num_bits
|
|
121
|
-
>>> print(num_bits)
|
|
122
|
-
8
|
|
123
|
-
"""
|
|
124
|
-
return self._value
|
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
# Copyright 2020 Huawei Technologies Co., Ltd
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
# ============================================================================
|
|
15
|
-
"""
|
|
16
|
-
Compression export module.
|
|
17
|
-
|
|
18
|
-
Note: This is an experimental interface that is subject to change and/or deletion.
|
|
19
|
-
"""
|
|
@@ -1,515 +0,0 @@
|
|
|
1
|
-
# Copyright 2020 Huawei Technologies Co., Ltd
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
# ============================================================================
|
|
15
|
-
"""
|
|
16
|
-
Note:
|
|
17
|
-
Export for quantization. This is interface that is subject to change or deletion.
|
|
18
|
-
"""
|
|
19
|
-
|
|
20
|
-
from __future__ import absolute_import
|
|
21
|
-
|
|
22
|
-
import copy
|
|
23
|
-
|
|
24
|
-
import numpy as np
|
|
25
|
-
|
|
26
|
-
from mindspore import log as logger
|
|
27
|
-
from mindspore import nn, ops
|
|
28
|
-
from mindspore._checkparam import Validator
|
|
29
|
-
from mindspore.common import Tensor
|
|
30
|
-
from mindspore.common import dtype as mstype
|
|
31
|
-
from mindspore.common.api import _cell_graph_executor as _executor
|
|
32
|
-
from mindspore.common.parameter import Parameter
|
|
33
|
-
from mindspore.nn import Cell
|
|
34
|
-
from mindspore.nn.layer import quant
|
|
35
|
-
from mindspore.ops import operations as P
|
|
36
|
-
from mindspore.ops import functional as F
|
|
37
|
-
from mindspore.ops.operations import _inner_ops as inner
|
|
38
|
-
from mindspore.compression.quant import quant_utils
|
|
39
|
-
from mindspore.compression.quant.qat import _AddFakeQuantInput, _AddFakeQuantAfterSubCell
|
|
40
|
-
|
|
41
|
-
__all__ = ["ExportToQuantInferNetwork"]
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
class QuantBlock(Cell):
|
|
45
|
-
r"""
|
|
46
|
-
A quant block of Conv/Dense, activation layer for Ascend deploy.
|
|
47
|
-
|
|
48
|
-
Calculate Conv or Dense in Int8, with Quant and DeQuant.
|
|
49
|
-
|
|
50
|
-
Notes:
|
|
51
|
-
This block is only for deploy, and not trainable.
|
|
52
|
-
|
|
53
|
-
Args:
|
|
54
|
-
in_channels (int): The number of channels in the input space.
|
|
55
|
-
out_channels (int): The number of channels in the output space.
|
|
56
|
-
weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype
|
|
57
|
-
is same as input x. The values of str refer to the function `initializer`. Default: 'normal'.
|
|
58
|
-
bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
|
|
59
|
-
same as input x. The values of str refer to the function `initializer`. Default: 'zeros'.
|
|
60
|
-
has_bias (bool): Specifies whether the layer uses a bias vector. Default: True.
|
|
61
|
-
activation (str): The regularization function applied to the output of the layer, eg. 'relu'. Default: None.
|
|
62
|
-
batchnorm (bool): Specifies to used batchnorm or not. Default: None.
|
|
63
|
-
activation (string): Specifies activation type. The optional values are as following:
|
|
64
|
-
'softmax', 'logsoftmax', 'relu', 'relu6', 'tanh', 'gelu', 'sigmoid',
|
|
65
|
-
'prelu', 'leakyrelu', 'hswish', 'hsigmoid'. Default: None.
|
|
66
|
-
|
|
67
|
-
Inputs:
|
|
68
|
-
- **input** (Tensor) - Tensor of shape :math:`(N, in\_channels)`.
|
|
69
|
-
|
|
70
|
-
Outputs:
|
|
71
|
-
Tensor of shape :math:`(N, out\_channels)`.
|
|
72
|
-
"""
|
|
73
|
-
|
|
74
|
-
def __init__(self,
|
|
75
|
-
core_op,
|
|
76
|
-
weight,
|
|
77
|
-
quant_op,
|
|
78
|
-
dequant_op,
|
|
79
|
-
dequant_scale,
|
|
80
|
-
bias=None,
|
|
81
|
-
activation=None):
|
|
82
|
-
super(QuantBlock, self).__init__()
|
|
83
|
-
self.core_op = core_op
|
|
84
|
-
self.weight = weight
|
|
85
|
-
self.quant = quant_op
|
|
86
|
-
self.dequant = dequant_op
|
|
87
|
-
self.dequant_scale = dequant_scale
|
|
88
|
-
self.bias = bias
|
|
89
|
-
self.has_bias = bias is not None
|
|
90
|
-
self.activation = activation
|
|
91
|
-
self.has_act = activation is not None
|
|
92
|
-
self.bias_add = P.BiasAdd()
|
|
93
|
-
self.sub = P.Sub()
|
|
94
|
-
self.weight_offset = Parameter(np.zeros(1, dtype=np.int8), name='weight_offset')
|
|
95
|
-
|
|
96
|
-
def construct(self, x):
|
|
97
|
-
x = self.quant(x)
|
|
98
|
-
if self.has_bias:
|
|
99
|
-
weight = self.sub(self.weight, self.weight_offset)
|
|
100
|
-
x = self.core_op(x, weight)
|
|
101
|
-
x = self.bias_add(x, self.bias)
|
|
102
|
-
else:
|
|
103
|
-
x = self.core_op(x, self.weight)
|
|
104
|
-
x = self.dequant(x, self.dequant_scale)
|
|
105
|
-
x = F.cast(x, mstype.float32)
|
|
106
|
-
if self.has_act:
|
|
107
|
-
x = self.activation(x)
|
|
108
|
-
return x
|
|
109
|
-
|
|
110
|
-
def extend_repr(self):
|
|
111
|
-
s = f'quant={self.quant}, core_op={type(self.core_op)}, weight=shape[{self.weight.shape}]'
|
|
112
|
-
if self.has_bias:
|
|
113
|
-
s += f', bias=shape[{self.bias.shape}]'
|
|
114
|
-
if self.has_act:
|
|
115
|
-
s += f', activation={self.activation}'
|
|
116
|
-
s += f', dequant={self.dequant}'
|
|
117
|
-
return s
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
class QuantMindirBlock(Cell):
|
|
121
|
-
"""A quant binary block of Conv/Dense, activation layer for export MINDIR model.
|
|
122
|
-
|
|
123
|
-
Args:
|
|
124
|
-
core_op (Cell): The operation cell.
|
|
125
|
-
weight (Tensor): The weight of the cell.
|
|
126
|
-
bias (Tensor): The bias of the cell. Default: None.
|
|
127
|
-
activation (str): The regularization function applied to the output of the layer, eg. 'relu'. Default: None.
|
|
128
|
-
param_dict (dict): The information of the cell.
|
|
129
|
-
"""
|
|
130
|
-
|
|
131
|
-
def __init__(self,
|
|
132
|
-
core_op,
|
|
133
|
-
weight,
|
|
134
|
-
bias=None,
|
|
135
|
-
activation=None,
|
|
136
|
-
param_dict=None):
|
|
137
|
-
|
|
138
|
-
super(QuantMindirBlock, self).__init__()
|
|
139
|
-
self.core_op = core_op
|
|
140
|
-
if activation is not None:
|
|
141
|
-
self.core_op.add_prim_attr("activation_name", activation.__class__.__name__)
|
|
142
|
-
self.core_op.add_prim_attr("filter_maxq", Tensor(param_dict["filter_maxq"]))
|
|
143
|
-
self.core_op.add_prim_attr("filter_minq", Tensor(param_dict["filter_minq"]))
|
|
144
|
-
if param_dict["output_maxq"] is not None:
|
|
145
|
-
self.core_op.add_prim_attr("output_maxq", Tensor(param_dict["output_maxq"]))
|
|
146
|
-
self.core_op.add_prim_attr("output_minq", Tensor(param_dict["output_minq"]))
|
|
147
|
-
self.core_op.add_prim_attr("symmetric", Tensor(param_dict["symmetric"]))
|
|
148
|
-
if hasattr(core_op, 'pad_mode'):
|
|
149
|
-
self.core_op.add_prim_attr("pad_mode", core_op.pad_mode)
|
|
150
|
-
self.core_op.add_prim_attr("act_num_bits", Tensor(8))
|
|
151
|
-
self.core_op.add_prim_attr("weight_num_bits", Tensor(param_dict["weight_num_bits"]))
|
|
152
|
-
self.core_op.add_prim_attr("weight_narrow_range", Tensor(param_dict["weight_narrow_range"]))
|
|
153
|
-
if param_dict["input_narrow_range"] is not None:
|
|
154
|
-
self.core_op.add_prim_attr("input_narrow_range", Tensor(param_dict["input_narrow_range"]))
|
|
155
|
-
if param_dict["output_narrow_range"] is not None:
|
|
156
|
-
self.core_op.add_prim_attr("output_narrow_range", Tensor(param_dict["output_narrow_range"]))
|
|
157
|
-
if param_dict["input_maxq"] == 'None':
|
|
158
|
-
self.core_op.add_prim_attr("mean", Tensor(param_dict["mean"]))
|
|
159
|
-
self.core_op.add_prim_attr("std_dev", Tensor(param_dict["std_dev"]))
|
|
160
|
-
elif param_dict["input_maxq"] is not None:
|
|
161
|
-
self.core_op.add_prim_attr("input_maxq", Tensor(param_dict["input_maxq"]))
|
|
162
|
-
self.core_op.add_prim_attr("input_minq", Tensor(param_dict["input_minq"]))
|
|
163
|
-
|
|
164
|
-
self.weight = weight
|
|
165
|
-
self.bias = bias
|
|
166
|
-
self.has_bias = bias is not None
|
|
167
|
-
self.activation = activation
|
|
168
|
-
self.has_act = activation is not None
|
|
169
|
-
self.bias_add = P.BiasAdd()
|
|
170
|
-
|
|
171
|
-
def construct(self, x):
|
|
172
|
-
if self.has_bias:
|
|
173
|
-
x = self.core_op(x, self.weight)
|
|
174
|
-
x = self.bias_add(x, self.bias)
|
|
175
|
-
else:
|
|
176
|
-
x = self.core_op(x, self.weight)
|
|
177
|
-
if self.has_act:
|
|
178
|
-
x = self.activation(x)
|
|
179
|
-
return x
|
|
180
|
-
|
|
181
|
-
def extend_repr(self):
|
|
182
|
-
s = f'core_op={type(self.core_op)}, weight=shape[{self.weight.shape}]'
|
|
183
|
-
if self.has_bias:
|
|
184
|
-
s += f', bias=shape[{self.bias.shape}]'
|
|
185
|
-
if self.has_act:
|
|
186
|
-
s += f', activation={self.activation}'
|
|
187
|
-
return s
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
class ExportToQuantInferNetwork:
|
|
191
|
-
"""
|
|
192
|
-
Convert quantization aware network to infer network.
|
|
193
|
-
|
|
194
|
-
Args:
|
|
195
|
-
network (Cell): MindSpore quantization aware training network.
|
|
196
|
-
inputs (Tensor): Input tensors of the `quantization aware training network`.
|
|
197
|
-
mean (int, float): The mean of input data after preprocessing, used for quantizing the first layer of network.
|
|
198
|
-
Default: 127.5.
|
|
199
|
-
std_dev (int, float): The variance of input data after preprocessing, used for quantizing the first layer
|
|
200
|
-
of network. Default: 127.5.
|
|
201
|
-
is_mindir (bool): Whether export MINDIR format. Default: False.
|
|
202
|
-
|
|
203
|
-
Returns:
|
|
204
|
-
Cell, Infer network.
|
|
205
|
-
"""
|
|
206
|
-
|
|
207
|
-
def __init__(self, network, mean, std_dev, *inputs, is_mindir=False):
|
|
208
|
-
network = Validator.check_isinstance('network', network, (nn.Cell,))
|
|
209
|
-
self.data_type = mstype.int8
|
|
210
|
-
self.network = copy.deepcopy(network)
|
|
211
|
-
self.network_bk = copy.deepcopy(network)
|
|
212
|
-
self.get_inputs_table(inputs)
|
|
213
|
-
self.mean = mean
|
|
214
|
-
self.std_dev = std_dev
|
|
215
|
-
self.is_mindir = is_mindir
|
|
216
|
-
self.upcell = None
|
|
217
|
-
|
|
218
|
-
@staticmethod
|
|
219
|
-
def __get_dequant_scale(scale_a_in, scale_w):
|
|
220
|
-
"""Get dequant scale"""
|
|
221
|
-
scale_deq = scale_a_in * scale_w
|
|
222
|
-
|
|
223
|
-
# fuse parameter
|
|
224
|
-
# |--------|47:40|--------|39:32|--------|31:0|
|
|
225
|
-
# offset_w [8] shift_N [8] deq_scale [32]
|
|
226
|
-
float32_deq_scale = scale_deq.astype(np.float32)
|
|
227
|
-
uint32_deq_scale = np.frombuffer(float32_deq_scale, np.uint32)
|
|
228
|
-
scale_length = scale_deq.size # channel
|
|
229
|
-
dequant_param = np.zeros(scale_length, dtype=np.uint64)
|
|
230
|
-
for index in range(scale_length):
|
|
231
|
-
dequant_param[index] += uint32_deq_scale[index]
|
|
232
|
-
scale_deq = Tensor(dequant_param, mstype.uint64)
|
|
233
|
-
return scale_deq
|
|
234
|
-
|
|
235
|
-
def get_inputs_table(self, inputs):
|
|
236
|
-
"""Get the input quantization parameters of quantization cell for quant export."""
|
|
237
|
-
phase_name = 'export_quant'
|
|
238
|
-
graph_id, _ = _executor.compile(self.network, *inputs, phase=phase_name, do_convert=False)
|
|
239
|
-
self.quant_info_table = _executor.fetch_info_for_quant_export(graph_id)
|
|
240
|
-
|
|
241
|
-
def run(self):
|
|
242
|
-
"""Start to convert."""
|
|
243
|
-
logger.warning("The compression module is deprecated and may not be supported in later version, please use "
|
|
244
|
-
"MindSpore Golden Stick(https://gitee.com/mindspore/golden-stick) instead.")
|
|
245
|
-
self.network.update_cell_prefix()
|
|
246
|
-
network = self.network
|
|
247
|
-
if isinstance(network, _AddFakeQuantInput):
|
|
248
|
-
network = network.network
|
|
249
|
-
network = self._convert_quant2deploy(network)
|
|
250
|
-
return network
|
|
251
|
-
|
|
252
|
-
def _get_quant_block(self, cell_core, activation, fake_quant_a_out):
|
|
253
|
-
"""convert network's quant subcell to deploy subcell"""
|
|
254
|
-
scale_a_in, zp_a_in, scale_w, zp_w, param_dict = self.__get_quant_param(cell_core, fake_quant_a_out)
|
|
255
|
-
|
|
256
|
-
# Build the `Quant` `Dequant` op.
|
|
257
|
-
# Quant only support perlayer version. Need check here.
|
|
258
|
-
if float(scale_a_in) == 0:
|
|
259
|
-
raise ValueError("If `scale_a_in` is zero, will lead to zero error.")
|
|
260
|
-
quant_op = inner.Quant(1 / float(scale_a_in), float(zp_a_in))
|
|
261
|
-
scale_deq = self.__get_dequant_scale(scale_a_in, scale_w)
|
|
262
|
-
dequant_op = inner.Dequant()
|
|
263
|
-
|
|
264
|
-
if isinstance(activation, _AddFakeQuantAfterSubCell):
|
|
265
|
-
activation = activation.subcell
|
|
266
|
-
elif hasattr(activation, "get_origin"):
|
|
267
|
-
activation = activation.get_origin()
|
|
268
|
-
|
|
269
|
-
# get op
|
|
270
|
-
if isinstance(cell_core, quant.DenseQuant):
|
|
271
|
-
op_core = P.MatMul()
|
|
272
|
-
else:
|
|
273
|
-
op_core = cell_core.conv
|
|
274
|
-
|
|
275
|
-
# get the `weight` and `bias`
|
|
276
|
-
weight, bias, weight_b, bias_b = self.__get_weight_bias(cell_core, scale_a_in, scale_w, zp_w)
|
|
277
|
-
|
|
278
|
-
if self.is_mindir:
|
|
279
|
-
block = QuantMindirBlock(op_core, weight_b, bias_b, activation, param_dict)
|
|
280
|
-
else:
|
|
281
|
-
block = QuantBlock(op_core, weight, quant_op, dequant_op, scale_deq, bias, activation)
|
|
282
|
-
return block
|
|
283
|
-
|
|
284
|
-
def _get_input_quant_param(self, minq_name, np_type, param_dict):
|
|
285
|
-
"""get input quant parameter for quant block"""
|
|
286
|
-
fake_quant_a_in_prefix = minq_name[:-5]
|
|
287
|
-
cells = self.network_bk.cells_and_names()
|
|
288
|
-
for cell in cells:
|
|
289
|
-
if cell[0].endswith(fake_quant_a_in_prefix):
|
|
290
|
-
fake_quant_a_in = cell[1]
|
|
291
|
-
break
|
|
292
|
-
scale_a_in, zp_a_in, param_dict["input_maxq"], param_dict["input_minq"] = \
|
|
293
|
-
quant_utils.scale_zp_max_min_from_fake_quant_cell(fake_quant_a_in, np_type)
|
|
294
|
-
param_dict["input_narrow_range"] = fake_quant_a_in.narrow_range
|
|
295
|
-
return scale_a_in, zp_a_in
|
|
296
|
-
|
|
297
|
-
def __get_quant_param(self, cell_core, fake_quant_a_out):
|
|
298
|
-
"""get parameter for quant block"""
|
|
299
|
-
w_minq_name = cell_core.fake_quant_weight.minq.name
|
|
300
|
-
w_maxq_name = cell_core.fake_quant_weight.maxq.name
|
|
301
|
-
np_type = mstype.dtype_to_nptype(self.data_type)
|
|
302
|
-
param_dict = dict()
|
|
303
|
-
param_dict["filter_maxq"] = None
|
|
304
|
-
param_dict["filter_minq"] = None
|
|
305
|
-
param_dict["output_maxq"] = None
|
|
306
|
-
param_dict["output_minq"] = None
|
|
307
|
-
param_dict["input_maxq"] = None
|
|
308
|
-
param_dict["input_minq"] = None
|
|
309
|
-
param_dict["input_narrow_range"] = None
|
|
310
|
-
param_dict["output_narrow_range"] = None
|
|
311
|
-
param_dict["weight_narrow_range"] = cell_core.fake_quant_weight.narrow_range
|
|
312
|
-
param_dict["mean"] = self.mean
|
|
313
|
-
param_dict["std_dev"] = self.std_dev
|
|
314
|
-
param_dict["symmetric"] = cell_core.fake_quant_weight.symmetric
|
|
315
|
-
param_dict["weight_num_bits"] = cell_core.fake_quant_weight.num_bits
|
|
316
|
-
|
|
317
|
-
scale_w, zp_w, param_dict["filter_maxq"], param_dict["filter_minq"] = \
|
|
318
|
-
quant_utils.scale_zp_max_min_from_fake_quant_cell(cell_core.fake_quant_weight, np_type)
|
|
319
|
-
if fake_quant_a_out is not None:
|
|
320
|
-
_, _, param_dict["output_maxq"], param_dict["output_minq"] = \
|
|
321
|
-
quant_utils.scale_zp_max_min_from_fake_quant_cell(fake_quant_a_out, np_type)
|
|
322
|
-
param_dict["output_narrow_range"] = fake_quant_a_out.narrow_range
|
|
323
|
-
|
|
324
|
-
info = self.quant_info_table.get(w_minq_name, None)
|
|
325
|
-
if not info:
|
|
326
|
-
info = self.quant_info_table.get(w_maxq_name, None)
|
|
327
|
-
if info:
|
|
328
|
-
_, minq_name = info
|
|
329
|
-
if minq_name == 'input':
|
|
330
|
-
scale_a_in, zp_a_in, param_dict["input_maxq"], param_dict["input_minq"] = \
|
|
331
|
-
(1 / self.std_dev), round(self.mean), 'None', 'None'
|
|
332
|
-
else:
|
|
333
|
-
scale_a_in, zp_a_in = self._get_input_quant_param(minq_name, np_type, param_dict)
|
|
334
|
-
else:
|
|
335
|
-
# skip quant layer
|
|
336
|
-
scale_a_in, zp_a_in = 1.0, 0.0
|
|
337
|
-
return scale_a_in, zp_a_in, scale_w, zp_w, param_dict
|
|
338
|
-
|
|
339
|
-
def __get_weight_bias(self, cell_core, scale_a_in, scale_w, zp_w):
|
|
340
|
-
"""Get weight and bias for quantizaiton"""
|
|
341
|
-
np_type = mstype.dtype_to_nptype(self.data_type)
|
|
342
|
-
weight = cell_core.weight.data.asnumpy()
|
|
343
|
-
bias = None
|
|
344
|
-
if isinstance(cell_core, (quant.DenseQuant, quant.Conv2dQuant)):
|
|
345
|
-
if cell_core.has_bias:
|
|
346
|
-
bias = cell_core.bias.data.asnumpy()
|
|
347
|
-
elif isinstance(cell_core, (quant.Conv2dBnFoldQuant, quant.Conv2dBnFoldQuantOneConv)):
|
|
348
|
-
weight, bias = quant_utils.fold_batchnorm(weight, cell_core)
|
|
349
|
-
elif isinstance(cell_core, quant.Conv2dBnWithoutFoldQuant):
|
|
350
|
-
weight, bias = quant_utils.without_fold_batchnorm(weight, cell_core)
|
|
351
|
-
weight_b = weight
|
|
352
|
-
bias_b = bias
|
|
353
|
-
# apply the quant
|
|
354
|
-
quant_min, quant_max = quant_utils.get_quant_min_max(np_type,
|
|
355
|
-
cell_core.fake_quant_weight.num_bits,
|
|
356
|
-
cell_core.fake_quant_weight.narrow_range)
|
|
357
|
-
weight = quant_utils.weight2int(weight, scale_w, zp_w, quant_min, quant_max)
|
|
358
|
-
if bias is not None:
|
|
359
|
-
if 0 in scale_a_in:
|
|
360
|
-
raise ValueError("Zero exist in `scale_a_in` which will lead to divide zero error.")
|
|
361
|
-
if 0 in scale_w:
|
|
362
|
-
raise ValueError("Zero exist in `scale_w` which will lead to divide zero error.")
|
|
363
|
-
bias = Tensor(bias / scale_a_in / scale_w, mstype.int32)
|
|
364
|
-
|
|
365
|
-
if isinstance(cell_core, quant.DenseQuant):
|
|
366
|
-
weight = np.transpose(weight)
|
|
367
|
-
weight_b = np.transpose(weight_b)
|
|
368
|
-
|
|
369
|
-
weight_tensor = Tensor(weight, self.data_type)
|
|
370
|
-
weight_b_tensor = Tensor(weight_b)
|
|
371
|
-
if bias_b is not None:
|
|
372
|
-
bias_b_tensor = Tensor(bias_b, mstype.float32)
|
|
373
|
-
return weight_tensor, bias, weight_b_tensor, bias_b_tensor
|
|
374
|
-
return weight_tensor, bias, weight_b_tensor, None
|
|
375
|
-
|
|
376
|
-
def _add_output_min_max_for_op(self, origin_op, fake_quant_cell):
|
|
377
|
-
"""add output quant info for quant op for export mindir."""
|
|
378
|
-
if self.is_mindir:
|
|
379
|
-
if isinstance(origin_op, ops.Primitive) and not hasattr(origin_op, 'output_minq'):
|
|
380
|
-
np_type = mstype.dtype_to_nptype(self.data_type)
|
|
381
|
-
_, _, maxq, minq = quant_utils.scale_zp_max_min_from_fake_quant_cell(fake_quant_cell, np_type)
|
|
382
|
-
origin_op.add_prim_attr('output_maxq', Tensor(maxq))
|
|
383
|
-
origin_op.add_prim_attr('output_minq', Tensor(minq))
|
|
384
|
-
|
|
385
|
-
def _convert_subcell(self, network, change, name, subcell):
|
|
386
|
-
"""Convert subcell to ant subcell."""
|
|
387
|
-
if subcell is not None and hasattr(subcell, "fake_quant_weight"):
|
|
388
|
-
new_subcell = self._get_quant_block(subcell, None, None)
|
|
389
|
-
prefix = subcell.param_prefix
|
|
390
|
-
new_subcell.update_parameters_name(prefix + '.')
|
|
391
|
-
self.upcell = new_subcell
|
|
392
|
-
network.insert_child_to_cell(name, new_subcell)
|
|
393
|
-
change = True
|
|
394
|
-
return network, change
|
|
395
|
-
|
|
396
|
-
def _convert_conv(self, network, change, name, subcell):
|
|
397
|
-
"""Convert subcell to ant subcell for conv."""
|
|
398
|
-
cell_core = subcell.conv
|
|
399
|
-
activation = subcell.activation
|
|
400
|
-
fake_quant_act = None
|
|
401
|
-
if hasattr(activation, 'fake_quant_act_before'):
|
|
402
|
-
fake_quant_act = activation.fake_quant_act_before
|
|
403
|
-
elif hasattr(activation, 'fake_quant_act'):
|
|
404
|
-
fake_quant_act = activation.fake_quant_act
|
|
405
|
-
if cell_core is not None and hasattr(cell_core, "fake_quant_weight"):
|
|
406
|
-
new_subcell = self._get_quant_block(cell_core, activation, fake_quant_act)
|
|
407
|
-
self.upcell = None
|
|
408
|
-
prefix = subcell.param_prefix
|
|
409
|
-
new_subcell.update_parameters_name(prefix + '.')
|
|
410
|
-
network.insert_child_to_cell(name, new_subcell)
|
|
411
|
-
change = True
|
|
412
|
-
return network, change
|
|
413
|
-
|
|
414
|
-
def _convert_dense(self, network, change, name, subcell):
|
|
415
|
-
"""Convert subcell to ant subcell for dense."""
|
|
416
|
-
cell_core = subcell.dense
|
|
417
|
-
activation = subcell.activation
|
|
418
|
-
fake_quant_act = None
|
|
419
|
-
if hasattr(activation, 'fake_quant_act_before'):
|
|
420
|
-
fake_quant_act = activation.fake_quant_act_before
|
|
421
|
-
elif hasattr(activation, 'fake_quant_act'):
|
|
422
|
-
fake_quant_act = activation.fake_quant_act
|
|
423
|
-
if cell_core is not None and hasattr(cell_core, "fake_quant_weight"):
|
|
424
|
-
new_subcell = self._get_quant_block(cell_core, activation, fake_quant_act)
|
|
425
|
-
prefix = subcell.param_prefix
|
|
426
|
-
new_subcell.update_parameters_name(prefix + '.')
|
|
427
|
-
network.insert_child_to_cell(name, new_subcell)
|
|
428
|
-
self.upcell = None
|
|
429
|
-
change = True
|
|
430
|
-
return network, change
|
|
431
|
-
|
|
432
|
-
def _convert_act(self, subcell):
|
|
433
|
-
"""Convert subcell to ant subcell for activation."""
|
|
434
|
-
activation = subcell.get_origin()
|
|
435
|
-
if isinstance(activation, nn.ReLU):
|
|
436
|
-
self._add_output_min_max_for_op(activation.relu, subcell.fake_quant_act)
|
|
437
|
-
elif isinstance(activation, nn.ReLU6):
|
|
438
|
-
self._add_output_min_max_for_op(activation.relu6, subcell.fake_quant_act)
|
|
439
|
-
if self.upcell:
|
|
440
|
-
self._add_output_min_max_for_op(self.upcell.core_op, subcell.fake_quant_act)
|
|
441
|
-
return activation
|
|
442
|
-
|
|
443
|
-
def _convert_add(self, subcell):
|
|
444
|
-
"""Convert subcell to ant subcell for add."""
|
|
445
|
-
if isinstance(subcell.add, _AddFakeQuantAfterSubCell):
|
|
446
|
-
add_op = subcell.add.subcell
|
|
447
|
-
subcell.__delattr__("add")
|
|
448
|
-
subcell.__setattr__("add", add_op)
|
|
449
|
-
add_op = subcell.add
|
|
450
|
-
self._add_output_min_max_for_op(add_op, subcell.fake_quant_act)
|
|
451
|
-
subcell.__delattr__("fake_quant_act")
|
|
452
|
-
subcell.__setattr__("fake_quant_act", P.identity())
|
|
453
|
-
|
|
454
|
-
def _convert_observer(self, network, name, subcell):
|
|
455
|
-
"""Convert subcell to ant subcell for FakeQuantWithMinMaxObserver."""
|
|
456
|
-
if self.upcell:
|
|
457
|
-
self._add_output_min_max_for_op(self.upcell.core_op, subcell)
|
|
458
|
-
network.__delattr__(name)
|
|
459
|
-
network.__setattr__(name, P.identity())
|
|
460
|
-
|
|
461
|
-
def _convert_fake_quant_after_cell(self, network, name, subcell):
|
|
462
|
-
"""Convert subcell to ant subcell for _AddFakeQuantAfterSubCell."""
|
|
463
|
-
op = subcell.subcell
|
|
464
|
-
self._add_output_min_max_for_op(op, subcell.fake_quant_act)
|
|
465
|
-
network.__delattr__(name)
|
|
466
|
-
network.__setattr__(name, op)
|
|
467
|
-
|
|
468
|
-
def _convert_core_quant_subcell(self, network, change, name, subcell):
|
|
469
|
-
"""Convert subcell to ant subcell for conv and dense."""
|
|
470
|
-
is_core_subcell = True
|
|
471
|
-
if isinstance(subcell, nn.Conv2dBnAct):
|
|
472
|
-
network, change = self._convert_conv(network, change, name, subcell)
|
|
473
|
-
elif isinstance(subcell, nn.DenseBnAct):
|
|
474
|
-
network, change = self._convert_dense(network, change, name, subcell)
|
|
475
|
-
elif isinstance(subcell, (quant.Conv2dBnFoldQuant, quant.Conv2dBnFoldQuantOneConv,
|
|
476
|
-
quant.Conv2dBnWithoutFoldQuant, quant.Conv2dQuant, quant.DenseQuant)):
|
|
477
|
-
network, change = self._convert_subcell(network, change, name, subcell)
|
|
478
|
-
else:
|
|
479
|
-
is_core_subcell = False
|
|
480
|
-
return is_core_subcell, network, change
|
|
481
|
-
|
|
482
|
-
def _convert_other_quant_subcell(self, network, change, name, subcell):
|
|
483
|
-
"""Convert subcell to ant subcell for cell except conv and dense."""
|
|
484
|
-
is_other_subcell = True
|
|
485
|
-
if isinstance(subcell, nn.ActQuant) and hasattr(subcell, "get_origin"):
|
|
486
|
-
activation = self._convert_act(subcell)
|
|
487
|
-
network.insert_child_to_cell(name, activation)
|
|
488
|
-
change = True
|
|
489
|
-
elif isinstance(subcell, nn.TensorAddQuant):
|
|
490
|
-
self._convert_add(subcell)
|
|
491
|
-
elif isinstance(subcell, quant.FakeQuantWithMinMaxObserver):
|
|
492
|
-
self._convert_observer(network, name, subcell)
|
|
493
|
-
elif isinstance(subcell, _AddFakeQuantAfterSubCell):
|
|
494
|
-
self._convert_fake_quant_after_cell(network, name, subcell)
|
|
495
|
-
change = True
|
|
496
|
-
else:
|
|
497
|
-
is_other_subcell = False
|
|
498
|
-
return is_other_subcell, network, change
|
|
499
|
-
|
|
500
|
-
def _convert_quant2deploy(self, network):
|
|
501
|
-
"""Convert network's all quant subcell to deploy subcell."""
|
|
502
|
-
cells = network.name_cells()
|
|
503
|
-
change = False
|
|
504
|
-
for name in cells:
|
|
505
|
-
subcell = cells[name]
|
|
506
|
-
if subcell == network:
|
|
507
|
-
continue
|
|
508
|
-
is_core_quant_subcell, network, change = self._convert_core_quant_subcell(network, change, name, subcell)
|
|
509
|
-
is_other_quant_subcell, network, change = self._convert_other_quant_subcell(network, change, name, subcell)
|
|
510
|
-
if not is_core_quant_subcell and not is_other_quant_subcell:
|
|
511
|
-
self.upcell = None
|
|
512
|
-
self._convert_quant2deploy(subcell)
|
|
513
|
-
if isinstance(network, nn.SequentialCell) and change:
|
|
514
|
-
network.cell_list = list(network.cells())
|
|
515
|
-
return network
|