mindspore 2.0.0a0__cp37-cp37m-win_amd64.whl → 2.0.0rc1__cp37-cp37m-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -2
- mindspore/_c_dataengine.cp37-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp37-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp37-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +16 -1
- mindspore/_extends/parse/parser.py +107 -22
- mindspore/_extends/parse/resources.py +0 -7
- mindspore/_extends/parse/standard_method.py +885 -413
- mindspore/amp.py +52 -57
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +38 -20
- mindspore/boost/dim_reduce.py +3 -3
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -6
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +41 -7
- mindspore/common/api.py +215 -141
- mindspore/common/dtype.py +8 -1
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +4 -2
- mindspore/common/jit_config.py +17 -13
- mindspore/common/mutable.py +33 -13
- mindspore/common/parameter.py +23 -21
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +62 -41
- mindspore/common/tensor.py +852 -1154
- mindspore/communication/__init__.py +2 -2
- mindspore/communication/_comm_helper.py +11 -4
- mindspore/communication/management.py +22 -21
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +201 -23
- mindspore/dataset/__init__.py +6 -6
- mindspore/dataset/audio/__init__.py +7 -7
- mindspore/dataset/audio/transforms.py +670 -30
- mindspore/dataset/audio/utils.py +47 -4
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +210 -14
- mindspore/dataset/core/validator_helpers.py +2 -2
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +322 -66
- mindspore/dataset/engine/datasets_audio.py +80 -76
- mindspore/dataset/engine/datasets_standard_format.py +51 -38
- mindspore/dataset/engine/datasets_text.py +232 -118
- mindspore/dataset/engine/datasets_user_defined.py +41 -17
- mindspore/dataset/engine/datasets_vision.py +746 -225
- mindspore/dataset/engine/graphdata.py +75 -10
- mindspore/dataset/engine/iterators.py +45 -5
- mindspore/dataset/engine/offload.py +48 -28
- mindspore/dataset/engine/validators.py +117 -8
- mindspore/dataset/text/__init__.py +6 -5
- mindspore/dataset/text/transforms.py +86 -3
- mindspore/dataset/text/utils.py +6 -4
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +3 -2
- mindspore/dataset/transforms/c_transforms.py +1 -1
- mindspore/dataset/transforms/transforms.py +2 -2
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +2 -3
- mindspore/dataset/vision/c_transforms.py +9 -9
- mindspore/dataset/vision/py_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -0
- mindspore/dataset/vision/transforms.py +160 -161
- mindspore/dataset/vision/utils.py +3 -3
- mindspore/experimental/map_parameter.py +38 -26
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +44 -9
- mindspore/include/api/delegate.h +1 -1
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_parallel_runner.h +2 -2
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +19 -3
- mindspore/include/api/types.h +3 -3
- mindspore/include/dataset/constants.h +7 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filereader.py +18 -0
- mindspore/mindrecord/filewriter.py +197 -34
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
- mindspore/mindrecord/tools/csv_to_mr.py +3 -3
- mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/__init__.py +0 -4
- mindspore/nn/cell.py +204 -132
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +7 -6
- mindspore/nn/layer/__init__.py +5 -4
- mindspore/nn/layer/activation.py +40 -89
- mindspore/nn/layer/basic.py +255 -624
- mindspore/nn/layer/channel_shuffle.py +7 -6
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +41 -4
- mindspore/nn/layer/conv.py +64 -28
- mindspore/nn/layer/dense.py +9 -8
- mindspore/nn/layer/embedding.py +27 -25
- mindspore/nn/layer/image.py +53 -46
- mindspore/nn/layer/math.py +97 -105
- mindspore/nn/layer/normalization.py +117 -86
- mindspore/nn/layer/padding.py +185 -95
- mindspore/nn/layer/pooling.py +817 -414
- mindspore/nn/layer/rnn_cells.py +10 -15
- mindspore/nn/layer/rnns.py +37 -38
- mindspore/nn/layer/thor_layer.py +11 -12
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +5 -4
- mindspore/nn/loss/loss.py +334 -199
- mindspore/nn/optim/ada_grad.py +6 -6
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +4 -5
- mindspore/nn/optim/adam.py +126 -62
- mindspore/nn/optim/adamax.py +3 -4
- mindspore/nn/optim/adasum.py +6 -6
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +67 -38
- mindspore/nn/optim/lamb.py +4 -5
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +43 -4
- mindspore/nn/optim/momentum.py +6 -5
- mindspore/nn/optim/optimizer.py +3 -1
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +1 -1
- mindspore/nn/optim/rprop.py +8 -9
- mindspore/nn/optim/sgd.py +19 -13
- mindspore/nn/optim/thor.py +10 -15
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +4 -4
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +9 -15
- mindspore/nn/probability/distribution/bernoulli.py +3 -3
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +5 -7
- mindspore/nn/probability/distribution/cauchy.py +3 -3
- mindspore/nn/probability/distribution/distribution.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +3 -3
- mindspore/nn/probability/distribution/half_normal.py +15 -11
- mindspore/nn/probability/distribution/laplace.py +16 -13
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/normal.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/student_t.py +20 -15
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +27 -10
- mindspore/nn/wrap/grad_reducer.py +2 -2
- mindspore/nn/wrap/loss_scale.py +40 -24
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +35 -30
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +22 -19
- mindspore/numpy/utils.py +1 -1
- mindspore/numpy/utils_const.py +108 -58
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +86 -117
- mindspore/ops/_grad/grad_base.py +23 -1
- mindspore/ops/_grad/grad_clip_ops.py +2 -3
- mindspore/ops/_grad/grad_comm_ops.py +34 -24
- mindspore/ops/_grad/grad_implementations.py +9 -45
- mindspore/ops/_grad/grad_inner_ops.py +47 -4
- mindspore/ops/_grad/grad_math_ops.py +142 -117
- mindspore/ops/_grad/grad_nn_ops.py +71 -165
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +7 -6
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
- mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
- mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
- mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -611
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_register_for_op.py +1 -0
- mindspore/ops/_utils/__init__.py +1 -2
- mindspore/ops/_utils/utils.py +19 -40
- mindspore/ops/_vmap/vmap_array_ops.py +116 -38
- mindspore/ops/_vmap/vmap_base.py +16 -9
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
- mindspore/ops/_vmap/vmap_image_ops.py +12 -5
- mindspore/ops/_vmap/vmap_math_ops.py +46 -5
- mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
- mindspore/ops/_vmap/vmap_random_ops.py +1 -1
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
- mindspore/ops/composite/__init__.py +7 -8
- mindspore/ops/composite/base.py +101 -47
- mindspore/ops/composite/math_ops.py +188 -158
- mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
- mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
- mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
- mindspore/ops/function/__init__.py +152 -8
- mindspore/ops/function/array_func.py +2555 -674
- mindspore/ops/function/clip_func.py +209 -13
- mindspore/ops/function/debug_func.py +2 -2
- mindspore/ops/function/grad/__init__.py +2 -1
- mindspore/ops/function/grad/grad_func.py +147 -62
- mindspore/ops/function/image_func.py +54 -38
- mindspore/ops/function/linalg_func.py +167 -16
- mindspore/ops/function/math_func.py +4849 -1492
- mindspore/ops/function/nn_func.py +2573 -988
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +3 -3
- mindspore/ops/function/random_func.py +790 -73
- mindspore/ops/function/sparse_func.py +98 -78
- mindspore/ops/function/sparse_unary_func.py +54 -53
- mindspore/ops/function/spectral_func.py +27 -24
- mindspore/ops/function/vmap_func.py +22 -2
- mindspore/ops/functional.py +97 -37
- mindspore/ops/op_info_register.py +70 -28
- mindspore/ops/operations/__init__.py +47 -14
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +5 -5
- mindspore/ops/operations/_grad_ops.py +276 -187
- mindspore/ops/operations/_inner_ops.py +319 -113
- mindspore/ops/operations/_ms_kernel.py +10 -8
- mindspore/ops/operations/_ocr_ops.py +9 -9
- mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
- mindspore/ops/operations/_quant_ops.py +137 -102
- mindspore/ops/operations/_rl_inner_ops.py +121 -60
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1004 -2
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +801 -466
- mindspore/ops/operations/comm_ops.py +51 -49
- mindspore/ops/operations/control_ops.py +2 -2
- mindspore/ops/operations/custom_ops.py +123 -44
- mindspore/ops/operations/debug_ops.py +24 -24
- mindspore/ops/operations/image_ops.py +240 -153
- mindspore/ops/operations/inner_ops.py +34 -50
- mindspore/ops/operations/linalg_ops.py +31 -9
- mindspore/ops/operations/math_ops.py +988 -757
- mindspore/ops/operations/nn_ops.py +965 -819
- mindspore/ops/operations/other_ops.py +51 -40
- mindspore/ops/operations/random_ops.py +204 -122
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +254 -93
- mindspore/ops/operations/spectral_ops.py +35 -3
- mindspore/ops/primitive.py +111 -9
- mindspore/parallel/_auto_parallel_context.py +189 -83
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +99 -7
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +7 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
- mindspore/parallel/_utils.py +1 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +37 -34
- mindspore/parallel/shard.py +17 -18
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +69 -47
- mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
- mindspore/profiler/parser/base_timeline_generator.py +49 -56
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
- mindspore/profiler/parser/hwts_log_parser.py +1 -1
- mindspore/profiler/parser/integrator.py +15 -14
- mindspore/profiler/parser/minddata_analyzer.py +2 -2
- mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +2 -1
- mindspore/profiler/profiling.py +218 -186
- mindspore/rewrite/__init__.py +3 -1
- mindspore/rewrite/api/node.py +1 -114
- mindspore/rewrite/api/node_type.py +3 -0
- mindspore/rewrite/api/pattern_engine.py +31 -1
- mindspore/rewrite/api/scoped_value.py +4 -4
- mindspore/rewrite/api/symbol_tree.py +3 -78
- mindspore/rewrite/api/tree_node_helper.py +1 -1
- mindspore/rewrite/ast_creator_register.py +1 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -2
- mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
- mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
- mindspore/rewrite/namespace.py +0 -2
- mindspore/rewrite/node.py +157 -11
- mindspore/rewrite/parsers/assign_parser.py +231 -53
- mindspore/rewrite/parsers/class_def_parser.py +187 -109
- mindspore/rewrite/parsers/for_parser.py +24 -14
- mindspore/rewrite/parsers/function_def_parser.py +21 -4
- mindspore/rewrite/parsers/if_parser.py +6 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +256 -133
- mindspore/rewrite/symbol_tree_builder.py +38 -1
- mindspore/run_check/_check_version.py +69 -63
- mindspore/run_check/run_check.py +2 -1
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +1 -1
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +273 -102
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +2 -2
- mindspore/train/callback/_checkpoint.py +3 -3
- mindspore/train/callback/_early_stop.py +3 -3
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +29 -31
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +3 -3
- mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
- mindspore/train/callback/_summary_collector.py +23 -16
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +15 -3
- mindspore/train/dataset_helper.py +10 -15
- mindspore/train/loss_scale_manager.py +8 -11
- mindspore/train/metrics/__init__.py +1 -1
- mindspore/train/metrics/bleu_score.py +1 -1
- mindspore/train/metrics/confusion_matrix.py +1 -1
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/dice.py +2 -2
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +4 -3
- mindspore/train/metrics/mean_surface_distance.py +2 -2
- mindspore/train/metrics/occlusion_sensitivity.py +1 -1
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +1 -1
- mindspore/train/metrics/recall.py +1 -1
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +45 -28
- mindspore/train/serialization.py +295 -188
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -13
- mindspore/train/train_thor/convert_utils.py +2 -2
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
- mindspore/compression/__init__.py +0 -19
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -515
- mindspore/compression/quant/__init__.py +0 -28
- mindspore/compression/quant/qat.py +0 -634
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -140
- mindspore/nn/probability/dpn/vae/vae.py +0 -124
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
- mindspore/ops/composite/array_ops.py +0 -241
- mindspore/ops/composite/clip_ops.py +0 -134
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
mindspore/context.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
|
|
1
|
+
# Copyright 2020-2022 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -27,15 +27,17 @@ from types import FunctionType
|
|
|
27
27
|
|
|
28
28
|
from mindspore import log as logger
|
|
29
29
|
from mindspore._c_expression import MSContext, ms_ctx_param
|
|
30
|
-
from mindspore
|
|
30
|
+
from mindspore import _checkparam as Validator
|
|
31
|
+
from mindspore._checkparam import args_type_check
|
|
31
32
|
from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context, _get_auto_parallel_context, \
|
|
32
33
|
_reset_auto_parallel_context
|
|
33
34
|
from mindspore.parallel._ps_context import _set_ps_context, _get_ps_context, _reset_ps_context, \
|
|
34
35
|
_need_reset_device_target_for_ps
|
|
36
|
+
from mindspore.parallel._offload_context import _set_offload_context, _get_offload_context
|
|
35
37
|
|
|
36
38
|
__all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'set_context', 'get_context', 'set_auto_parallel_context',
|
|
37
39
|
'get_auto_parallel_context', 'reset_auto_parallel_context', 'ParallelMode', 'set_ps_context',
|
|
38
|
-
'get_ps_context', 'reset_ps_context']
|
|
40
|
+
'get_ps_context', 'reset_ps_context', 'set_offload_context', 'get_offload_context']
|
|
39
41
|
|
|
40
42
|
GRAPH_MODE = 0
|
|
41
43
|
PYNATIVE_MODE = 1
|
|
@@ -231,6 +233,51 @@ class _Context:
|
|
|
231
233
|
else:
|
|
232
234
|
self.set_param(ms_ctx_param.memory_offload, False)
|
|
233
235
|
|
|
236
|
+
def set_deterministic(self, deterministic):
|
|
237
|
+
"""
|
|
238
|
+
Enable model run in deterministic, and support the values "ON" and "OFF".
|
|
239
|
+
|
|
240
|
+
Args:
|
|
241
|
+
deterministic (str): "ON", "OFF"
|
|
242
|
+
"""
|
|
243
|
+
deterministic_options = ["ON", "OFF"]
|
|
244
|
+
if deterministic not in deterministic_options:
|
|
245
|
+
raise ValueError(f"For 'context.set_context', the argument 'deterministic' must be one of "
|
|
246
|
+
f"{deterministic_options}, but got {deterministic}.")
|
|
247
|
+
self.set_param(ms_ctx_param.deterministic, deterministic)
|
|
248
|
+
|
|
249
|
+
def set_ascend_config(self, ascend_config):
|
|
250
|
+
"""
|
|
251
|
+
Enable ascend config.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
ascend_config (dict): 'precision_mode'
|
|
255
|
+
- precision_mode (str): "force_fp16", "allow_fp32_to_fp16", "allow_mix_precision",
|
|
256
|
+
"must_keep_origin_dtype", "force_fp32", "force_lowerprecision", "allow_fp32_to_bf16",
|
|
257
|
+
"allow_fp32_to_lowprecision", "allow_mix_precision_fp16" and "allow_mix_precision_bf16".
|
|
258
|
+
"""
|
|
259
|
+
|
|
260
|
+
ascend_cfgs = {'precision_mode': ["force_fp16", "allow_fp32_to_fp16", "allow_mix_precision",
|
|
261
|
+
"must_keep_origin_dtype", "force_fp32", "force_lowerprecision",
|
|
262
|
+
"allow_fp32_to_bf16", "allow_fp32_to_lowprecision",
|
|
263
|
+
"allow_mix_precision_fp16", "allow_mix_precision_bf16"],
|
|
264
|
+
'jit_compile': [True, False]}
|
|
265
|
+
for ascend_key in ascend_config:
|
|
266
|
+
if ascend_key not in ascend_cfgs:
|
|
267
|
+
raise ValueError(f"For 'context.set_context', the key of argument 'ascend_config' must be one of "
|
|
268
|
+
f"{ascend_cfgs}, but got {ascend_key}.")
|
|
269
|
+
supported_modes = ascend_cfgs.get(ascend_key)
|
|
270
|
+
if ascend_config[ascend_key] not in supported_modes:
|
|
271
|
+
raise ValueError(f"For 'ascend_config', the value of argument {ascend_key} must be one of "
|
|
272
|
+
f"{supported_modes}, but got {ascend_config[ascend_key]}.")
|
|
273
|
+
if ascend_key == 'precision_mode':
|
|
274
|
+
self.set_param(ms_ctx_param.precision_mode, ascend_config[ascend_key])
|
|
275
|
+
if ascend_key == 'jit_compile':
|
|
276
|
+
if ascend_config[ascend_key] is True:
|
|
277
|
+
self.set_param(ms_ctx_param.jit_compile, "1")
|
|
278
|
+
else:
|
|
279
|
+
self.set_param(ms_ctx_param.jit_compile, "0")
|
|
280
|
+
|
|
234
281
|
def set_backend_policy(self, policy):
|
|
235
282
|
success = self._context_handle.set_backend_policy(policy)
|
|
236
283
|
if not success:
|
|
@@ -368,8 +415,8 @@ class _Context:
|
|
|
368
415
|
json.load(f)
|
|
369
416
|
except (TypeError, ValueError) as exo:
|
|
370
417
|
raise ValueError(str(exo) + "\nFor 'context.set_context', open or load the 'env_config_path' file {} "
|
|
371
|
-
|
|
372
|
-
|
|
418
|
+
"failed, please check whether 'env_config_path' is json file and correct, "
|
|
419
|
+
"or may not have permission to read it.".format(env_config_path))
|
|
373
420
|
self.set_param(ms_ctx_param.env_config_path, env_config_path)
|
|
374
421
|
|
|
375
422
|
def set_runtime_num_threads(self, runtime_num_threads):
|
|
@@ -383,6 +430,7 @@ class _Context:
|
|
|
383
430
|
if op_timeout < 0:
|
|
384
431
|
raise ValueError("The num of op exe timeout must bigger than or equal to 0.")
|
|
385
432
|
self.set_param(ms_ctx_param.op_timeout, op_timeout)
|
|
433
|
+
|
|
386
434
|
def set_inter_op_parallel_num(self, inter_op_parallel_num):
|
|
387
435
|
"""Check and set inter_op_parallel_num."""
|
|
388
436
|
if inter_op_parallel_num < 0:
|
|
@@ -406,7 +454,9 @@ class _Context:
|
|
|
406
454
|
'runtime_num_threads': set_runtime_num_threads,
|
|
407
455
|
'memory_optimize_level': set_memory_optimize_level,
|
|
408
456
|
'op_timeout': set_op_timeout,
|
|
409
|
-
'memory_offload': set_memory_offload
|
|
457
|
+
'memory_offload': set_memory_offload,
|
|
458
|
+
'deterministic': set_deterministic,
|
|
459
|
+
'ascend_config': set_ascend_config
|
|
410
460
|
}
|
|
411
461
|
|
|
412
462
|
@property
|
|
@@ -447,7 +497,6 @@ class _Context:
|
|
|
447
497
|
self._support_binary = support
|
|
448
498
|
|
|
449
499
|
|
|
450
|
-
|
|
451
500
|
def _context():
|
|
452
501
|
"""
|
|
453
502
|
Get the global _context, if context is not created, create a new one.
|
|
@@ -476,7 +525,7 @@ def _context():
|
|
|
476
525
|
auto_parallel_search_mode=str, search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
|
|
477
526
|
strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool, enable_alltoall=bool,
|
|
478
527
|
all_reduce_fusion_config=list, pipeline_stages=int, grad_accumulation_step=int,
|
|
479
|
-
parallel_optimizer_config=dict, comm_fusion=dict)
|
|
528
|
+
parallel_optimizer_config=dict, comm_fusion=dict, strategy_ckpt_config=dict)
|
|
480
529
|
def set_auto_parallel_context(**kwargs):
|
|
481
530
|
r"""
|
|
482
531
|
Set auto parallel context, only data parallel supported on CPU.
|
|
@@ -504,6 +553,7 @@ def set_auto_parallel_context(**kwargs):
|
|
|
504
553
|
enable_alltoall grad_accumulation_step
|
|
505
554
|
\ auto_parallel_search_mode
|
|
506
555
|
\ comm_fusion
|
|
556
|
+
\ strategy_ckpt_config
|
|
507
557
|
=========================== ===========================
|
|
508
558
|
|
|
509
559
|
Args:
|
|
@@ -542,15 +592,18 @@ def set_auto_parallel_context(**kwargs):
|
|
|
542
592
|
data_parallel mode, all parameters are broadcast except for the parameter whose attribute
|
|
543
593
|
layerwise_parallel is True. Hybrid_parallel, semi_auto_parallel and auto_parallel mode, the
|
|
544
594
|
segmented parameters do not participate in broadcasting. Default: False.
|
|
545
|
-
strategy_ckpt_load_file (str): The path to load parallel strategy checkpoint.
|
|
546
|
-
|
|
595
|
+
strategy_ckpt_load_file (str): The path to load parallel strategy checkpoint. The parameter is not to be
|
|
596
|
+
recommended currently, it is better using 'strategy_ckpt_config' to replace it. Default: ''
|
|
597
|
+
strategy_ckpt_save_file (str): The path to save parallel strategy checkpoint. The parameter is not to be
|
|
598
|
+
recommended currently, it is better using 'strategy_ckpt_config' to replace it. Default: ''
|
|
547
599
|
full_batch (bool): If you load whole batch datasets in auto_parallel mode, this parameter
|
|
548
600
|
should be set as True. Default: False. The interface is not to be recommended currently,
|
|
549
601
|
it is better using 'dataset_strategy' to replace it.
|
|
550
602
|
dataset_strategy (Union[str, tuple]): Dataset sharding strategy. Default: "data_parallel".
|
|
551
603
|
dataset_strategy="data_parallel" is equal to full_batch=False, dataset_strategy="full_batch" is
|
|
552
|
-
equal to full_batch=True. For dataset load into net by model
|
|
553
|
-
ds_stra ((1, 8), (1, 8)), it requires using
|
|
604
|
+
equal to full_batch=True. For execution mode is 'GRAPH_MODE' and dataset load into net by model
|
|
605
|
+
parallel strategy likes ds_stra ((1, 8), (1, 8)), it requires using
|
|
606
|
+
set_auto_parallel_context(dataset_strategy=ds_stra).
|
|
554
607
|
enable_parallel_optimizer (bool): This is a developing feature, which shards the weight update computation for
|
|
555
608
|
data parallel training in the benefit of time and memory saving. Currently, auto and semi auto
|
|
556
609
|
parallel mode support all optimizers in both Ascend and GPU. Data parallel mode only supports
|
|
@@ -591,6 +644,9 @@ def set_auto_parallel_context(**kwargs):
|
|
|
591
644
|
communication fusion config has two keys: "mode" and "config".
|
|
592
645
|
It supports following communication fusion types and configurations:
|
|
593
646
|
|
|
647
|
+
- openstate: Whether turn on the communication fusion or not. If `openstate` is `True`, turn on
|
|
648
|
+
the communication fusion, otherwise, turn off the communication fusion. Default: `True`.
|
|
649
|
+
|
|
594
650
|
- allreduce: If communication fusion type is `allreduce`. The `mode` contains: `auto`, `size`
|
|
595
651
|
and `index`. In `auto` mode, AllReduce fusion is configured by gradients size and the default
|
|
596
652
|
fusion threshold is `64` MB. In 'size' mode, AllReduce fusion is configured by gradients size
|
|
@@ -605,6 +661,24 @@ def set_auto_parallel_context(**kwargs):
|
|
|
605
661
|
- reducescatter: If communication fusion type is `reducescatter`. The `mode` contains: `auto`
|
|
606
662
|
and `size`. Config is same as `allgather`.
|
|
607
663
|
|
|
664
|
+
strategy_ckpt_config (dict): A dict contains the configurations for setting the parallel strategy file. This
|
|
665
|
+
interface contains the functions of parameter `strategy_ckpt_load_file` and
|
|
666
|
+
`strategy_ckpt_save_file`, it is recommonded to use this parameter to replace those two
|
|
667
|
+
parameters.
|
|
668
|
+
It contains following configurations:
|
|
669
|
+
|
|
670
|
+
- load_file (str): The path to load parallel strategy checkpoint. If the file name extension is
|
|
671
|
+
`.json`, the file is loaded in JSON format. Otherwise, the file is loaded in ProtoBuf
|
|
672
|
+
format.
|
|
673
|
+
Default: ''
|
|
674
|
+
|
|
675
|
+
- save_file (str): The path to save parallel strategy checkpoint. If the file name extension is
|
|
676
|
+
`.json`, the file is saved in JSON format. Otherwise, the file is saved in ProtoBuf format.
|
|
677
|
+
Default: ''
|
|
678
|
+
|
|
679
|
+
- only_trainable_params (bool): Only save/load the strategy information for trainable parameter.
|
|
680
|
+
Default: True.
|
|
681
|
+
|
|
608
682
|
Raises:
|
|
609
683
|
ValueError: If input key is not attribute in auto parallel context.
|
|
610
684
|
|
|
@@ -629,6 +703,8 @@ def set_auto_parallel_context(**kwargs):
|
|
|
629
703
|
>>> ms.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
|
|
630
704
|
>>> config = {"allreduce": {"mode": "size", "config": 32}, "allgather": {"mode": "size", "config": 32}}
|
|
631
705
|
>>> ms.set_auto_parallel_context(comm_fusion=config)
|
|
706
|
+
>>> stra_ckpt_dict = {"load_file": "./stra0.ckpt", "save_file": "./stra1.ckpt", "only_trainable_params": False}
|
|
707
|
+
>>> ms.set_auto_parallel_context(strategy_ckpt_config=stra_ckpt_dict)
|
|
632
708
|
"""
|
|
633
709
|
_set_auto_parallel_context(**kwargs)
|
|
634
710
|
|
|
@@ -677,6 +753,46 @@ def reset_auto_parallel_context():
|
|
|
677
753
|
_reset_auto_parallel_context()
|
|
678
754
|
|
|
679
755
|
|
|
756
|
+
@args_type_check(offload_config=dict)
|
|
757
|
+
def set_offload_context(offload_config):
|
|
758
|
+
r"""
|
|
759
|
+
Set offload context.
|
|
760
|
+
Some configurations are offload specific, see the below table for details:
|
|
761
|
+
|
|
762
|
+
Args:
|
|
763
|
+
offload_config (dict): A dict contains the keys and values for setting the offload context
|
|
764
|
+
configure.It supports the following keys.
|
|
765
|
+
enable_offload (bool): The flag of whether enabling offload. Default: False.
|
|
766
|
+
offload_param (str): The param for offload destination, cpu or disk.
|
|
767
|
+
offload_path (str): The path of offload.
|
|
768
|
+
offload_checkpoint (str): The checkpoint for offload destination, cpu or disk.
|
|
769
|
+
offload_ddr_size (int): The ddr size for offload.
|
|
770
|
+
offload_disk_size (int): The disk size for offload.
|
|
771
|
+
enable_aio (bool): The flag of whether enabling aio. Default: True.
|
|
772
|
+
aio_block_size (int): The size of aio block.
|
|
773
|
+
aio_queue_depth (int): The depth of aio queue.
|
|
774
|
+
enable_pinned_mem (bool): The flag of whether enabling pinned memory.
|
|
775
|
+
|
|
776
|
+
Raises:
|
|
777
|
+
ValueError: If input key is not attribute in auto parallel context.
|
|
778
|
+
|
|
779
|
+
Examples:
|
|
780
|
+
>>> from mindspore import context
|
|
781
|
+
>>> context.set_offload_context(offload_config={"offload_param"="cpu"})
|
|
782
|
+
"""
|
|
783
|
+
_set_offload_context(offload_config)
|
|
784
|
+
|
|
785
|
+
|
|
786
|
+
def get_offload_context():
|
|
787
|
+
"""
|
|
788
|
+
Get offload context.
|
|
789
|
+
Examples:
|
|
790
|
+
>>> from mindspore import context
|
|
791
|
+
>>> offload_config = context.get_offload_context()
|
|
792
|
+
"""
|
|
793
|
+
return _get_offload_context()
|
|
794
|
+
|
|
795
|
+
|
|
680
796
|
def _check_target_specific_cfgs(device, arg_key):
|
|
681
797
|
"""Checking whether a config is suitable for a specified device"""
|
|
682
798
|
device_cfgs = {
|
|
@@ -688,7 +804,8 @@ def _check_target_specific_cfgs(device, arg_key):
|
|
|
688
804
|
'auto_tune_mode': ['Ascend'],
|
|
689
805
|
'max_device_memory': ['Ascend', 'GPU'],
|
|
690
806
|
'mempool_block_size': ['GPU', 'Ascend'],
|
|
691
|
-
'disable_format_transform': ['GPU']
|
|
807
|
+
'disable_format_transform': ['GPU'],
|
|
808
|
+
'ascend_config': ['Ascend']
|
|
692
809
|
}
|
|
693
810
|
# configs not in map device_cfgs are supposed to be suitable for all devices
|
|
694
811
|
if arg_key not in device_cfgs:
|
|
@@ -702,8 +819,7 @@ def _check_target_specific_cfgs(device, arg_key):
|
|
|
702
819
|
return False
|
|
703
820
|
|
|
704
821
|
|
|
705
|
-
@
|
|
706
|
-
@args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=bool,
|
|
822
|
+
@args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=(bool, int),
|
|
707
823
|
save_graphs_path=str, enable_dump=bool, auto_tune_mode=str,
|
|
708
824
|
save_dump_path=str, enable_reduce_precision=bool, variable_memory_max_size=str,
|
|
709
825
|
enable_auto_mixed_precision=bool, inter_op_parallel_num=int,
|
|
@@ -711,7 +827,7 @@ def _check_target_specific_cfgs(device, arg_key):
|
|
|
711
827
|
max_device_memory=str, print_file_path=str, max_call_depth=int, env_config_path=str,
|
|
712
828
|
graph_kernel_flags=str, save_compile_cache=bool, runtime_num_threads=int, load_compile_cache=bool,
|
|
713
829
|
grad_for_scalar=bool, pynative_synchronize=bool, mempool_block_size=str, disable_format_transform=bool,
|
|
714
|
-
op_timeout=int,
|
|
830
|
+
op_timeout=int, deterministic=str, ascend_config=dict)
|
|
715
831
|
def set_context(**kwargs):
|
|
716
832
|
"""
|
|
717
833
|
Set context for running environment.
|
|
@@ -749,6 +865,8 @@ def set_context(**kwargs):
|
|
|
749
865
|
| +------------------------------+----------------------------+
|
|
750
866
|
| | save_dump_path | Ascend |
|
|
751
867
|
| +------------------------------+----------------------------+
|
|
868
|
+
| | deterministic | Ascend |
|
|
869
|
+
| +------------------------------+----------------------------+
|
|
752
870
|
| | print_file_path | Ascend |
|
|
753
871
|
| +------------------------------+----------------------------+
|
|
754
872
|
| | env_config_path | CPU/GPU/Ascend |
|
|
@@ -790,6 +908,8 @@ def set_context(**kwargs):
|
|
|
790
908
|
| | memory_optimize_level | CPU/GPU/Ascend |
|
|
791
909
|
| +------------------------------+----------------------------+
|
|
792
910
|
| | memory_offload | GPU/Ascend |
|
|
911
|
+
| +------------------------------+----------------------------+
|
|
912
|
+
| | ascend_config | Ascend |
|
|
793
913
|
+-------------------------+------------------------------+----------------------------+
|
|
794
914
|
|
|
795
915
|
Args:
|
|
@@ -806,14 +926,31 @@ def set_context(**kwargs):
|
|
|
806
926
|
of the available memory of the device and mempool_block_size.
|
|
807
927
|
op_timeout (int): Set the maximum duration of executing an operator in seconds.
|
|
808
928
|
If the execution time exceeds this value, system will terminate the task. 0 means endless wait.
|
|
809
|
-
Default:
|
|
810
|
-
save_graphs (bool): Whether to save graphs. Default:
|
|
811
|
-
|
|
812
|
-
|
|
929
|
+
Default: 1900.
|
|
930
|
+
save_graphs (bool or int): Whether to save intermediate compilation graphs. Default: 0.
|
|
931
|
+
Available values are:
|
|
932
|
+
|
|
933
|
+
- False or 0: disable saving of intermediate compilation graphs.
|
|
934
|
+
- 1: some intermediate files will be generated during graph compliation.
|
|
935
|
+
- True or 2: Generate more ir files related to backend process.
|
|
936
|
+
- 3: Generate visualization computing graphs and detailed frontend ir graphs.
|
|
937
|
+
|
|
938
|
+
When the `save_graphs` attribute is set as True, 1, 2 or 3, attribute of `save_graphs_path` is used
|
|
939
|
+
to set the intermediate compilation graph storage path. By default, the graphs are saved in the current
|
|
940
|
+
directory.
|
|
813
941
|
save_graphs_path (str): Path to save graphs. Default: ".".
|
|
814
942
|
If the specified directory does not exist, the system will automatically create the directory.
|
|
815
943
|
During distributed training, graphs will be saved to the directory of
|
|
816
944
|
`save_graphs_path/rank_${rank_id}/`. `rank_id` is the ID of the current device in the cluster.
|
|
945
|
+
deterministic (str): Whether to enable op run in deterministic mode. The value must be in the
|
|
946
|
+
range of ['ON', 'OFF'], and the default value is 'OFF'.
|
|
947
|
+
|
|
948
|
+
- "ON": Enable operator deterministic running mode.
|
|
949
|
+
- "OFF": Disable operator deterministic running mode.
|
|
950
|
+
|
|
951
|
+
When deterministic mode is on, model ops will be deterministic in Ascend. This means that if op run multiple
|
|
952
|
+
times with the same inputs on the same hardware, it will have the exact same outputs each time. This is
|
|
953
|
+
useful for debugging models.
|
|
817
954
|
enable_dump (bool): This parameters is deprecated, and will be deleted in the next version.
|
|
818
955
|
save_dump_path (str): This parameters is deprecated, and will be deleted in the next version.
|
|
819
956
|
print_file_path (str): The path of saving print data. If this parameter is set, print data is saved to
|
|
@@ -864,7 +1001,7 @@ def set_context(**kwargs):
|
|
|
864
1001
|
If enable_graph_kernel is set to True, acceleration can be enabled.
|
|
865
1002
|
For details of graph kernel fusion, please check
|
|
866
1003
|
`Enabling Graph Kernel Fusion
|
|
867
|
-
<https://www.mindspore.cn/tutorials/experts/en/r2.0
|
|
1004
|
+
<https://www.mindspore.cn/tutorials/experts/en/r2.0/debug/graph_fusion_engine.html>`_.
|
|
868
1005
|
graph_kernel_flags (str):
|
|
869
1006
|
Optimization options of graph kernel fusion, and the priority is higher when it conflicts
|
|
870
1007
|
with enable_graph_kernel. Only for experienced users.
|
|
@@ -899,7 +1036,7 @@ def set_context(**kwargs):
|
|
|
899
1036
|
|
|
900
1037
|
For more information about the enable operator tuning tool settings, please check
|
|
901
1038
|
`Enable the operator optimization tool
|
|
902
|
-
<https://www.mindspore.cn/tutorials/experts/en/r2.0
|
|
1039
|
+
<https://www.mindspore.cn/tutorials/experts/en/r2.0/debug/auto_tune.html>`_.
|
|
903
1040
|
check_bprop (bool): Whether to check back propagation nodes. The checking ensures that the shape and dtype
|
|
904
1041
|
of back propagation node outputs is the same as input parameters. Default: False.
|
|
905
1042
|
max_call_depth (int): Specify the maximum depth of function call. Must be positive integer. Default: 1000.
|
|
@@ -917,7 +1054,7 @@ def set_context(**kwargs):
|
|
|
917
1054
|
the compile cache is loaded. Note that only limited automatic detection for the changes of
|
|
918
1055
|
python scripts is supported by now, which means that there is a correctness risk. Default: False.
|
|
919
1056
|
This is an experimental prototype that is subject to change and/or deletion.
|
|
920
|
-
compile_cache_path (str): Path to save the cache
|
|
1057
|
+
compile_cache_path (str): Path to save the compile cache. Default: ".".
|
|
921
1058
|
If the specified directory does not exist, the system will automatically create the directory.
|
|
922
1059
|
The cache will be saved to the directory of `compile_cache_path/rank_${rank_id}/`. The `rank_id` is
|
|
923
1060
|
the ID of the current device in the cluster.
|
|
@@ -946,6 +1083,36 @@ def set_context(**kwargs):
|
|
|
946
1083
|
when the environment variable "GRAPH_OP_RUN=1" is not set; This parameter does not take effect when
|
|
947
1084
|
memory_optimize_level is set 'O1'.
|
|
948
1085
|
- OFF: Turn off the memory Offload function.
|
|
1086
|
+
ascend_config (dict): Set the parameters specific to Ascend hardware platform. It is not set by default.
|
|
1087
|
+
Currently, only setting `precision_mode` and `jit_compile` are supported on Ascend910B hardware platform.
|
|
1088
|
+
The default value of `precision_mode` and `jit_compile` are experimental parameters, may change
|
|
1089
|
+
in the future.
|
|
1090
|
+
|
|
1091
|
+
- precision_mode (str): Mixed precision mode setting, on Ascend910B hardware platform, the default
|
|
1092
|
+
value of training network is based on the value of CANN, and the default value of inference network
|
|
1093
|
+
is force_fp16. The value range is as follows:
|
|
1094
|
+
|
|
1095
|
+
- force_fp16: When the operator supports both float16 and float32, select float16 directly.
|
|
1096
|
+
- allow_fp32_to_fp16: When the operator does not support the float32 data type, directly reduce
|
|
1097
|
+
the precision of float16.
|
|
1098
|
+
- allow_mix_precision: Automatic mixing precision, facing the whole network operator, according
|
|
1099
|
+
to the built-in optimization strategy, automatically reduces the precision of some operators
|
|
1100
|
+
to float16 or bfloat16.
|
|
1101
|
+
- must_keep_origin_dtype: Keep the accuracy of the original drawing.
|
|
1102
|
+
- force_fp32: When the input of the matrix calculation operator is float16 and the output supports
|
|
1103
|
+
float16 and float32, output is forced to float32.
|
|
1104
|
+
- force_lowerprecision: When the operator supports both float16 or bfloat16 and float32, select
|
|
1105
|
+
float16 or bfloat16 directly.
|
|
1106
|
+
- allow_fp32_to_bf16: When the operator does not support the float32 data type, directly reduce
|
|
1107
|
+
the precision of bfloat16.
|
|
1108
|
+
- allow_fp32_to_lowprecision: When the operator does not support the float32 data type, directly
|
|
1109
|
+
reduce the precision of float16 or bfloat16.
|
|
1110
|
+
- allow_mix_precision_fp16: Automatic mixing precision, facing the whole network operator, automatically
|
|
1111
|
+
reduces the precision of some operators to float16 according to the built-in optimization strategy.
|
|
1112
|
+
- allow_mix_precision_bf16: Automatic mixing precision, facing the whole network operator, according to
|
|
1113
|
+
the built-in optimization strategy, automatically reduces the precision of some operators to bfloat16.
|
|
1114
|
+
|
|
1115
|
+
- jit_compile (bool): Whether to select online compilation. the default value is based on CANN.
|
|
949
1116
|
|
|
950
1117
|
Raises:
|
|
951
1118
|
ValueError: If input key is not an attribute in context.
|
|
@@ -977,6 +1144,8 @@ def set_context(**kwargs):
|
|
|
977
1144
|
>>> ms.set_context(disable_format_transform=True)
|
|
978
1145
|
>>> ms.set_context(memory_optimize_level='O0')
|
|
979
1146
|
>>> ms.set_context(memory_offload='ON')
|
|
1147
|
+
>>> ms.set_context(deterministic='ON')
|
|
1148
|
+
>>> ms.set_context(ascend_config={"precision_mode": "force_fp16", "jit_compile": True})
|
|
980
1149
|
"""
|
|
981
1150
|
ctx = _context()
|
|
982
1151
|
# set device target first
|
|
@@ -992,6 +1161,15 @@ def set_context(**kwargs):
|
|
|
992
1161
|
logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated. "
|
|
993
1162
|
"For details, please see the interface parameter API comments")
|
|
994
1163
|
continue
|
|
1164
|
+
if key in ('precision_mode', 'jit_compile'):
|
|
1165
|
+
raise ValueError(f"Please set '{key}' through parameter ascend_config")
|
|
1166
|
+
if key == 'save_graphs':
|
|
1167
|
+
if value is True:
|
|
1168
|
+
value = 2
|
|
1169
|
+
if value is False:
|
|
1170
|
+
value = 0
|
|
1171
|
+
if value > 3:
|
|
1172
|
+
raise ValueError(f"value for save_graphs should be 0-3 but got '{value}'")
|
|
995
1173
|
if not _check_target_specific_cfgs(device, key):
|
|
996
1174
|
continue
|
|
997
1175
|
if hasattr(ctx, key):
|
mindspore/dataset/__init__.py
CHANGED
|
@@ -21,7 +21,7 @@ Besides, this module provides APIs to sample data while loading.
|
|
|
21
21
|
|
|
22
22
|
We can enable cache in most of the dataset with its key arguments 'cache'. Please notice that cache is not supported
|
|
23
23
|
on Windows platform yet. Do not use it while loading and processing data on Windows. More introductions and limitations
|
|
24
|
-
can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/r2.0
|
|
24
|
+
can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/r2.0/dataset/cache.html>`_ .
|
|
25
25
|
|
|
26
26
|
Common imported modules in corresponding API examples are as follows:
|
|
27
27
|
|
|
@@ -55,11 +55,11 @@ The specific steps are as follows:
|
|
|
55
55
|
- Dataset operation: The user uses the dataset object method `.shuffle` / `.filter` / `.skip` / `.split` /
|
|
56
56
|
`.take` / ... to further shuffle, filter, skip, and obtain the maximum number of samples of datasets;
|
|
57
57
|
- Dataset sample transform operation: The user can add data transform operations
|
|
58
|
-
( `vision transform <https://mindspore.cn/docs/en/r2.0
|
|
58
|
+
( `vision transform <https://mindspore.cn/docs/en/r2.0/api_python/mindspore.\
|
|
59
59
|
dataset.transforms.html#module-mindspore.dataset.vision>`_ ,
|
|
60
|
-
`NLP transform <https://mindspore.cn/docs/en/r2.0
|
|
60
|
+
`NLP transform <https://mindspore.cn/docs/en/r2.0/api_python/mindspore.\
|
|
61
61
|
dataset.transforms.html#module-mindspore.dataset.text>`_ ,
|
|
62
|
-
`audio transform <https://mindspore.cn/docs/en/r2.0
|
|
62
|
+
`audio transform <https://mindspore.cn/docs/en/r2.0/api_python/mindspore.\
|
|
63
63
|
dataset.transforms.html#module-mindspore.dataset.audio>`_ ) to the map
|
|
64
64
|
operation to perform transformations. During data preprocessing, multiple map operations can be defined to
|
|
65
65
|
perform different transform operations to different fields. The data transform operation can also be a
|
|
@@ -70,8 +70,7 @@ The specific steps are as follows:
|
|
|
70
70
|
iterator, which can output the preprocessed data cyclically.
|
|
71
71
|
|
|
72
72
|
The data processing pipeline example is as follows. Please refer to
|
|
73
|
-
`datasets_example.py <https://gitee.com/mindspore/mindspore/tree/r2.0
|
|
74
|
-
/datasets_example.py>`_
|
|
73
|
+
`datasets_example.py <https://gitee.com/mindspore/mindspore/tree/r2.0/docs/api/api_python_en/datasets_example.py>`_
|
|
75
74
|
for complete example.
|
|
76
75
|
|
|
77
76
|
.. code-block::
|
|
@@ -122,6 +121,7 @@ from .engine.datasets import *
|
|
|
122
121
|
from .engine.graphdata import GraphData, SamplingStrategy, OutputFormat
|
|
123
122
|
from .engine.samplers import *
|
|
124
123
|
from .engine.serializer_deserializer import compare, deserialize, serialize, show
|
|
124
|
+
from .utils.line_reader import LineReader
|
|
125
125
|
|
|
126
126
|
__all__ = []
|
|
127
127
|
__all__.extend(engine.__all__)
|
|
@@ -40,7 +40,7 @@ Descriptions of common data processing terms are as follows:
|
|
|
40
40
|
The data transform operation can be executed in the data processing pipeline or in the eager mode:
|
|
41
41
|
|
|
42
42
|
- Pipeline mode is generally used to process datasets. For examples, please refer to
|
|
43
|
-
`introduction to data processing pipeline <https://www.mindspore.cn/docs/en/r2.0
|
|
43
|
+
`introduction to data processing pipeline <https://www.mindspore.cn/docs/en/r2.0/api_python/
|
|
44
44
|
mindspore.dataset.html#introduction-to-data-processing-pipeline>`_ .
|
|
45
45
|
- Eager mode is generally used for scattered samples. Examples of audio preprocessing are as follows:
|
|
46
46
|
|
|
@@ -65,12 +65,12 @@ from __future__ import absolute_import
|
|
|
65
65
|
from mindspore.dataset.audio import transforms
|
|
66
66
|
from mindspore.dataset.audio import utils
|
|
67
67
|
from mindspore.dataset.audio.transforms import AllpassBiquad, AmplitudeToDB, Angle, BandBiquad, \
|
|
68
|
-
BandpassBiquad, BandrejectBiquad, BassBiquad, Biquad, \
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
Phaser, PhaseVocoder, Resample, RiaaBiquad, SlidingWindowCmn, SpectralCentroid, Spectrogram,
|
|
73
|
-
TimeStretch, TrebleBiquad, Vad, Vol
|
|
68
|
+
BandpassBiquad, BandrejectBiquad, BassBiquad, Biquad, ComplexNorm, ComputeDeltas, Contrast, DBToAmplitude, \
|
|
69
|
+
DCShift, DeemphBiquad, DetectPitchFrequency, Dither, EqualizerBiquad, Fade, Filtfilt, Flanger, FrequencyMasking, \
|
|
70
|
+
Gain, GriffinLim, HighpassBiquad, InverseMelScale, InverseSpectrogram, LFCC, LFilter, LowpassBiquad, Magphase, \
|
|
71
|
+
MaskAlongAxis, MaskAlongAxisIID, MelScale, MelSpectrogram, MFCC, MuLawDecoding, MuLawEncoding, Overdrive, \
|
|
72
|
+
Phaser, PhaseVocoder, PitchShift, Resample, RiaaBiquad, SlidingWindowCmn, SpectralCentroid, Spectrogram, \
|
|
73
|
+
TimeMasking, TimeStretch, TrebleBiquad, Vad, Vol
|
|
74
74
|
from mindspore.dataset.audio.utils import BorderType, DensityFunction, FadeShape, GainType, Interpolation, \
|
|
75
75
|
MelType, Modulation, NormMode, NormType, ResampleMethod, ScaleType, WindowType, create_dct, linear_fbanks, \
|
|
76
76
|
melscale_fbanks
|