mindspore 2.0.0a0__cp37-none-any.whl → 2.0.0rc1__cp37-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +9064 -0
- mindspore/__init__.py +4 -2
- mindspore/_akg/akg/composite/build_module.py +11 -0
- mindspore/_akg/akg/config/repository_cuda.json +11 -0
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -3
- mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +16 -1
- mindspore/_extends/parse/parser.py +107 -22
- mindspore/_extends/parse/resources.py +0 -7
- mindspore/_extends/parse/standard_method.py +885 -413
- mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +52 -57
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +38 -20
- mindspore/boost/dim_reduce.py +3 -3
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -6
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +41 -7
- mindspore/common/api.py +215 -141
- mindspore/common/dtype.py +8 -1
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +4 -2
- mindspore/common/jit_config.py +17 -13
- mindspore/common/mutable.py +33 -13
- mindspore/common/parameter.py +23 -21
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +62 -41
- mindspore/common/tensor.py +852 -1154
- mindspore/communication/__init__.py +2 -2
- mindspore/communication/_comm_helper.py +11 -4
- mindspore/communication/management.py +22 -21
- mindspore/config/op_info.config +501 -1008
- mindspore/config/super_bar_config.json +512 -0
- mindspore/context.py +201 -23
- mindspore/dataset/__init__.py +6 -6
- mindspore/dataset/audio/__init__.py +7 -7
- mindspore/dataset/audio/transforms.py +670 -30
- mindspore/dataset/audio/utils.py +47 -4
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +210 -14
- mindspore/dataset/core/validator_helpers.py +2 -2
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +322 -66
- mindspore/dataset/engine/datasets_audio.py +80 -76
- mindspore/dataset/engine/datasets_standard_format.py +51 -38
- mindspore/dataset/engine/datasets_text.py +232 -118
- mindspore/dataset/engine/datasets_user_defined.py +41 -17
- mindspore/dataset/engine/datasets_vision.py +746 -225
- mindspore/dataset/engine/graphdata.py +75 -10
- mindspore/dataset/engine/iterators.py +45 -5
- mindspore/dataset/engine/offload.py +48 -28
- mindspore/dataset/engine/validators.py +117 -8
- mindspore/dataset/text/__init__.py +6 -5
- mindspore/dataset/text/transforms.py +86 -3
- mindspore/dataset/text/utils.py +6 -4
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +3 -2
- mindspore/dataset/transforms/c_transforms.py +1 -1
- mindspore/dataset/transforms/transforms.py +2 -2
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +2 -3
- mindspore/dataset/vision/c_transforms.py +9 -9
- mindspore/dataset/vision/py_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -0
- mindspore/dataset/vision/transforms.py +160 -161
- mindspore/dataset/vision/utils.py +3 -3
- mindspore/experimental/map_parameter.py +38 -26
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +44 -9
- mindspore/include/api/delegate.h +1 -1
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_parallel_runner.h +2 -2
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +19 -3
- mindspore/include/api/types.h +3 -3
- mindspore/include/dataset/constants.h +7 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/include/mindapi/base/type_id.h +1 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libicudata.so.69 +0 -0
- mindspore/lib/libicui18n.so.69 +0 -0
- mindspore/lib/libicuuc.so.69 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/{libmindspore_ascend.so → libmindspore_ascend.so.2} +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filereader.py +18 -0
- mindspore/mindrecord/filewriter.py +197 -34
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
- mindspore/mindrecord/tools/csv_to_mr.py +3 -3
- mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/nn/__init__.py +0 -4
- mindspore/nn/cell.py +204 -132
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +7 -6
- mindspore/nn/layer/__init__.py +5 -4
- mindspore/nn/layer/activation.py +40 -89
- mindspore/nn/layer/basic.py +255 -624
- mindspore/nn/layer/channel_shuffle.py +7 -6
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +41 -4
- mindspore/nn/layer/conv.py +64 -28
- mindspore/nn/layer/dense.py +9 -8
- mindspore/nn/layer/embedding.py +27 -25
- mindspore/nn/layer/image.py +53 -46
- mindspore/nn/layer/math.py +97 -105
- mindspore/nn/layer/normalization.py +117 -86
- mindspore/nn/layer/padding.py +185 -95
- mindspore/nn/layer/pooling.py +817 -414
- mindspore/nn/layer/rnn_cells.py +10 -15
- mindspore/nn/layer/rnns.py +37 -38
- mindspore/nn/layer/thor_layer.py +11 -12
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +5 -4
- mindspore/nn/loss/loss.py +334 -199
- mindspore/nn/optim/ada_grad.py +6 -6
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +4 -5
- mindspore/nn/optim/adam.py +126 -62
- mindspore/nn/optim/adamax.py +3 -4
- mindspore/nn/optim/adasum.py +6 -6
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +67 -38
- mindspore/nn/optim/lamb.py +4 -5
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +43 -4
- mindspore/nn/optim/momentum.py +6 -5
- mindspore/nn/optim/optimizer.py +3 -1
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +1 -1
- mindspore/nn/optim/rprop.py +8 -9
- mindspore/nn/optim/sgd.py +19 -13
- mindspore/nn/optim/thor.py +10 -15
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +4 -4
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +9 -15
- mindspore/nn/probability/distribution/bernoulli.py +3 -3
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +5 -7
- mindspore/nn/probability/distribution/cauchy.py +3 -3
- mindspore/nn/probability/distribution/distribution.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +3 -3
- mindspore/nn/probability/distribution/half_normal.py +15 -11
- mindspore/nn/probability/distribution/laplace.py +16 -13
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/normal.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/student_t.py +20 -15
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +27 -10
- mindspore/nn/wrap/grad_reducer.py +2 -2
- mindspore/nn/wrap/loss_scale.py +40 -24
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +35 -30
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +22 -19
- mindspore/numpy/utils.py +1 -1
- mindspore/numpy/utils_const.py +108 -58
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +86 -117
- mindspore/ops/_grad/grad_base.py +23 -1
- mindspore/ops/_grad/grad_clip_ops.py +2 -3
- mindspore/ops/_grad/grad_comm_ops.py +34 -24
- mindspore/ops/_grad/grad_implementations.py +9 -45
- mindspore/ops/_grad/grad_inner_ops.py +47 -4
- mindspore/ops/_grad/grad_math_ops.py +142 -117
- mindspore/ops/_grad/grad_nn_ops.py +71 -165
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +7 -6
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
- mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
- mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
- mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -611
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_register_for_op.py +1 -0
- mindspore/ops/_utils/__init__.py +1 -2
- mindspore/ops/_utils/utils.py +19 -40
- mindspore/ops/_vmap/vmap_array_ops.py +116 -38
- mindspore/ops/_vmap/vmap_base.py +16 -9
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
- mindspore/ops/_vmap/vmap_image_ops.py +12 -5
- mindspore/ops/_vmap/vmap_math_ops.py +46 -5
- mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
- mindspore/ops/_vmap/vmap_random_ops.py +1 -1
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
- mindspore/ops/composite/__init__.py +7 -8
- mindspore/ops/composite/base.py +101 -47
- mindspore/ops/composite/math_ops.py +188 -158
- mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
- mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
- mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
- mindspore/ops/function/__init__.py +152 -8
- mindspore/ops/function/array_func.py +2555 -674
- mindspore/ops/function/clip_func.py +209 -13
- mindspore/ops/function/debug_func.py +2 -2
- mindspore/ops/function/grad/__init__.py +2 -1
- mindspore/ops/function/grad/grad_func.py +147 -62
- mindspore/ops/function/image_func.py +54 -38
- mindspore/ops/function/linalg_func.py +167 -16
- mindspore/ops/function/math_func.py +4849 -1492
- mindspore/ops/function/nn_func.py +2573 -988
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +3 -3
- mindspore/ops/function/random_func.py +790 -73
- mindspore/ops/function/sparse_func.py +98 -78
- mindspore/ops/function/sparse_unary_func.py +54 -53
- mindspore/ops/function/spectral_func.py +27 -24
- mindspore/ops/function/vmap_func.py +22 -2
- mindspore/ops/functional.py +97 -37
- mindspore/ops/op_info_register.py +70 -28
- mindspore/ops/operations/__init__.py +47 -14
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +5 -5
- mindspore/ops/operations/_grad_ops.py +276 -187
- mindspore/ops/operations/_inner_ops.py +319 -113
- mindspore/ops/operations/_ms_kernel.py +10 -8
- mindspore/ops/operations/_ocr_ops.py +9 -9
- mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
- mindspore/ops/operations/_quant_ops.py +137 -102
- mindspore/ops/operations/_rl_inner_ops.py +121 -60
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1004 -2
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +801 -466
- mindspore/ops/operations/comm_ops.py +51 -49
- mindspore/ops/operations/control_ops.py +2 -2
- mindspore/ops/operations/custom_ops.py +123 -44
- mindspore/ops/operations/debug_ops.py +24 -24
- mindspore/ops/operations/image_ops.py +240 -153
- mindspore/ops/operations/inner_ops.py +34 -50
- mindspore/ops/operations/linalg_ops.py +31 -9
- mindspore/ops/operations/math_ops.py +988 -757
- mindspore/ops/operations/nn_ops.py +965 -819
- mindspore/ops/operations/other_ops.py +51 -40
- mindspore/ops/operations/random_ops.py +204 -122
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +254 -93
- mindspore/ops/operations/spectral_ops.py +35 -3
- mindspore/ops/primitive.py +111 -9
- mindspore/parallel/_auto_parallel_context.py +189 -83
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +99 -7
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +7 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
- mindspore/parallel/_utils.py +1 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +37 -34
- mindspore/parallel/shard.py +17 -18
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +69 -47
- mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
- mindspore/profiler/parser/base_timeline_generator.py +49 -56
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
- mindspore/profiler/parser/hwts_log_parser.py +1 -1
- mindspore/profiler/parser/integrator.py +15 -14
- mindspore/profiler/parser/minddata_analyzer.py +2 -2
- mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +2 -1
- mindspore/profiler/profiling.py +218 -186
- mindspore/rewrite/__init__.py +3 -1
- mindspore/rewrite/api/node.py +1 -114
- mindspore/rewrite/api/node_type.py +3 -0
- mindspore/rewrite/api/pattern_engine.py +31 -1
- mindspore/rewrite/api/scoped_value.py +4 -4
- mindspore/rewrite/api/symbol_tree.py +3 -78
- mindspore/rewrite/api/tree_node_helper.py +1 -1
- mindspore/rewrite/ast_creator_register.py +1 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -2
- mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
- mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
- mindspore/rewrite/namespace.py +0 -2
- mindspore/rewrite/node.py +157 -11
- mindspore/rewrite/parsers/assign_parser.py +231 -53
- mindspore/rewrite/parsers/class_def_parser.py +187 -109
- mindspore/rewrite/parsers/for_parser.py +24 -14
- mindspore/rewrite/parsers/function_def_parser.py +21 -4
- mindspore/rewrite/parsers/if_parser.py +6 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +256 -133
- mindspore/rewrite/symbol_tree_builder.py +38 -1
- mindspore/run_check/_check_version.py +69 -63
- mindspore/run_check/run_check.py +2 -1
- mindspore/scipy/linalg.py +10 -114
- mindspore/scipy/ops.py +2 -2
- mindspore/scipy/ops_wrapper.py +1 -1
- mindspore/scipy/optimize/_bfgs.py +1 -1
- mindspore/scipy/optimize/_lagrange.py +200 -0
- mindspore/scipy/optimize/line_search.py +3 -2
- mindspore/scipy/optimize/minimize.py +41 -2
- mindspore/scipy/sparse/__init__.py +2 -2
- mindspore/scipy/sparse/linalg.py +4 -464
- mindspore/scipy/utils.py +1 -1
- mindspore/scipy/utils_const.py +7 -1
- mindspore/train/__init__.py +1 -1
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +273 -102
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +2 -2
- mindspore/train/callback/_checkpoint.py +3 -3
- mindspore/train/callback/_early_stop.py +3 -3
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +29 -31
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +3 -3
- mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
- mindspore/train/callback/_summary_collector.py +23 -16
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +15 -3
- mindspore/train/dataset_helper.py +10 -15
- mindspore/train/loss_scale_manager.py +8 -11
- mindspore/train/metrics/__init__.py +1 -1
- mindspore/train/metrics/bleu_score.py +1 -1
- mindspore/train/metrics/confusion_matrix.py +1 -1
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/dice.py +2 -2
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +4 -3
- mindspore/train/metrics/mean_surface_distance.py +2 -2
- mindspore/train/metrics/occlusion_sensitivity.py +1 -1
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +1 -1
- mindspore/train/metrics/recall.py +1 -1
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +45 -28
- mindspore/train/serialization.py +295 -188
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -13
- mindspore/train/train_thor/convert_utils.py +2 -2
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +648 -574
- mindspore/compression/__init__.py +0 -19
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -515
- mindspore/compression/quant/__init__.py +0 -28
- mindspore/compression/quant/qat.py +0 -634
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -140
- mindspore/nn/probability/dpn/vae/vae.py +0 -124
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
- mindspore/ops/composite/array_ops.py +0 -241
- mindspore/ops/composite/clip_ops.py +0 -134
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
mindspore/dataset/audio/utils.py
CHANGED
|
@@ -223,6 +223,16 @@ def create_dct(n_mfcc, n_mels, norm=NormMode.NONE):
|
|
|
223
223
|
Returns:
|
|
224
224
|
numpy.ndarray, the transformation matrix, to be right-multiplied to row-wise data of size (n_mels, n_mfcc).
|
|
225
225
|
|
|
226
|
+
Raises:
|
|
227
|
+
TypeError: If `n_mfcc` is not of type int.
|
|
228
|
+
ValueError: If `n_mfcc` is not positive.
|
|
229
|
+
TypeError: If `n_mels` is not of type int.
|
|
230
|
+
ValueError: If `n_mels` is not positive.
|
|
231
|
+
TypeError: If `n_mels` is not of type :class:`mindspore.dataset.audio.NormMode` .
|
|
232
|
+
|
|
233
|
+
Supported Platforms:
|
|
234
|
+
``CPU``
|
|
235
|
+
|
|
226
236
|
Examples:
|
|
227
237
|
>>> from mindspore.dataset.audio import create_dct, NormMode
|
|
228
238
|
>>>
|
|
@@ -257,15 +267,31 @@ def linear_fbanks(n_freqs, f_min, f_max, n_filter, sample_rate):
|
|
|
257
267
|
Creates a linear triangular filterbank.
|
|
258
268
|
|
|
259
269
|
Args:
|
|
260
|
-
n_freqs (int): Number of
|
|
261
|
-
f_min (float): Minimum
|
|
262
|
-
f_max (float): Maximum
|
|
270
|
+
n_freqs (int): Number of frequencies to highlight/apply.
|
|
271
|
+
f_min (float): Minimum frequency in Hz.
|
|
272
|
+
f_max (float): Maximum frequency in Hz.
|
|
263
273
|
n_filter (int): Number of (linear) triangular filter.
|
|
264
|
-
sample_rate (int): Sample rate.
|
|
274
|
+
sample_rate (int): Sample rate of the waveform.
|
|
265
275
|
|
|
266
276
|
Returns:
|
|
267
277
|
numpy.ndarray, the linear triangular filterbank.
|
|
268
278
|
|
|
279
|
+
Raises:
|
|
280
|
+
TypeError: If `n_freqs` is not of type int.
|
|
281
|
+
ValueError: If `n_freqs` is negative.
|
|
282
|
+
TypeError: If `f_min` is not of type float.
|
|
283
|
+
ValueError: If `f_min` is negative.
|
|
284
|
+
TypeError: If `f_max` is not of type float.
|
|
285
|
+
ValueError: If `f_max` is negative.
|
|
286
|
+
ValueError: If `f_min` is larger than `f_max`.
|
|
287
|
+
TypeError: If `n_filter` is not of type int.
|
|
288
|
+
ValueError: If `n_filter` is not positive.
|
|
289
|
+
TypeError: If `sample_rate` is not of type int.
|
|
290
|
+
ValueError: If `sample_rate` is not positive.
|
|
291
|
+
|
|
292
|
+
Supported Platforms:
|
|
293
|
+
``CPU``
|
|
294
|
+
|
|
269
295
|
Examples:
|
|
270
296
|
>>> from mindspore.dataset.audio import linear_fbanks
|
|
271
297
|
>>>
|
|
@@ -310,6 +336,23 @@ def melscale_fbanks(n_freqs, f_min, f_max, n_mels, sample_rate, norm=NormType.NO
|
|
|
310
336
|
Returns:
|
|
311
337
|
numpy.ndarray, the frequency transformation matrix with shape ( `n_freqs` , `n_mels` ).
|
|
312
338
|
|
|
339
|
+
Raises:
|
|
340
|
+
TypeError: If `n_freqs` is not of type int.
|
|
341
|
+
ValueError: If `n_freqs` is a negative number.
|
|
342
|
+
TypeError: If `f_min` is not of type float.
|
|
343
|
+
ValueError: If `f_min` is greater than `f_max` .
|
|
344
|
+
TypeError: If `f_max` is not of type float.
|
|
345
|
+
ValueError: If `f_max` is a negative number.
|
|
346
|
+
TypeError: If `n_mels` is not of type int.
|
|
347
|
+
ValueError: If `n_mels` is not positive.
|
|
348
|
+
TypeError: If `sample_rate` is not of type int.
|
|
349
|
+
ValueError: If `sample_rate` is not positive.
|
|
350
|
+
TypeError: If `norm` is not of type :class:`mindspore.dataset.audio.NormType` .
|
|
351
|
+
TypeError: If `mel_type` is not of type :class:`mindspore.dataset.audio.MelType` .
|
|
352
|
+
|
|
353
|
+
Supported Platforms:
|
|
354
|
+
``CPU``
|
|
355
|
+
|
|
313
356
|
Examples:
|
|
314
357
|
>>> from mindspore.dataset.audio import melscale_fbanks
|
|
315
358
|
>>>
|
|
@@ -23,7 +23,7 @@ from mindspore.dataset.core.validator_helpers import check_float32, check_float3
|
|
|
23
23
|
check_int32_not_zero, check_list_same_size, check_non_negative_float32, check_non_negative_int32, \
|
|
24
24
|
check_pos_float32, check_pos_int32, check_value, INT32_MAX, parse_user_args, type_check
|
|
25
25
|
from mindspore.dataset.audio.utils import BorderType, DensityFunction, FadeShape, GainType, \
|
|
26
|
-
Interpolation, MelType, Modulation, NormType, ResampleMethod, ScaleType, WindowType
|
|
26
|
+
Interpolation, MelType, Modulation, NormMode, NormType, ResampleMethod, ScaleType, WindowType
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
def check_amplitude_to_db(method):
|
|
@@ -366,6 +366,35 @@ def check_inverse_mel_scale(method):
|
|
|
366
366
|
return new_method
|
|
367
367
|
|
|
368
368
|
|
|
369
|
+
def check_inverse_spectrogram(method):
|
|
370
|
+
"""Wrapper method to check the parameters of InverseSpectrogram."""
|
|
371
|
+
|
|
372
|
+
@wraps(method)
|
|
373
|
+
def new_method(self, *args, **kwargs):
|
|
374
|
+
[length, n_fft, win_length, hop_length, pad, window, normalized, center, \
|
|
375
|
+
pad_mode, onesided], _ = parse_user_args(method, *args, **kwargs)
|
|
376
|
+
if length is not None:
|
|
377
|
+
check_non_negative_int32(length, "length")
|
|
378
|
+
check_pos_int32(n_fft, "n_fft")
|
|
379
|
+
type_check(window, (WindowType,), "window")
|
|
380
|
+
type_check(normalized, (bool,), "normalized")
|
|
381
|
+
type_check(center, (bool,), "center")
|
|
382
|
+
type_check(pad_mode, (BorderType,), "pad_mode")
|
|
383
|
+
type_check(onesided, (bool,), "onesided")
|
|
384
|
+
check_non_negative_int32(pad, "pad")
|
|
385
|
+
if hop_length is not None:
|
|
386
|
+
check_pos_int32(hop_length, "hop_length")
|
|
387
|
+
if win_length is not None:
|
|
388
|
+
check_pos_int32(win_length, "win_length")
|
|
389
|
+
if win_length > n_fft:
|
|
390
|
+
raise ValueError(
|
|
391
|
+
"Input win_length should be no more than n_fft, but got win_length: {0} and n_fft: {1}.".format(
|
|
392
|
+
win_length, n_fft))
|
|
393
|
+
return method(self, *args, **kwargs)
|
|
394
|
+
|
|
395
|
+
return new_method
|
|
396
|
+
|
|
397
|
+
|
|
369
398
|
def check_lfilter(method):
|
|
370
399
|
"""Wrapper method to check the parameters of LFilter."""
|
|
371
400
|
|
|
@@ -925,6 +954,33 @@ def check_phase_vocoder(method):
|
|
|
925
954
|
return new_method
|
|
926
955
|
|
|
927
956
|
|
|
957
|
+
def check_pitch_shift(method):
|
|
958
|
+
"""Wrapper method to check the parameters of PitchShift."""
|
|
959
|
+
|
|
960
|
+
@wraps(method)
|
|
961
|
+
def new_method(self, *args, **kwargs):
|
|
962
|
+
[sample_rate, n_steps, bins_per_octave, n_fft, win_length, hop_length, window], _ = parse_user_args(
|
|
963
|
+
method, *args, **kwargs)
|
|
964
|
+
|
|
965
|
+
check_non_negative_int32(sample_rate, "sample_rate")
|
|
966
|
+
check_int32(n_steps, "n_steps")
|
|
967
|
+
check_int32_not_zero(bins_per_octave, "bins_per_octave")
|
|
968
|
+
check_pos_int32(n_fft, "n_fft")
|
|
969
|
+
type_check(window, (WindowType,), "window")
|
|
970
|
+
|
|
971
|
+
if win_length is not None:
|
|
972
|
+
check_pos_int32(win_length, "win_length")
|
|
973
|
+
if win_length > n_fft:
|
|
974
|
+
raise ValueError(
|
|
975
|
+
"Input win_length should be no more than n_fft, but got win_length: {0} and n_fft: {1}.".format(
|
|
976
|
+
win_length, n_fft))
|
|
977
|
+
if hop_length is not None:
|
|
978
|
+
check_pos_int32(hop_length, "hop_length")
|
|
979
|
+
return method(self, *args, **kwargs)
|
|
980
|
+
|
|
981
|
+
return new_method
|
|
982
|
+
|
|
983
|
+
|
|
928
984
|
def check_resample(method):
|
|
929
985
|
"""Wrapper method to check the parameters of Resample."""
|
|
930
986
|
|
|
@@ -948,3 +1004,169 @@ def check_resample(method):
|
|
|
948
1004
|
return method(self, *args, **kwargs)
|
|
949
1005
|
|
|
950
1006
|
return new_method
|
|
1007
|
+
|
|
1008
|
+
|
|
1009
|
+
def check_lfcc(method):
|
|
1010
|
+
"""Wrapper method to check the parameters of LFCC."""
|
|
1011
|
+
|
|
1012
|
+
@wraps(method)
|
|
1013
|
+
def new_method(self, *args, **kwargs):
|
|
1014
|
+
[sample_rate, n_filter, n_lfcc, f_min, f_max, dct_type, norm, log_lf, speckwargs], _ = parse_user_args(
|
|
1015
|
+
method, *args, **kwargs)
|
|
1016
|
+
type_check(sample_rate, (int,), "sample_rate")
|
|
1017
|
+
check_non_negative_int32(sample_rate, "sample_rate")
|
|
1018
|
+
type_check(n_filter, (int,), "n_filter")
|
|
1019
|
+
check_pos_int32(n_filter, "n_filter")
|
|
1020
|
+
type_check(n_lfcc, (int,), "n_lfcc")
|
|
1021
|
+
check_pos_int32(n_lfcc, "n_lfcc")
|
|
1022
|
+
type_check(log_lf, (bool,), "log_lf")
|
|
1023
|
+
type_check(norm, (NormMode,), "norm")
|
|
1024
|
+
type_check(f_min, (int, float), "f_min")
|
|
1025
|
+
check_non_negative_float32(f_min, "f_min")
|
|
1026
|
+
if f_max is not None:
|
|
1027
|
+
type_check(f_max, (int, float), "f_max")
|
|
1028
|
+
check_non_negative_float32(f_max, "f_max")
|
|
1029
|
+
if f_min > f_max:
|
|
1030
|
+
raise ValueError(
|
|
1031
|
+
"f_max should be greater than or equal to f_min, but got f_min: {0} and f_max: {1}.".format(
|
|
1032
|
+
f_min, f_max))
|
|
1033
|
+
else:
|
|
1034
|
+
if f_min >= sample_rate // 2:
|
|
1035
|
+
raise ValueError(
|
|
1036
|
+
"Input sample_rate // 2 should be greater than f_min when f_max is set to None, but got f_min: {0} "
|
|
1037
|
+
"and sample_rate: {1}.".format(f_min, sample_rate))
|
|
1038
|
+
if dct_type != 2:
|
|
1039
|
+
raise ValueError("Input dct_type must be 2, but got : {0}.".format(dct_type))
|
|
1040
|
+
if speckwargs is not None:
|
|
1041
|
+
type_check(speckwargs, (dict,), "speckwargs")
|
|
1042
|
+
window = speckwargs["window"]
|
|
1043
|
+
pad_mode = speckwargs["pad_mode"]
|
|
1044
|
+
n_fft = speckwargs["n_fft"]
|
|
1045
|
+
win_length = speckwargs["win_length"]
|
|
1046
|
+
pad = speckwargs["pad"]
|
|
1047
|
+
power = speckwargs["power"]
|
|
1048
|
+
type_check(window, (WindowType,), "window")
|
|
1049
|
+
type_check(pad_mode, (BorderType,), "pad_mode")
|
|
1050
|
+
type_check(pad, (int,), "pad")
|
|
1051
|
+
check_non_negative_int32(pad, "pad")
|
|
1052
|
+
type_check(power, (float,), "power")
|
|
1053
|
+
check_non_negative_float32(power, "power")
|
|
1054
|
+
if n_fft < n_lfcc:
|
|
1055
|
+
raise ValueError(
|
|
1056
|
+
"n_fft should be greater than or equal to n_lfcc, but got n_fft: {0} and n_lfcc: {1}.".format(
|
|
1057
|
+
n_fft, n_lfcc))
|
|
1058
|
+
if win_length > n_fft:
|
|
1059
|
+
raise ValueError(
|
|
1060
|
+
"win_length must be less than or equal to n_fft, but got win_length: {0} and n_fft: {1}.".format(
|
|
1061
|
+
win_length, n_fft))
|
|
1062
|
+
return method(self, *args, **kwargs)
|
|
1063
|
+
|
|
1064
|
+
return new_method
|
|
1065
|
+
|
|
1066
|
+
|
|
1067
|
+
def check_mfcc(method):
|
|
1068
|
+
"""Wrapper method to check the parameters of MFCC."""
|
|
1069
|
+
|
|
1070
|
+
@wraps(method)
|
|
1071
|
+
def new_method(self, *args, **kwargs):
|
|
1072
|
+
[sample_rate, n_mfcc, dct_type, norm, log_mels, melkwargs], _ = parse_user_args(method, *args, **kwargs)
|
|
1073
|
+
check_non_negative_int32(sample_rate, "sample_rate")
|
|
1074
|
+
type_check(log_mels, (bool,), "log_mels")
|
|
1075
|
+
type_check(norm, (NormMode,), "norm")
|
|
1076
|
+
check_non_negative_int32(n_mfcc, "n_mfcc")
|
|
1077
|
+
if dct_type != 2:
|
|
1078
|
+
raise ValueError("Input dct_type must be 2, but got : {0}.".format(dct_type))
|
|
1079
|
+
|
|
1080
|
+
if melkwargs is not None:
|
|
1081
|
+
type_check(melkwargs, (dict,), "melkwargs")
|
|
1082
|
+
n_fft = melkwargs["n_fft"]
|
|
1083
|
+
win_length = melkwargs["win_length"]
|
|
1084
|
+
hop_length = melkwargs["hop_length"]
|
|
1085
|
+
f_min = melkwargs["f_min"]
|
|
1086
|
+
f_max = melkwargs["f_max"]
|
|
1087
|
+
pad = melkwargs["pad"]
|
|
1088
|
+
power = melkwargs["power"]
|
|
1089
|
+
normalized = melkwargs["normalized"]
|
|
1090
|
+
center = melkwargs["center"]
|
|
1091
|
+
onesided = melkwargs["onesided"]
|
|
1092
|
+
window = melkwargs["window"]
|
|
1093
|
+
pad_mode = melkwargs["pad_mode"]
|
|
1094
|
+
norm_mel = melkwargs["norm"]
|
|
1095
|
+
mel_scale = melkwargs["mel_scale"]
|
|
1096
|
+
n_mels = melkwargs["n_mels"]
|
|
1097
|
+
|
|
1098
|
+
check_pos_int32(n_fft, "n_fft")
|
|
1099
|
+
check_mel_scale_n_mels(n_mels)
|
|
1100
|
+
check_mel_scale_freq(f_min, f_max, sample_rate)
|
|
1101
|
+
check_mel_scale_norm(norm_mel)
|
|
1102
|
+
check_mel_scale_mel_type(mel_scale)
|
|
1103
|
+
check_power(power)
|
|
1104
|
+
type_check(window, (WindowType,), "window")
|
|
1105
|
+
type_check(normalized, (bool,), "normalized")
|
|
1106
|
+
type_check(center, (bool,), "center")
|
|
1107
|
+
type_check(pad_mode, (BorderType,), "pad_mode")
|
|
1108
|
+
type_check(onesided, (bool,), "onesided")
|
|
1109
|
+
check_non_negative_int32(pad, "pad")
|
|
1110
|
+
if hop_length is not None:
|
|
1111
|
+
check_pos_int32(hop_length, "hop_length")
|
|
1112
|
+
if f_max is not None:
|
|
1113
|
+
check_non_negative_float32(f_max, "f_max")
|
|
1114
|
+
if win_length is not None:
|
|
1115
|
+
check_non_negative_int32(win_length, "win_length")
|
|
1116
|
+
if n_mels < n_mfcc:
|
|
1117
|
+
raise ValueError("Input n_mels should be greater than or equal to n_mfcc, but got n_mfcc: {0} and " \
|
|
1118
|
+
"n_mels: {1}.".format(n_mfcc, n_mels))
|
|
1119
|
+
|
|
1120
|
+
return method(self, *args, **kwargs)
|
|
1121
|
+
|
|
1122
|
+
return new_method
|
|
1123
|
+
|
|
1124
|
+
|
|
1125
|
+
def check_mel_spectrogram_freq(f_min, f_max, sample_rate):
|
|
1126
|
+
"""Wrapper method to check the parameters of f_min and f_max."""
|
|
1127
|
+
type_check(f_min, (float,), "f_min")
|
|
1128
|
+
|
|
1129
|
+
if f_max is not None:
|
|
1130
|
+
check_non_negative_float32(f_max, "f_max")
|
|
1131
|
+
if f_min > f_max:
|
|
1132
|
+
raise ValueError("f_max should be greater than or equal to f_min, but got f_min: {0} and f_max: {1}."
|
|
1133
|
+
.format(f_min, f_max))
|
|
1134
|
+
else:
|
|
1135
|
+
if f_min >= sample_rate // 2:
|
|
1136
|
+
raise ValueError(
|
|
1137
|
+
"MelSpectrogram: sample_rate // 2 should be greater than f_min when f_max is set to None, "
|
|
1138
|
+
"but got f_min: {0}.".format(f_min))
|
|
1139
|
+
|
|
1140
|
+
|
|
1141
|
+
def check_mel_spectrogram(method):
|
|
1142
|
+
"""Wrapper method to check the parameters of MelSpectrogram."""
|
|
1143
|
+
|
|
1144
|
+
@wraps(method)
|
|
1145
|
+
def new_method(self, *args, **kwargs):
|
|
1146
|
+
[sample_rate, n_fft, win_length, hop_length, f_min, f_max, pad, n_mels, window, power, normalized, center, \
|
|
1147
|
+
pad_mode, onesided, norm, mel_scale], _ = parse_user_args(method, *args, **kwargs)
|
|
1148
|
+
check_non_negative_int32(sample_rate, "sample_rate")
|
|
1149
|
+
check_pos_int32(n_fft, "n_fft")
|
|
1150
|
+
check_non_negative_int32(n_mels, "n_mels")
|
|
1151
|
+
check_mel_spectrogram_freq(f_min, f_max, sample_rate)
|
|
1152
|
+
check_mel_scale_norm(norm)
|
|
1153
|
+
check_mel_scale_mel_type(mel_scale)
|
|
1154
|
+
check_pos_float32(power, "power")
|
|
1155
|
+
type_check(window, (WindowType,), "window")
|
|
1156
|
+
type_check(normalized, (bool,), "normalized")
|
|
1157
|
+
type_check(center, (bool,), "center")
|
|
1158
|
+
type_check(pad_mode, (BorderType,), "pad_mode")
|
|
1159
|
+
type_check(onesided, (bool,), "onesided")
|
|
1160
|
+
check_non_negative_int32(pad, "pad")
|
|
1161
|
+
if hop_length is not None:
|
|
1162
|
+
check_pos_int32(hop_length, "hop_length")
|
|
1163
|
+
if win_length is not None:
|
|
1164
|
+
check_pos_int32(win_length, "win_length")
|
|
1165
|
+
if win_length > n_fft:
|
|
1166
|
+
raise ValueError(
|
|
1167
|
+
"Input win_length should be no more than n_fft, but got win_length: {0} and n_fft: {1}.".format(
|
|
1168
|
+
win_length, n_fft))
|
|
1169
|
+
|
|
1170
|
+
return method(self, *args, **kwargs)
|
|
1171
|
+
|
|
1172
|
+
return new_method
|
|
@@ -131,7 +131,7 @@ class WaitedDSCallback(Callback, DSCallback):
|
|
|
131
131
|
r"""
|
|
132
132
|
Abstract base class used to build dataset callback classes that are synchronized with the training callback class
|
|
133
133
|
`mindspore.train.Callback \
|
|
134
|
-
<https://www.mindspore.cn/docs/en/r2.0
|
|
134
|
+
<https://www.mindspore.cn/docs/en/r2.0/api_python/train/
|
|
135
135
|
mindspore.train.Callback.html#mindspore.train.Callback>`_ .
|
|
136
136
|
|
|
137
137
|
It can be used to execute a custom callback method before a step or an epoch, such as
|
|
@@ -142,7 +142,7 @@ class WaitedDSCallback(Callback, DSCallback):
|
|
|
142
142
|
`device_number` , `list_callback` , `cur_epoch_num` , `cur_step_num` , `dataset_sink_mode` ,
|
|
143
143
|
`net_outputs` , etc., see
|
|
144
144
|
`mindspore.train.Callback \
|
|
145
|
-
<https://www.mindspore.cn/docs/en/r2.0
|
|
145
|
+
<https://www.mindspore.cn/docs/en/r2.0/api_python/train/
|
|
146
146
|
mindspore.train.Callback.html#mindspore.train.Callback>`_ .
|
|
147
147
|
|
|
148
148
|
Users can obtain the dataset pipeline context through `ds_run_context` , including
|
mindspore/dataset/core/config.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2019-
|
|
1
|
+
# Copyright 2019-2023 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -23,14 +23,15 @@ Common imported modules in corresponding API examples are as follows:
|
|
|
23
23
|
import mindspore.dataset as ds
|
|
24
24
|
"""
|
|
25
25
|
from __future__ import absolute_import
|
|
26
|
-
|
|
26
|
+
from enum import IntEnum
|
|
27
27
|
import os
|
|
28
28
|
import platform
|
|
29
29
|
import random
|
|
30
30
|
import numpy
|
|
31
31
|
import mindspore._c_dataengine as cde
|
|
32
32
|
from mindspore import log as logger
|
|
33
|
-
from mindspore.dataset.core.validator_helpers import replace_none
|
|
33
|
+
from mindspore.dataset.core.validator_helpers import replace_none, type_check
|
|
34
|
+
from mindspore.dataset.debug import DebugHook, PrintMetaDataHook
|
|
34
35
|
|
|
35
36
|
__all__ = ['set_sending_batches', 'load', '_init_device_info',
|
|
36
37
|
'set_seed', 'get_seed',
|
|
@@ -46,12 +47,15 @@ __all__ = ['set_sending_batches', 'load', '_init_device_info',
|
|
|
46
47
|
'set_auto_offload', 'get_auto_offload',
|
|
47
48
|
'set_enable_watchdog', 'get_enable_watchdog',
|
|
48
49
|
'set_fast_recovery', 'get_fast_recovery',
|
|
50
|
+
'set_debug_mode', 'get_debug_mode',
|
|
51
|
+
'set_error_samples_mode', 'get_error_samples_mode', 'ErrorSamplesMode',
|
|
49
52
|
'set_multiprocessing_timeout_interval', 'get_multiprocessing_timeout_interval']
|
|
50
53
|
|
|
51
54
|
INT32_MAX = 2147483647
|
|
52
55
|
UINT32_MAX = 4294967295
|
|
53
56
|
|
|
54
57
|
_config = cde.GlobalContext.config_manager()
|
|
58
|
+
_debug_context = {}
|
|
55
59
|
|
|
56
60
|
|
|
57
61
|
def _init_device_info():
|
|
@@ -134,7 +138,7 @@ def get_seed():
|
|
|
134
138
|
"""
|
|
135
139
|
Get random number seed. If the seed has been set, then will
|
|
136
140
|
return the set value, otherwise it will return the default seed value
|
|
137
|
-
which equals to std::mt19937::default_seed.
|
|
141
|
+
which equals to `std::mt19937::default_seed <http://www.cplusplus.com/reference/random/mt19937/>`_ .
|
|
138
142
|
|
|
139
143
|
Returns:
|
|
140
144
|
int, random number seed.
|
|
@@ -152,11 +156,12 @@ def set_prefetch_size(size):
|
|
|
152
156
|
Set the queue capacity of the thread in pipeline.
|
|
153
157
|
|
|
154
158
|
Args:
|
|
155
|
-
size (int): The length of the cache queue.
|
|
159
|
+
size (int): The length of the cache queue. The `size` must be greater than 0, otherwise the queue capacity of
|
|
160
|
+
the thread is invalid.
|
|
156
161
|
|
|
157
162
|
Raises:
|
|
158
163
|
TypeError: If `size` is not of type int.
|
|
159
|
-
ValueError: If `size`
|
|
164
|
+
ValueError: If `size` is not a positive number.
|
|
160
165
|
|
|
161
166
|
Note:
|
|
162
167
|
Since total memory used for prefetch can grow very large with high number of workers,
|
|
@@ -234,7 +239,8 @@ def get_num_parallel_workers():
|
|
|
234
239
|
|
|
235
240
|
def set_numa_enable(numa_enable):
|
|
236
241
|
"""
|
|
237
|
-
Set the default state of numa enabled. If numa_enable is True, need to
|
|
242
|
+
Set the default state of numa enabled. If `numa_enable` is True, need to
|
|
243
|
+
ensure `numa library <http://rpmfind.net/linux/rpm2html/search.php?query=libnuma-devel>`_ is installed.
|
|
238
244
|
|
|
239
245
|
Args:
|
|
240
246
|
numa_enable (bool): Whether to use numa bind feature.
|
|
@@ -375,14 +381,15 @@ def get_auto_num_workers():
|
|
|
375
381
|
|
|
376
382
|
def set_callback_timeout(timeout):
|
|
377
383
|
"""
|
|
378
|
-
Set the default timeout (in seconds) for
|
|
384
|
+
Set the default timeout (in seconds) for :class:`mindspore.dataset.WaitedDSCallback` .
|
|
379
385
|
|
|
380
386
|
Args:
|
|
381
|
-
timeout (int): Timeout (in seconds) to be used to end the wait in
|
|
387
|
+
timeout (int): Timeout (in seconds) to be used to end the wait in :class:`mindspore.dataset.WaitedDSCallback`
|
|
388
|
+
in case of a deadlock. The `timeout` must be greater than 0.
|
|
382
389
|
|
|
383
390
|
Raises:
|
|
384
391
|
TypeError: If `timeout` is not type int.
|
|
385
|
-
ValueError: If `timeout`
|
|
392
|
+
ValueError: If `timeout` is not a positive number.
|
|
386
393
|
|
|
387
394
|
Examples:
|
|
388
395
|
>>> # Set a new global configuration value for the timeout value.
|
|
@@ -397,10 +404,11 @@ def set_callback_timeout(timeout):
|
|
|
397
404
|
|
|
398
405
|
def get_callback_timeout():
|
|
399
406
|
"""
|
|
400
|
-
Get the default timeout for WaitedDSCallback.
|
|
407
|
+
Get the default timeout for :class:`mindspore.dataset.WaitedDSCallback` .
|
|
401
408
|
|
|
402
409
|
Returns:
|
|
403
|
-
int, Timeout (in seconds) to be used to end the wait in
|
|
410
|
+
int, Timeout (in seconds) to be used to end the wait in :class:`mindspore.dataset.WaitedDSCallback` in case of
|
|
411
|
+
a deadlock.
|
|
404
412
|
|
|
405
413
|
Examples:
|
|
406
414
|
>>> # Get the global configuration of callback timeout.
|
|
@@ -805,10 +813,10 @@ def get_dynamic_shape():
|
|
|
805
813
|
def set_fast_recovery(fast_recovery):
|
|
806
814
|
"""
|
|
807
815
|
Set whether dataset pipeline should recover in fast mode during failover
|
|
808
|
-
(
|
|
816
|
+
(In fast mode, random augmentations may not get same results as before the failure occurred).
|
|
809
817
|
|
|
810
818
|
Args:
|
|
811
|
-
fast_recovery (bool): Whether the dataset pipeline recovers in fast mode.
|
|
819
|
+
fast_recovery (bool): Whether the dataset pipeline recovers in fast mode. System default: True.
|
|
812
820
|
|
|
813
821
|
Raises:
|
|
814
822
|
TypeError: If `fast_recovery` is not a boolean data type.
|
|
@@ -832,3 +840,191 @@ def get_fast_recovery():
|
|
|
832
840
|
>>> is_fast_recovery = ds.config.get_fast_recovery()
|
|
833
841
|
"""
|
|
834
842
|
return _config.get_fast_recovery()
|
|
843
|
+
|
|
844
|
+
|
|
845
|
+
def set_debug_mode(debug_mode_flag: bool, debug_hook_list: list = None):
|
|
846
|
+
"""
|
|
847
|
+
Set the debug_mode flag of the dataset pipeline. When enabled, the dataset pipeline is run synchronously and
|
|
848
|
+
sequentially with a single thread.
|
|
849
|
+
|
|
850
|
+
Note:
|
|
851
|
+
- When debug_mode is enabled, if set_seed has not yet been issued, MindData will internally set the seed to 1
|
|
852
|
+
so that debug mode execution of the dataset pipeline can produce deterministic results.
|
|
853
|
+
- When debug_mode is enabled, many configuration settings are ignored, including the following noteworthy
|
|
854
|
+
settings:
|
|
855
|
+
- auto_offload (False is used.)
|
|
856
|
+
- enable_autotune (False is used.)
|
|
857
|
+
- error_samples_mode (ErrorSamplesMode.RETURN is used.)
|
|
858
|
+
- num_parallel_workers (Value 1 is used.)
|
|
859
|
+
- If both debug_mode is enabled and a dataset pipeline has Map operation with offload set, then offloading is
|
|
860
|
+
ignored.
|
|
861
|
+
- If both debug_mode is enabled and a dataset pipeline has Map operation or Batch operation with
|
|
862
|
+
python_multiprocessing=True, then Python multiprocessing is ignored.
|
|
863
|
+
- If both debug_mode is enabled and a dataset pipeline has GeneratorDataset with
|
|
864
|
+
python_multiprocessing=True (the default value), then Python multiprocessing is ignored.
|
|
865
|
+
- If both debug_mode is enabled and a dataset operation has cache set, then the cache is dropped.
|
|
866
|
+
- If both debug_mode and profiling are enabled, then dataset profiling is ignored.
|
|
867
|
+
|
|
868
|
+
Args:
|
|
869
|
+
debug_mode_flag (bool): Whether dataset pipeline debug mode is enabled, which forces the pipeline
|
|
870
|
+
to run synchronously and sequentially.
|
|
871
|
+
debug_hook_list (list[DebugHook]): a list of debug hook objects to be inserted before and after each
|
|
872
|
+
transform operation in map operation. Default: None, which means to use `[PrintMetaDataHook]`,
|
|
873
|
+
which prints shape/size/type of each input/output data of each transformation.
|
|
874
|
+
|
|
875
|
+
Raises:
|
|
876
|
+
TypeError: If `debug_mode_flag` is not a boolean data type.
|
|
877
|
+
TypeError: If `debug_hook_list` is not a list type.
|
|
878
|
+
TypeError: If any item in `debug_hook_list` is not DebugHook type.
|
|
879
|
+
|
|
880
|
+
Examples:
|
|
881
|
+
1. Enable dataset pipeline debug mode and use default debug hook.
|
|
882
|
+
>>> # Print shape and type of input/output data of each transform op in map operator.
|
|
883
|
+
>>> ds.config.set_debug_mode(True)
|
|
884
|
+
|
|
885
|
+
2. Enable dataset pipeline debug mode and use pre-defined debug hook provided by MindData.
|
|
886
|
+
>>> import mindspore.dataset.debug as debug
|
|
887
|
+
>>>
|
|
888
|
+
>>> ds.config.set_debug_mode(True, debug_hook_list=[debug.PrintDataHook()])
|
|
889
|
+
|
|
890
|
+
3. Enable dataset pipeline debug mode and use user-defined debug hook. It must define a
|
|
891
|
+
class inherited from DebugHook.
|
|
892
|
+
>>> import mindspore.dataset.debug as debug
|
|
893
|
+
>>>
|
|
894
|
+
>>> class CustomizedHook(debug.DebugHook):
|
|
895
|
+
... def __init__(self):
|
|
896
|
+
... super().__init__()
|
|
897
|
+
...
|
|
898
|
+
... def compute(self, *args):
|
|
899
|
+
... # Add your debugging code here.
|
|
900
|
+
... return args
|
|
901
|
+
>>>
|
|
902
|
+
>>> ds.config.set_debug_mode(True, debug_hook_list=[CustomizedHook()])
|
|
903
|
+
|
|
904
|
+
4. Enable dataset pipeline debug mode and use user-defined debug hook and insert by users manually.
|
|
905
|
+
>>> ds.config.set_debug_mode(True)
|
|
906
|
+
>>> dataset = ds.ImageFolderDataset(dataset_dir="/path/to/image_folder_dataset_directory")
|
|
907
|
+
>>> # the debug hook is added after `Decode` operation.
|
|
908
|
+
>>> dataset = dataset.map([vision.Decode(), CustomizedHook(), vision.CenterCrop(100)])
|
|
909
|
+
"""
|
|
910
|
+
if not isinstance(debug_mode_flag, bool):
|
|
911
|
+
raise TypeError("debug_mode_flag isn't of type boolean.")
|
|
912
|
+
if not debug_hook_list:
|
|
913
|
+
debug_hook_list = [PrintMetaDataHook()]
|
|
914
|
+
if not isinstance(debug_hook_list, list):
|
|
915
|
+
raise TypeError("debug_hook_list is not a list.")
|
|
916
|
+
for debug_func in debug_hook_list:
|
|
917
|
+
if not isinstance(debug_func, DebugHook):
|
|
918
|
+
raise TypeError("All items in debug_hook_list must be of type DebugHook.")
|
|
919
|
+
if debug_mode_flag:
|
|
920
|
+
logger.warning("Dataset pipeline debug mode is enabled. Performance will be impacted because the pipeline"
|
|
921
|
+
" will be running in a single thread.")
|
|
922
|
+
if debug_hook_list:
|
|
923
|
+
_debug_context["debug_hook_list"] = debug_hook_list
|
|
924
|
+
|
|
925
|
+
_config.set_debug_mode(debug_mode_flag)
|
|
926
|
+
|
|
927
|
+
|
|
928
|
+
def get_debug_mode():
|
|
929
|
+
"""
|
|
930
|
+
Get the debug_mode flag of the dataset pipeline
|
|
931
|
+
|
|
932
|
+
Returns:
|
|
933
|
+
bool, whether dataset pipeline debug mode is enabled
|
|
934
|
+
|
|
935
|
+
Examples:
|
|
936
|
+
>>> debug_mode = ds.config.get_debug_mode()
|
|
937
|
+
"""
|
|
938
|
+
return _config.get_debug_mode()
|
|
939
|
+
|
|
940
|
+
|
|
941
|
+
def _get_debug_hook_list():
|
|
942
|
+
"""
|
|
943
|
+
INTERNAL USE ONLY!
|
|
944
|
+
Get value of debug_hook_list.
|
|
945
|
+
|
|
946
|
+
Returns:
|
|
947
|
+
list, the debug hook objects to be inserted in map operation to debug inputs/outputs of each transform.
|
|
948
|
+
"""
|
|
949
|
+
return _debug_context.get("debug_hook_list")
|
|
950
|
+
|
|
951
|
+
|
|
952
|
+
class ErrorSamplesMode(IntEnum):
|
|
953
|
+
"""
|
|
954
|
+
An enumeration for `error_samples_mode` .
|
|
955
|
+
|
|
956
|
+
Possible enumeration values are: ErrorSamplesMode.RETURN, ErrorSamplesMode.REPLACE, ErrorSamplesMode.SKIP.
|
|
957
|
+
|
|
958
|
+
- ErrorSamplesMode.RETURN: means erroneous sample results in error raised and returned.
|
|
959
|
+
- ErrorSamplesMode.REPLACE: means erroneous sample is replaced with an internally determined sample.
|
|
960
|
+
- ErrorSamplesMode.SKIP: means erroneous sample is skipped.
|
|
961
|
+
"""
|
|
962
|
+
|
|
963
|
+
RETURN = 0
|
|
964
|
+
REPLACE = 1
|
|
965
|
+
SKIP = 2
|
|
966
|
+
|
|
967
|
+
|
|
968
|
+
# Convert ErrorSamplesMode from Python enum format to CDE enum format
|
|
969
|
+
_PYTHON_TO_CDE_ERROR_SAMPLES_MODE = {
|
|
970
|
+
ErrorSamplesMode.RETURN: cde.ErrorSamplesMode.DE_ERROR_SAMPLES_MODE_RETURN,
|
|
971
|
+
ErrorSamplesMode.REPLACE: cde.ErrorSamplesMode.DE_ERROR_SAMPLES_MODE_REPLACE,
|
|
972
|
+
ErrorSamplesMode.SKIP: cde.ErrorSamplesMode.DE_ERROR_SAMPLES_MODE_SKIP
|
|
973
|
+
}
|
|
974
|
+
|
|
975
|
+
# Convert ErrorSamplesMode from CDE int format to Python enum format
|
|
976
|
+
_CDE_TO_PYTHON_ERROR_SAMPLES_MODE = {
|
|
977
|
+
0: ErrorSamplesMode.RETURN,
|
|
978
|
+
1: ErrorSamplesMode.REPLACE,
|
|
979
|
+
2: ErrorSamplesMode.SKIP
|
|
980
|
+
}
|
|
981
|
+
|
|
982
|
+
|
|
983
|
+
def set_error_samples_mode(error_samples_mode):
|
|
984
|
+
"""
|
|
985
|
+
Set the method in which erroneous samples should be processed in a dataset pipeline.
|
|
986
|
+
|
|
987
|
+
Note:
|
|
988
|
+
- This error samples feature is only applicable to the Map operation in a dataset pipeline.
|
|
989
|
+
- For 'ErrorSamplesMode.REPLACE' mode, a cache of other samples will be used.
|
|
990
|
+
- If 'ErrorSamplesMode.SKIP' mode is used in a distributed setting, beware to manually ensure the
|
|
991
|
+
number of valid samples are the same for each shard (otherwise one may encounter hangs).
|
|
992
|
+
One technique is to manually concat a dataset of all valid samples plus a
|
|
993
|
+
take operation for the number of skipped erroneous samples.
|
|
994
|
+
|
|
995
|
+
Args:
|
|
996
|
+
error_samples_mode (ErrorSamplesMode): The method in which erroneous samples should be processed in a dataset
|
|
997
|
+
pipeline. It can be any of [ErrorSamplesMode.RETURN, ErrorSamplesMode.REPLACE, ErrorSamplesMode.SKIP].
|
|
998
|
+
System default: ErrorSamplesMode.RETURN.
|
|
999
|
+
|
|
1000
|
+
- ErrorSamplesMode.RETURN: means erroneous sample results in error raised and returned.
|
|
1001
|
+
|
|
1002
|
+
- ErrorSamplesMode.REPLACE: means erroneous sample is replaced with a correct sample.
|
|
1003
|
+
|
|
1004
|
+
- ErrorSamplesMode.SKIP: means erroneous sample is skipped.
|
|
1005
|
+
|
|
1006
|
+
Raises:
|
|
1007
|
+
TypeError: If `error_samples_mode` is not of type ErrorSamplesMode.
|
|
1008
|
+
|
|
1009
|
+
Examples:
|
|
1010
|
+
>>> ds.config.set_error_samples_mode(ds.config.ErrorSamplesMode.SKIP)
|
|
1011
|
+
"""
|
|
1012
|
+
type_check(error_samples_mode, (ErrorSamplesMode,), "error_samples_mode")
|
|
1013
|
+
_config.set_error_samples_mode(_PYTHON_TO_CDE_ERROR_SAMPLES_MODE.get(error_samples_mode))
|
|
1014
|
+
|
|
1015
|
+
|
|
1016
|
+
def get_error_samples_mode():
|
|
1017
|
+
"""
|
|
1018
|
+
Get the current configuration for strategy for processing erroneous samples in a dataset pipeline.
|
|
1019
|
+
|
|
1020
|
+
Returns:
|
|
1021
|
+
ErrorSamplesMode, The method in which erroneous samples should be processed in a dataset pipeline.
|
|
1022
|
+
|
|
1023
|
+
- ErrorSamplesMode.RETURN: means erroneous sample results in error raised and returned.
|
|
1024
|
+
- ErrorSamplesMode.REPLACE: means erroneous sample is replaced with an internally determined sample.
|
|
1025
|
+
- ErrorSamplesMode.SKIP: means erroneous sample is skipped.
|
|
1026
|
+
|
|
1027
|
+
Examples:
|
|
1028
|
+
>>> error_samples_mode = ds.config.get_error_samples_mode()
|
|
1029
|
+
"""
|
|
1030
|
+
return _CDE_TO_PYTHON_ERROR_SAMPLES_MODE.get(_config.get_error_samples_mode())
|
|
@@ -746,9 +746,9 @@ def check_gnn_list_or_ndarray(param, param_name, data_type=int):
|
|
|
746
746
|
elif data_type == str:
|
|
747
747
|
data_type = np.str_
|
|
748
748
|
|
|
749
|
-
if param.dtype
|
|
749
|
+
if param.dtype != data_type:
|
|
750
750
|
raise TypeError("Each member in {0} should be of type {1}. Got {2}.".format(
|
|
751
|
-
param_name, data_type, param.dtype
|
|
751
|
+
param_name, data_type, param.dtype))
|
|
752
752
|
|
|
753
753
|
|
|
754
754
|
def check_tensor_op(param, param_name):
|