mindspore 2.0.0a0__cp38-cp38-win_amd64.whl → 2.0.0rc1__cp38-cp38-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -2
- mindspore/_c_dataengine.cp38-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp38-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp38-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +16 -1
- mindspore/_extends/parse/parser.py +107 -22
- mindspore/_extends/parse/resources.py +0 -7
- mindspore/_extends/parse/standard_method.py +885 -413
- mindspore/amp.py +52 -57
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +38 -20
- mindspore/boost/dim_reduce.py +3 -3
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -6
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +41 -7
- mindspore/common/api.py +215 -141
- mindspore/common/dtype.py +8 -1
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +4 -2
- mindspore/common/jit_config.py +17 -13
- mindspore/common/mutable.py +33 -13
- mindspore/common/parameter.py +23 -21
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +62 -41
- mindspore/common/tensor.py +852 -1154
- mindspore/communication/__init__.py +2 -2
- mindspore/communication/_comm_helper.py +11 -4
- mindspore/communication/management.py +22 -21
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +201 -23
- mindspore/dataset/__init__.py +6 -6
- mindspore/dataset/audio/__init__.py +7 -7
- mindspore/dataset/audio/transforms.py +670 -30
- mindspore/dataset/audio/utils.py +47 -4
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +210 -14
- mindspore/dataset/core/validator_helpers.py +2 -2
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +322 -66
- mindspore/dataset/engine/datasets_audio.py +80 -76
- mindspore/dataset/engine/datasets_standard_format.py +51 -38
- mindspore/dataset/engine/datasets_text.py +232 -118
- mindspore/dataset/engine/datasets_user_defined.py +41 -17
- mindspore/dataset/engine/datasets_vision.py +746 -225
- mindspore/dataset/engine/graphdata.py +75 -10
- mindspore/dataset/engine/iterators.py +45 -5
- mindspore/dataset/engine/offload.py +48 -28
- mindspore/dataset/engine/validators.py +117 -8
- mindspore/dataset/text/__init__.py +6 -5
- mindspore/dataset/text/transforms.py +86 -3
- mindspore/dataset/text/utils.py +6 -4
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +3 -2
- mindspore/dataset/transforms/c_transforms.py +1 -1
- mindspore/dataset/transforms/transforms.py +2 -2
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +2 -3
- mindspore/dataset/vision/c_transforms.py +9 -9
- mindspore/dataset/vision/py_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -0
- mindspore/dataset/vision/transforms.py +160 -161
- mindspore/dataset/vision/utils.py +3 -3
- mindspore/experimental/map_parameter.py +38 -26
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +44 -9
- mindspore/include/api/delegate.h +1 -1
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_parallel_runner.h +2 -2
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +19 -3
- mindspore/include/api/types.h +3 -3
- mindspore/include/dataset/constants.h +7 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filereader.py +18 -0
- mindspore/mindrecord/filewriter.py +197 -34
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
- mindspore/mindrecord/tools/csv_to_mr.py +3 -3
- mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/__init__.py +0 -4
- mindspore/nn/cell.py +204 -132
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +7 -6
- mindspore/nn/layer/__init__.py +5 -4
- mindspore/nn/layer/activation.py +40 -89
- mindspore/nn/layer/basic.py +255 -624
- mindspore/nn/layer/channel_shuffle.py +7 -6
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +41 -4
- mindspore/nn/layer/conv.py +64 -28
- mindspore/nn/layer/dense.py +9 -8
- mindspore/nn/layer/embedding.py +27 -25
- mindspore/nn/layer/image.py +53 -46
- mindspore/nn/layer/math.py +97 -105
- mindspore/nn/layer/normalization.py +117 -86
- mindspore/nn/layer/padding.py +185 -95
- mindspore/nn/layer/pooling.py +817 -414
- mindspore/nn/layer/rnn_cells.py +10 -15
- mindspore/nn/layer/rnns.py +37 -38
- mindspore/nn/layer/thor_layer.py +11 -12
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +5 -4
- mindspore/nn/loss/loss.py +334 -199
- mindspore/nn/optim/ada_grad.py +6 -6
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +4 -5
- mindspore/nn/optim/adam.py +126 -62
- mindspore/nn/optim/adamax.py +3 -4
- mindspore/nn/optim/adasum.py +6 -6
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +67 -38
- mindspore/nn/optim/lamb.py +4 -5
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +43 -4
- mindspore/nn/optim/momentum.py +6 -5
- mindspore/nn/optim/optimizer.py +3 -1
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +1 -1
- mindspore/nn/optim/rprop.py +8 -9
- mindspore/nn/optim/sgd.py +19 -13
- mindspore/nn/optim/thor.py +10 -15
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +4 -4
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +9 -15
- mindspore/nn/probability/distribution/bernoulli.py +3 -3
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +5 -7
- mindspore/nn/probability/distribution/cauchy.py +3 -3
- mindspore/nn/probability/distribution/distribution.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +3 -3
- mindspore/nn/probability/distribution/half_normal.py +15 -11
- mindspore/nn/probability/distribution/laplace.py +16 -13
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/normal.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/student_t.py +20 -15
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +27 -10
- mindspore/nn/wrap/grad_reducer.py +2 -2
- mindspore/nn/wrap/loss_scale.py +40 -24
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +35 -30
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +22 -19
- mindspore/numpy/utils.py +1 -1
- mindspore/numpy/utils_const.py +108 -58
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +86 -117
- mindspore/ops/_grad/grad_base.py +23 -1
- mindspore/ops/_grad/grad_clip_ops.py +2 -3
- mindspore/ops/_grad/grad_comm_ops.py +34 -24
- mindspore/ops/_grad/grad_implementations.py +9 -45
- mindspore/ops/_grad/grad_inner_ops.py +47 -4
- mindspore/ops/_grad/grad_math_ops.py +142 -117
- mindspore/ops/_grad/grad_nn_ops.py +71 -165
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +7 -6
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
- mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
- mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
- mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -611
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_register_for_op.py +1 -0
- mindspore/ops/_utils/__init__.py +1 -2
- mindspore/ops/_utils/utils.py +19 -40
- mindspore/ops/_vmap/vmap_array_ops.py +116 -38
- mindspore/ops/_vmap/vmap_base.py +16 -9
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
- mindspore/ops/_vmap/vmap_image_ops.py +12 -5
- mindspore/ops/_vmap/vmap_math_ops.py +46 -5
- mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
- mindspore/ops/_vmap/vmap_random_ops.py +1 -1
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
- mindspore/ops/composite/__init__.py +7 -8
- mindspore/ops/composite/base.py +101 -47
- mindspore/ops/composite/math_ops.py +188 -158
- mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
- mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
- mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
- mindspore/ops/function/__init__.py +152 -8
- mindspore/ops/function/array_func.py +2555 -674
- mindspore/ops/function/clip_func.py +209 -13
- mindspore/ops/function/debug_func.py +2 -2
- mindspore/ops/function/grad/__init__.py +2 -1
- mindspore/ops/function/grad/grad_func.py +147 -62
- mindspore/ops/function/image_func.py +54 -38
- mindspore/ops/function/linalg_func.py +167 -16
- mindspore/ops/function/math_func.py +4849 -1492
- mindspore/ops/function/nn_func.py +2573 -988
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +3 -3
- mindspore/ops/function/random_func.py +790 -73
- mindspore/ops/function/sparse_func.py +98 -78
- mindspore/ops/function/sparse_unary_func.py +54 -53
- mindspore/ops/function/spectral_func.py +27 -24
- mindspore/ops/function/vmap_func.py +22 -2
- mindspore/ops/functional.py +97 -37
- mindspore/ops/op_info_register.py +70 -28
- mindspore/ops/operations/__init__.py +47 -14
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +5 -5
- mindspore/ops/operations/_grad_ops.py +276 -187
- mindspore/ops/operations/_inner_ops.py +319 -113
- mindspore/ops/operations/_ms_kernel.py +10 -8
- mindspore/ops/operations/_ocr_ops.py +9 -9
- mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
- mindspore/ops/operations/_quant_ops.py +137 -102
- mindspore/ops/operations/_rl_inner_ops.py +121 -60
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1004 -2
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +801 -466
- mindspore/ops/operations/comm_ops.py +51 -49
- mindspore/ops/operations/control_ops.py +2 -2
- mindspore/ops/operations/custom_ops.py +123 -44
- mindspore/ops/operations/debug_ops.py +24 -24
- mindspore/ops/operations/image_ops.py +240 -153
- mindspore/ops/operations/inner_ops.py +34 -50
- mindspore/ops/operations/linalg_ops.py +31 -9
- mindspore/ops/operations/math_ops.py +988 -757
- mindspore/ops/operations/nn_ops.py +965 -819
- mindspore/ops/operations/other_ops.py +51 -40
- mindspore/ops/operations/random_ops.py +204 -122
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +254 -93
- mindspore/ops/operations/spectral_ops.py +35 -3
- mindspore/ops/primitive.py +111 -9
- mindspore/parallel/_auto_parallel_context.py +189 -83
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +99 -7
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +7 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
- mindspore/parallel/_utils.py +1 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +37 -34
- mindspore/parallel/shard.py +17 -18
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +69 -47
- mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
- mindspore/profiler/parser/base_timeline_generator.py +49 -56
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
- mindspore/profiler/parser/hwts_log_parser.py +1 -1
- mindspore/profiler/parser/integrator.py +15 -14
- mindspore/profiler/parser/minddata_analyzer.py +2 -2
- mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +2 -1
- mindspore/profiler/profiling.py +218 -186
- mindspore/rewrite/__init__.py +3 -1
- mindspore/rewrite/api/node.py +1 -114
- mindspore/rewrite/api/node_type.py +3 -0
- mindspore/rewrite/api/pattern_engine.py +31 -1
- mindspore/rewrite/api/scoped_value.py +4 -4
- mindspore/rewrite/api/symbol_tree.py +3 -78
- mindspore/rewrite/api/tree_node_helper.py +1 -1
- mindspore/rewrite/ast_creator_register.py +1 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -2
- mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
- mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
- mindspore/rewrite/namespace.py +0 -2
- mindspore/rewrite/node.py +157 -11
- mindspore/rewrite/parsers/assign_parser.py +231 -53
- mindspore/rewrite/parsers/class_def_parser.py +187 -109
- mindspore/rewrite/parsers/for_parser.py +24 -14
- mindspore/rewrite/parsers/function_def_parser.py +21 -4
- mindspore/rewrite/parsers/if_parser.py +6 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +256 -133
- mindspore/rewrite/symbol_tree_builder.py +38 -1
- mindspore/run_check/_check_version.py +69 -63
- mindspore/run_check/run_check.py +2 -1
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +1 -1
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +273 -102
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +2 -2
- mindspore/train/callback/_checkpoint.py +3 -3
- mindspore/train/callback/_early_stop.py +3 -3
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +29 -31
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +3 -3
- mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
- mindspore/train/callback/_summary_collector.py +23 -16
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +15 -3
- mindspore/train/dataset_helper.py +10 -15
- mindspore/train/loss_scale_manager.py +8 -11
- mindspore/train/metrics/__init__.py +1 -1
- mindspore/train/metrics/bleu_score.py +1 -1
- mindspore/train/metrics/confusion_matrix.py +1 -1
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/dice.py +2 -2
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +4 -3
- mindspore/train/metrics/mean_surface_distance.py +2 -2
- mindspore/train/metrics/occlusion_sensitivity.py +1 -1
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +1 -1
- mindspore/train/metrics/recall.py +1 -1
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +45 -28
- mindspore/train/serialization.py +295 -188
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -13
- mindspore/train/train_thor/convert_utils.py +2 -2
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
- mindspore/compression/__init__.py +0 -19
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -515
- mindspore/compression/quant/__init__.py +0 -28
- mindspore/compression/quant/qat.py +0 -634
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -140
- mindspore/nn/probability/dpn/vae/vae.py +0 -124
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
- mindspore/ops/composite/array_ops.py +0 -241
- mindspore/ops/composite/clip_ops.py +0 -134
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
mindspore/common/dtype.py
CHANGED
|
@@ -20,7 +20,6 @@ from __future__ import absolute_import
|
|
|
20
20
|
import enum
|
|
21
21
|
from inspect import isfunction
|
|
22
22
|
import numpy as np
|
|
23
|
-
from mindspore import log as logger
|
|
24
23
|
from mindspore._c_expression import typing
|
|
25
24
|
from mindspore._c_expression.typing import Type
|
|
26
25
|
|
|
@@ -146,6 +145,7 @@ float_type = (float16, float32, float64,)
|
|
|
146
145
|
signed_type = (int8, byte, int16, short, int32, intc, int64,
|
|
147
146
|
intp, float16, half, float32, single, float64,
|
|
148
147
|
double, complex64, complex128)
|
|
148
|
+
complex_type = (complex64, complex128,)
|
|
149
149
|
all_types = (bool_, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128)
|
|
150
150
|
implicit_conversion_seq = {t: idx for idx, t in enumerate(all_types)}
|
|
151
151
|
|
|
@@ -320,6 +320,13 @@ def type_size_in_bytes(dtype):
|
|
|
320
320
|
class QuantDtype(enum.Enum):
|
|
321
321
|
"""
|
|
322
322
|
An enum for quant datatype, contains `INT1` ~ `INT16`, `UINT1` ~ `UINT16`.
|
|
323
|
+
|
|
324
|
+
`QuantDtype` is defined in `mindspore/common/dtype.py`, use command below to import:
|
|
325
|
+
|
|
326
|
+
.. code-block::
|
|
327
|
+
|
|
328
|
+
from mindspore import QuantDtype
|
|
329
|
+
|
|
323
330
|
"""
|
|
324
331
|
INT1 = 0
|
|
325
332
|
INT2 = 1
|
mindspore/common/dump.py
CHANGED
|
@@ -27,11 +27,11 @@ def set_dump(target, enabled=True):
|
|
|
27
27
|
`target` should be an instance of :class:`mindspore.nn.Cell` or :class:`mindspore.ops.Primitive` .
|
|
28
28
|
Please note that this API takes effect only when Asynchronous Dump is enabled and the `dump_mode`
|
|
29
29
|
field in dump config file is "2". See the `dump document <https://www.mindspore.cn/tutorials/
|
|
30
|
-
experts/en/r2.0
|
|
30
|
+
experts/en/r2.0/debug/dump.html>`_ for details. The default enabled status for
|
|
31
31
|
a :class:`mindspore.nn.Cell` or :class:`mindspore.ops.Primitive` is False.
|
|
32
32
|
|
|
33
33
|
.. warning::
|
|
34
|
-
This is an experimental
|
|
34
|
+
This is an experimental API that is subject to change or deletion.
|
|
35
35
|
|
|
36
36
|
Note:
|
|
37
37
|
1. This API is only effective for GRAPH_MODE with Ascend backend.
|
mindspore/common/initializer.py
CHANGED
|
@@ -262,7 +262,7 @@ def _calculate_in_and_out(arr):
|
|
|
262
262
|
class XavierNormal(Initializer):
|
|
263
263
|
r"""
|
|
264
264
|
Generates an array with values sampled from Xavier normal distribution
|
|
265
|
-
:math
|
|
265
|
+
:math:`{N}(0, \text{sigma}^2)` in order to initialize a tensor, where
|
|
266
266
|
|
|
267
267
|
.. math::
|
|
268
268
|
sigma = gain * \sqrt{\frac{2}{n_{in} + n_{out}}}
|
|
@@ -610,7 +610,9 @@ class VarianceScaling(Initializer):
|
|
|
610
610
|
When `distribution` is 'truncated_normal' or 'untruncated_normal', the value will be sampled from truncated or
|
|
611
611
|
untruncated normal distribution with a mean of 0 and a scaled standard deviation
|
|
612
612
|
:math:`stddev = \sqrt{\frac{scale}{n}}`. :math:`n` will be the number of input units if `mode` is 'fan_in',
|
|
613
|
-
|
|
613
|
+
while :math:`n` will be
|
|
614
|
+
the number of output units if `mode` is 'fan_out'. :math:`n` will be the average of 'fan_in' and 'fan_out'
|
|
615
|
+
if `mode` is 'fan_avg'.
|
|
614
616
|
When `distribution` is 'uniform', the value will be sampled from a uniform distribution within the limit of
|
|
615
617
|
:math:`[-\sqrt{\frac{3*scale}{n}}, \sqrt{\frac{3*scale}{n}}]`.
|
|
616
618
|
|
mindspore/common/jit_config.py
CHANGED
|
@@ -19,18 +19,24 @@ class JitConfig:
|
|
|
19
19
|
"""
|
|
20
20
|
Jit config for compile.
|
|
21
21
|
|
|
22
|
-
|
|
23
|
-
This is an experimental
|
|
22
|
+
.. warning::
|
|
23
|
+
This is an experimental API that is subject to change or deletion.
|
|
24
24
|
|
|
25
25
|
Args:
|
|
26
26
|
jit_level (str): Option for argument `level` for Optimization of lift graph.
|
|
27
|
-
Supports ["O0", "O1", "O2"]. Default: "O1".
|
|
27
|
+
Supports ["O0", "O1", "O2", "O3"]. Default: "O1".
|
|
28
28
|
|
|
29
29
|
- "O0": Basic optimization.
|
|
30
30
|
- "O1": Manual optimization.
|
|
31
31
|
- "O2": Manual optimization and graph computation fusion.
|
|
32
|
+
- "O3": Performance optimization, no generalization guaranteed.
|
|
33
|
+
|
|
34
|
+
exc_mode (str): Mode for execute the network. Supports ["auto", "sink", "no_sink"]. Default: "auto".
|
|
35
|
+
|
|
36
|
+
- "auto": Automatic Policies.
|
|
37
|
+
- "sink": Build computational graphs with the sink mode.
|
|
38
|
+
- "no_sink": Build computational graphs with no sink mode.
|
|
32
39
|
|
|
33
|
-
task_sink (bool): Determines whether to pass the data through dataset channel. Default: True.
|
|
34
40
|
**kwargs (dict): A dictionary of keyword arguments that the class needs.
|
|
35
41
|
|
|
36
42
|
Examples:
|
|
@@ -41,13 +47,11 @@ class JitConfig:
|
|
|
41
47
|
>>>
|
|
42
48
|
>>> net.set_jit_config(jitconfig)
|
|
43
49
|
"""
|
|
44
|
-
def __init__(self, jit_level="O1",
|
|
45
|
-
if jit_level not in ["O0", "O1", "O2"]:
|
|
46
|
-
raise ValueError("For 'jit_level' must be one of ['O0', 'O1', 'O2'].")
|
|
47
|
-
if not
|
|
48
|
-
raise
|
|
49
|
-
self.jit_config_dict =
|
|
50
|
+
def __init__(self, jit_level="O1", exc_mode="auto", **kwargs):
|
|
51
|
+
if jit_level not in ["O0", "O1", "O2", "O3"]:
|
|
52
|
+
raise ValueError("For 'jit_level' must be one of ['O0', 'O1', 'O2', 'O3'].")
|
|
53
|
+
if exc_mode not in ['auto', 'sink', 'no_sink']:
|
|
54
|
+
raise ValueError("For 'exc_mode' must be one of '['auto', 'sink', 'no_sink']'.")
|
|
55
|
+
self.jit_config_dict = kwargs
|
|
50
56
|
self.jit_config_dict["jit_level"] = jit_level
|
|
51
|
-
self.jit_config_dict["
|
|
52
|
-
for key, value in kwargs.items():
|
|
53
|
-
self.jit_config_dict[key] = value
|
|
57
|
+
self.jit_config_dict["exc_mode"] = exc_mode
|
mindspore/common/mutable.py
CHANGED
|
@@ -19,6 +19,14 @@ from mindspore.common.tensor import Tensor
|
|
|
19
19
|
from mindspore._c_expression import Tensor as Tensor_
|
|
20
20
|
|
|
21
21
|
|
|
22
|
+
class _Int(int):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class _Float(float):
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
|
|
22
30
|
class _Tuple(tuple):
|
|
23
31
|
pass
|
|
24
32
|
|
|
@@ -43,7 +51,7 @@ def _check_element_type(value):
|
|
|
43
51
|
if not _check_element_type(element):
|
|
44
52
|
return False
|
|
45
53
|
return True
|
|
46
|
-
return isinstance(value, Tensor_)
|
|
54
|
+
return isinstance(value, (Tensor, Tensor_, int, float)) and not isinstance(value, bool)
|
|
47
55
|
|
|
48
56
|
|
|
49
57
|
def mutable(input_data, dynamic_len=False):
|
|
@@ -68,15 +76,17 @@ def mutable(input_data, dynamic_len=False):
|
|
|
68
76
|
the length of the tuple or list is different for each run, it does not need to be re-compiled.
|
|
69
77
|
|
|
70
78
|
Args:
|
|
71
|
-
input_data (Union[int, float, Tensor, tuple, list, dict): The input data to be made mutable. If
|
|
79
|
+
input_data (Union[int, float, Tensor, tuple, list, dict]): The input data to be made mutable. If
|
|
72
80
|
'input_data' is list/tuple/dict, the type of each element should also in the valid types.
|
|
73
81
|
dynamic_len (bool): Whether to set the whole sequence to be dynamic length. In graph compilation, if
|
|
74
82
|
`dynamic_len` is True, the `input_data` must be list or tuple and the elements of `input_data` must have
|
|
75
83
|
the same type and shape. Default: False.
|
|
76
84
|
|
|
77
85
|
.. warning::
|
|
78
|
-
|
|
79
|
-
|
|
86
|
+
This is an experimental API that is subject to change or deletion.
|
|
87
|
+
|
|
88
|
+
Note:
|
|
89
|
+
Currently this api only works in GRAPH mode.
|
|
80
90
|
|
|
81
91
|
Returns:
|
|
82
92
|
The origin input data which has been set mutable.
|
|
@@ -96,6 +106,8 @@ def mutable(input_data, dynamic_len=False):
|
|
|
96
106
|
>>> from mindspore.common import mutable
|
|
97
107
|
>>> from mindspore.common import dtype as mstype
|
|
98
108
|
>>> from mindspore import Tensor
|
|
109
|
+
>>> from mindspore import context
|
|
110
|
+
>>> context.set_context(mode=context.GRAPH_MODE)
|
|
99
111
|
>>> class Net(nn.Cell):
|
|
100
112
|
... def __init__(self):
|
|
101
113
|
... super(Net, self).__init__()
|
|
@@ -122,20 +134,28 @@ def mutable(input_data, dynamic_len=False):
|
|
|
122
134
|
>>> output = GradNetWrtX(Net())(z)
|
|
123
135
|
>>> print(output)
|
|
124
136
|
(Tensor(shape=[2, 3], dtype=Float32, value=
|
|
125
|
-
[[ 1.41000009e+00,
|
|
126
|
-
[ 1.41000009e+00,
|
|
127
|
-
[[ 1.70000005e+00,
|
|
128
|
-
[ 1.89999998e+00,
|
|
129
|
-
[ 1.50000000e+00,
|
|
137
|
+
[[ 1.41000009e+00, 1.60000002e+00, 6.59999943e+00],
|
|
138
|
+
[ 1.41000009e+00, 1.60000002e+00, 6.59999943e+00]]), Tensor(shape=[3, 3], dtype=Float32, value=
|
|
139
|
+
[[ 1.70000005e+00, 1.70000005e+00, 1.70000005e+00],
|
|
140
|
+
[ 1.89999998e+00, 1.89999998e+00, 1.89999998e+00],
|
|
141
|
+
[ 1.50000000e+00, 1.50000000e+00, 1.50000000e+00]]))
|
|
130
142
|
"""
|
|
131
|
-
|
|
132
|
-
if not dynamic_len and not _check_element_type(input_data):
|
|
143
|
+
if not _check_element_type(input_data):
|
|
133
144
|
raise TypeError(
|
|
134
|
-
f"For 'mutable', the 'input_data' should be one of (Tensor, tuple
|
|
145
|
+
f"For 'mutable', the 'input_data' should be one of (int, float, bool, Tensor, tuple, list, dict) "
|
|
135
146
|
f"or their nested structures, but got {input_data}.")
|
|
136
147
|
|
|
148
|
+
if dynamic_len and not isinstance(input_data, (tuple, list)):
|
|
149
|
+
raise TypeError(
|
|
150
|
+
f"For mutable, when the variable_len is True, the first input should be list or tuple,"
|
|
151
|
+
f" but got {input_data}")
|
|
152
|
+
|
|
137
153
|
ret = input_data
|
|
138
|
-
if isinstance(input_data,
|
|
154
|
+
if isinstance(input_data, int):
|
|
155
|
+
ret = _Int(input_data)
|
|
156
|
+
elif isinstance(input_data, float):
|
|
157
|
+
ret = _Float(input_data)
|
|
158
|
+
elif isinstance(input_data, list):
|
|
139
159
|
ret = _List(input_data)
|
|
140
160
|
elif isinstance(input_data, tuple):
|
|
141
161
|
ret = _Tuple(input_data)
|
mindspore/common/parameter.py
CHANGED
|
@@ -27,18 +27,17 @@ from mindspore._c_expression import ParamInfo
|
|
|
27
27
|
from mindspore.common import dtype as mstype
|
|
28
28
|
from mindspore import context
|
|
29
29
|
from mindspore.parallel._utils import _get_parallel_mode
|
|
30
|
-
from mindspore.common._utils import
|
|
30
|
+
from mindspore.common._utils import get_slice_num, get_slice_shape
|
|
31
31
|
from mindspore.common.initializer import initializer
|
|
32
32
|
from mindspore.common.tensor import Tensor
|
|
33
|
-
from mindspore
|
|
33
|
+
from mindspore import _checkparam as Validator
|
|
34
|
+
from mindspore._check_jit_forbidden_api import jit_forbidden_register
|
|
34
35
|
from mindspore._c_expression import Tensor as Tensor_
|
|
35
36
|
from mindspore.parallel._tensor import _get_slice_index
|
|
36
37
|
from mindspore.parallel._auto_parallel_context import auto_parallel_context
|
|
37
38
|
from mindspore.parallel._ps_context import _is_role_worker, _is_role_pserver, _is_role_sched, _clone_hash_table, \
|
|
38
39
|
_is_ps_mode
|
|
39
|
-
from mindspore.parallel._ps_context import _reinsert_hash_table_size
|
|
40
|
-
from mindspore.parallel._ps_context import _insert_weight_init_info, _insert_accumu_init_info
|
|
41
|
-
from mindspore.common.seed import _get_global_and_op_seed
|
|
40
|
+
from mindspore.parallel._ps_context import _reinsert_hash_table_size, _insert_accumu_init_info, _cache_enable
|
|
42
41
|
import mindspore.common._monad as monad
|
|
43
42
|
|
|
44
43
|
__all__ = ['Parameter', 'ParameterTuple']
|
|
@@ -222,20 +221,23 @@ class Parameter(Tensor_):
|
|
|
222
221
|
self.is_in_parallel = _is_in_parallel_mode()
|
|
223
222
|
self.is_in_shard = False
|
|
224
223
|
self._pipeline_stage_list = []
|
|
224
|
+
self.slice_num = 1
|
|
225
225
|
if -1 in self.shape:
|
|
226
226
|
raise ValueError(f"All shape elements of the Parameter must be positive. But got None.")
|
|
227
227
|
if isinstance(default_input, (Tensor_, Tensor)):
|
|
228
|
-
Tensor_.__init__(self, default_input.dtype, default_input.shape)
|
|
229
|
-
|
|
230
228
|
# At embedding cache scenes, we need limit the size of memory for parameter.
|
|
231
229
|
# And save out range data to persistent storage to support TB-Level size parameter.
|
|
232
|
-
slice_num_of_persistent_data =
|
|
230
|
+
slice_num_of_persistent_data = get_slice_num(default_input.dtype, default_input.shape)
|
|
233
231
|
if slice_num_of_persistent_data > 1:
|
|
234
232
|
data_shape = list(default_input.shape)
|
|
235
233
|
slice_first_dim = math.ceil(data_shape[0] / slice_num_of_persistent_data)
|
|
236
234
|
data_shape[0] = slice_first_dim
|
|
237
|
-
self.param_info.parameter_persistent_slice_shape = data_shape
|
|
238
235
|
self.param_info.use_persistent_storage = True
|
|
236
|
+
self.param_info.origin_shape = default_input.shape
|
|
237
|
+
self.slice_num = slice_num_of_persistent_data
|
|
238
|
+
Tensor_.__init__(self, default_input.dtype, tuple(data_shape))
|
|
239
|
+
else:
|
|
240
|
+
Tensor_.__init__(self, default_input.dtype, default_input.shape)
|
|
239
241
|
|
|
240
242
|
elif isinstance(default_input, int):
|
|
241
243
|
Tensor_.__init__(self, mstype.int64, ())
|
|
@@ -291,10 +293,10 @@ class Parameter(Tensor_):
|
|
|
291
293
|
# make a copy of Tensor to init the parameter.
|
|
292
294
|
return (Tensor, data.asnumpy())
|
|
293
295
|
|
|
294
|
-
not_init_data = _is_role_sched() or _is_in_parallel_mode()
|
|
296
|
+
not_init_data = _is_role_sched() or (_is_role_pserver() and _cache_enable()) or _is_in_parallel_mode()
|
|
295
297
|
if not_init_data:
|
|
296
298
|
# do not init data while in auto parallel.
|
|
297
|
-
return (Tensor, None, data.dtype, data.shape, data.init)
|
|
299
|
+
return (Tensor, None, data.dtype, get_slice_shape(data.dtype, data.shape), data.init)
|
|
298
300
|
return (Tensor, data.init_data())
|
|
299
301
|
if isinstance(data, int):
|
|
300
302
|
return (Tensor, data, mstype.int32)
|
|
@@ -504,7 +506,7 @@ class Parameter(Tensor_):
|
|
|
504
506
|
if self.cache_shape:
|
|
505
507
|
x.cache_shape = self.cache_shape
|
|
506
508
|
if init != 'same':
|
|
507
|
-
shape = self.shape
|
|
509
|
+
shape = self.shape if self.slice_num == 1 else self.param_info.origin_shape
|
|
508
510
|
dtype = self.dtype
|
|
509
511
|
x.set_data(initializer(init, shape=shape, dtype=dtype))
|
|
510
512
|
return x
|
|
@@ -631,13 +633,13 @@ class Parameter(Tensor_):
|
|
|
631
633
|
|
|
632
634
|
@staticmethod
|
|
633
635
|
def _set_data_check_input_valid(current_shape, data_shape, current_tensor_is_init,
|
|
634
|
-
incoming_tensor_is_init, slice_shape=False):
|
|
636
|
+
incoming_tensor_is_init, slice_shape=False, slice_num=1):
|
|
635
637
|
if incoming_tensor_is_init and not current_tensor_is_init:
|
|
636
638
|
raise TypeError("The original tensor data is initialized, but the argument 'data' is not initialized."
|
|
637
639
|
"Please initialize 'data' before call this method.")
|
|
638
640
|
if tuple(current_shape) != tuple(data_shape):
|
|
639
641
|
# If Slice create Parameter shape can be change.
|
|
640
|
-
if not slice_shape:
|
|
642
|
+
if not slice_shape and slice_num == 1:
|
|
641
643
|
raise ValueError(f"Can not change the shape of Parameter which has been initialized."
|
|
642
644
|
f" Current shape is {current_shape}, and incoming is {data_shape}.")
|
|
643
645
|
|
|
@@ -654,6 +656,7 @@ class Parameter(Tensor_):
|
|
|
654
656
|
Parameter.__init__(param, tensor, *args, **kwargs)
|
|
655
657
|
return param
|
|
656
658
|
|
|
659
|
+
@jit_forbidden_register
|
|
657
660
|
def set_data(self, data, slice_shape=False):
|
|
658
661
|
"""
|
|
659
662
|
Set Parameter's data.
|
|
@@ -677,7 +680,7 @@ class Parameter(Tensor_):
|
|
|
677
680
|
incoming_tensor_is_init = isinstance(data, Tensor) and not data.has_init
|
|
678
681
|
current_tensor_is_init = isinstance(self, Tensor) and not self.has_init
|
|
679
682
|
Parameter._set_data_check_input_valid(self.shape, data.shape, current_tensor_is_init, incoming_tensor_is_init,
|
|
680
|
-
slice_shape)
|
|
683
|
+
slice_shape, self.slice_num)
|
|
681
684
|
if self.dtype != data.dtype:
|
|
682
685
|
if mstype.implicit_conversion_seq[self.dtype] < mstype.implicit_conversion_seq[data.dtype]:
|
|
683
686
|
self._raise_type_error(data.dtype)
|
|
@@ -709,7 +712,7 @@ class Parameter(Tensor_):
|
|
|
709
712
|
raise TypeError("The argument 'layout' should be tuple, but got {}.".format(type(layout)))
|
|
710
713
|
if len(layout) < 6:
|
|
711
714
|
raise ValueError("The length of 'layout' must be larger than 5, but got {}.".format(len(layout)))
|
|
712
|
-
slice_index = int(_get_slice_index(layout[0], layout[1]))
|
|
715
|
+
slice_index = int(_get_slice_index(layout[0], layout[1], layout[5]))
|
|
713
716
|
init_data_args += (slice_index, layout[2], layout[5])
|
|
714
717
|
return init_data_args
|
|
715
718
|
|
|
@@ -742,19 +745,18 @@ class Parameter(Tensor_):
|
|
|
742
745
|
TypeError: If `layout` is not tuple.
|
|
743
746
|
"""
|
|
744
747
|
if self.is_default_input_init and self.is_in_parallel != _is_in_parallel_mode():
|
|
745
|
-
raise RuntimeError("Must set or change parallel mode before any Tensor created.")
|
|
748
|
+
raise RuntimeError("Must set or change parallel mode before any initializer Tensor created.")
|
|
746
749
|
if self.init_mode is None:
|
|
747
750
|
return self
|
|
748
751
|
if self.inited_param is not None:
|
|
749
752
|
return self.inited_param
|
|
750
|
-
if _is_role_worker() and self.cache_enable:
|
|
751
|
-
global_seed, op_seed = _get_global_and_op_seed()
|
|
752
|
-
_insert_weight_init_info(self.name, global_seed, op_seed)
|
|
753
753
|
|
|
754
754
|
init_data_args = self._get_init_data_args(layout)
|
|
755
755
|
|
|
756
|
+
if _is_role_sched():
|
|
757
|
+
return self
|
|
756
758
|
if self.init_in_server and self.is_param_ps and isinstance(self.init_mode, Tensor) and \
|
|
757
|
-
self.init_mode.init is not None and
|
|
759
|
+
self.init_mode.init is not None and _is_role_worker():
|
|
758
760
|
if self.cache_enable:
|
|
759
761
|
data = self.init_mode.init_data(*init_data_args)
|
|
760
762
|
else:
|
mindspore/common/seed.py
CHANGED
|
@@ -16,7 +16,7 @@
|
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
|
|
18
18
|
import numpy as np
|
|
19
|
-
from mindspore
|
|
19
|
+
from mindspore import _checkparam as Validator
|
|
20
20
|
|
|
21
21
|
# constants
|
|
22
22
|
DEFAULT_GRAPH_SEED = 87654321
|
|
@@ -41,11 +41,11 @@ def set_seed(seed):
|
|
|
41
41
|
Set global seed.
|
|
42
42
|
|
|
43
43
|
Note:
|
|
44
|
-
The global seed is used by numpy.random, mindspore.common.Initializer, mindspore.ops.
|
|
44
|
+
The global seed is used by numpy.random, mindspore.common.Initializer, mindspore.ops.function.random_func and
|
|
45
45
|
mindspore.nn.probability.distribution.
|
|
46
46
|
|
|
47
47
|
If global seed is not set, these packages will use their own default seed independently, numpy.random and
|
|
48
|
-
mindspore.common.Initializer will choose a random seed, mindspore.ops.
|
|
48
|
+
mindspore.common.Initializer will choose a random seed, mindspore.ops.function.random_func and
|
|
49
49
|
mindspore.nn.probability.distribution will use zero.
|
|
50
50
|
|
|
51
51
|
Seed set by numpy.random.seed() only used by numpy.random, while seed set by this API will also used by
|
|
@@ -98,7 +98,7 @@ def set_seed(seed):
|
|
|
98
98
|
>>> w1 = Parameter(initializer("uniform", [2, 2], ms.float32), name="w1") # W1
|
|
99
99
|
>>> w1 = Parameter(initializer("uniform", [2, 2], ms.float32), name="w1") # W2
|
|
100
100
|
>>>
|
|
101
|
-
>>> # 3. If neither global seed nor op seed is set, mindspore.ops.
|
|
101
|
+
>>> # 3. If neither global seed nor op seed is set, mindspore.ops.function.random_func and
|
|
102
102
|
>>> # mindspore.nn.probability.distribution will choose a random seed:
|
|
103
103
|
>>> c1 = ops.uniform((1, 4), minval, maxval) # C1
|
|
104
104
|
>>> c2 = ops.uniform((1, 4), minval, maxval) # C2
|
|
@@ -106,7 +106,7 @@ def set_seed(seed):
|
|
|
106
106
|
>>> c1 = ops.uniform((1, 4), minval, maxval) # C3
|
|
107
107
|
>>> c2 = ops.uniform((1, 4), minval, maxval) # C4
|
|
108
108
|
>>>
|
|
109
|
-
>>> # 4. If global seed is set, but op seed is not set, mindspore.ops.
|
|
109
|
+
>>> # 4. If global seed is set, but op seed is not set, mindspore.ops.function.random_func and
|
|
110
110
|
>>> # mindspore.nn.probability.distribution will calculate a seed according to global seed and
|
|
111
111
|
>>> # default op seed. Each call will change the default op seed, thus each call get different
|
|
112
112
|
>>> # results.
|
|
@@ -118,7 +118,7 @@ def set_seed(seed):
|
|
|
118
118
|
>>> c1 = ops.uniform((1, 4), minval, maxval) # C1
|
|
119
119
|
>>> c2 = ops.uniform((1, 4), minval, maxval) # C2
|
|
120
120
|
>>>
|
|
121
|
-
>>> # 5. If both global seed and op seed are set, mindspore.ops.
|
|
121
|
+
>>> # 5. If both global seed and op seed are set, mindspore.ops.function.random_func and
|
|
122
122
|
>>> # mindspore.nn.probability.distribution will calculate a seed according to global seed and
|
|
123
123
|
>>> # op seed counter. Each call will change the op seed counter, thus each call get different
|
|
124
124
|
>>> # results.
|
|
@@ -131,7 +131,7 @@ def set_seed(seed):
|
|
|
131
131
|
>>> c2 = ops.uniform((1, 4), minval, maxval, seed=2) # C2
|
|
132
132
|
>>>
|
|
133
133
|
>>> # 6. If op seed is set but global seed is not set, 0 will be used as global seed. Then
|
|
134
|
-
>>> # mindspore.ops.
|
|
134
|
+
>>> # mindspore.ops.function.random_func and mindspore.nn.probability.distribution act as in
|
|
135
135
|
>>> # condition 5.
|
|
136
136
|
>>> c1 = ops.uniform((1, 4), minval, maxval, seed=2) # C1
|
|
137
137
|
>>> c2 = ops.uniform((1, 4), minval, maxval, seed=2) # C2
|
|
@@ -140,7 +140,7 @@ def set_seed(seed):
|
|
|
140
140
|
>>> c2 = ops.uniform((1, 4), minval, maxval, seed=2) # C2
|
|
141
141
|
>>>
|
|
142
142
|
>>> # 7. Recall set_seed() in the program will reset numpy seed and op seed counter of
|
|
143
|
-
>>> # mindspore.ops.
|
|
143
|
+
>>> # mindspore.ops.function.random_func and mindspore.nn.probability.distribution.
|
|
144
144
|
>>> set_seed(1234)
|
|
145
145
|
>>> np_1 = np.random.normal(0, 1, [1]).astype(np.float32) # A1
|
|
146
146
|
>>> c1 = ops.uniform((1, 4), minval, maxval, seed=2) # C1
|
|
@@ -209,22 +209,6 @@ def _get_op_seed(op_seed, kernel_name):
|
|
|
209
209
|
return _KERNEL_SEED[(kernel_name, op_seed)]
|
|
210
210
|
|
|
211
211
|
|
|
212
|
-
def _get_global_and_op_seed():
|
|
213
|
-
"""Get global_seed and op_seed."""
|
|
214
|
-
global_seed = get_seed()
|
|
215
|
-
op_seed = get_seed()
|
|
216
|
-
if global_seed == 0:
|
|
217
|
-
global_seed = DEFAULT_GRAPH_SEED
|
|
218
|
-
elif global_seed is None:
|
|
219
|
-
global_seed = 0
|
|
220
|
-
if op_seed is None:
|
|
221
|
-
op_seed = 0
|
|
222
|
-
Validator.check_non_negative_int(op_seed, "seed", "init")
|
|
223
|
-
temp_seed = _get_op_seed(op_seed, "init")
|
|
224
|
-
seeds = _truncate_seed(global_seed), _truncate_seed(temp_seed)
|
|
225
|
-
return seeds
|
|
226
|
-
|
|
227
|
-
|
|
228
212
|
def _get_graph_seed(op_seed, kernel_name):
|
|
229
213
|
"""
|
|
230
214
|
Get the graph-level seed.
|