mindspore 2.0.0a0__cp38-cp38-win_amd64.whl → 2.0.0rc1__cp38-cp38-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -2
- mindspore/_c_dataengine.cp38-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp38-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp38-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +16 -1
- mindspore/_extends/parse/parser.py +107 -22
- mindspore/_extends/parse/resources.py +0 -7
- mindspore/_extends/parse/standard_method.py +885 -413
- mindspore/amp.py +52 -57
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +38 -20
- mindspore/boost/dim_reduce.py +3 -3
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -6
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +41 -7
- mindspore/common/api.py +215 -141
- mindspore/common/dtype.py +8 -1
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +4 -2
- mindspore/common/jit_config.py +17 -13
- mindspore/common/mutable.py +33 -13
- mindspore/common/parameter.py +23 -21
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +62 -41
- mindspore/common/tensor.py +852 -1154
- mindspore/communication/__init__.py +2 -2
- mindspore/communication/_comm_helper.py +11 -4
- mindspore/communication/management.py +22 -21
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +201 -23
- mindspore/dataset/__init__.py +6 -6
- mindspore/dataset/audio/__init__.py +7 -7
- mindspore/dataset/audio/transforms.py +670 -30
- mindspore/dataset/audio/utils.py +47 -4
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +210 -14
- mindspore/dataset/core/validator_helpers.py +2 -2
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +322 -66
- mindspore/dataset/engine/datasets_audio.py +80 -76
- mindspore/dataset/engine/datasets_standard_format.py +51 -38
- mindspore/dataset/engine/datasets_text.py +232 -118
- mindspore/dataset/engine/datasets_user_defined.py +41 -17
- mindspore/dataset/engine/datasets_vision.py +746 -225
- mindspore/dataset/engine/graphdata.py +75 -10
- mindspore/dataset/engine/iterators.py +45 -5
- mindspore/dataset/engine/offload.py +48 -28
- mindspore/dataset/engine/validators.py +117 -8
- mindspore/dataset/text/__init__.py +6 -5
- mindspore/dataset/text/transforms.py +86 -3
- mindspore/dataset/text/utils.py +6 -4
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +3 -2
- mindspore/dataset/transforms/c_transforms.py +1 -1
- mindspore/dataset/transforms/transforms.py +2 -2
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +2 -3
- mindspore/dataset/vision/c_transforms.py +9 -9
- mindspore/dataset/vision/py_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -0
- mindspore/dataset/vision/transforms.py +160 -161
- mindspore/dataset/vision/utils.py +3 -3
- mindspore/experimental/map_parameter.py +38 -26
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +44 -9
- mindspore/include/api/delegate.h +1 -1
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_parallel_runner.h +2 -2
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +19 -3
- mindspore/include/api/types.h +3 -3
- mindspore/include/dataset/constants.h +7 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filereader.py +18 -0
- mindspore/mindrecord/filewriter.py +197 -34
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
- mindspore/mindrecord/tools/csv_to_mr.py +3 -3
- mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/__init__.py +0 -4
- mindspore/nn/cell.py +204 -132
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +7 -6
- mindspore/nn/layer/__init__.py +5 -4
- mindspore/nn/layer/activation.py +40 -89
- mindspore/nn/layer/basic.py +255 -624
- mindspore/nn/layer/channel_shuffle.py +7 -6
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +41 -4
- mindspore/nn/layer/conv.py +64 -28
- mindspore/nn/layer/dense.py +9 -8
- mindspore/nn/layer/embedding.py +27 -25
- mindspore/nn/layer/image.py +53 -46
- mindspore/nn/layer/math.py +97 -105
- mindspore/nn/layer/normalization.py +117 -86
- mindspore/nn/layer/padding.py +185 -95
- mindspore/nn/layer/pooling.py +817 -414
- mindspore/nn/layer/rnn_cells.py +10 -15
- mindspore/nn/layer/rnns.py +37 -38
- mindspore/nn/layer/thor_layer.py +11 -12
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +5 -4
- mindspore/nn/loss/loss.py +334 -199
- mindspore/nn/optim/ada_grad.py +6 -6
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +4 -5
- mindspore/nn/optim/adam.py +126 -62
- mindspore/nn/optim/adamax.py +3 -4
- mindspore/nn/optim/adasum.py +6 -6
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +67 -38
- mindspore/nn/optim/lamb.py +4 -5
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +43 -4
- mindspore/nn/optim/momentum.py +6 -5
- mindspore/nn/optim/optimizer.py +3 -1
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +1 -1
- mindspore/nn/optim/rprop.py +8 -9
- mindspore/nn/optim/sgd.py +19 -13
- mindspore/nn/optim/thor.py +10 -15
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +4 -4
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +9 -15
- mindspore/nn/probability/distribution/bernoulli.py +3 -3
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +5 -7
- mindspore/nn/probability/distribution/cauchy.py +3 -3
- mindspore/nn/probability/distribution/distribution.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +3 -3
- mindspore/nn/probability/distribution/half_normal.py +15 -11
- mindspore/nn/probability/distribution/laplace.py +16 -13
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/normal.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/student_t.py +20 -15
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +27 -10
- mindspore/nn/wrap/grad_reducer.py +2 -2
- mindspore/nn/wrap/loss_scale.py +40 -24
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +35 -30
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +22 -19
- mindspore/numpy/utils.py +1 -1
- mindspore/numpy/utils_const.py +108 -58
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +86 -117
- mindspore/ops/_grad/grad_base.py +23 -1
- mindspore/ops/_grad/grad_clip_ops.py +2 -3
- mindspore/ops/_grad/grad_comm_ops.py +34 -24
- mindspore/ops/_grad/grad_implementations.py +9 -45
- mindspore/ops/_grad/grad_inner_ops.py +47 -4
- mindspore/ops/_grad/grad_math_ops.py +142 -117
- mindspore/ops/_grad/grad_nn_ops.py +71 -165
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +7 -6
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
- mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
- mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
- mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -611
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_register_for_op.py +1 -0
- mindspore/ops/_utils/__init__.py +1 -2
- mindspore/ops/_utils/utils.py +19 -40
- mindspore/ops/_vmap/vmap_array_ops.py +116 -38
- mindspore/ops/_vmap/vmap_base.py +16 -9
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
- mindspore/ops/_vmap/vmap_image_ops.py +12 -5
- mindspore/ops/_vmap/vmap_math_ops.py +46 -5
- mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
- mindspore/ops/_vmap/vmap_random_ops.py +1 -1
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
- mindspore/ops/composite/__init__.py +7 -8
- mindspore/ops/composite/base.py +101 -47
- mindspore/ops/composite/math_ops.py +188 -158
- mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
- mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
- mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
- mindspore/ops/function/__init__.py +152 -8
- mindspore/ops/function/array_func.py +2555 -674
- mindspore/ops/function/clip_func.py +209 -13
- mindspore/ops/function/debug_func.py +2 -2
- mindspore/ops/function/grad/__init__.py +2 -1
- mindspore/ops/function/grad/grad_func.py +147 -62
- mindspore/ops/function/image_func.py +54 -38
- mindspore/ops/function/linalg_func.py +167 -16
- mindspore/ops/function/math_func.py +4849 -1492
- mindspore/ops/function/nn_func.py +2573 -988
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +3 -3
- mindspore/ops/function/random_func.py +790 -73
- mindspore/ops/function/sparse_func.py +98 -78
- mindspore/ops/function/sparse_unary_func.py +54 -53
- mindspore/ops/function/spectral_func.py +27 -24
- mindspore/ops/function/vmap_func.py +22 -2
- mindspore/ops/functional.py +97 -37
- mindspore/ops/op_info_register.py +70 -28
- mindspore/ops/operations/__init__.py +47 -14
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +5 -5
- mindspore/ops/operations/_grad_ops.py +276 -187
- mindspore/ops/operations/_inner_ops.py +319 -113
- mindspore/ops/operations/_ms_kernel.py +10 -8
- mindspore/ops/operations/_ocr_ops.py +9 -9
- mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
- mindspore/ops/operations/_quant_ops.py +137 -102
- mindspore/ops/operations/_rl_inner_ops.py +121 -60
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1004 -2
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +801 -466
- mindspore/ops/operations/comm_ops.py +51 -49
- mindspore/ops/operations/control_ops.py +2 -2
- mindspore/ops/operations/custom_ops.py +123 -44
- mindspore/ops/operations/debug_ops.py +24 -24
- mindspore/ops/operations/image_ops.py +240 -153
- mindspore/ops/operations/inner_ops.py +34 -50
- mindspore/ops/operations/linalg_ops.py +31 -9
- mindspore/ops/operations/math_ops.py +988 -757
- mindspore/ops/operations/nn_ops.py +965 -819
- mindspore/ops/operations/other_ops.py +51 -40
- mindspore/ops/operations/random_ops.py +204 -122
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +254 -93
- mindspore/ops/operations/spectral_ops.py +35 -3
- mindspore/ops/primitive.py +111 -9
- mindspore/parallel/_auto_parallel_context.py +189 -83
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +99 -7
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +7 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
- mindspore/parallel/_utils.py +1 -2
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +37 -34
- mindspore/parallel/shard.py +17 -18
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +69 -47
- mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
- mindspore/profiler/parser/base_timeline_generator.py +49 -56
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
- mindspore/profiler/parser/hwts_log_parser.py +1 -1
- mindspore/profiler/parser/integrator.py +15 -14
- mindspore/profiler/parser/minddata_analyzer.py +2 -2
- mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +2 -1
- mindspore/profiler/profiling.py +218 -186
- mindspore/rewrite/__init__.py +3 -1
- mindspore/rewrite/api/node.py +1 -114
- mindspore/rewrite/api/node_type.py +3 -0
- mindspore/rewrite/api/pattern_engine.py +31 -1
- mindspore/rewrite/api/scoped_value.py +4 -4
- mindspore/rewrite/api/symbol_tree.py +3 -78
- mindspore/rewrite/api/tree_node_helper.py +1 -1
- mindspore/rewrite/ast_creator_register.py +1 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -2
- mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
- mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
- mindspore/rewrite/namespace.py +0 -2
- mindspore/rewrite/node.py +157 -11
- mindspore/rewrite/parsers/assign_parser.py +231 -53
- mindspore/rewrite/parsers/class_def_parser.py +187 -109
- mindspore/rewrite/parsers/for_parser.py +24 -14
- mindspore/rewrite/parsers/function_def_parser.py +21 -4
- mindspore/rewrite/parsers/if_parser.py +6 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +256 -133
- mindspore/rewrite/symbol_tree_builder.py +38 -1
- mindspore/run_check/_check_version.py +69 -63
- mindspore/run_check/run_check.py +2 -1
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +1 -1
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +273 -102
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +2 -2
- mindspore/train/callback/_checkpoint.py +3 -3
- mindspore/train/callback/_early_stop.py +3 -3
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +29 -31
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +3 -3
- mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
- mindspore/train/callback/_summary_collector.py +23 -16
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +15 -3
- mindspore/train/dataset_helper.py +10 -15
- mindspore/train/loss_scale_manager.py +8 -11
- mindspore/train/metrics/__init__.py +1 -1
- mindspore/train/metrics/bleu_score.py +1 -1
- mindspore/train/metrics/confusion_matrix.py +1 -1
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/dice.py +2 -2
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +4 -3
- mindspore/train/metrics/mean_surface_distance.py +2 -2
- mindspore/train/metrics/occlusion_sensitivity.py +1 -1
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +1 -1
- mindspore/train/metrics/recall.py +1 -1
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +45 -28
- mindspore/train/serialization.py +295 -188
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -13
- mindspore/train/train_thor/convert_utils.py +2 -2
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
- mindspore/compression/__init__.py +0 -19
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -515
- mindspore/compression/quant/__init__.py +0 -28
- mindspore/compression/quant/qat.py +0 -634
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -140
- mindspore/nn/probability/dpn/vae/vae.py +0 -124
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
- mindspore/ops/composite/array_ops.py +0 -241
- mindspore/ops/composite/clip_ops.py +0 -134
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -16,7 +16,7 @@
|
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
|
|
18
18
|
from mindspore.common._decorator import deprecated
|
|
19
|
-
from mindspore
|
|
19
|
+
from mindspore import _checkparam as Validator
|
|
20
20
|
from mindspore.common import dtype as mstype
|
|
21
21
|
from mindspore.ops.primitive import PrimitiveWithInfer, prim_attr_register, Primitive
|
|
22
22
|
from mindspore.ops._utils import get_broadcast_shape
|
|
@@ -53,15 +53,14 @@ class NonDeterministicInts(Primitive):
|
|
|
53
53
|
ValueError: If the number of elements of output is more than 1000000.
|
|
54
54
|
|
|
55
55
|
Supported Platforms:
|
|
56
|
-
``CPU``
|
|
56
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
57
57
|
|
|
58
58
|
Examples:
|
|
59
|
-
>>> shape = Tensor(
|
|
59
|
+
>>> shape = Tensor((3,4), mstype.int32)
|
|
60
60
|
>>> ndints = ops.NonDeterministicInts(dtype=mstype.int32)
|
|
61
61
|
>>> output = ndints(shape)
|
|
62
|
-
>>> print(output)
|
|
63
|
-
|
|
64
|
-
[ 140364228 290834494 ]]
|
|
62
|
+
>>> print(output.shape)
|
|
63
|
+
(3, 4)
|
|
65
64
|
"""
|
|
66
65
|
|
|
67
66
|
@prim_attr_register
|
|
@@ -70,28 +69,29 @@ class NonDeterministicInts(Primitive):
|
|
|
70
69
|
self.dtype = dtype
|
|
71
70
|
self.add_prim_attr("max_length", 1000000)
|
|
72
71
|
self.init_prim_io_names(inputs=["shape"], outputs=["output"])
|
|
73
|
-
valid_values = (mstype.int32, mstype.int64)
|
|
74
|
-
Validator.check_type_name("dtype", dtype, valid_values, self.name)
|
|
75
72
|
self.add_prim_attr("side_effect_hidden", True)
|
|
73
|
+
valid_values = (mstype.int32, mstype.int64, mstype.uint32, mstype.uint64)
|
|
74
|
+
Validator.check_type_name("dtype", dtype, valid_values, self.name)
|
|
76
75
|
|
|
77
76
|
|
|
78
77
|
class TruncatedNormal(Primitive):
|
|
79
78
|
"""
|
|
80
|
-
Returns a
|
|
79
|
+
Returns a Tensor of the specified shape filled with truncated normal values.
|
|
81
80
|
|
|
82
|
-
The generated values
|
|
81
|
+
The generated values conform to a Gaussian distribution.
|
|
83
82
|
|
|
84
|
-
|
|
85
|
-
The value of `shape` must be greater than zero. The output length can not exceed 1000000.
|
|
83
|
+
Note:
|
|
84
|
+
- The value of `shape` must be greater than zero. The output length can not exceed 1000000.
|
|
85
|
+
- When `seed` or `seed2` is assigned a non-zero value, that value will be used as the seed.
|
|
86
|
+
Otherwise, a random seed will be used instead.
|
|
86
87
|
|
|
87
88
|
Args:
|
|
88
|
-
seed (int, optional):
|
|
89
|
-
|
|
90
|
-
seed2 (int, optional): An optional int. Defaults to 0. A second seed to avoid seed collision.
|
|
89
|
+
seed (int, optional): Random number seed. Default: 0.
|
|
90
|
+
seed2 (int, optional): The second seed to avoid seed collision. Default: 0.
|
|
91
91
|
dtype (mindspore.dtype, optional): Specified output data type. Must be one of the following types:
|
|
92
92
|
mindspore.float16, mindspore.float32 and mindspore.float64. Default: mindspore.float32.
|
|
93
93
|
|
|
94
|
-
Inputs
|
|
94
|
+
Inputs:
|
|
95
95
|
- **shape** (Tensor) - The shape of random tensor to be generated. Its type must be one of the following types:
|
|
96
96
|
mindspore.int32 and mindspore.int64.
|
|
97
97
|
|
|
@@ -108,7 +108,7 @@ class TruncatedNormal(Primitive):
|
|
|
108
108
|
ValueError: If the number of elements of output is more than 1000000.
|
|
109
109
|
|
|
110
110
|
Supported Platforms:
|
|
111
|
-
``GPU`` ``CPU``
|
|
111
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
112
112
|
|
|
113
113
|
Examples:
|
|
114
114
|
>>> shape = Tensor(np.array([2, 2]), mstype.int32)
|
|
@@ -126,12 +126,12 @@ class TruncatedNormal(Primitive):
|
|
|
126
126
|
"""Initialize TruncatedNormal"""
|
|
127
127
|
self.dtype = dtype
|
|
128
128
|
self.add_prim_attr("max_length", 1000000)
|
|
129
|
+
self.add_prim_attr("side_effect_hidden", True)
|
|
129
130
|
self.init_prim_io_names(inputs=["shape"], outputs=["output"])
|
|
130
131
|
Validator.check_value_type('seed', seed, [int], self.name)
|
|
131
132
|
Validator.check_value_type('seed2', seed2, [int], self.name)
|
|
132
133
|
valid_values = (mstype.float16, mstype.float32, mstype.float64)
|
|
133
134
|
Validator.check_type_name("dtype", dtype, valid_values, self.name)
|
|
134
|
-
self.add_prim_attr("side_effect_hidden", True)
|
|
135
135
|
|
|
136
136
|
|
|
137
137
|
class StandardNormal(Primitive):
|
|
@@ -140,6 +140,16 @@ class StandardNormal(Primitive):
|
|
|
140
140
|
|
|
141
141
|
Refer to :func:`mindspore.ops.standard_normal` for more details.
|
|
142
142
|
|
|
143
|
+
Args:
|
|
144
|
+
seed (int): Random seed, must be non-negative. Default: 0.
|
|
145
|
+
seed2 (int): Random seed2, must be non-negative. A second seed to avoid seed collision. Default: 0.
|
|
146
|
+
|
|
147
|
+
Inputs:
|
|
148
|
+
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
|
|
149
|
+
|
|
150
|
+
Outputs:
|
|
151
|
+
Tensor. The shape is the same as the input `shape`. The dtype is float32.
|
|
152
|
+
|
|
143
153
|
Supported Platforms:
|
|
144
154
|
``Ascend`` ``GPU`` ``CPU``
|
|
145
155
|
|
|
@@ -169,7 +179,7 @@ class StandardLaplace(Primitive):
|
|
|
169
179
|
It is defined as:
|
|
170
180
|
|
|
171
181
|
.. math::
|
|
172
|
-
\text{f}(x) = \frac{1}{2}\exp(-|x|)
|
|
182
|
+
\text{f}(x) = \frac{1}{2}\exp(-|x|)
|
|
173
183
|
|
|
174
184
|
Args:
|
|
175
185
|
seed (int): Random seed. Default: 0.
|
|
@@ -277,11 +287,13 @@ class LogNormalReverse(Primitive):
|
|
|
277
287
|
.. math::
|
|
278
288
|
\text{f}(x;1.0,2.0)=\frac{1}{x\delta \sqrt[]{2\pi} }e^{-\frac{(\ln x-\mu )^2}{2\delta ^2} }
|
|
279
289
|
|
|
290
|
+
where \mu, \delta is mean and standard deviation of lognormal distribution respectively.
|
|
291
|
+
|
|
280
292
|
Args:
|
|
281
293
|
mean (float, optional): the mean of normal distribution. With float data type.
|
|
282
|
-
Default: 2.0.
|
|
283
|
-
std (float, optional): the std of normal distribution. With float data type.
|
|
284
294
|
Default: 1.0.
|
|
295
|
+
std (float, optional): the std of normal distribution. With float data type.
|
|
296
|
+
Default: 2.0.
|
|
285
297
|
|
|
286
298
|
Inputs:
|
|
287
299
|
- **input** (Tensor) - The tensor to be generated with log-normal distribution.
|
|
@@ -311,47 +323,9 @@ class LogNormalReverse(Primitive):
|
|
|
311
323
|
@prim_attr_register
|
|
312
324
|
def __init__(self, mean=1.0, std=2.0):
|
|
313
325
|
"""Initialize LogNormalReverse"""
|
|
326
|
+
self.add_prim_attr("side_effect_hidden", True)
|
|
314
327
|
Validator.check_value_type("mean", mean, [float], self.name)
|
|
315
328
|
Validator.check_value_type("std", std, [float], self.name)
|
|
316
|
-
self.add_prim_attr("side_effect_hidden", True)
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
class RandomGammaGrad(Primitive):
|
|
320
|
-
r"""
|
|
321
|
-
Computes the derivative of a random sample of Gamma with respect to alpha.:
|
|
322
|
-
|
|
323
|
-
Inputs:
|
|
324
|
-
- **alpha** (Tensor) - α is the shape parameter of RandomGamma distribution.
|
|
325
|
-
It must be greater than 0. Must be one of the following types: float32, float64.
|
|
326
|
-
- **sample** (Tensor) - The sample of random gamma tensor. Must be one of the
|
|
327
|
-
following types: float32, float64.
|
|
328
|
-
|
|
329
|
-
Outputs:
|
|
330
|
-
The dtype is the same type as alpha.
|
|
331
|
-
The output shape is derived from the input through broadcasting.
|
|
332
|
-
|
|
333
|
-
Raises:
|
|
334
|
-
TypeError: If data type of `alpha` and `sample` is not float32 or float64.
|
|
335
|
-
TypeError: If data type of `alpha` and `sample` is not same.
|
|
336
|
-
ValueError: If the shape last dim of `sample` and `alpha` is not equal.
|
|
337
|
-
|
|
338
|
-
Supported Platforms:
|
|
339
|
-
``GPU``
|
|
340
|
-
|
|
341
|
-
Examples:
|
|
342
|
-
>>> alpha = Tensor(np.array([1., 0.6, 3., 26.]), mstype.float32)
|
|
343
|
-
>>> sample = Tensor(np.array([6., 7, 11., 0.5]), mstype.float32)
|
|
344
|
-
>>> randomgammagrad = ops.RandomGammaGrad()
|
|
345
|
-
>>> output = randomgammagrad(alpha, sample)
|
|
346
|
-
>>> print(output)
|
|
347
|
-
[2.5142431 3.4334087 1.8847835 0.07780622]
|
|
348
|
-
"""
|
|
349
|
-
|
|
350
|
-
@prim_attr_register
|
|
351
|
-
def __init__(self):
|
|
352
|
-
"""Initialize RandomGammaGrad"""
|
|
353
|
-
self.init_prim_io_names(inputs=['alpha', 'sample'], outputs=['output'])
|
|
354
|
-
self.add_prim_attr("side_effect_hidden", True)
|
|
355
329
|
|
|
356
330
|
|
|
357
331
|
class Gamma(PrimitiveWithInfer):
|
|
@@ -445,24 +419,29 @@ class ParameterizedTruncatedNormal(Primitive):
|
|
|
445
419
|
`min` and `max` should be :math:`()` or :math:`(batch\_size, )`.
|
|
446
420
|
|
|
447
421
|
Note:
|
|
448
|
-
The value in tensor `min` must be strictly less than `max` at any position after broadcasting.
|
|
422
|
+
- The value in tensor `min` must be strictly less than `max` at any position after broadcasting.
|
|
423
|
+
- When `seed` or `seed2` is assigned a non-zero value, that value will be used as the seed.
|
|
424
|
+
Otherwise, a random seed will be used instead.
|
|
449
425
|
|
|
450
426
|
Args:
|
|
451
|
-
seed (int, optional): Random number seed.
|
|
452
|
-
|
|
453
|
-
seed2 (int, optional): A second seed to avoid seed collision. Default: 0.
|
|
427
|
+
seed (int, optional): Random number seed. Default: 0.
|
|
428
|
+
seed2 (int, optional): The second seed to avoid seed collision. Default: 0.
|
|
454
429
|
|
|
455
430
|
Inputs:
|
|
456
|
-
- **shape** (Tensor) - The shape of random tensor to be generated.
|
|
457
|
-
|
|
431
|
+
- **shape** (Tensor) - The shape of random tensor to be generated.
|
|
432
|
+
It has shape :math:`(batch\_size, *)` where :math:`*` is an additional
|
|
433
|
+
dimension with a length of no less than 1.
|
|
434
|
+
Its type must be one of the following types: int32 and int64.
|
|
458
435
|
- **mean** (Tensor) - The parameter defines the mean of truncated normal distribution.
|
|
436
|
+
It has shape :math:`()` or :math:`(batch\_size, )`.
|
|
459
437
|
Its type must be one of the following types:float16, float32, float64.
|
|
460
438
|
- **stdevs** (Tensor) - The parameter defines the standard deviation for truncation of
|
|
461
|
-
the normal distribution.
|
|
439
|
+
the normal distribution.
|
|
440
|
+
It must be greater than 0 and have the same shape and type as means.
|
|
462
441
|
- **min** (Tensor) - The parameter defines the minimum of
|
|
463
|
-
truncated normal distribution. It must have the same type as means.
|
|
442
|
+
truncated normal distribution. It must have the same shape and type as means.
|
|
464
443
|
- **max** (Tensor) - The parameter defines the maximum of
|
|
465
|
-
truncated normal distribution. It must have the same type as means.
|
|
444
|
+
truncated normal distribution. It must have the same shape and type as means.
|
|
466
445
|
|
|
467
446
|
Outputs:
|
|
468
447
|
Tensor. Its shape is specified by the input `shape` and it must have the same type as means.
|
|
@@ -479,14 +458,14 @@ class ParameterizedTruncatedNormal(Primitive):
|
|
|
479
458
|
ValueError: If `shape` is not a 1-D tensor.
|
|
480
459
|
|
|
481
460
|
Supported Platforms:
|
|
482
|
-
``CPU``
|
|
461
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
483
462
|
|
|
484
463
|
Examples:
|
|
485
464
|
>>> shape = Tensor(np.array([2, 3]), mstype.int32)
|
|
486
|
-
>>> mean = Tensor(np.array([0], mstype.float32)
|
|
487
|
-
>>> stdevs = Tensor(np.array([1], mstype.float32)
|
|
488
|
-
>>> min = Tensor(np.array([-100], mstype.float32)
|
|
489
|
-
>>> max = Tensor(np.array([100], mstype.float32)
|
|
465
|
+
>>> mean = Tensor(np.array([0]), mstype.float32)
|
|
466
|
+
>>> stdevs = Tensor(np.array([1]), mstype.float32)
|
|
467
|
+
>>> min = Tensor(np.array([-100]), mstype.float32)
|
|
468
|
+
>>> max = Tensor(np.array([100]), mstype.float32)
|
|
490
469
|
>>> seed = 1
|
|
491
470
|
>>> seed2 = 2
|
|
492
471
|
>>> parameterized_truncated_normal = ops.ParameterizedTruncatedNormal(seed=seed, seed2=seed2)
|
|
@@ -501,9 +480,9 @@ class ParameterizedTruncatedNormal(Primitive):
|
|
|
501
480
|
"""Initialize ParameterizedTruncatedNormal"""
|
|
502
481
|
self.init_prim_io_names(
|
|
503
482
|
inputs=['shape', 'mean', 'stdevs', 'min', 'max'], outputs=['y'])
|
|
483
|
+
self.add_prim_attr("side_effect_hidden", True)
|
|
504
484
|
Validator.check_value_type('seed', seed, [int], self.name)
|
|
505
485
|
Validator.check_value_type('seed2', seed2, [int], self.name)
|
|
506
|
-
self.add_prim_attr("side_effect_hidden", True)
|
|
507
486
|
|
|
508
487
|
|
|
509
488
|
class Poisson(PrimitiveWithInfer):
|
|
@@ -601,7 +580,7 @@ class RandomPoisson(Primitive):
|
|
|
601
580
|
ValueError: If `shape` elements are negative.
|
|
602
581
|
|
|
603
582
|
Supported Platforms:
|
|
604
|
-
``
|
|
583
|
+
``GPU`` ``CPU``
|
|
605
584
|
|
|
606
585
|
Examples:
|
|
607
586
|
>>> shape = Tensor(np.array([2, 3]), mstype.int32)
|
|
@@ -620,10 +599,10 @@ class RandomPoisson(Primitive):
|
|
|
620
599
|
self.init_prim_io_names(inputs=['shape', 'rate'], outputs=['output'])
|
|
621
600
|
Validator.check_value_type('seed', seed, [int], self.name)
|
|
622
601
|
Validator.check_value_type('seed2', seed2, [int], self.name)
|
|
602
|
+
self.add_prim_attr("side_effect_hidden", True)
|
|
623
603
|
valid_values = (mstype.int64, mstype.int32,
|
|
624
604
|
mstype.float16, mstype.float32, mstype.float64)
|
|
625
605
|
Validator.check_type_name("dtype", dtype, valid_values, self.name)
|
|
626
|
-
self.add_prim_attr("side_effect_hidden", True)
|
|
627
606
|
|
|
628
607
|
|
|
629
608
|
class UniformInt(Primitive):
|
|
@@ -638,17 +617,18 @@ class UniformInt(Primitive):
|
|
|
638
617
|
the :math:`b` indicates the max distribution parameter.
|
|
639
618
|
|
|
640
619
|
Note:
|
|
641
|
-
The number in tensor minval must be strictly less than maxval at any position after broadcasting.
|
|
620
|
+
- The number in tensor minval must be strictly less than maxval at any position after broadcasting.
|
|
621
|
+
- If neither `seed` nor `seed2` is assigned a non-zero value, a randomly generated seed is used instead.
|
|
642
622
|
|
|
643
623
|
Args:
|
|
644
624
|
seed (int): Random seed, must be non-negative. Default: 0.
|
|
645
625
|
seed2 (int): Random seed2, must be non-negative. A second seed to avoid seed collision. Default: 0.
|
|
646
626
|
|
|
647
627
|
Inputs:
|
|
648
|
-
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
|
|
649
|
-
- **minval** (Tensor) - The distribution parameter, a
|
|
628
|
+
- **shape** (Union[tuple, Tensor]) - The shape of random tensor to be generated. Only constant value is allowed.
|
|
629
|
+
- **minval** (Tensor) - The distribution parameter, :math:`a`.
|
|
650
630
|
It defines the minimum possibly generated value, with int32 data type. Only one number is supported.
|
|
651
|
-
- **maxval** (Tensor) - The distribution parameter, b
|
|
631
|
+
- **maxval** (Tensor) - The distribution parameter, :math:`b`.
|
|
652
632
|
It defines the maximum possibly generated value, with int32 data type. Only one number is supported.
|
|
653
633
|
|
|
654
634
|
Outputs:
|
|
@@ -656,7 +636,7 @@ class UniformInt(Primitive):
|
|
|
656
636
|
|
|
657
637
|
Raises:
|
|
658
638
|
TypeError: If neither `seed` nor `seed2` is an int.
|
|
659
|
-
TypeError: If `shape` is
|
|
639
|
+
TypeError: If `shape` is neither a tuple nor a Tensor.
|
|
660
640
|
TypeError: If neither `minval` nor `maxval` is a Tensor.
|
|
661
641
|
ValueError: If `shape` is not a constant value.
|
|
662
642
|
|
|
@@ -694,7 +674,7 @@ class UniformReal(Primitive):
|
|
|
694
674
|
final generated random number, must be non-negative. Default: 0.
|
|
695
675
|
|
|
696
676
|
.. note::
|
|
697
|
-
- Global random seed and operator-level random seed are not set: Use
|
|
677
|
+
- Global random seed and operator-level random seed are not set: Use a randomly generated seed.
|
|
698
678
|
- Global random seed is set, but operator-level random seed is not set: A global random seed will splice
|
|
699
679
|
with a randomly generated seed.
|
|
700
680
|
- Global random seed is not set, operator-level random seed is set: The default global random seed is used,
|
|
@@ -703,14 +683,14 @@ class UniformReal(Primitive):
|
|
|
703
683
|
operator-level random seed.
|
|
704
684
|
|
|
705
685
|
Inputs:
|
|
706
|
-
- **shape** (tuple) - The shape of tensor to be generated. Only constant value is allowed.
|
|
686
|
+
- **shape** (Union[tuple, Tensor]) - The shape of tensor to be generated. Only constant value is allowed.
|
|
707
687
|
|
|
708
688
|
Outputs:
|
|
709
689
|
Tensor. The shape that the input 'shape' denotes. The dtype is float32.
|
|
710
690
|
|
|
711
691
|
Raises:
|
|
712
692
|
TypeError: If `seed` or `seed2` is not an int.
|
|
713
|
-
TypeError: If `shape` is
|
|
693
|
+
TypeError: If `shape` is neither a tuple nor a Tensor.
|
|
714
694
|
ValueError: If `shape` is not a constant value.
|
|
715
695
|
|
|
716
696
|
Supported Platforms:
|
|
@@ -739,6 +719,22 @@ class RandomChoiceWithMask(Primitive):
|
|
|
739
719
|
|
|
740
720
|
Refer to :func:`mindspore.ops.choice_with_mask` for more details.
|
|
741
721
|
|
|
722
|
+
Args:
|
|
723
|
+
count (int, optional): Number of items expected to get and the number must be greater than 0. Default: 256.
|
|
724
|
+
seed (int, optional): Seed is used as entropy source for Random number engines generating
|
|
725
|
+
pseudo-random numbers. Default: 0.
|
|
726
|
+
seed2 (int, optional): Second seed to avoid collision. Default: 0.
|
|
727
|
+
|
|
728
|
+
Inputs:
|
|
729
|
+
- **input_x** (Tensor[bool]) - The input tensor.
|
|
730
|
+
The input tensor rank must be greater than or equal to 1 and less than or equal to 5.
|
|
731
|
+
|
|
732
|
+
Outputs:
|
|
733
|
+
Two tensors, the first one is the index tensor and the other one is the mask tensor.
|
|
734
|
+
|
|
735
|
+
- **index** (Tensor) - The output shape is 2-D.
|
|
736
|
+
- **mask** (Tensor) - The output shape is 1-D.
|
|
737
|
+
|
|
742
738
|
Supported Platforms:
|
|
743
739
|
``Ascend`` ``GPU`` ``CPU``
|
|
744
740
|
|
|
@@ -765,7 +761,7 @@ class RandomChoiceWithMask(Primitive):
|
|
|
765
761
|
|
|
766
762
|
|
|
767
763
|
class RandomCategorical(PrimitiveWithInfer):
|
|
768
|
-
"""
|
|
764
|
+
r"""
|
|
769
765
|
Generates random samples from a given categorical distribution tensor.
|
|
770
766
|
|
|
771
767
|
Args:
|
|
@@ -773,12 +769,12 @@ class RandomCategorical(PrimitiveWithInfer):
|
|
|
773
769
|
mindspore.int32 and mindspore.int64. Default: mindspore.int64.
|
|
774
770
|
|
|
775
771
|
Inputs:
|
|
776
|
-
- **logits** (Tensor) - The input tensor. 2-D Tensor with shape
|
|
772
|
+
- **logits** (Tensor) - The input tensor. 2-D Tensor with shape :math:`(batch\_size, num\_classes)`.
|
|
777
773
|
- **num_sample** (int) - Number of sample to be drawn. Only constant values is allowed.
|
|
778
774
|
- **seed** (int) - Random seed. Default: 0. Only constant values is allowed.
|
|
779
775
|
|
|
780
776
|
Outputs:
|
|
781
|
-
- **output** (Tensor) - The output Tensor with shape
|
|
777
|
+
- **output** (Tensor) - The output Tensor with shape :math:`(batch_size, num_samples)`.
|
|
782
778
|
|
|
783
779
|
Raises:
|
|
784
780
|
TypeError: If `dtype` is not one of the following: mindspore.int16, mindspore.int32, mindspore.int64.
|
|
@@ -833,8 +829,7 @@ class Multinomial(Primitive):
|
|
|
833
829
|
|
|
834
830
|
Inputs:
|
|
835
831
|
- **x** (Tensor) - the input tensor containing the cumsum of probabilities, must be 1 or 2
|
|
836
|
-
dimensions.
|
|
837
|
-
supports x 1 or 2 dimensions and Ascend only supports 2 dimensions.
|
|
832
|
+
dimensions.
|
|
838
833
|
- **num_samples** (int) - number of samples to draw, must be a nonnegative number.
|
|
839
834
|
|
|
840
835
|
Outputs:
|
|
@@ -842,19 +837,18 @@ class Multinomial(Primitive):
|
|
|
842
837
|
|
|
843
838
|
Raises:
|
|
844
839
|
TypeError: If neither `seed` nor `seed2` is an int.
|
|
845
|
-
TypeError: If `x` is not a Tensor whose dtype is float16, float32, float64.
|
|
846
840
|
TypeError: If dtype of `num_samples` is not int.
|
|
847
841
|
TypeError: If `dtype` is not int32 or int64.
|
|
848
842
|
ValueError: If `seed` or `seed2` is less than 0.
|
|
849
843
|
|
|
850
844
|
Supported Platforms:
|
|
851
|
-
``GPU`` ``CPU``
|
|
845
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
852
846
|
|
|
853
847
|
Examples:
|
|
854
848
|
>>> x = Tensor([[0., 9., 4., 0.]], mstype.float32)
|
|
855
849
|
>>> multinomial = ops.Multinomial(seed=10)
|
|
856
850
|
>>> output = multinomial(x, 2)
|
|
857
|
-
>>> print(output)
|
|
851
|
+
>>> print(output)
|
|
858
852
|
[[1 1]]
|
|
859
853
|
"""
|
|
860
854
|
|
|
@@ -873,21 +867,42 @@ class Multinomial(Primitive):
|
|
|
873
867
|
|
|
874
868
|
class MultinomialWithReplacement(Primitive):
|
|
875
869
|
r"""
|
|
876
|
-
Returns a tensor where each row contains numsamples indices sampled from the multinomial distribution
|
|
870
|
+
Returns a tensor where each row contains `numsamples` indices sampled from the multinomial distribution
|
|
871
|
+
with replacement. It diffs from `Multinomial` in that it allows the same outcome to be chosen multiple times.
|
|
872
|
+
|
|
873
|
+
.. warning::
|
|
874
|
+
This is an experimental API that is subject to change or deletion.
|
|
875
|
+
|
|
876
|
+
Refer to :func:`mindspore.ops.multinomial_with_replacement` for more details.
|
|
877
877
|
|
|
878
878
|
Note:
|
|
879
879
|
The rows of input do not need to sum to one (in which case we use the values as weights),
|
|
880
880
|
but must be non-negative, finite and have a non-zero sum.
|
|
881
881
|
|
|
882
|
-
|
|
882
|
+
Args:
|
|
883
|
+
numsamples (int): number of samples to draw, must be a nonnegative number.
|
|
884
|
+
replacement (bool, optional): Whether to draw with replacement or not. Default: False.
|
|
885
|
+
|
|
886
|
+
Inputs:
|
|
887
|
+
- **x** (Tensor) - the input tensor containing the cumsum of probabilities, must be 1 or 2
|
|
888
|
+
dimensions.
|
|
889
|
+
- **seed** (Tensor) - If `seed` is set to -1, and `offset` is set to 0, the random number
|
|
890
|
+
generator is seeded by a random seed. Otherwise, it is seeded by the given seed.
|
|
891
|
+
Supported dtype: int64.
|
|
892
|
+
- **offset** (Tensor) - Offset used to avoid seed collision. Supported dtype: int64.
|
|
893
|
+
|
|
894
|
+
Outputs:
|
|
895
|
+
Tensor with the same rows as `x`, each row has `numsamples` sampled indices.
|
|
883
896
|
|
|
884
897
|
Supported Platforms:
|
|
885
|
-
``
|
|
898
|
+
``CPU``
|
|
886
899
|
|
|
887
900
|
Examples:
|
|
888
901
|
>>> x = Tensor([[0., 9., 4., 0.]], mstype.float32)
|
|
902
|
+
>>> seed = Tensor(2, mstype.int64)
|
|
903
|
+
>>> offset = Tensor(5, mstype.int64)
|
|
889
904
|
>>> multinomialwithreplacement = ops.MultinomialWithReplacement(numsamples=2,replacement=True)
|
|
890
|
-
>>> output = multinomialwithreplacement(x,
|
|
905
|
+
>>> output = multinomialwithreplacement(x, seed, offset)
|
|
891
906
|
>>> print(output)
|
|
892
907
|
[[1 1]]
|
|
893
908
|
"""
|
|
@@ -901,7 +916,7 @@ class MultinomialWithReplacement(Primitive):
|
|
|
901
916
|
self.add_prim_attr("side_effect_hidden", True)
|
|
902
917
|
|
|
903
918
|
|
|
904
|
-
class UniformCandidateSampler(
|
|
919
|
+
class UniformCandidateSampler(Primitive):
|
|
905
920
|
r"""
|
|
906
921
|
Uniform candidate sampler.
|
|
907
922
|
|
|
@@ -935,30 +950,19 @@ class UniformCandidateSampler(PrimitiveWithInfer):
|
|
|
935
950
|
Validator.check_value_type(
|
|
936
951
|
"remove_accidental_hits", remove_accidental_hits, [bool], self.name)
|
|
937
952
|
Validator.check("value of num_true", num_true,
|
|
938
|
-
'', 0,
|
|
953
|
+
'', 0, Validator.GT, self.name)
|
|
939
954
|
Validator.check("value of num_sampled", num_sampled,
|
|
940
|
-
'', 0,
|
|
955
|
+
'', 0, Validator.GT, self.name)
|
|
941
956
|
Validator.check("value of range_max", range_max,
|
|
942
|
-
'', 0,
|
|
957
|
+
'', 0, Validator.GT, self.name)
|
|
943
958
|
self.num_true = num_true
|
|
944
959
|
if unique:
|
|
945
960
|
Validator.check('value of num_sampled', num_sampled,
|
|
946
|
-
"value of range_max", range_max,
|
|
947
|
-
Validator.check("value of seed", seed, '', 0,
|
|
961
|
+
"value of range_max", range_max, Validator.LE, self.name)
|
|
962
|
+
Validator.check("value of seed", seed, '', 0, Validator.GE, self.name)
|
|
948
963
|
self.num_sampled = num_sampled
|
|
949
964
|
self.add_prim_attr("side_effect_hidden", True)
|
|
950
965
|
|
|
951
|
-
def infer_dtype(self, true_classes_type):
|
|
952
|
-
Validator.check_subclass(
|
|
953
|
-
"true_classes_type", true_classes_type, mstype.tensor, self.name)
|
|
954
|
-
Validator.check_tensor_dtype_valid("true_classes_type", true_classes_type,
|
|
955
|
-
(mstype.int32, mstype.int64), self.name)
|
|
956
|
-
return true_classes_type, mstype.float32, mstype.float32
|
|
957
|
-
|
|
958
|
-
def infer_shape(self, true_classes_shape):
|
|
959
|
-
Validator.check("true_class.shape[1]", true_classes_shape[1],
|
|
960
|
-
"num_true", self.num_true, Rel.EQ, self.name)
|
|
961
|
-
return [self.num_sampled], true_classes_shape, [self.num_sampled]
|
|
962
966
|
|
|
963
967
|
|
|
964
968
|
class LogUniformCandidateSampler(Primitive):
|
|
@@ -996,16 +1000,16 @@ class LogUniformCandidateSampler(Primitive):
|
|
|
996
1000
|
Validator.check_value_type("range_max", range_max, [int], self.name)
|
|
997
1001
|
Validator.check_value_type("seed", seed, [int], self.name)
|
|
998
1002
|
self.num_true = Validator.check_number(
|
|
999
|
-
"num_true", num_true, 1,
|
|
1003
|
+
"num_true", num_true, 1, Validator.GE, self.name)
|
|
1000
1004
|
self.num_sampled = Validator.check_number(
|
|
1001
|
-
"num_sampled", num_sampled, 1,
|
|
1002
|
-
Validator.check_number("range_max", range_max, 1,
|
|
1005
|
+
"num_sampled", num_sampled, 1, Validator.GE, self.name)
|
|
1006
|
+
Validator.check_number("range_max", range_max, 1, Validator.GE, self.name)
|
|
1003
1007
|
if unique:
|
|
1004
1008
|
Validator.check("range_max", range_max, "num_sampled",
|
|
1005
|
-
num_sampled,
|
|
1009
|
+
num_sampled, Validator.GE, self.name)
|
|
1006
1010
|
self.range_max = range_max
|
|
1007
1011
|
self.unique = unique
|
|
1008
|
-
self.seed = Validator.check_number("seed", seed, 0,
|
|
1012
|
+
self.seed = Validator.check_number("seed", seed, 0, Validator.GE, self.name)
|
|
1009
1013
|
self.add_prim_attr("side_effect_hidden", True)
|
|
1010
1014
|
|
|
1011
1015
|
|
|
@@ -1014,9 +1018,10 @@ class RandomShuffle(Primitive):
|
|
|
1014
1018
|
Randomly shuffles a Tensor along its first dimension.
|
|
1015
1019
|
|
|
1016
1020
|
Args:
|
|
1017
|
-
seed (int): Random seed. If `seed` or `seed2` is set to non-zero, the random number generator
|
|
1018
|
-
by the given seed. Otherwise, it will be seeded randomly.
|
|
1019
|
-
|
|
1021
|
+
seed (int, optional): Random seed. If `seed` or `seed2` is set to non-zero, the random number generator
|
|
1022
|
+
will be seeded by the given seed. Otherwise, it will be seeded randomly.
|
|
1023
|
+
The `seed` must be non-negative. Default: 0.
|
|
1024
|
+
seed2 (int, optional): A second seed to avoid seed collision. If `seed` is 0, the `seed2` will be used as
|
|
1020
1025
|
the seed of the random generator. It must be non-negative. Default: 0.
|
|
1021
1026
|
|
|
1022
1027
|
Inputs:
|
|
@@ -1046,3 +1051,80 @@ class RandomShuffle(Primitive):
|
|
|
1046
1051
|
self.add_prim_attr("side_effect_hidden", True)
|
|
1047
1052
|
Validator.check_non_negative_int(seed, "seed", self.name)
|
|
1048
1053
|
Validator.check_non_negative_int(seed2, "seed2", self.name)
|
|
1054
|
+
|
|
1055
|
+
|
|
1056
|
+
class Uniform(Primitive):
|
|
1057
|
+
r"""
|
|
1058
|
+
Generates random numbers according to the Uniform random number distribution.
|
|
1059
|
+
|
|
1060
|
+
Args:
|
|
1061
|
+
minval(float):must be non-negative. Default: 0.0.
|
|
1062
|
+
maxval(float):must be non-negative. Default: 1.0.
|
|
1063
|
+
|
|
1064
|
+
Inputs:
|
|
1065
|
+
- **x** (Tensor) - The x of random tensor to be generated.
|
|
1066
|
+
Only constant value is allowed, and the date type is float16, float32, float64.
|
|
1067
|
+
|
|
1068
|
+
Raises:
|
|
1069
|
+
TypeError: If `minval` or `maxval` is not a float.
|
|
1070
|
+
TypeError: If `x`is not a Tensor.
|
|
1071
|
+
ValueError: If `minval` is larger than `maxval`.
|
|
1072
|
+
|
|
1073
|
+
Outputs:
|
|
1074
|
+
- **output** (Tensor) - With the same type and shape as the 'x'.
|
|
1075
|
+
|
|
1076
|
+
Supported Platforms:
|
|
1077
|
+
``GPU`` ``CPU``
|
|
1078
|
+
|
|
1079
|
+
Examples:
|
|
1080
|
+
>>> x = Tensor(np.random.randn(3,4), mstype.float64)
|
|
1081
|
+
>>> uniform = Uniform(minval=1.0, maxval=2.0)
|
|
1082
|
+
>>> y = uniform(x)
|
|
1083
|
+
>>> print(y.shape)
|
|
1084
|
+
(3, 4)
|
|
1085
|
+
"""
|
|
1086
|
+
|
|
1087
|
+
@prim_attr_register
|
|
1088
|
+
def __init__(self, minval=0., maxval=1., seed=0, offset=0):
|
|
1089
|
+
"""Initialize Uniform"""
|
|
1090
|
+
self.init_prim_io_names(inputs=['x'], outputs=['y'])
|
|
1091
|
+
self.add_prim_attr("from", minval)
|
|
1092
|
+
self.add_prim_attr("to", maxval)
|
|
1093
|
+
Validator.check_value_type('seed', seed, [int], self.name)
|
|
1094
|
+
Validator.check_value_type('offset', offset, [int], self.name)
|
|
1095
|
+
Validator.check('minval', minval, 'maxval', maxval, Validator.LE, self.name)
|
|
1096
|
+
Validator.check_non_negative_float(minval, "minval", self.name)
|
|
1097
|
+
Validator.check_non_negative_float(maxval, "maxval", self.name)
|
|
1098
|
+
self.add_prim_attr("side_effect_hidden", True)
|
|
1099
|
+
|
|
1100
|
+
|
|
1101
|
+
class RandpermV2(Primitive):
|
|
1102
|
+
r"""
|
|
1103
|
+
Generates random permutation of integers from 0 to n-1 without repeating.
|
|
1104
|
+
|
|
1105
|
+
Refer to :func:`mindspore.ops.randperm` for more detail.
|
|
1106
|
+
|
|
1107
|
+
Supported Platforms:
|
|
1108
|
+
``CPU``
|
|
1109
|
+
|
|
1110
|
+
Examples:
|
|
1111
|
+
>>> n = Tensor([4], mstype.int64)
|
|
1112
|
+
>>> seed = 0
|
|
1113
|
+
>>> offset = 0
|
|
1114
|
+
>>> randperm = ops.RandpermV2(layout=0, dtype=mstype.int64)
|
|
1115
|
+
>>> output = randperm(n, seed, offset)
|
|
1116
|
+
>>> print(output)
|
|
1117
|
+
[1 0 2 3]
|
|
1118
|
+
"""
|
|
1119
|
+
|
|
1120
|
+
@prim_attr_register
|
|
1121
|
+
def __init__(self, layout=0, dtype=mstype.int64):
|
|
1122
|
+
"""Initialize RandpermV2"""
|
|
1123
|
+
self.dtype = dtype
|
|
1124
|
+
self.layout = layout
|
|
1125
|
+
Validator.check_value_type('layout', layout, [int], self.name)
|
|
1126
|
+
Validator.check_non_negative_int(layout, 'layout', self.name)
|
|
1127
|
+
valid_values = (mstype.int32, mstype.int64, mstype.int16, mstype.int8, mstype.uint8, mstype.float64
|
|
1128
|
+
, mstype.float32, mstype.float16)
|
|
1129
|
+
Validator.check_type_name("dtype", dtype, valid_values, self.name)
|
|
1130
|
+
self.add_prim_attr("side_effect_hidden", True)
|