mindspore 1.10.0__cp38-cp38-win_amd64.whl → 2.0.0rc1__cp38-cp38-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/ConcurrencyCheck.dll +0 -0
- mindspore/CppBuildInsights.dll +0 -0
- mindspore/CppCoreCheck.dll +0 -0
- mindspore/EnumIndex.dll +0 -0
- mindspore/EspXEngine.dll +0 -0
- mindspore/HResultCheck.dll +0 -0
- mindspore/KernelTraceControl.dll +0 -0
- mindspore/LocalESPC.dll +0 -0
- mindspore/Microsoft.Diagnostics.Tracing.EventSource.dll +0 -0
- mindspore/Microsoft.VisualStudio.RemoteControl.dll +0 -0
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Microsoft.VisualStudio.Utilities.Internal.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/System.Runtime.CompilerServices.Unsafe.dll +0 -0
- mindspore/VariantClear.dll +0 -0
- mindspore/__init__.py +9 -4
- mindspore/_c_dataengine.cp38-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp38-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp38-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/builtin_operations.py +32 -4
- mindspore/_extends/graph_kernel/model/graph_split.py +66 -222
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +12 -9
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +119 -26
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -50
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -6
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -25
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -27
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +17 -2
- mindspore/_extends/parse/parser.py +193 -34
- mindspore/_extends/parse/resources.py +7 -8
- mindspore/_extends/parse/standard_method.py +1780 -435
- mindspore/_extends/parse/trope.py +3 -1
- mindspore/amp.py +53 -58
- mindspore/atlprov.dll +0 -0
- mindspore/boost/adasum.py +3 -2
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +46 -26
- mindspore/boost/dim_reduce.py +6 -5
- mindspore/boost/grad_accumulation.py +2 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/cfgpersist.dll +0 -0
- mindspore/clang_rt.asan_dbg_dynamic-x86_64.dll +0 -0
- mindspore/clang_rt.asan_dynamic-x86_64.dll +0 -0
- mindspore/common/__init__.py +11 -10
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +57 -0
- mindspore/common/api.py +582 -297
- mindspore/common/dtype.py +66 -18
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +38 -1
- mindspore/common/jit_config.py +25 -13
- mindspore/common/mutable.py +53 -24
- mindspore/common/parameter.py +60 -37
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +927 -0
- mindspore/common/tensor.py +1627 -3900
- mindspore/communication/__init__.py +10 -5
- mindspore/communication/_comm_helper.py +78 -214
- mindspore/communication/_hccl_management.py +2 -1
- mindspore/communication/management.py +136 -47
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +291 -56
- mindspore/d3dcompiler_47.dll +0 -0
- mindspore/dataset/__init__.py +12 -8
- mindspore/dataset/audio/__init__.py +9 -9
- mindspore/dataset/audio/transforms.py +1090 -228
- mindspore/dataset/audio/utils.py +87 -39
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +17 -15
- mindspore/dataset/core/config.py +246 -17
- mindspore/dataset/core/py_util_helpers.py +4 -3
- mindspore/dataset/core/validator_helpers.py +10 -10
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +9 -9
- mindspore/dataset/engine/datasets.py +648 -477
- mindspore/dataset/engine/datasets_audio.py +165 -167
- mindspore/dataset/engine/datasets_standard_format.py +93 -67
- mindspore/dataset/engine/datasets_text.py +492 -342
- mindspore/dataset/engine/datasets_user_defined.py +85 -50
- mindspore/dataset/engine/datasets_vision.py +1224 -699
- mindspore/dataset/engine/graphdata.py +134 -69
- mindspore/dataset/engine/iterators.py +50 -9
- mindspore/dataset/engine/offload.py +52 -31
- mindspore/dataset/engine/samplers.py +27 -24
- mindspore/dataset/engine/serializer_deserializer.py +14 -15
- mindspore/dataset/engine/validators.py +213 -52
- mindspore/dataset/text/__init__.py +10 -8
- mindspore/dataset/text/transforms.py +152 -57
- mindspore/dataset/text/utils.py +98 -49
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +4 -2
- mindspore/dataset/transforms/c_transforms.py +11 -13
- mindspore/dataset/transforms/py_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms_util.py +10 -0
- mindspore/dataset/transforms/transforms.py +13 -15
- mindspore/dataset/transforms/validators.py +7 -7
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/browse_dataset.py +13 -13
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +8 -7
- mindspore/dataset/vision/c_transforms.py +125 -126
- mindspore/dataset/vision/py_transforms.py +37 -37
- mindspore/dataset/vision/py_transforms_util.py +23 -20
- mindspore/dataset/vision/transforms.py +316 -315
- mindspore/dataset/vision/utils.py +313 -17
- mindspore/dataset/vision/validators.py +6 -6
- mindspore/default_config.py +0 -1
- mindspore/dpcmi.dll +0 -0
- mindspore/{compression → experimental}/__init__.py +6 -5
- mindspore/experimental/map_parameter.py +275 -0
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +70 -9
- mindspore/include/api/delegate.h +8 -1
- mindspore/include/api/dual_abi_helper.h +8 -24
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_group.h +68 -0
- mindspore/include/api/model_parallel_runner.h +17 -17
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +20 -4
- mindspore/include/api/status.h +7 -1
- mindspore/include/api/types.h +25 -21
- mindspore/include/api/visible.h +4 -0
- mindspore/include/c_api/model_c.h +5 -0
- mindspore/include/c_api/status_c.h +1 -1
- mindspore/include/dataset/config.h +1 -1
- mindspore/include/dataset/constants.h +14 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/include/dataset/vision.h +56 -117
- mindspore/include/dataset/vision_lite.h +102 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +28 -28
- mindspore/mindrecord/common/exceptions.py +2 -4
- mindspore/mindrecord/filereader.py +19 -1
- mindspore/mindrecord/filewriter.py +250 -88
- mindspore/mindrecord/mindpage.py +13 -13
- mindspore/mindrecord/shardheader.py +15 -15
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +29 -29
- mindspore/mindrecord/tools/cifar100_to_mr.py +9 -9
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -9
- mindspore/mindrecord/tools/csv_to_mr.py +4 -4
- mindspore/mindrecord/tools/imagenet_to_mr.py +70 -65
- mindspore/mindrecord/tools/mnist_to_mr.py +41 -41
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/{libmindspore_backend.dll → mindspore_backend.dll} +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +1 -5
- mindspore/nn/cell.py +297 -234
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +17 -42
- mindspore/nn/layer/__init__.py +7 -4
- mindspore/nn/layer/activation.py +131 -88
- mindspore/nn/layer/basic.py +313 -613
- mindspore/nn/layer/channel_shuffle.py +103 -0
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +52 -6
- mindspore/nn/layer/conv.py +112 -43
- mindspore/nn/layer/dense.py +10 -9
- mindspore/nn/layer/embedding.py +36 -34
- mindspore/nn/layer/image.py +123 -27
- mindspore/nn/layer/math.py +108 -107
- mindspore/nn/layer/normalization.py +212 -366
- mindspore/nn/layer/padding.py +370 -42
- mindspore/nn/layer/pooling.py +1443 -219
- mindspore/nn/layer/rnn_cells.py +11 -16
- mindspore/nn/layer/rnns.py +38 -39
- mindspore/nn/layer/thor_layer.py +24 -25
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +9 -6
- mindspore/nn/loss/loss.py +678 -142
- mindspore/nn/metrics.py +53 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +2 -2
- mindspore/nn/optim/ada_grad.py +8 -8
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +18 -14
- mindspore/nn/optim/adam.py +429 -87
- mindspore/nn/optim/adamax.py +5 -6
- mindspore/nn/optim/adasum.py +10 -8
- mindspore/nn/optim/asgd.py +7 -7
- mindspore/nn/optim/ftrl.py +81 -11
- mindspore/nn/optim/lamb.py +7 -8
- mindspore/nn/optim/lars.py +4 -4
- mindspore/nn/optim/lazyadam.py +82 -7
- mindspore/nn/optim/momentum.py +8 -7
- mindspore/nn/optim/optimizer.py +19 -10
- mindspore/nn/optim/proximal_ada_grad.py +6 -5
- mindspore/nn/optim/rmsprop.py +3 -3
- mindspore/nn/optim/rprop.py +20 -16
- mindspore/nn/optim/sgd.py +21 -15
- mindspore/nn/optim/thor.py +23 -21
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -6
- mindspore/nn/probability/bijector/invert.py +4 -2
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/__init__.py +6 -0
- mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -2
- mindspore/nn/probability/distribution/_utils/utils.py +11 -17
- mindspore/nn/probability/distribution/bernoulli.py +6 -6
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +9 -9
- mindspore/nn/probability/distribution/cauchy.py +8 -8
- mindspore/nn/probability/distribution/distribution.py +12 -6
- mindspore/nn/probability/distribution/exponential.py +5 -5
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +6 -5
- mindspore/nn/probability/distribution/gumbel.py +5 -5
- mindspore/nn/probability/distribution/half_normal.py +133 -0
- mindspore/nn/probability/distribution/laplace.py +128 -0
- mindspore/nn/probability/distribution/log_normal.py +0 -1
- mindspore/nn/probability/distribution/logistic.py +4 -5
- mindspore/nn/probability/distribution/normal.py +11 -15
- mindspore/nn/probability/distribution/poisson.py +6 -2
- mindspore/nn/probability/distribution/student_t.py +150 -0
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +5 -5
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +8 -1
- mindspore/nn/wrap/cell_wrapper.py +55 -27
- mindspore/nn/wrap/grad_reducer.py +20 -11
- mindspore/nn/wrap/loss_scale.py +47 -30
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +46 -42
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +26 -19
- mindspore/numpy/utils.py +1 -8
- mindspore/numpy/utils_const.py +112 -62
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +6 -3
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +209 -152
- mindspore/ops/_grad/grad_base.py +55 -17
- mindspore/ops/_grad/grad_clip_ops.py +11 -3
- mindspore/ops/_grad/grad_comm_ops.py +58 -47
- mindspore/ops/_grad/grad_implementations.py +21 -61
- mindspore/ops/_grad/grad_inner_ops.py +48 -6
- mindspore/ops/_grad/grad_math_ops.py +306 -161
- mindspore/ops/_grad/grad_nn_ops.py +192 -181
- mindspore/ops/_grad/grad_other_ops.py +1 -1
- mindspore/ops/_grad/grad_quant_ops.py +5 -5
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +15 -9
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +441 -55
- mindspore/ops/_grad_experimental/grad_image_ops.py +25 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +3 -44
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +16 -21
- mindspore/ops/_grad_experimental/grad_math_ops.py +979 -49
- mindspore/ops/_grad_experimental/grad_nn_ops.py +78 -8
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +197 -13
- mindspore/ops/_op_impl/__init__.py +3 -3
- mindspore/ops/_op_impl/_custom_op/__init__.py +0 -1
- mindspore/ops/_op_impl/_custom_op/_basic.py +0 -1
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +4 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +5 -5
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +3 -3
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +4 -8
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +238 -3
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
- mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
- mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
- mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/cauchy.py} +17 -10
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/cholesky.py +1 -1
- mindspore/ops/_op_impl/{cpu/bias_add.py → aicpu/choleskygrad.py} +9 -7
- mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +2 -2
- mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
- mindspore/ops/_op_impl/aicpu/diag.py +36 -0
- mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
- mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
- mindspore/ops/_op_impl/{cpu/bias_add_grad.py → aicpu/digamma.py} +9 -7
- mindspore/ops/_op_impl/aicpu/eig.py +35 -0
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +41 -0
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/glu.py +33 -0
- mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/{tbe/scatter_add_ds.py → aicpu/inplace_index_add.py} +17 -21
- mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
- mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/lgamma.py +32 -0
- mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +39 -0
- mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
- mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +32 -0
- mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
- mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +2 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
- mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/qr.py +36 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +3 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/range.py +36 -0
- mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
- mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/search_sorted.py +12 -6
- mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sort.py +39 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
- mindspore/ops/_op_impl/{tbe/slice_ds.py → aicpu/sparse_segment_sum.py} +16 -24
- mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
- mindspore/ops/_op_impl/aicpu/squared_difference.py +2 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +93 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +66 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/{tbe/gather_v2.py → aicpu/tile.py} +24 -24
- mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/__init__.py +1 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/maximum_grad.py +2 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/cpu/pyexecute.py} +13 -8
- mindspore/ops/_op_impl/cpu/reduce_sum.py +8 -0
- mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -608
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +42 -0
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +41 -0
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +1 -0
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +2 -0
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +40 -0
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -2
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -2
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +1 -1
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/greater.py +2 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -6
- mindspore/ops/_op_impl/tbe/{greater_ds.py → reduce_all_ds.py} +13 -16
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +39 -0
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/scatter_add.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +2 -2
- mindspore/ops/_op_impl/tbe/slice.py +26 -15
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +1 -0
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +1 -1
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +3 -2
- mindspore/ops/_register_for_op.py +11 -0
- mindspore/ops/_utils/__init__.py +1 -1
- mindspore/ops/_utils/utils.py +20 -41
- mindspore/ops/_vmap/__init__.py +2 -2
- mindspore/ops/_vmap/vmap_array_ops.py +170 -78
- mindspore/ops/_vmap/vmap_base.py +24 -10
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -9
- mindspore/ops/_vmap/vmap_image_ops.py +52 -0
- mindspore/ops/_vmap/vmap_math_ops.py +77 -6
- mindspore/ops/_vmap/vmap_nn_ops.py +78 -29
- mindspore/ops/_vmap/vmap_other_ops.py +3 -1
- mindspore/ops/_vmap/vmap_random_ops.py +55 -3
- mindspore/ops/_vmap/vmap_sparse_ops.py +1 -0
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +16 -16
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +306 -0
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +12 -8
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DType_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -24
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -14
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +57 -0
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +13 -10
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +14 -11
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +22 -19
- mindspore/ops/bprop_mindir/Load_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +25 -23
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +13 -13
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/Range_bprop.mindir +21 -19
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +11 -11
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +19 -23
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +60 -0
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +89 -0
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +52 -0
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Round_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +24 -0
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/Select_bprop.mindir +30 -34
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +95 -0
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +98 -0
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +28 -32
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +11 -13
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/__init__.py +1 -4
- mindspore/ops/bprop_mindir/generate_mindir.py +32 -20
- mindspore/ops/composite/__init__.py +12 -13
- mindspore/ops/composite/base.py +261 -254
- mindspore/ops/composite/env_ops.py +41 -0
- mindspore/ops/composite/math_ops.py +197 -156
- mindspore/ops/composite/multitype_ops/_compile_utils.py +428 -176
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +188 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +23 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/equal_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +52 -5
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +33 -2
- mindspore/ops/composite/multitype_ops/less_impl.py +33 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -7
- mindspore/ops/composite/multitype_ops/not_in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +62 -70
- mindspore/ops/composite/multitype_ops/sub_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +41 -4
- mindspore/ops/function/__init__.py +323 -8
- mindspore/ops/function/array_func.py +3511 -780
- mindspore/ops/function/clip_func.py +329 -0
- mindspore/ops/function/debug_func.py +6 -6
- mindspore/ops/function/grad/__init__.py +5 -1
- mindspore/ops/function/grad/grad_func.py +736 -65
- mindspore/ops/function/image_func.py +270 -0
- mindspore/ops/function/linalg_func.py +268 -8
- mindspore/ops/function/math_func.py +8032 -3164
- mindspore/ops/function/nn_func.py +5619 -1855
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +11 -10
- mindspore/ops/function/random_func.py +939 -77
- mindspore/ops/function/sparse_func.py +249 -84
- mindspore/ops/function/sparse_unary_func.py +2303 -0
- mindspore/ops/function/spectral_func.py +146 -0
- mindspore/ops/function/vmap_func.py +114 -0
- mindspore/ops/functional.py +182 -254
- mindspore/ops/op_info_register.py +79 -34
- mindspore/ops/operations/__init__.py +210 -118
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +25 -15
- mindspore/ops/operations/_grad_ops.py +447 -322
- mindspore/ops/operations/_inner_ops.py +547 -176
- mindspore/ops/operations/_map_tensor_ops.py +112 -0
- mindspore/ops/operations/_ms_kernel.py +29 -27
- mindspore/ops/operations/_ocr_ops.py +11 -11
- mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
- mindspore/ops/operations/_quant_ops.py +186 -101
- mindspore/ops/operations/_rl_inner_ops.py +122 -61
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1047 -0
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +4 -4
- mindspore/ops/operations/array_ops.py +1428 -1226
- mindspore/ops/operations/comm_ops.py +180 -117
- mindspore/ops/operations/control_ops.py +4 -2
- mindspore/ops/operations/custom_ops.py +185 -98
- mindspore/ops/operations/debug_ops.py +92 -54
- mindspore/ops/operations/image_ops.py +406 -211
- mindspore/ops/operations/inner_ops.py +42 -53
- mindspore/ops/operations/linalg_ops.py +32 -29
- mindspore/ops/operations/math_ops.py +2076 -897
- mindspore/ops/operations/nn_ops.py +1282 -1252
- mindspore/ops/operations/other_ops.py +124 -278
- mindspore/ops/operations/random_ops.py +345 -178
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +502 -157
- mindspore/ops/operations/spectral_ops.py +107 -0
- mindspore/ops/primitive.py +192 -15
- mindspore/ops/vm_impl_registry.py +23 -2
- mindspore/parallel/__init__.py +6 -1
- mindspore/parallel/_auto_parallel_context.py +199 -92
- mindspore/parallel/_cell_wrapper.py +4 -2
- mindspore/parallel/_cost_model_context.py +3 -0
- mindspore/parallel/_dp_allreduce_fusion.py +2 -1
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +167 -28
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +9 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +59 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +160 -35
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +235 -196
- mindspore/parallel/_utils.py +47 -7
- mindspore/parallel/algo_parameter_config.py +5 -1
- mindspore/parallel/checkpoint_transform.py +329 -0
- mindspore/parallel/shard.py +229 -0
- mindspore/perf_msvcbuildinsights.dll +0 -0
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/common/util.py +4 -3
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +249 -0
- mindspore/profiler/parser/aicpu_data_parser.py +38 -39
- mindspore/profiler/parser/ascend_timeline_generator.py +497 -0
- mindspore/profiler/parser/base_timeline_generator.py +471 -0
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +684 -0
- mindspore/profiler/parser/framework_parser.py +42 -16
- mindspore/profiler/parser/hccl_parser.py +158 -158
- mindspore/profiler/parser/hwts_log_parser.py +7 -6
- mindspore/profiler/parser/integrator.py +18 -1579
- mindspore/profiler/parser/minddata_analyzer.py +8 -8
- mindspore/profiler/parser/msadvisor_analyzer.py +14 -27
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +108 -0
- mindspore/profiler/parser/step_trace_parser.py +1 -1
- mindspore/profiler/profiling.py +396 -194
- mindspore/rewrite/__init__.py +6 -2
- mindspore/rewrite/api/node.py +51 -110
- mindspore/rewrite/api/node_type.py +10 -6
- mindspore/rewrite/api/pattern_engine.py +51 -7
- mindspore/rewrite/api/scoped_value.py +64 -53
- mindspore/rewrite/api/symbol_tree.py +108 -61
- mindspore/rewrite/api/tree_node_helper.py +2 -3
- mindspore/{compression/quant/__init__.py → rewrite/ast_creator_register.py} +20 -11
- mindspore/rewrite/ast_helpers/__init__.py +6 -3
- mindspore/rewrite/ast_helpers/ast_creator.py +115 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +99 -1
- mindspore/rewrite/ast_helpers/ast_modifier.py +17 -4
- mindspore/rewrite/ast_helpers/ast_replacer.py +1 -1
- mindspore/rewrite/ast_transformers/__init__.py +0 -1
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +46 -5
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +6 -3
- mindspore/rewrite/common/__init__.py +2 -0
- mindspore/rewrite/common/event.py +1 -1
- mindspore/rewrite/common/observable.py +1 -1
- mindspore/rewrite/common/observer.py +1 -1
- mindspore/rewrite/common/rewrite_elog.py +35 -0
- mindspore/rewrite/namer.py +2 -2
- mindspore/rewrite/namespace.py +14 -4
- mindspore/rewrite/node.py +161 -13
- mindspore/rewrite/parser.py +0 -1
- mindspore/rewrite/parser_register.py +0 -1
- mindspore/rewrite/parsers/arguments_parser.py +3 -2
- mindspore/rewrite/parsers/assign_parser.py +267 -67
- mindspore/rewrite/parsers/attribute_parser.py +56 -0
- mindspore/rewrite/parsers/class_def_parser.py +191 -108
- mindspore/rewrite/parsers/constant_parser.py +101 -0
- mindspore/rewrite/parsers/container_parser.py +88 -0
- mindspore/rewrite/parsers/for_parser.py +28 -15
- mindspore/rewrite/parsers/function_def_parser.py +21 -5
- mindspore/rewrite/parsers/if_parser.py +11 -28
- mindspore/rewrite/parsers/module_parser.py +9 -6
- mindspore/rewrite/parsers/return_parser.py +3 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +322 -109
- mindspore/rewrite/symbol_tree_builder.py +45 -8
- mindspore/rewrite/symbol_tree_dumper.py +0 -1
- mindspore/rewrite/topological_manager.py +1 -2
- mindspore/run_check/_check_version.py +209 -112
- mindspore/run_check/run_check.py +2 -1
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +6 -4
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +321 -50
- mindspore/train/callback/__init__.py +3 -1
- mindspore/train/callback/_backup_and_restore.py +120 -0
- mindspore/train/callback/_callback.py +8 -8
- mindspore/train/callback/_checkpoint.py +12 -9
- mindspore/train/callback/_early_stop.py +13 -7
- mindspore/train/callback/_history.py +8 -8
- mindspore/train/callback/_lambda_callback.py +6 -6
- mindspore/train/callback/_landscape.py +36 -38
- mindspore/train/callback/_loss_monitor.py +12 -6
- mindspore/train/callback/_lr_scheduler_callback.py +2 -4
- mindspore/train/callback/_on_request_exit.py +212 -0
- mindspore/train/callback/_reduce_lr_on_plateau.py +13 -7
- mindspore/train/callback/_summary_collector.py +27 -19
- mindspore/train/callback/_time_monitor.py +13 -7
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +122 -33
- mindspore/train/dataset_helper.py +28 -87
- mindspore/train/loss_scale_manager.py +4 -7
- mindspore/{nn → train}/metrics/__init__.py +20 -20
- mindspore/{nn → train}/metrics/accuracy.py +12 -10
- mindspore/{nn → train}/metrics/auc.py +4 -4
- mindspore/{nn → train}/metrics/bleu_score.py +4 -4
- mindspore/{nn → train}/metrics/confusion_matrix.py +10 -8
- mindspore/{nn → train}/metrics/cosine_similarity.py +4 -4
- mindspore/{nn → train}/metrics/dice.py +6 -5
- mindspore/{nn → train}/metrics/error.py +7 -5
- mindspore/{nn → train}/metrics/fbeta.py +9 -7
- mindspore/{nn → train}/metrics/hausdorff_distance.py +8 -6
- mindspore/{nn → train}/metrics/loss.py +4 -3
- mindspore/{nn → train}/metrics/mean_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/metric.py +6 -5
- mindspore/{nn → train}/metrics/occlusion_sensitivity.py +4 -3
- mindspore/{nn → train}/metrics/perplexity.py +5 -4
- mindspore/{nn → train}/metrics/precision.py +5 -4
- mindspore/{nn → train}/metrics/recall.py +5 -4
- mindspore/{nn → train}/metrics/roc.py +7 -6
- mindspore/{nn → train}/metrics/root_mean_square_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/topk.py +7 -5
- mindspore/train/mind_ir_pb2.py +339 -32
- mindspore/train/model.py +113 -84
- mindspore/train/serialization.py +547 -167
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -12
- mindspore/train/train_thor/convert_utils.py +7 -1
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/train/train_thor/model_thor.py +0 -4
- mindspore/turbojpeg.dll +0 -0
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +4 -3
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +901 -660
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -514
- mindspore/compression/quant/qat.py +0 -636
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/libatomic-1.dll +0 -0
- mindspore/libgcc_s_seh-1.dll +0 -0
- mindspore/libgfortran-4.dll +0 -0
- mindspore/libgomp-1.dll +0 -0
- mindspore/libjpeg-62.dll +0 -0
- mindspore/libmindspore.dll +0 -0
- mindspore/libmindspore_common.dll +0 -0
- mindspore/libmindspore_core.dll +0 -0
- mindspore/libmindspore_glog.dll +0 -0
- mindspore/libnnacl.dll +0 -0
- mindspore/libopencv_core452.dll +0 -0
- mindspore/libopencv_imgcodecs452.dll +0 -0
- mindspore/libopencv_imgproc452.dll +0 -0
- mindspore/libquadmath-0.dll +0 -0
- mindspore/libsqlite3.dll +0 -0
- mindspore/libssp-0.dll +0 -0
- mindspore/libstdc++-6.dll +0 -0
- mindspore/libtinyxml2.dll +0 -0
- mindspore/libturbojpeg.dll +0 -0
- mindspore/libwinpthread-1.dll +0 -0
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -138
- mindspore/nn/probability/dpn/vae/vae.py +0 -122
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -363
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/tbe/bias_add_grad_ds.py +0 -52
- mindspore/ops/_op_impl/tbe/scatter_nd_add_ds.py +0 -43
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Identity_bprop.mindir +0 -9
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/stop_gradient_bprop.mindir +0 -12
- mindspore/ops/composite/array_ops.py +0 -210
- mindspore/ops/composite/clip_ops.py +0 -238
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/ops/operations/sponge_ops.py +0 -3531
- mindspore/ops/operations/sponge_update_ops.py +0 -2546
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- mindspore/run_check/_check_deps_version.py +0 -84
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -12,23 +12,25 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ============================================================================
|
|
15
|
-
|
|
16
15
|
"""Defines parameter operators with functional form."""
|
|
17
16
|
|
|
18
|
-
|
|
17
|
+
from __future__ import absolute_import
|
|
18
|
+
|
|
19
|
+
from mindspore import context
|
|
19
20
|
from mindspore.ops import operations as P
|
|
20
21
|
from mindspore.ops import functional as F
|
|
21
|
-
from mindspore.ops.primitive import constexpr
|
|
22
|
+
from mindspore.ops.primitive import constexpr, _primexpr
|
|
22
23
|
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
23
24
|
from mindspore.common import dtype as mstype
|
|
24
25
|
from mindspore.common.seed import _get_graph_seed
|
|
25
26
|
from mindspore.common.tensor import Tensor
|
|
26
|
-
from mindspore.ops.operations.random_ops import RandomShuffle
|
|
27
|
+
from mindspore.ops.operations.random_ops import RandomShuffle, RandomChoiceWithMask, RandpermV2
|
|
27
28
|
from mindspore.ops._primitive_cache import _get_cache_prim
|
|
28
|
-
from mindspore.
|
|
29
|
+
from mindspore.common.api import _function_forbid_reuse
|
|
29
30
|
|
|
30
31
|
|
|
31
|
-
|
|
32
|
+
@_function_forbid_reuse
|
|
33
|
+
def random_gamma(shape, alpha, seed=None):
|
|
32
34
|
r"""
|
|
33
35
|
Outputs random values from the Gamma distribution(s) described by alpha.
|
|
34
36
|
|
|
@@ -36,12 +38,10 @@ def random_gamma(shape, alpha, seed=0, seed2=0):
|
|
|
36
38
|
Args:
|
|
37
39
|
shape (Tensor): The shape of random tensor to be generated.
|
|
38
40
|
Must be one of the following types: int32, int64. 1-D integer tensor.
|
|
39
|
-
alpha (Tensor): The alpha
|
|
41
|
+
alpha (Tensor): The :math:`\alpha` distribution parameter.
|
|
40
42
|
A Tensor. Must be one of the following types: half, float32, float64.
|
|
41
|
-
seed (int): Seed is used as entropy source for
|
|
42
|
-
|
|
43
|
-
seed2 (int): Seed2 is used as entropy source for the random number engines to generate
|
|
44
|
-
pseudo-random numbers, must be non-negative. Default: None, which will be treated as 0.
|
|
43
|
+
seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
|
|
44
|
+
Default: None, which will be treated as 0.
|
|
45
45
|
|
|
46
46
|
Returns:
|
|
47
47
|
Tensor. The shape should be equal to the concat shape between the input `shape` and the broadcast
|
|
@@ -68,50 +68,40 @@ def random_gamma(shape, alpha, seed=0, seed2=0):
|
|
|
68
68
|
>>> print(result)
|
|
69
69
|
(7, 5, 2)
|
|
70
70
|
"""
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
alpha_shape = P.Shape()(alpha)
|
|
75
|
-
beta_shape = P.Shape()(beta)
|
|
76
|
-
broadcast_shape = get_broadcast_shape(alpha_shape, beta_shape, "random_gamma",
|
|
77
|
-
arg_name1="alpha", arg_name2="beta")
|
|
78
|
-
broadcast_shape_t = tuple(broadcast_shape)
|
|
79
|
-
broadcast_to = P.BroadcastTo(broadcast_shape_t)
|
|
80
|
-
alpha_broadcast = broadcast_to(alpha)
|
|
81
|
-
random_gamma_op = _get_cache_prim(P.RandomGamma)(seed=seed, seed2=seed2)
|
|
82
|
-
output = random_gamma_op(shape, alpha_broadcast)
|
|
83
|
-
|
|
71
|
+
seed1, seed2 = _get_seed(seed, "random_gamma")
|
|
72
|
+
random_gamma_op = _get_cache_prim(P.RandomGamma)(seed1, seed2)
|
|
73
|
+
output = random_gamma_op(shape, alpha)
|
|
84
74
|
return output
|
|
85
75
|
|
|
86
76
|
|
|
87
77
|
@constexpr(reuse_result=False)
|
|
88
78
|
def _get_seed(op_seed, kernel_name):
|
|
89
|
-
"Get the graph-level seed."
|
|
79
|
+
"""Get the graph-level seed."""
|
|
90
80
|
return _get_graph_seed(op_seed, kernel_name)
|
|
91
81
|
|
|
92
82
|
|
|
93
|
-
|
|
83
|
+
@_function_forbid_reuse
|
|
84
|
+
def standard_laplace(shape, seed=None):
|
|
94
85
|
r"""
|
|
95
86
|
Generates random numbers according to the Laplace random number distribution (mean=0, lambda=1).
|
|
96
87
|
It is defined as:
|
|
97
88
|
|
|
98
89
|
.. math::
|
|
99
|
-
\text{f}(x) = \frac{1}{2}\exp(-|x|)
|
|
90
|
+
\text{f}(x) = \frac{1}{2}\exp(-|x|)
|
|
100
91
|
|
|
101
92
|
Args:
|
|
102
93
|
shape (Union[tuple, Tensor]): The shape of random tensor to be generated. Only constant value is allowed
|
|
103
94
|
when the input type is tuple. And the operator supports dynamic shape only when the input type is Tensor.
|
|
104
|
-
seed (int): Random
|
|
105
|
-
|
|
95
|
+
seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
|
|
96
|
+
Default: None, which will be treated as 0.
|
|
106
97
|
|
|
107
98
|
Returns:
|
|
108
99
|
Tensor. The shape that the input 'shape' denotes. The dtype is float32.
|
|
109
100
|
|
|
110
101
|
Raises:
|
|
111
|
-
TypeError: If seed or seed2 is not an int.
|
|
112
102
|
TypeError: If shape is neither a tuple nor a Tensor.
|
|
113
|
-
ValueError: If seed or seed2 is not a non-negative int.
|
|
114
103
|
ValueError: If shape is a tuple containing non-positive items.
|
|
104
|
+
ValueError: If shape is a Tensor, and the rank of the Tensor is not equal to 1.
|
|
115
105
|
|
|
116
106
|
Supported Platforms:
|
|
117
107
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -124,10 +114,12 @@ def standard_laplace(shape, seed=0, seed2=0):
|
|
|
124
114
|
>>> print(result)
|
|
125
115
|
(4, 4)
|
|
126
116
|
"""
|
|
127
|
-
|
|
117
|
+
seed1, seed2 = _get_seed(seed, "standard_laplace")
|
|
118
|
+
standard_laplace_op = _get_cache_prim(P.StandardLaplace)(seed=seed1, seed2=seed2)
|
|
128
119
|
return standard_laplace_op(shape)
|
|
129
120
|
|
|
130
121
|
|
|
122
|
+
@_function_forbid_reuse
|
|
131
123
|
def random_categorical(logits, num_sample, seed=0, dtype=mstype.int64):
|
|
132
124
|
r"""
|
|
133
125
|
Generates random samples from a given categorical distribution tensor.
|
|
@@ -165,6 +157,64 @@ def random_categorical(logits, num_sample, seed=0, dtype=mstype.int64):
|
|
|
165
157
|
return random_categorical_(logits, num_sample, seed)
|
|
166
158
|
|
|
167
159
|
|
|
160
|
+
@_function_forbid_reuse
|
|
161
|
+
def multinomial_with_replacement(x, seed, offset, numsamples, replacement=False):
|
|
162
|
+
r"""
|
|
163
|
+
Returns a tensor where each row contains numsamples indices sampled from the
|
|
164
|
+
multinomial distribution with replacement. It is different from `multinomial` in that it allows
|
|
165
|
+
the same outcome to be chosen multiple times.
|
|
166
|
+
|
|
167
|
+
Note:
|
|
168
|
+
The rows of input do not need to sum to one (in which case we use the values as weights),
|
|
169
|
+
but must be non-negative, finite and have a non-zero sum.
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
x (Tensor): the input tensor containing the cumsum of probabilities, must be 1 or 2
|
|
173
|
+
dimensions. Must be one of the following types: float16, float32, float64.
|
|
174
|
+
seed (int): If seed is set to be -1, and offset is set to be 0, the random number
|
|
175
|
+
generator is seeded by a random seed. Otherwise, it is seeded by the given seed.
|
|
176
|
+
offset (int): Offset used to avoid seed collision.
|
|
177
|
+
numsamples (int): the number of samples to draw.
|
|
178
|
+
replacement (bool, optional): Whether to draw with replacement or not. Default: False.
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
Tensor with the same rows as `x`, each row has `numsamples` sampled indices.
|
|
182
|
+
|
|
183
|
+
Raises:
|
|
184
|
+
TypeError: If `x` is not a 1D or 2D Tensor.
|
|
185
|
+
TypeError: If dtype of `x` is not float16, float32 or float64.
|
|
186
|
+
TypeError: If `numsamples` is not an int.
|
|
187
|
+
TypeError: If `replacement` is not a bool.
|
|
188
|
+
ValueError: If the value of `numsamples` is not greater than x_shape[-1] when `replacement` is False.
|
|
189
|
+
ValueError: If the sum of one row of `x` less than 0.
|
|
190
|
+
ValueError: If one of the element of each row of `x` less than 0.
|
|
191
|
+
ValueError: If `numsamples` equal or less than 0.
|
|
192
|
+
|
|
193
|
+
Supported Platforms:
|
|
194
|
+
``CPU``
|
|
195
|
+
|
|
196
|
+
Examples:
|
|
197
|
+
>>> x = Tensor([[0., 9., 4., 0.]], mstype.float32)
|
|
198
|
+
>>> output = ops.multinomial_with_replacement(x, 2, 5, 2, True)
|
|
199
|
+
>>> print(output)
|
|
200
|
+
[[1 1]]
|
|
201
|
+
"""
|
|
202
|
+
if not isinstance(seed, Tensor):
|
|
203
|
+
if not isinstance(seed, int):
|
|
204
|
+
raise TypeError("For multinomial_with_replacement,",
|
|
205
|
+
"the input[seed] must be int, but got {}.".format(type(seed)))
|
|
206
|
+
seed = Tensor(seed, dtype=mstype.int64)
|
|
207
|
+
if not isinstance(offset, Tensor):
|
|
208
|
+
if not isinstance(offset, int):
|
|
209
|
+
raise TypeError("For multinomial_with_replacement,",
|
|
210
|
+
"the input[offset] must be int, but got {}.".format(type(offset)))
|
|
211
|
+
offset = Tensor(offset, dtype=mstype.int64)
|
|
212
|
+
multinomial_with_replacement_ = _get_cache_prim(P.MultinomialWithReplacement)(numsamples=numsamples,
|
|
213
|
+
replacement=replacement)
|
|
214
|
+
return multinomial_with_replacement_(x, seed, offset)
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
@_function_forbid_reuse
|
|
168
218
|
def uniform(shape, minval, maxval, seed=None, dtype=mstype.float32):
|
|
169
219
|
"""
|
|
170
220
|
Generates random numbers according to the Uniform random number distribution.
|
|
@@ -173,9 +223,7 @@ def uniform(shape, minval, maxval, seed=None, dtype=mstype.float32):
|
|
|
173
223
|
The number in tensor minval should be strictly less than maxval at any position after broadcasting.
|
|
174
224
|
|
|
175
225
|
Args:
|
|
176
|
-
shape (tuple): The shape of random tensor to be generated.
|
|
177
|
-
The format is :math:`(N,*)` where :math:`*` means, any number of additional dimensions
|
|
178
|
-
and the length of :math:`(N,*)` should be less than 8 in broadcast operation.
|
|
226
|
+
shape (Union[tuple, Tensor]): The shape of random tensor to be generated.
|
|
179
227
|
minval (Tensor): The distribution parameter `a`.
|
|
180
228
|
It defines the minimum possible generated value, with int32 or float32 data type.
|
|
181
229
|
If dtype is int32, only one number is allowed.
|
|
@@ -194,14 +242,14 @@ def uniform(shape, minval, maxval, seed=None, dtype=mstype.float32):
|
|
|
194
242
|
The dtype is designated as the input `dtype`.
|
|
195
243
|
|
|
196
244
|
Raises:
|
|
197
|
-
TypeError: If `shape` is
|
|
245
|
+
TypeError: If `shape` is neither a tuple nor a Tensor.
|
|
198
246
|
TypeError: If 'minval' or 'maxval' is neither int32 nor float32
|
|
199
247
|
and dtype of 'minval' is not the same as 'maxval'.
|
|
200
248
|
TypeError: If `seed` is not an int.
|
|
201
249
|
TypeError: If 'dtype' is neither int32 nor float32.
|
|
202
250
|
|
|
203
251
|
Supported Platforms:
|
|
204
|
-
``
|
|
252
|
+
``GPU`` ``CPU``
|
|
205
253
|
|
|
206
254
|
Examples:
|
|
207
255
|
>>> from mindspore import Tensor, ops
|
|
@@ -241,7 +289,8 @@ def uniform(shape, minval, maxval, seed=None, dtype=mstype.float32):
|
|
|
241
289
|
return value
|
|
242
290
|
|
|
243
291
|
|
|
244
|
-
|
|
292
|
+
@_function_forbid_reuse
|
|
293
|
+
def standard_normal(shape, seed=None):
|
|
245
294
|
r"""
|
|
246
295
|
Generates random numbers according to the standard Normal (or Gaussian) random number distribution.
|
|
247
296
|
|
|
@@ -252,17 +301,17 @@ def standard_normal(shape, seed=0, seed2=0):
|
|
|
252
301
|
f(x)=\frac{1}{\sqrt{2 \pi}} e^{\left(-\frac{x^{2}}{2}\right)}
|
|
253
302
|
|
|
254
303
|
Args:
|
|
255
|
-
shape (tuple): The shape of random tensor to be generated. Only constant value is allowed
|
|
256
|
-
|
|
257
|
-
|
|
304
|
+
shape (Union[tuple, Tensor]): The shape of random tensor to be generated. Only constant value is allowed
|
|
305
|
+
when the input type is tuple. And the operator supports dynamic shape only when the input type is Tensor.
|
|
306
|
+
seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
|
|
307
|
+
Default: None, which will be treated as 0.
|
|
258
308
|
|
|
259
309
|
Returns:
|
|
260
|
-
Tensor. The shape
|
|
310
|
+
Tensor. The shape that the input 'shape' denotes. The dtype is float32.
|
|
261
311
|
|
|
262
312
|
Raises:
|
|
263
|
-
TypeError: If `
|
|
264
|
-
|
|
265
|
-
ValueError: If `shape` is not a constant value.
|
|
313
|
+
TypeError: If `shape` is neither a tuple nor a Tensor.
|
|
314
|
+
ValueError: If `shape` is a tuple containing non-positive items.
|
|
266
315
|
|
|
267
316
|
Supported Platforms:
|
|
268
317
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -275,11 +324,18 @@ def standard_normal(shape, seed=0, seed2=0):
|
|
|
275
324
|
>>> print(result)
|
|
276
325
|
(4, 4)
|
|
277
326
|
"""
|
|
278
|
-
|
|
327
|
+
seed1, seed2 = _get_seed(seed, "standard_normal")
|
|
328
|
+
standard_normal_op = _get_cache_prim(P.StandardNormal)(seed=seed1, seed2=seed2)
|
|
279
329
|
return standard_normal_op(shape)
|
|
280
330
|
|
|
281
331
|
|
|
282
|
-
|
|
332
|
+
@_function_forbid_reuse
|
|
333
|
+
def uniform_candidate_sampler(true_classes,
|
|
334
|
+
num_true,
|
|
335
|
+
num_sampled,
|
|
336
|
+
unique,
|
|
337
|
+
range_max,
|
|
338
|
+
seed=0,
|
|
283
339
|
remove_accidental_hits=False):
|
|
284
340
|
r"""
|
|
285
341
|
Uniform candidate sampler.
|
|
@@ -288,7 +344,7 @@ def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range
|
|
|
288
344
|
If unique=True, candidates are drawn without replacement, else unique=False with replacement.
|
|
289
345
|
|
|
290
346
|
Args:
|
|
291
|
-
true_classes (Tensor): A Tensor. The target classes with a Tensor shape of (batch_size, num_true).
|
|
347
|
+
true_classes (Tensor): A Tensor. The target classes with a Tensor shape of :math:`(batch_size, num_true)` .
|
|
292
348
|
num_true (int): The number of target classes in each training example.
|
|
293
349
|
num_sampled (int): The number of classes to randomly sample. The sampled_candidates will have a shape
|
|
294
350
|
of num_sampled. If unique=True, num_sampled must be less than or equal to range_max.
|
|
@@ -300,11 +356,11 @@ def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range
|
|
|
300
356
|
|
|
301
357
|
Returns:
|
|
302
358
|
- **sampled_candidates** (Tensor) - The sampled_candidates is independent of the true classes.
|
|
303
|
-
Shape: (num_sampled, ).
|
|
359
|
+
Shape: :math:`(num_sampled, )` .
|
|
304
360
|
- **true_expected_count** (Tensor) - The expected counts under the sampling distribution of each
|
|
305
|
-
of true_classes. Shape: (batch_size, num_true).
|
|
361
|
+
of true_classes. Shape: :math:`(batch_size, num_true)` .
|
|
306
362
|
- **sampled_expected_count** (Tensor) - The expected counts under the sampling distribution of
|
|
307
|
-
each of sampled_candidates. Shape: (num_sampled, ).
|
|
363
|
+
each of sampled_candidates. Shape: :math:`(num_sampled, )` .
|
|
308
364
|
|
|
309
365
|
Raises:
|
|
310
366
|
TypeError: If neither `num_true` nor `num_sampled` is an int.
|
|
@@ -316,7 +372,7 @@ def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range
|
|
|
316
372
|
``Ascend`` ``GPU`` ``CPU``
|
|
317
373
|
|
|
318
374
|
Examples:
|
|
319
|
-
>>> data = Tensor(np.array([[1], [3], [4], [6], [3]], dtype=np.
|
|
375
|
+
>>> data = Tensor(np.array([[1], [3], [4], [6], [3]], dtype=np.int64))
|
|
320
376
|
>>> output1, output2, output3 = ops.uniform_candidate_sampler(data, 1, 3, False, 4, 1)
|
|
321
377
|
>>> print(output1.shape)
|
|
322
378
|
(3,)
|
|
@@ -325,15 +381,21 @@ def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range
|
|
|
325
381
|
>>> print(output3.shape)
|
|
326
382
|
(3,)
|
|
327
383
|
"""
|
|
328
|
-
sampler_op = _get_cache_prim(P.UniformCandidateSampler)(num_true,
|
|
384
|
+
sampler_op = _get_cache_prim(P.UniformCandidateSampler)(num_true,
|
|
385
|
+
num_sampled,
|
|
386
|
+
unique,
|
|
387
|
+
range_max,
|
|
388
|
+
seed=seed,
|
|
329
389
|
remove_accidental_hits=remove_accidental_hits)
|
|
330
390
|
sampled_candidates, true_expected_count, sampled_expected_count = sampler_op(true_classes)
|
|
331
391
|
return sampled_candidates, true_expected_count, sampled_expected_count
|
|
332
392
|
|
|
333
393
|
|
|
394
|
+
@_function_forbid_reuse
|
|
334
395
|
def random_poisson(shape, rate, seed=None, dtype=mstype.float32):
|
|
335
396
|
r"""
|
|
336
|
-
Generates random
|
|
397
|
+
Generates random number Tensor with shape `shape` according to a Poisson distribution with mean `rate`.
|
|
398
|
+
|
|
337
399
|
|
|
338
400
|
.. math::
|
|
339
401
|
|
|
@@ -342,11 +404,12 @@ def random_poisson(shape, rate, seed=None, dtype=mstype.float32):
|
|
|
342
404
|
Args:
|
|
343
405
|
shape (Tensor): The shape of random tensor to be sampled from each poisson distribution, 1-D `Tensor` whose
|
|
344
406
|
dtype is mindspore.dtype.int32 or mindspore.dtype.int64.
|
|
345
|
-
rate (Tensor): The
|
|
407
|
+
rate (Tensor): The :math:`μ` parameter the distribution is constructed with.
|
|
408
|
+
It represents the mean of the distribution
|
|
346
409
|
and also the variance of the distribution. It should be a `Tensor` whose dtype is mindspore.dtype.int64,
|
|
347
410
|
mindspore.dtype.int32, mindspore.dtype.float64, mindspore.dtype.float32 or mindspore.dtype.float16.
|
|
348
|
-
seed (int): Seed is used as entropy source for the random number engines to generate pseudo-random
|
|
349
|
-
and must be non-negative. Default: None, which will be treated as 0.
|
|
411
|
+
seed (int, optional): Seed is used as entropy source for the random number engines to generate pseudo-random
|
|
412
|
+
numbers and must be non-negative. Default: None, which will be treated as 0.
|
|
350
413
|
dtype (mindspore.dtype): The data type of output: mindspore.dtype.int64, mindspore.dtype.int32,
|
|
351
414
|
mindspore.dtype.float64, mindspore.dtype.float32 or mindspore.dtype.float16. Default: mindspore.dtype.float32.
|
|
352
415
|
|
|
@@ -367,7 +430,7 @@ def random_poisson(shape, rate, seed=None, dtype=mstype.float32):
|
|
|
367
430
|
ValueError: If any element of input `shape` tensor is not positive.
|
|
368
431
|
|
|
369
432
|
Supported Platforms:
|
|
370
|
-
``CPU``
|
|
433
|
+
``GPU`` ``CPU``
|
|
371
434
|
|
|
372
435
|
Examples:
|
|
373
436
|
>>> import mindspore
|
|
@@ -378,7 +441,7 @@ def random_poisson(shape, rate, seed=None, dtype=mstype.float32):
|
|
|
378
441
|
>>> rate = Tensor(np.array([[5.0, 10.0], [5.0, 1.0]]), mindspore.float32)
|
|
379
442
|
>>> output = ops.random_poisson(shape, rate, seed=5, dtype=mindspore.float64)
|
|
380
443
|
>>> print(output.shape, output.dtype)
|
|
381
|
-
(2, 2, 2, 2)
|
|
444
|
+
(2, 2, 2, 2) Float64
|
|
382
445
|
>>> # case 2: 1-D shape, scalar rate, int64 output
|
|
383
446
|
>>> shape = Tensor(np.array([2, 2]), mindspore.int64)
|
|
384
447
|
>>> rate = Tensor(5.0, mindspore.float64)
|
|
@@ -392,45 +455,844 @@ def random_poisson(shape, rate, seed=None, dtype=mstype.float32):
|
|
|
392
455
|
return value
|
|
393
456
|
|
|
394
457
|
|
|
395
|
-
|
|
458
|
+
@_function_forbid_reuse
|
|
459
|
+
def shuffle(x, seed=None):
|
|
396
460
|
r"""
|
|
397
461
|
Randomly shuffles a Tensor along its first dimension.
|
|
398
462
|
|
|
399
463
|
Args:
|
|
400
464
|
x (Tensor): The Tensor need be shuffled.
|
|
401
|
-
seed (int):
|
|
402
|
-
|
|
403
|
-
final generated random number, must be non-negative. Default: 0.
|
|
465
|
+
seed (int, optional): Random seed used for random number generation, must be non-negative. If `seed` is 0,
|
|
466
|
+
which will be replaced with a randomly generated value. Default: None, which will be treated as 0.
|
|
404
467
|
|
|
405
468
|
Returns:
|
|
406
469
|
Tensor. The shape and type are the same as the input `x`.
|
|
407
470
|
|
|
408
471
|
Raises:
|
|
409
|
-
TypeError: If data type of `seed`
|
|
472
|
+
TypeError: If data type of `seed` is not None or non-negative int.
|
|
410
473
|
|
|
411
474
|
Supported Platforms:
|
|
412
|
-
``CPU``
|
|
475
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
413
476
|
|
|
414
477
|
Examples:
|
|
415
478
|
>>> x = Tensor(np.array([1, 2, 3, 4]), mstype.float32)
|
|
416
|
-
>>>
|
|
417
|
-
>>> output
|
|
418
|
-
|
|
419
|
-
(4,)
|
|
479
|
+
>>> output = ops.shuffle(x, seed=1)
|
|
480
|
+
>>> print(output)
|
|
481
|
+
(3. 4. 2. 1.)
|
|
420
482
|
"""
|
|
483
|
+
seed, seed2 = _get_seed(seed, "shuffle")
|
|
421
484
|
random_shuffle_ = _get_cache_prim(RandomShuffle)(seed=seed, seed2=seed2)
|
|
422
485
|
output = random_shuffle_(x)
|
|
423
486
|
return output
|
|
424
487
|
|
|
425
488
|
|
|
489
|
+
@_function_forbid_reuse
|
|
490
|
+
def log_uniform_candidate_sampler(true_classes, num_true=1, num_sampled=5, unique=True, range_max=5, seed=0):
|
|
491
|
+
r"""
|
|
492
|
+
Generates random labels with a log-uniform distribution for sampled_candidates.
|
|
493
|
+
|
|
494
|
+
Randomly samples a tensor of sampled classes from the range of integers [0, range_max).
|
|
495
|
+
|
|
496
|
+
Args:
|
|
497
|
+
true_classes (Tensor): The target classes. With data type of int64 and
|
|
498
|
+
shape :math:`(batch\_size, num\_true)` .
|
|
499
|
+
num_true (int): The number of target classes per training example. Default: 1.
|
|
500
|
+
num_sampled (int): The number of classes to randomly sample. Default: 5.
|
|
501
|
+
unique (bool): Determines whether sample with rejection. If `unique` is True,
|
|
502
|
+
all sampled classes in a batch are unique. Default: True.
|
|
503
|
+
range_max (int): The number of possible classes. When `unique` is True,
|
|
504
|
+
`range_max` must be greater than or equal to `num_sampled`. Default: 5.
|
|
505
|
+
seed (int): Random seed, must be non-negative. Default: 0.
|
|
506
|
+
|
|
507
|
+
Returns:
|
|
508
|
+
Tuple of 3 Tensors.
|
|
509
|
+
|
|
510
|
+
- **sampled_candidates** (Tensor) - A Tensor with shape :math:`(num\_sampled,)`
|
|
511
|
+
and the same type as `true_classes`.
|
|
512
|
+
- **true_expected_count** (Tensor) - A Tensor with the same shape as `true_classes and` type float32.
|
|
513
|
+
- **sampled_expected_count** (Tensor) - A Tensor with the same shape as `sampled_candidates` and type float32.
|
|
514
|
+
|
|
515
|
+
Raises:
|
|
516
|
+
TypeError: If neither `num_true` nor `num_sampled` is an int.
|
|
517
|
+
TypeError: If `unique` is not a bool.
|
|
518
|
+
TypeError: If neither `range_max` nor `seed` is an int.
|
|
519
|
+
TypeError: If `true_classes` is not a Tensor.
|
|
520
|
+
|
|
521
|
+
Supported Platforms:
|
|
522
|
+
``Ascend`` ``CPU``
|
|
523
|
+
|
|
524
|
+
Examples:
|
|
525
|
+
>>> import numpy as np
|
|
526
|
+
>>> from mindspore import Tensor, ops
|
|
527
|
+
>>> output1, output2, output3 = ops.log_uniform_candidate_sampler(
|
|
528
|
+
... Tensor(np.array([[1, 7], [0, 4], [3, 3]])), 2, 5, True, 5)
|
|
529
|
+
>>> print(output1, output2, output3)
|
|
530
|
+
[3 2 0 4 1]
|
|
531
|
+
[[0.92312991 0.49336370]
|
|
532
|
+
[0.99248987 0.65806371]
|
|
533
|
+
[0.73553443 0.73553443]]
|
|
534
|
+
[0.73553443 0.82625800 0.99248987 0.65806371 0.92312991]
|
|
535
|
+
|
|
536
|
+
"""
|
|
537
|
+
|
|
538
|
+
sampler = _get_cache_prim(P.LogUniformCandidateSampler)(num_true, num_sampled, unique, range_max, seed)
|
|
539
|
+
return sampler(true_classes)
|
|
540
|
+
|
|
541
|
+
|
|
542
|
+
@_function_forbid_reuse
|
|
543
|
+
def choice_with_mask(input_x, count=256, seed=None):
|
|
544
|
+
"""
|
|
545
|
+
Generates a random sample as index tensor with a mask tensor from a given tensor.
|
|
546
|
+
|
|
547
|
+
The `input_x` must be a tensor whose dimension is not less than 1. If its dimension is greater than or equal to 2,
|
|
548
|
+
the first dimension specifies the number of samples.
|
|
549
|
+
The returned index tensor denotes the index of the nonzero
|
|
550
|
+
sample, the mask tensor denotes which elements in the index tensor are valid.
|
|
551
|
+
|
|
552
|
+
Args:
|
|
553
|
+
input_x (Tensor[bool]): The input tensor.
|
|
554
|
+
The input tensor rank must be greater than or equal to 1 and less than or equal to 5.
|
|
555
|
+
count (int, optional): Number of items expected to get and the number must be greater than 0. Default: 256.
|
|
556
|
+
seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
|
|
557
|
+
Default: None, which will be treated as 0.
|
|
558
|
+
|
|
559
|
+
Returns:
|
|
560
|
+
Two tensors, the first one is the index tensor and the other one is the mask tensor.
|
|
561
|
+
|
|
562
|
+
- **index** (Tensor) - The output shape is 2-D.
|
|
563
|
+
- **mask** (Tensor) - The output shape is 1-D.
|
|
564
|
+
|
|
565
|
+
Raises:
|
|
566
|
+
TypeError: If `count` is not an int.
|
|
567
|
+
TypeError: If `seed` is not an int.
|
|
568
|
+
TypeError: If `input_x` is not a Tensor.
|
|
569
|
+
|
|
570
|
+
Supported Platforms:
|
|
571
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
572
|
+
|
|
573
|
+
Examples:
|
|
574
|
+
>>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool))
|
|
575
|
+
>>> output_y, output_mask = ops.choice_with_mask(input_x)
|
|
576
|
+
>>> result = output_y.shape
|
|
577
|
+
>>> print(result)
|
|
578
|
+
(256, 2)
|
|
579
|
+
>>> result = output_mask.shape
|
|
580
|
+
>>> print(result)
|
|
581
|
+
(256,)
|
|
582
|
+
"""
|
|
583
|
+
seed1, seed2 = _get_seed(seed, "choice_with_mask")
|
|
584
|
+
choice_with_mask_ = _get_cache_prim(RandomChoiceWithMask)(count=count, seed=seed1, seed2=seed2)
|
|
585
|
+
output = choice_with_mask_(input_x)
|
|
586
|
+
return output
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
@constexpr
|
|
590
|
+
def is_cpu_backend():
|
|
591
|
+
"""Check if the CPU is used"""
|
|
592
|
+
return context.get_context('device_target') == 'CPU'
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
@_function_forbid_reuse
|
|
596
|
+
def randperm(n, seed=0, offset=0, dtype=mstype.int64):
|
|
597
|
+
r"""
|
|
598
|
+
Generates random permutation of integers from 0 to n-1.
|
|
599
|
+
|
|
600
|
+
Returns the tensor with the determined shape inferred by n, the random numbers in it drawn from the data range
|
|
601
|
+
that a given type can represent.
|
|
602
|
+
|
|
603
|
+
Args:
|
|
604
|
+
n (Union[Tensor, int]): The input n Tensor with shape: () or (1,) and with data type of int64.
|
|
605
|
+
The value of `n` must be greater than zero.
|
|
606
|
+
seed (int, optional): Random seed. Default: 0. When seed is -1(only negative value), offset is 0,
|
|
607
|
+
it's determined by time.
|
|
608
|
+
offset (int, optional): Offset to generate random numbers. Priority is higher than random seed.
|
|
609
|
+
Default: 0. It must be non-negative.
|
|
610
|
+
dtype (mindspore.dtype, optional): The type of output.
|
|
611
|
+
Its value must be one of the following types: int32, int16, int8,
|
|
612
|
+
uint8, int64, float64, float32, float16. Default: int64.
|
|
613
|
+
|
|
614
|
+
Returns:
|
|
615
|
+
Tensor. Its shape is specified by the required args `n`. Its type is spcified by `dtype`. Otherwise is default.
|
|
616
|
+
|
|
617
|
+
Raises:
|
|
618
|
+
TypeError: If `dtype` is not allowed.
|
|
619
|
+
ValueError: If `n` is a negative or 0 element.
|
|
620
|
+
ValueError: If `seed` is a negative element.
|
|
621
|
+
ValueError: If `n` is larger than the maximal data of the set dtype.
|
|
622
|
+
|
|
623
|
+
Supported Platforms:
|
|
624
|
+
``CPU``
|
|
625
|
+
|
|
626
|
+
Examples:
|
|
627
|
+
>>> n = 4
|
|
628
|
+
>>> seed = 0
|
|
629
|
+
>>> offset = 0
|
|
630
|
+
>>> output = ops.randperm(n, seed, offset, dtype=mstype.int64)
|
|
631
|
+
>>> print(output)
|
|
632
|
+
[1 0 2 3]
|
|
633
|
+
"""
|
|
634
|
+
if not isinstance(n, Tensor):
|
|
635
|
+
n = Tensor(n)
|
|
636
|
+
randperm_ = _get_cache_prim(RandpermV2)(dtype=dtype)
|
|
637
|
+
return randperm_(n, seed, offset)
|
|
638
|
+
|
|
639
|
+
|
|
640
|
+
@_function_forbid_reuse
|
|
641
|
+
def normal(shape, mean, stddev, seed=None):
|
|
642
|
+
"""
|
|
643
|
+
Generates random numbers according to the Normal (or Gaussian) random number distribution.
|
|
644
|
+
|
|
645
|
+
Args:
|
|
646
|
+
shape (tuple): The shape of random tensor to be generated.
|
|
647
|
+
The format is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
648
|
+
mean (Union[Tensor, int, float]): The mean μ distribution parameter, which specifies the location of the peak,
|
|
649
|
+
with data type in [int8, int16, int32, int64, float16, float32].
|
|
650
|
+
stddev (Union[Tensor, int, float]): The deviation σ distribution parameter. It should be greater than 0,
|
|
651
|
+
with data type in [int8, int16, int32, int64, float16, float32].
|
|
652
|
+
seed (int): Seed is used as entropy source for the Random number engines to generate pseudo-random numbers.
|
|
653
|
+
The value must be non-negative. Default: None, which will be treated as 0.
|
|
654
|
+
|
|
655
|
+
Returns:
|
|
656
|
+
Tensor. The shape should be equal to the broadcasted shape between the input `shape` and shapes
|
|
657
|
+
of `mean` and `stddev`.
|
|
658
|
+
The dtype is float32.
|
|
659
|
+
|
|
660
|
+
Supported Platforms:
|
|
661
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
662
|
+
|
|
663
|
+
Examples:
|
|
664
|
+
>>> import mindspore
|
|
665
|
+
>>> import numpy as np
|
|
666
|
+
>>> from mindspore import Tensor, ops
|
|
667
|
+
>>> shape = (3, 1, 2)
|
|
668
|
+
>>> mean = Tensor(np.array([[3, 4], [5, 6]]), mindspore.float32)
|
|
669
|
+
>>> stddev = Tensor(1.0, mindspore.float32)
|
|
670
|
+
>>> output = ops.normal(shape, mean, stddev, seed=5)
|
|
671
|
+
>>> result = output.shape
|
|
672
|
+
>>> print(result)
|
|
673
|
+
(3, 2, 2)
|
|
674
|
+
>>> shape = (3, 1, 3)
|
|
675
|
+
>>> mean = Tensor(np.array([[3, 4, 3], [3, 5, 6]]), mindspore.float32)
|
|
676
|
+
>>> stddev = Tensor(1.0, mindspore.float32)
|
|
677
|
+
>>> output = ops.normal(shape, mean, stddev, seed=5)
|
|
678
|
+
>>> result = output.shape
|
|
679
|
+
>>> print(result)
|
|
680
|
+
(3, 2, 3)
|
|
681
|
+
>>> shape = (3, 1, 3)
|
|
682
|
+
>>> mean = Tensor(np.array([[1, 2, 3], [3, 4, 3], [3, 5, 6]]), mindspore.float32)
|
|
683
|
+
>>> stddev = Tensor(1.0, mindspore.float32)
|
|
684
|
+
>>> output = ops.normal(shape, mean, stddev, seed=5)
|
|
685
|
+
>>> result = output.shape
|
|
686
|
+
>>> print(result)
|
|
687
|
+
(3, 3, 3)
|
|
688
|
+
"""
|
|
689
|
+
_check_param("normal", "mean", mean)
|
|
690
|
+
_check_param("normal", "stddev", stddev)
|
|
691
|
+
if not isinstance(mean, Tensor):
|
|
692
|
+
mean = Tensor(mean)
|
|
693
|
+
if not isinstance(stddev, Tensor):
|
|
694
|
+
stddev = Tensor(stddev)
|
|
695
|
+
mean_dtype = F.dtype(mean)
|
|
696
|
+
stddev_dtype = F.dtype(stddev)
|
|
697
|
+
const_utils.check_type_valid(mean_dtype, mstype.int_type + (mstype.float16, mstype.float32), 'normal')
|
|
698
|
+
const_utils.check_type_valid(stddev_dtype, mstype.int_type + (mstype.float16, mstype.float32), 'normal')
|
|
699
|
+
seed1, seed2 = _get_seed(seed, "normal")
|
|
700
|
+
stdnormal = P.StandardNormal(seed1, seed2)
|
|
701
|
+
_check_shape(shape)
|
|
702
|
+
random_normal = stdnormal(shape)
|
|
703
|
+
value = random_normal * stddev + mean
|
|
704
|
+
return value
|
|
705
|
+
|
|
706
|
+
|
|
707
|
+
@_function_forbid_reuse
|
|
708
|
+
def laplace(shape, mean, lambda_param, seed=None):
|
|
709
|
+
r"""
|
|
710
|
+
Generates random numbers according to the Laplace random number distribution.
|
|
711
|
+
It is defined as:
|
|
712
|
+
|
|
713
|
+
.. math::
|
|
714
|
+
\text{f}(x;μ,λ) = \frac{1}{2λ}\exp(-\frac{|x-μ|}{λ}),
|
|
715
|
+
|
|
716
|
+
Args:
|
|
717
|
+
shape (tuple): The shape of random tensor to be generated.
|
|
718
|
+
The format is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
719
|
+
mean (Tensor): The mean μ distribution parameter, which specifies the location of the peak.
|
|
720
|
+
With float32 data type.
|
|
721
|
+
lambda_param (Tensor): The parameter used for controlling the variance of this random distribution. The
|
|
722
|
+
variance of Laplace distribution is equal to twice the square of lambda_param. With float32 data type.
|
|
723
|
+
seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
|
|
724
|
+
Default: None, which will be treated as 0.
|
|
725
|
+
|
|
726
|
+
Returns:
|
|
727
|
+
Tensor. The shape should be the broadcasted shape of input `shape` and shapes of `mean` and `lambda_param`.
|
|
728
|
+
The dtype is float32.
|
|
729
|
+
|
|
730
|
+
Supported Platforms:
|
|
731
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
732
|
+
|
|
733
|
+
Examples:
|
|
734
|
+
>>> import mindspore
|
|
735
|
+
>>> from mindspore import Tensor
|
|
736
|
+
>>> from mindspore import ops as ops
|
|
737
|
+
>>> shape = (2, 3)
|
|
738
|
+
>>> mean = Tensor(1.0, mindspore.float32)
|
|
739
|
+
>>> lambda_param = Tensor(1.0, mindspore.float32)
|
|
740
|
+
>>> output = ops.laplace(shape, mean, lambda_param, seed=5)
|
|
741
|
+
>>> print(output.shape)
|
|
742
|
+
(2, 3)
|
|
743
|
+
"""
|
|
744
|
+
mean_dtype = F.dtype(mean)
|
|
745
|
+
lambda_param_dtype = F.dtype(lambda_param)
|
|
746
|
+
const_utils.check_tensors_dtype_same(mean_dtype, mstype.float32, "laplace")
|
|
747
|
+
const_utils.check_tensors_dtype_same(lambda_param_dtype, mstype.float32, "laplace")
|
|
748
|
+
seed1, seed2 = _get_seed(seed, "laplace")
|
|
749
|
+
stdlaplace = P.StandardLaplace(seed1, seed2)
|
|
750
|
+
_check_shape(shape)
|
|
751
|
+
rnd = stdlaplace(shape)
|
|
752
|
+
value = rnd * lambda_param + mean
|
|
753
|
+
return value
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
@_function_forbid_reuse
|
|
757
|
+
def gamma(shape, alpha, beta, seed=None):
|
|
758
|
+
r"""
|
|
759
|
+
Generates random numbers according to the Gamma random number distribution.
|
|
760
|
+
|
|
761
|
+
Args:
|
|
762
|
+
shape (tuple): The shape of random tensor to be generated.
|
|
763
|
+
alpha (Tensor): The :math:`\alpha` distribution parameter. It should be greater than 0 with float32 data type.
|
|
764
|
+
beta (Tensor): The :math:`\beta` distribution parameter. It should be greater than 0 with float32 data type.
|
|
765
|
+
seed (int): Seed is used as entropy source for the random number engines to generate
|
|
766
|
+
pseudo-random numbers, must be non-negative. Default: None, which will be treated as 0.
|
|
767
|
+
|
|
768
|
+
Returns:
|
|
769
|
+
Tensor. The shape should be equal to the broadcasted shape between the input `shape` and shapes
|
|
770
|
+
of `alpha` and `beta`.
|
|
771
|
+
The dtype is float32.
|
|
772
|
+
|
|
773
|
+
Raises:
|
|
774
|
+
TypeError: If `shape` is not a tuple.
|
|
775
|
+
TypeError: If neither `alpha` nor `beta` is a Tensor.
|
|
776
|
+
TypeError: If `seed` is not an int.
|
|
777
|
+
TypeError: If dtype of `alpha` and `beta` is not float32.
|
|
778
|
+
|
|
779
|
+
Supported Platforms:
|
|
780
|
+
``Ascend``
|
|
781
|
+
|
|
782
|
+
Examples:
|
|
783
|
+
>>> import mindspore
|
|
784
|
+
>>> import numpy as np
|
|
785
|
+
>>> from mindspore import Tensor, ops
|
|
786
|
+
>>> # case 1: alpha_shape is (2, 2)
|
|
787
|
+
>>> shape = (3, 1, 2)
|
|
788
|
+
>>> alpha = Tensor(np.array([[3, 4], [5, 6]]), mindspore.float32)
|
|
789
|
+
>>> beta = Tensor(np.array([1.0]), mindspore.float32)
|
|
790
|
+
>>> output = ops.gamma(shape, alpha, beta, seed=5)
|
|
791
|
+
>>> result = output.shape
|
|
792
|
+
>>> print(result)
|
|
793
|
+
(3, 2, 2)
|
|
794
|
+
>>> # case 2: alpha_shape is (2, 3), so shape is (3, 1, 3)
|
|
795
|
+
>>> shape = (3, 1, 3)
|
|
796
|
+
>>> alpha = Tensor(np.array([[1, 3, 4], [2, 5, 6]]), mindspore.float32)
|
|
797
|
+
>>> beta = Tensor(np.array([1.0]), mindspore.float32)
|
|
798
|
+
>>> output = ops.gamma(shape, alpha, beta, seed=5)
|
|
799
|
+
>>> result = output.shape
|
|
800
|
+
>>> print(result)
|
|
801
|
+
(3, 2, 3)
|
|
802
|
+
>>> # case 3: beta_shape is (1, 2), the output is different.
|
|
803
|
+
>>> shape = (3, 1, 2)
|
|
804
|
+
>>> alpha = Tensor(np.array([[3, 4], [5, 6]]), mindspore.float32)
|
|
805
|
+
>>> beta = Tensor(np.array([1.0, 2]), mindspore.float32)
|
|
806
|
+
>>> output = ops.gamma(shape, alpha, beta, seed=5)
|
|
807
|
+
>>> result = output.shape
|
|
808
|
+
>>> print(output)
|
|
809
|
+
[[[ 2.2132034 5.8855834]]
|
|
810
|
+
[ 3.3981476 7.5805717]
|
|
811
|
+
[[ 3.3981476 7.5805717]]
|
|
812
|
+
[ 3.7190282 19.941492]
|
|
813
|
+
[[ 2.9512358 2.5969937]]
|
|
814
|
+
[ 3.786061 5.160872 ]]]
|
|
815
|
+
>>> # case 4: beta_shape is (2, 1), the output is different.
|
|
816
|
+
>>> shape = (3, 1, 2)
|
|
817
|
+
>>> alpha = Tensor(np.array([[3, 4], [5, 6]]), mindspore.float32)
|
|
818
|
+
>>> beta = Tensor(np.array([[1.0], [2.0]]), mindspore.float32)
|
|
819
|
+
>>> output = ops.gamma(shape, alpha, beta, seed=5)
|
|
820
|
+
>>> result = output.shape
|
|
821
|
+
>>> print(output)
|
|
822
|
+
[[[ 5.6085486 7.8280783]]
|
|
823
|
+
[ 15.97684 16.116285]
|
|
824
|
+
[[ 1.8347423 1.713663]]
|
|
825
|
+
[ 3.2434065 15.667398]
|
|
826
|
+
[[ 4.2922077 7.3365674]]
|
|
827
|
+
[ 5.3876944 13.159832 ]]]
|
|
828
|
+
"""
|
|
829
|
+
seed1, seed2 = _get_seed(seed, "gamma")
|
|
830
|
+
gamma_v = P.Gamma(seed1, seed2)
|
|
831
|
+
value = gamma_v(shape, alpha, beta)
|
|
832
|
+
return value
|
|
833
|
+
|
|
834
|
+
|
|
835
|
+
@_primexpr
|
|
836
|
+
def _generate_shapes(shape):
|
|
837
|
+
"""Generate shapes for randn and rand."""
|
|
838
|
+
if not shape:
|
|
839
|
+
size = (1,)
|
|
840
|
+
elif len(shape) == 1:
|
|
841
|
+
if isinstance(shape[0], int):
|
|
842
|
+
size = shape
|
|
843
|
+
elif isinstance(shape[0], list):
|
|
844
|
+
size = tuple(shape[0])
|
|
845
|
+
elif isinstance(shape[0], tuple):
|
|
846
|
+
size = shape[0]
|
|
847
|
+
else:
|
|
848
|
+
raise TypeError("If the length of the argument 'shape' is 1, the type of the argument 'shape' must be "
|
|
849
|
+
"one of ['int', 'list', 'tuple'], but got ", shape[0])
|
|
850
|
+
else:
|
|
851
|
+
for value in shape:
|
|
852
|
+
if not isinstance(value, int):
|
|
853
|
+
raise TypeError("If the length of the argument 'shape' is > 1, the type of the argument 'shape' must "
|
|
854
|
+
"all be int, but got ", value)
|
|
855
|
+
size = shape
|
|
856
|
+
return size
|
|
857
|
+
|
|
858
|
+
|
|
859
|
+
@_function_forbid_reuse
|
|
860
|
+
def rand(*size, dtype=None, seed=None):
|
|
861
|
+
r"""
|
|
862
|
+
Returns a new tensor that fills numbers from the uniform distribution over an interval :math:`[0, 1)`
|
|
863
|
+
based on the given shape and dtype.
|
|
864
|
+
|
|
865
|
+
Args:
|
|
866
|
+
size (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g. :math:`(2, 3)` or :math:`2`.
|
|
867
|
+
|
|
868
|
+
Keyword Args:
|
|
869
|
+
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
|
|
870
|
+
`mindspore.float32` will be applied. Default: None.
|
|
871
|
+
seed (int, optional): Random seed, must be greater or equal to 0. Default: None, and 0 will be used.
|
|
872
|
+
|
|
873
|
+
Returns:
|
|
874
|
+
Tensor, with the designated shape and dtype, filled with random numbers from the uniform distribution on
|
|
875
|
+
the interval :math:`[0, 1)`.
|
|
876
|
+
|
|
877
|
+
Raises:
|
|
878
|
+
TypeError: `seed` is not a non-negative integer.
|
|
879
|
+
ValueError: If `dtype` is not a `mstype.float_type` type.
|
|
880
|
+
|
|
881
|
+
Supported Platforms:
|
|
882
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
883
|
+
|
|
884
|
+
Examples:
|
|
885
|
+
>>> import mindspore.ops as ops
|
|
886
|
+
>>> print(ops.rand((2,3)))
|
|
887
|
+
[[4.1702199e-01 9.9718481e-01 7.2032452e-01]
|
|
888
|
+
[9.3255734e-01 1.1438108e-04 1.2812445e-01]]
|
|
889
|
+
"""
|
|
890
|
+
if dtype is None:
|
|
891
|
+
dtype = mstype.float32
|
|
892
|
+
elif dtype not in mstype.float_type:
|
|
893
|
+
raise ValueError(f"For 'rand', the 'dtype' must be a float type, but got {dtype}.")
|
|
894
|
+
shape = _generate_shapes(size)
|
|
895
|
+
cast_ = P.Cast()
|
|
896
|
+
seed1, seed2 = _get_seed(seed, 'rand')
|
|
897
|
+
rand_op = P.UniformReal(seed1, seed2)
|
|
898
|
+
output = rand_op(shape)
|
|
899
|
+
return cast_(output, dtype)
|
|
900
|
+
|
|
901
|
+
|
|
902
|
+
@_function_forbid_reuse
|
|
903
|
+
def rand_like(input, seed=None, *, dtype=None):
|
|
904
|
+
r"""
|
|
905
|
+
Returns a new tensor that fills numbers from the uniform distribution over an interval :math:`[0, 1)`
|
|
906
|
+
based on the given shape and dtype.
|
|
907
|
+
|
|
908
|
+
Args:
|
|
909
|
+
input (Tensor): Input Tensor to specify the output shape and its default dtype.
|
|
910
|
+
seed (int, optional): Random seed, must be greater or equal to 0. Default: None, and 0 will be used.
|
|
911
|
+
|
|
912
|
+
Keyword Args:
|
|
913
|
+
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
|
|
914
|
+
the same dtype of `input` will be applied. Default: None.
|
|
915
|
+
|
|
916
|
+
Returns:
|
|
917
|
+
Tensor, with the designated shape and dtype, filled with random numbers from the uniform distribution on
|
|
918
|
+
the interval :math:`[0, 1)`.
|
|
919
|
+
|
|
920
|
+
Raises:
|
|
921
|
+
TypeError: If `seed` is not a non-negative integer.
|
|
922
|
+
ValueError: If `dtype` is not a `mstype.float_type` type.
|
|
923
|
+
|
|
924
|
+
Supported Platforms:
|
|
925
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
926
|
+
|
|
927
|
+
Examples:
|
|
928
|
+
>>> import mindspore as ms
|
|
929
|
+
>>> from mindspore import Tensor, ops
|
|
930
|
+
>>> a = Tensor([[2, 3, 4], [1, 2, 3]])
|
|
931
|
+
>>> print(ops.rand_like(a, dtype=ms.float32))
|
|
932
|
+
[[4.1702199e-01 9.9718481e-01 7.2032452e-01]
|
|
933
|
+
[9.3255734e-01 1.1438108e-04 1.2812445e-01]]
|
|
934
|
+
"""
|
|
935
|
+
|
|
936
|
+
if dtype is None:
|
|
937
|
+
dtype = input.dtype
|
|
938
|
+
elif dtype not in mstype.float_type:
|
|
939
|
+
raise ValueError(f"For 'rand_like', the 'dtype' must be a float type, but got {dtype}.")
|
|
940
|
+
shape = input.shape
|
|
941
|
+
cast_ = P.Cast()
|
|
942
|
+
seed1, seed2 = _get_seed(seed, 'rand_like')
|
|
943
|
+
rand_op = P.UniformReal(seed1, seed2)
|
|
944
|
+
output = rand_op(shape)
|
|
945
|
+
return cast_(output, dtype)
|
|
946
|
+
|
|
947
|
+
|
|
948
|
+
@_function_forbid_reuse
|
|
949
|
+
def randn(*size, dtype=None, seed=None):
|
|
950
|
+
r"""
|
|
951
|
+
Returns a new Tensor with given shape and dtype, filled with a sample (or samples)
|
|
952
|
+
from the standard normal distribution.
|
|
953
|
+
|
|
954
|
+
Args:
|
|
955
|
+
size (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g., :math:`(2, 3)` or :math:`2`.
|
|
956
|
+
|
|
957
|
+
Keyword Args:
|
|
958
|
+
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
|
|
959
|
+
`mindspore.float32` will be used. Default: None.
|
|
960
|
+
seed (int, optional): Random seed, must be greater or equal to 0. Default: None, and 0 will be used.
|
|
961
|
+
|
|
962
|
+
Returns:
|
|
963
|
+
Tensor, with the designated shape and dtype, filled with a sample (or samples) from the
|
|
964
|
+
"standard normal" distribution.
|
|
965
|
+
|
|
966
|
+
Raises:
|
|
967
|
+
TypeError: `seed` is not a non-negative integer.
|
|
968
|
+
ValueError: If `dtype` is not a `mstype.float_type`.
|
|
969
|
+
ValueError: If `size` contains invalid number.
|
|
970
|
+
|
|
971
|
+
Supported Platforms:
|
|
972
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
973
|
+
|
|
974
|
+
Examples:
|
|
975
|
+
>>> import mindspore.ops as ops
|
|
976
|
+
>>> print(ops.randn((2, 2)))
|
|
977
|
+
[[ 0.30639967 -0.42438635]
|
|
978
|
+
[-0.4287376 1.3054721 ]]
|
|
979
|
+
"""
|
|
980
|
+
if dtype is None:
|
|
981
|
+
dtype = mstype.float32
|
|
982
|
+
elif dtype not in mstype.float_type:
|
|
983
|
+
raise ValueError(f"For 'randn', the 'dtype' must be a float type, but got {dtype}.")
|
|
984
|
+
shape = _generate_shapes(size)
|
|
985
|
+
cast_ = P.Cast()
|
|
986
|
+
seed1, seed2 = _get_seed(seed, 'randn')
|
|
987
|
+
rand_op = P.StandardNormal(seed1, seed2)
|
|
988
|
+
output = rand_op(shape)
|
|
989
|
+
return cast_(output, dtype)
|
|
990
|
+
|
|
991
|
+
|
|
992
|
+
@_function_forbid_reuse
|
|
993
|
+
def randn_like(input, seed=None, *, dtype=None):
|
|
994
|
+
r"""
|
|
995
|
+
Returns a new Tensor with given shape and dtype, filled with a sample (or samples) from the standard normal
|
|
996
|
+
distribution.
|
|
997
|
+
|
|
998
|
+
Args:
|
|
999
|
+
input (Tensor): Input Tensor to specify the output shape and its default dtype.
|
|
1000
|
+
seed (int, optional): Random seed, must be greater or equal to 0. Default: None, and 0 will be used.
|
|
1001
|
+
|
|
1002
|
+
Keyword Args:
|
|
1003
|
+
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
|
|
1004
|
+
`mindspore.float32` will be used. Default: None.
|
|
1005
|
+
|
|
1006
|
+
Returns:
|
|
1007
|
+
Tensor, with the designated shape and dtype, filled with a sample (or samples) from the
|
|
1008
|
+
"standard normal" distribution.
|
|
1009
|
+
|
|
1010
|
+
Raises:
|
|
1011
|
+
TypeError: `seed` is not a non-negative integer.
|
|
1012
|
+
ValueError: If `dtype` is not a `mstype.float_type`.
|
|
1013
|
+
|
|
1014
|
+
Supported Platforms:
|
|
1015
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1016
|
+
|
|
1017
|
+
Examples:
|
|
1018
|
+
>>> import mindspore as ms
|
|
1019
|
+
>>> from mindspore import Tensor, ops
|
|
1020
|
+
>>> a = Tensor([[1, 2, 3], [4, 5, 6]])
|
|
1021
|
+
>>> print(ops.randn_like(a, dtype=ms.float32))
|
|
1022
|
+
[[ 0.30639967 -0.42438635 -0.20454668]
|
|
1023
|
+
[-0.4287376 1.3054721 0.64747655]]
|
|
1024
|
+
"""
|
|
1025
|
+
if dtype is None:
|
|
1026
|
+
dtype = input.dtype
|
|
1027
|
+
elif dtype not in mstype.float_type:
|
|
1028
|
+
raise ValueError(f"For 'randn_like', the 'dtype' must be a float type, but got {dtype}.")
|
|
1029
|
+
shape = input.shape
|
|
1030
|
+
cast_ = P.Cast()
|
|
1031
|
+
seed1, seed2 = _get_seed(seed, 'randn_like')
|
|
1032
|
+
rand_op = P.StandardNormal(seed1, seed2)
|
|
1033
|
+
output = rand_op(shape)
|
|
1034
|
+
return cast_(output, dtype)
|
|
1035
|
+
|
|
1036
|
+
|
|
1037
|
+
@_function_forbid_reuse
|
|
1038
|
+
def randint(low, high, size, seed=None, *, dtype=None):
|
|
1039
|
+
r"""
|
|
1040
|
+
Returns a Tensor whose elements are random integers in the range of [ `low` , `high` ) .
|
|
1041
|
+
|
|
1042
|
+
Args:
|
|
1043
|
+
low (int): Start value of interval.
|
|
1044
|
+
high (int): End value of interval.
|
|
1045
|
+
size (tuple): Shape of the new tensor.
|
|
1046
|
+
seed (int, optional): Random seed, must be greater or equal to 0. Default: None, and 0 will be used.
|
|
1047
|
+
|
|
1048
|
+
Keyword Args:
|
|
1049
|
+
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be int type. If None,
|
|
1050
|
+
`mindspore.int64` will be used. Default: None.
|
|
1051
|
+
|
|
1052
|
+
Returns:
|
|
1053
|
+
Tensor, with the designated shape and dtype, filled with random integers from low (inclusive)
|
|
1054
|
+
to high (exclusive).
|
|
1055
|
+
|
|
1056
|
+
Raises:
|
|
1057
|
+
TypeError: `seed` is not a non-negative integer.
|
|
1058
|
+
TypeError: `size` is not a tuple.
|
|
1059
|
+
TypeError: `low` or `high` is not an integer.
|
|
1060
|
+
ValueError: If `dtype` is not a `mstype.int_type`.
|
|
1061
|
+
|
|
1062
|
+
|
|
1063
|
+
Supported Platforms:
|
|
1064
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1065
|
+
|
|
1066
|
+
Examples:
|
|
1067
|
+
>>> import mindspore.ops as ops
|
|
1068
|
+
>>> print(ops.randint(1, 10, (2,3)))
|
|
1069
|
+
[[4 9 7]
|
|
1070
|
+
[9 1 2]]
|
|
1071
|
+
"""
|
|
1072
|
+
if dtype is None:
|
|
1073
|
+
dtype = mstype.int64
|
|
1074
|
+
elif dtype not in mstype.int_type:
|
|
1075
|
+
raise ValueError(f"For 'randint', the 'dtype' must be an int type, but got {dtype}.")
|
|
1076
|
+
if not isinstance(size, tuple):
|
|
1077
|
+
raise ValueError(f"For 'randint', the input 'size' must be a tuple, but got {size}.")
|
|
1078
|
+
if not isinstance(low, int) or not isinstance(high, int):
|
|
1079
|
+
raise TypeError(f"For 'randint', 'low' and 'high' must be an int, but got {type(low)} and {type(high)}.")
|
|
1080
|
+
seed1, seed2 = _get_seed(seed, 'randint')
|
|
1081
|
+
cast_ = P.Cast()
|
|
1082
|
+
rand_op = P.UniformInt(seed1, seed2)
|
|
1083
|
+
low_ = Tensor(low, mstype.int32)
|
|
1084
|
+
high_ = Tensor(high, mstype.int32)
|
|
1085
|
+
output = rand_op(size, low_, high_)
|
|
1086
|
+
return cast_(output, dtype)
|
|
1087
|
+
|
|
1088
|
+
|
|
1089
|
+
@_function_forbid_reuse
|
|
1090
|
+
def randint_like(input, low, high, seed=None, *, dtype=None):
|
|
1091
|
+
r"""
|
|
1092
|
+
Returns a tensor with the same shape as Tensor `input` whose elements are random integers in the range
|
|
1093
|
+
of [ `low` , `high` ) .
|
|
1094
|
+
|
|
1095
|
+
Args:
|
|
1096
|
+
input (Tensor): Input Tensor to specify the output shape and its default dtype.
|
|
1097
|
+
low(int): Start value of interval.
|
|
1098
|
+
high(int): End value of interval.
|
|
1099
|
+
seed (int, optional): Random seed, must be greater or equal to 0. Default: None, and 0 will be used.
|
|
1100
|
+
|
|
1101
|
+
Keyword Args:
|
|
1102
|
+
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be int type. If None,
|
|
1103
|
+
`mindspore.int64` will be used. Default is `mindspore.int64`.
|
|
1104
|
+
|
|
1105
|
+
Returns:
|
|
1106
|
+
Tensor, with the designated shape and dtype, filled with random integers from low (inclusive)
|
|
1107
|
+
to high (exclusive).
|
|
1108
|
+
|
|
1109
|
+
Raises:
|
|
1110
|
+
TypeError: `seed` is not a non-negative integer.
|
|
1111
|
+
TypeError: `low` or `high` is not an integer.
|
|
1112
|
+
ValueError: If `dtype` is not a `mstype.int_type`.
|
|
1113
|
+
|
|
1114
|
+
Supported Platforms:
|
|
1115
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1116
|
+
|
|
1117
|
+
Examples:
|
|
1118
|
+
>>> from mindspore import Tensor, ops
|
|
1119
|
+
>>> a = Tensor([[1, 2, 3], [3, 2, 1]])
|
|
1120
|
+
>>> print(ops.randint_like(a, 1, 10))
|
|
1121
|
+
[[4 9 7]
|
|
1122
|
+
[9 1 2]]
|
|
1123
|
+
"""
|
|
1124
|
+
if dtype is None:
|
|
1125
|
+
dtype = input.dtype
|
|
1126
|
+
elif dtype not in mstype.int_type:
|
|
1127
|
+
raise ValueError(f"For 'randint_like', the 'dtype' must be an int type, but got {dtype}.")
|
|
1128
|
+
if not isinstance(low, int) or not isinstance(high, int):
|
|
1129
|
+
raise TypeError(f"For 'randint_like', 'low' and 'high' must be an int, but got {type(low)} and {type(high)}.")
|
|
1130
|
+
size = input.shape
|
|
1131
|
+
seed1, seed2 = _get_seed(seed, 'randint_like')
|
|
1132
|
+
rand_op = P.UniformInt(seed1, seed2)
|
|
1133
|
+
cast_ = P.Cast()
|
|
1134
|
+
low_ = Tensor(low, mstype.int32)
|
|
1135
|
+
high_ = Tensor(high, mstype.int32)
|
|
1136
|
+
output = rand_op(size, low_, high_)
|
|
1137
|
+
return cast_(output, dtype)
|
|
1138
|
+
|
|
1139
|
+
|
|
1140
|
+
@_function_forbid_reuse
|
|
1141
|
+
def poisson(shape, mean, seed=None):
|
|
1142
|
+
r"""
|
|
1143
|
+
The ops.poisson is deprecated, please use :class:`mindspore.ops.random_poisson`
|
|
1144
|
+
Generates random numbers according to the Poisson random number distribution.
|
|
1145
|
+
|
|
1146
|
+
.. math::
|
|
1147
|
+
|
|
1148
|
+
\text{P}(i|μ) = \frac{\exp(-μ)μ^{i}}{i!}
|
|
1149
|
+
|
|
1150
|
+
Args:
|
|
1151
|
+
shape (tuple): The shape of random tensor to be generated.
|
|
1152
|
+
The format is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
1153
|
+
mean (Tensor): The mean μ distribution parameter. It should be greater than 0 with float32 data type.
|
|
1154
|
+
seed (int): Seed is used as entropy source for the random number engines to generate pseudo-random numbers
|
|
1155
|
+
and must be non-negative. Default: None, which will be treated as 0.
|
|
1156
|
+
|
|
1157
|
+
Returns:
|
|
1158
|
+
Tensor. The shape should be equal to the broadcasted shape between the input "shape" and shapes of `mean`.
|
|
1159
|
+
The dtype is float32.
|
|
1160
|
+
|
|
1161
|
+
Raises:
|
|
1162
|
+
TypeError: If `shape` is not a tuple.
|
|
1163
|
+
TypeError: If `mean` is not a Tensor whose dtype is not float32.
|
|
1164
|
+
TypeError: If `seed` is not an int.
|
|
1165
|
+
|
|
1166
|
+
Supported Platforms:
|
|
1167
|
+
deprecated
|
|
1168
|
+
|
|
1169
|
+
Examples:
|
|
1170
|
+
>>> from mindspore import Tensor, ops
|
|
1171
|
+
>>> import mindspore
|
|
1172
|
+
>>> # case 1: It can be broadcast.
|
|
1173
|
+
>>> shape = (4, 1)
|
|
1174
|
+
>>> mean = Tensor(np.array([5.0, 10.0]), mindspore.float32)
|
|
1175
|
+
>>> output = ops.poisson(shape, mean, seed=5)
|
|
1176
|
+
>>> result = output.shape
|
|
1177
|
+
>>> print(result)
|
|
1178
|
+
(4, 2)
|
|
1179
|
+
>>> # case 2: It can not be broadcast. It is recommended to use the same shape.
|
|
1180
|
+
>>> shape = (2, 2)
|
|
1181
|
+
>>> mean = Tensor(np.array([[5.0, 10.0], [5.0, 1.0]]), mindspore.float32)
|
|
1182
|
+
>>> output = ops.poisson(shape, mean, seed=5)
|
|
1183
|
+
>>> result = output.shape
|
|
1184
|
+
>>> print(result)
|
|
1185
|
+
(2, 2)
|
|
1186
|
+
"""
|
|
1187
|
+
seed1, seed2 = _get_seed(seed, "poisson")
|
|
1188
|
+
random_poisson_op = P.Poisson(seed1, seed2)
|
|
1189
|
+
value = random_poisson_op(shape, mean)
|
|
1190
|
+
return value
|
|
1191
|
+
|
|
1192
|
+
|
|
1193
|
+
@_function_forbid_reuse
|
|
1194
|
+
def multinomial(input, num_samples, replacement=True, seed=None):
|
|
1195
|
+
r"""
|
|
1196
|
+
Returns a tensor sampled from the multinomial probability distribution located in the corresponding
|
|
1197
|
+
row of the input tensor.
|
|
1198
|
+
|
|
1199
|
+
Note:
|
|
1200
|
+
The rows of input do not need to sum to one (in which case we use the values as weights),
|
|
1201
|
+
but must be non-negative, finite and have a non-zero sum.
|
|
1202
|
+
|
|
1203
|
+
Args:
|
|
1204
|
+
input (Tensor): The input tensor containing probabilities, must be 1 or 2 dimensions, with
|
|
1205
|
+
float32 data type.
|
|
1206
|
+
num_samples (int): Number of samples to draw.
|
|
1207
|
+
replacement (bool, optional): Whether to draw with replacement or not, default: True.
|
|
1208
|
+
seed (int, optional): Seed is used as entropy source for the random number engines to generate
|
|
1209
|
+
pseudo-random numbers, must be non-negative. Default: None.
|
|
1210
|
+
|
|
1211
|
+
Returns:
|
|
1212
|
+
Tensor, has the same rows with input. The number of sampled indices of each row is `num_samples`.
|
|
1213
|
+
The dtype is float32.
|
|
1214
|
+
|
|
1215
|
+
Raises:
|
|
1216
|
+
TypeError: If `input` is not a Tensor whose dtype is not float32.
|
|
1217
|
+
TypeError: If `num_samples` is not an int.
|
|
1218
|
+
TypeError: If `seed` is neither an int nor None.
|
|
1219
|
+
|
|
1220
|
+
Supported Platforms:
|
|
1221
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1222
|
+
|
|
1223
|
+
Examples:
|
|
1224
|
+
>>> import mindspore
|
|
1225
|
+
>>> from mindspore import Tensor, ops
|
|
1226
|
+
>>> from mindspore import dtype as mstype
|
|
1227
|
+
>>> # case 1: The output is random, and the length of the output is the same as num_sample.
|
|
1228
|
+
>>> input = Tensor([0, 9, 4, 0], mindspore.float32)
|
|
1229
|
+
>>> output = ops.multinomial(input, 2)
|
|
1230
|
+
>>> # print(output)
|
|
1231
|
+
>>> # [1 2] or [2 1]
|
|
1232
|
+
>>> # the case where the result is [2 1] in multiple times.
|
|
1233
|
+
>>> # This is because the value corresponding to the index 1 is larger than the value of the index 2.
|
|
1234
|
+
>>> print(len(output))
|
|
1235
|
+
2
|
|
1236
|
+
>>> # case 2: The output is random, and the length of the output is the same as num_sample.
|
|
1237
|
+
>>> # replacement is False(Default).
|
|
1238
|
+
>>> # If the extracted value is 0, the index value of 1 will be returned.
|
|
1239
|
+
>>> input = Tensor([0, 9, 4, 0], mstype.float32)
|
|
1240
|
+
>>> output = ops.multinomial(input, 4)
|
|
1241
|
+
>>> print(output)
|
|
1242
|
+
[1 1 2 1]
|
|
1243
|
+
>>> # case 3: The output is random, num_sample == x_length = 4, and replacement is True,
|
|
1244
|
+
>>> # Can extract the same elements。
|
|
1245
|
+
>>> input = Tensor([0, 9, 4, 0], mstype.float32)
|
|
1246
|
+
>>> output = ops.multinomial(input, 4, True)
|
|
1247
|
+
>>> print(output)
|
|
1248
|
+
[1 1 2 2]
|
|
1249
|
+
"""
|
|
1250
|
+
shape = P.Shape()
|
|
1251
|
+
reshape = P.Reshape()
|
|
1252
|
+
const_utils.check_valid_dim(len(shape(input)), "multinomial")
|
|
1253
|
+
seed1, seed2 = _get_seed(seed, "multinomial")
|
|
1254
|
+
if not replacement:
|
|
1255
|
+
if shape(input)[-1] < num_samples:
|
|
1256
|
+
const_utils.raise_value_error("For 'multinomial', the 'num_samples' must be less than "
|
|
1257
|
+
"the last dimension of input without 'replacement', "
|
|
1258
|
+
"but got 'num_samples': {} and "
|
|
1259
|
+
"'replacement': {}".format(num_samples, replacement))
|
|
1260
|
+
n_dist = 1
|
|
1261
|
+
if len(shape(input)) > 1:
|
|
1262
|
+
n_dist = shape(input)[-2]
|
|
1263
|
+
random_uniform = P.UniformReal(seed1, seed2)((n_dist * shape(input)[-1],))
|
|
1264
|
+
if n_dist != 1:
|
|
1265
|
+
random_uniform = reshape(random_uniform, (n_dist, shape(input)[-1]))
|
|
1266
|
+
vals = P.RealDiv()(P.Log()(random_uniform), input + 1e-6)
|
|
1267
|
+
_, indices = P.TopK()(vals, num_samples)
|
|
1268
|
+
return indices
|
|
1269
|
+
return P.Multinomial(seed1, seed2)(input, num_samples)
|
|
1270
|
+
|
|
1271
|
+
|
|
1272
|
+
def _check_shape(input_shape):
|
|
1273
|
+
"""Check 'shape' value."""
|
|
1274
|
+
if not isinstance(input_shape, tuple):
|
|
1275
|
+
const_utils.raise_type_error("Type of 'shape' must be tuple, but got: {}".format(type(input_shape)))
|
|
1276
|
+
for item in input_shape:
|
|
1277
|
+
if not isinstance(item, int):
|
|
1278
|
+
const_utils.raise_type_error("Elements of 'shape' must be int, but got: {}".format(type(item)))
|
|
1279
|
+
if item < 1:
|
|
1280
|
+
const_utils.raise_value_error("Elements of 'shape' must be positive int, but got: {}".format(item))
|
|
1281
|
+
return True
|
|
1282
|
+
|
|
1283
|
+
|
|
1284
|
+
def _check_param(op_name, param_name, param_value):
|
|
1285
|
+
"""Check type of param_value is Tensor, int, or float."""
|
|
1286
|
+
if not isinstance(param_value, (Tensor, int, float)):
|
|
1287
|
+
const_utils.raise_type_error("For '{}', the type of '{}' must be Tensor, int, or float, "
|
|
1288
|
+
"but got: {}".format(op_name, param_name, type(param_value)))
|
|
1289
|
+
return True
|
|
1290
|
+
|
|
1291
|
+
|
|
426
1292
|
__all__ = [
|
|
427
|
-
'standard_laplace',
|
|
428
|
-
'
|
|
429
|
-
'
|
|
430
|
-
'
|
|
431
|
-
'random_gamma',
|
|
432
|
-
'uniform_candidate_sampler',
|
|
433
|
-
'random_poisson',
|
|
434
|
-
'random_shuffle',
|
|
1293
|
+
'standard_laplace', 'random_categorical', 'uniform', 'standard_normal', 'random_gamma',
|
|
1294
|
+
'uniform_candidate_sampler', 'random_poisson', 'log_uniform_candidate_sampler', 'shuffle', 'choice_with_mask',
|
|
1295
|
+
'normal', 'laplace', 'gamma', 'poisson', 'multinomial', 'rand', 'rand_like', 'randn', 'randn_like', 'randint',
|
|
1296
|
+
'randint_like', 'multinomial_with_replacement', 'randperm'
|
|
435
1297
|
]
|
|
436
1298
|
__all__.sort()
|