mindspore 2.4.10__cp310-cp310-win_amd64.whl → 2.6.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +13 -6
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -38
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +83 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +47 -198
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +229 -99
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +11 -5
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +138 -43
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +6 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -2
- mindspore/common/_stub_tensor.py +30 -14
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +4760 -0
- mindspore/common/api.py +480 -372
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +39 -36
- mindspore/common/dump.py +9 -6
- mindspore/common/file_system.py +9 -1
- mindspore/common/generator.py +5 -0
- mindspore/common/hook_handle.py +6 -2
- mindspore/common/initializer.py +13 -10
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +9 -3
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +135 -52
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +975 -1981
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +52 -2
- mindspore/communication/comm_func.py +240 -181
- mindspore/communication/management.py +95 -26
- mindspore/context.py +324 -573
- mindspore/dataset/__init__.py +65 -37
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +87 -6
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +6 -5
- mindspore/dataset/engine/datasets.py +292 -267
- mindspore/dataset/engine/datasets_audio.py +22 -8
- mindspore/dataset/engine/datasets_standard_format.py +46 -27
- mindspore/dataset/engine/datasets_text.py +78 -48
- mindspore/dataset/engine/datasets_user_defined.py +183 -117
- mindspore/dataset/engine/datasets_vision.py +120 -44
- mindspore/dataset/engine/iterators.py +283 -63
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +289 -43
- mindspore/dataset/engine/serializer_deserializer.py +3 -2
- mindspore/dataset/engine/validators.py +53 -11
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +31 -14
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +153 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +123 -0
- mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +170 -0
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +209 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +6 -6
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +7 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +73 -46
- mindspore/experimental/optim/radam.py +34 -31
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -53
- mindspore/hal/event.py +52 -52
- mindspore/hal/memory.py +179 -120
- mindspore/hal/stream.py +150 -109
- mindspore/include/api/context.h +0 -1
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +50 -0
- mindspore/mindrecord/__init__.py +21 -8
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +798 -761
- mindspore/mint/distributed/__init__.py +70 -4
- mindspore/mint/distributed/distributed.py +2679 -44
- mindspore/mint/linalg/__init__.py +8 -0
- mindspore/mint/nn/__init__.py +743 -22
- mindspore/mint/nn/functional.py +716 -23
- mindspore/mint/nn/layer/__init__.py +21 -4
- mindspore/mint/nn/layer/_functions.py +334 -0
- mindspore/mint/nn/layer/activation.py +276 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +933 -0
- mindspore/mint/nn/layer/normalization.py +223 -28
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +235 -0
- mindspore/mint/optim/__init__.py +3 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/mint/special/__init__.py +2 -1
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1373 -192
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +29 -27
- mindspore/nn/layer/basic.py +51 -35
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +53 -42
- mindspore/nn/layer/embedding.py +12 -11
- mindspore/nn/layer/normalization.py +56 -49
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +120 -42
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +59 -36
- mindspore/nn/learning_rate_schedule.py +8 -4
- mindspore/nn/loss/loss.py +58 -55
- mindspore/nn/optim/ada_grad.py +7 -5
- mindspore/nn/optim/adadelta.py +11 -9
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +19 -15
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +3 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +3 -3
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +11 -9
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +181 -122
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +6 -7
- mindspore/numpy/array_creations.py +63 -65
- mindspore/numpy/array_ops.py +149 -144
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +17 -18
- mindspore/numpy/utils_const.py +5 -6
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +5 -3
- mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
- mindspore/ops/_vmap/vmap_array_ops.py +52 -25
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
- mindspore/ops/_vmap/vmap_math_ops.py +15 -16
- mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +258 -46
- mindspore/ops/auto_generate/gen_extend_func.py +757 -185
- mindspore/ops/auto_generate/gen_ops_def.py +4197 -2243
- mindspore/ops/auto_generate/gen_ops_prim.py +16976 -6055
- mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +20 -25
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +40 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +2089 -2403
- mindspore/ops/function/clip_func.py +80 -23
- mindspore/ops/function/debug_func.py +57 -57
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +2 -2
- mindspore/ops/function/linalg_func.py +47 -78
- mindspore/ops/function/math_func.py +4351 -3813
- mindspore/ops/function/nn_func.py +1712 -637
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +18 -84
- mindspore/ops/function/random_func.py +452 -387
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +6 -6
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +26 -18
- mindspore/ops/functional.py +23 -7
- mindspore/ops/functional_overload.py +1548 -0
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +23 -15
- mindspore/ops/operations/_custom_ops_utils.py +235 -0
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -43
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +43 -84
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +81 -324
- mindspore/ops/operations/comm_ops.py +154 -108
- mindspore/ops/operations/custom_ops.py +298 -87
- mindspore/ops/operations/debug_ops.py +157 -59
- mindspore/ops/operations/inner_ops.py +7 -5
- mindspore/ops/operations/linalg_ops.py +1 -57
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +928 -180
- mindspore/ops/operations/math_ops.py +32 -234
- mindspore/ops/operations/nn_ops.py +212 -531
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +66 -53
- mindspore/ops/tensor_method.py +1895 -0
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
- mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
- mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
- mindspore/ops_generate/api/functions_cc_generator.py +237 -0
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/api/op_api_proto.py +235 -0
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/base_generator.py +11 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/common/gen_utils.py +348 -0
- mindspore/ops_generate/common/op_proto.py +473 -0
- mindspore/ops_generate/common/template.py +523 -0
- mindspore/ops_generate/gen_ops.py +22 -1069
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +296 -0
- mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +7 -3
- mindspore/parallel/_auto_parallel_context.py +159 -40
- mindspore/parallel/_cell_wrapper.py +132 -15
- mindspore/parallel/_parallel_serialization.py +107 -5
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +199 -23
- mindspore/parallel/algo_parameter_config.py +4 -4
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +700 -35
- mindspore/parallel/cluster/process_entity/_api.py +276 -50
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +21 -4
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +258 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +25 -14
- mindspore/parallel/shard.py +137 -59
- mindspore/parallel/transform_safetensors.py +364 -305
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +22 -5
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +109 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +186 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +221 -0
- mindspore/profiler/common/path_manager.py +395 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +500 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +251 -0
- mindspore/profiler/common/profiler_path_manager.py +179 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +341 -75
- mindspore/profiler/envprofiler.py +163 -0
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +242 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +335 -0
- mindspore/profiler/profiler.py +1073 -90
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +118 -0
- mindspore/profiler/schedule.py +243 -0
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +2 -3
- mindspore/run_check/_check_version.py +27 -20
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +177 -0
- mindspore/runtime/memory.py +416 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +96 -27
- mindspore/train/amp.py +9 -5
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +53 -55
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +103 -68
- mindspore/train/callback/_history.py +8 -5
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +52 -19
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +228 -108
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +15 -16
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +11 -10
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +174 -46
- mindspore/train/model.py +269 -136
- mindspore/train/serialization.py +622 -978
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +2 -3
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dryrun.py +140 -0
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/runtime_execution_order_check.py +552 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/METADATA +3 -3
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/RECORD +587 -418
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +1 -1
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_aclnn_implement.py +0 -263
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/gen_pyboost_func.py +0 -1052
- mindspore/ops_generate/gen_utils.py +0 -209
- mindspore/ops_generate/op_proto.py +0 -145
- mindspore/ops_generate/template.py +0 -261
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
|
@@ -31,8 +31,10 @@ from mindspore.ops.auto_generate import randperm
|
|
|
31
31
|
from mindspore.common.generator import default_generator
|
|
32
32
|
from mindspore.ops.auto_generate import UniformExt, NormalTensorTensor, \
|
|
33
33
|
NormalTensorFloat, NormalFloatTensor, NormalFloatFloat, RandExt, RandLikeExt, MultinomialExt, \
|
|
34
|
-
Randn, RandnLike, RandInt, RandIntLike, RandpermExt
|
|
34
|
+
Randn, RandnLike, RandInt, RandIntLike, RandpermExt, InplaceRandom, InplaceNormal
|
|
35
|
+
from mindspore.ops.auto_generate.gen_ops_prim import inplace_uniform_op, inplace_exponential_op
|
|
35
36
|
|
|
37
|
+
inplace_normal_ = InplaceNormal()
|
|
36
38
|
normal_tensor_tensor_op = NormalTensorTensor()
|
|
37
39
|
normal_tensor_float_op = NormalTensorFloat()
|
|
38
40
|
normal_float_tensor_op = NormalFloatTensor()
|
|
@@ -44,7 +46,7 @@ reshape_ = P.Reshape()
|
|
|
44
46
|
shape_ = P.Shape()
|
|
45
47
|
top_k_ = P.TopK()
|
|
46
48
|
randperm_ext_ = RandpermExt()
|
|
47
|
-
|
|
49
|
+
uniform_ext_ = UniformExt()
|
|
48
50
|
rand_ext_ = RandExt()
|
|
49
51
|
rand_like_ext_ = RandLikeExt()
|
|
50
52
|
multinomial_ext_ = MultinomialExt()
|
|
@@ -52,7 +54,8 @@ randn_ = Randn()
|
|
|
52
54
|
randn_like_ = RandnLike()
|
|
53
55
|
randint_ = RandInt()
|
|
54
56
|
randint_like_ = RandIntLike()
|
|
55
|
-
|
|
57
|
+
inplace_random_ = InplaceRandom()
|
|
58
|
+
generator_step_ = Tensor(12, mstype.int64)
|
|
56
59
|
|
|
57
60
|
|
|
58
61
|
@constexpr
|
|
@@ -64,41 +67,28 @@ def _set_prim_op_user_data(prim, key, value):
|
|
|
64
67
|
@_function_forbid_reuse
|
|
65
68
|
def random_gamma(shape, alpha, seed=None):
|
|
66
69
|
r"""
|
|
67
|
-
|
|
70
|
+
Generate random numbers from the Gamma distribution(s).
|
|
68
71
|
|
|
69
72
|
|
|
70
73
|
Args:
|
|
71
74
|
shape (Tensor): The shape of random tensor to be generated.
|
|
72
|
-
Must be one of the following types: int32, int64. 1-D integer tensor.
|
|
73
75
|
alpha (Tensor): The :math:`\alpha` distribution parameter.
|
|
74
|
-
|
|
75
|
-
seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
|
|
76
|
-
Default: ``None`` , which will be treated as 0.
|
|
76
|
+
seed (int, optional): Random seed, must be non-negative. Default ``None`` .
|
|
77
77
|
|
|
78
78
|
Returns:
|
|
79
|
-
Tensor
|
|
80
|
-
|
|
81
|
-
The dtype is the same type as alpha.
|
|
82
|
-
|
|
83
|
-
Raises:
|
|
84
|
-
TypeError: If `shape` is not a Tensor.
|
|
85
|
-
TypeError: If `alpha` is not a Tensor.
|
|
86
|
-
TypeError: If `seed` is not an int.
|
|
87
|
-
TypeError: If dtype of `alpha` is not half, float32 or float64.
|
|
79
|
+
Tensor, the shape is `mindspore.ops.concat([shape, rate.shape], axis=0)`.
|
|
80
|
+
The data type is the same as `alpha`.
|
|
88
81
|
|
|
89
82
|
Supported Platforms:
|
|
90
83
|
``CPU``
|
|
91
84
|
|
|
92
85
|
Examples:
|
|
93
|
-
>>> import numpy as np
|
|
94
86
|
>>> import mindspore
|
|
95
|
-
>>>
|
|
96
|
-
>>>
|
|
97
|
-
>>>
|
|
98
|
-
>>> output
|
|
99
|
-
|
|
100
|
-
>>> print(result)
|
|
101
|
-
(7, 5, 2)
|
|
87
|
+
>>> shape = mindspore.tensor([7, 5], mindspore.int32)
|
|
88
|
+
>>> alpha = mindspore.tensor([0.5, 1.5], mindspore.float32)
|
|
89
|
+
>>> output = mindspore.ops.random_gamma(shape, alpha, seed=5)
|
|
90
|
+
>>> print(output.shape, output.dtype)
|
|
91
|
+
(7, 5, 2) Float32
|
|
102
92
|
"""
|
|
103
93
|
seed1, seed2 = _get_seed(seed, "random_gamma")
|
|
104
94
|
random_gamma_op = P.RandomGamma(seed1, seed2)
|
|
@@ -118,7 +108,6 @@ def _get_seed(op_seed, kernel_name):
|
|
|
118
108
|
def standard_laplace(shape, seed=None):
|
|
119
109
|
r"""
|
|
120
110
|
Generates random numbers according to the Laplace random number distribution (mean=0, lambda=1).
|
|
121
|
-
It is defined as:
|
|
122
111
|
|
|
123
112
|
.. math::
|
|
124
113
|
\text{f}(x) = \frac{1}{2}\exp(-|x|)
|
|
@@ -128,16 +117,13 @@ def standard_laplace(shape, seed=None):
|
|
|
128
117
|
the `seed` parameter has no effect.
|
|
129
118
|
|
|
130
119
|
Args:
|
|
131
|
-
shape (Union[tuple, Tensor]): The shape of
|
|
132
|
-
|
|
133
|
-
seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
|
|
134
|
-
Default: ``None`` .
|
|
120
|
+
shape (Union[tuple, Tensor]): The shape of returned tensor.
|
|
121
|
+
seed (int, optional): Random number seed. Default ``None`` .
|
|
135
122
|
|
|
136
123
|
Returns:
|
|
137
|
-
Tensor
|
|
124
|
+
Tensor
|
|
138
125
|
|
|
139
126
|
Raises:
|
|
140
|
-
TypeError: If shape is neither a tuple nor a Tensor.
|
|
141
127
|
ValueError: If shape is a tuple containing non-positive items.
|
|
142
128
|
ValueError: If shape is a Tensor, and the rank of the Tensor is not equal to 1.
|
|
143
129
|
|
|
@@ -145,12 +131,11 @@ def standard_laplace(shape, seed=None):
|
|
|
145
131
|
``Ascend`` ``GPU`` ``CPU``
|
|
146
132
|
|
|
147
133
|
Examples:
|
|
148
|
-
>>>
|
|
134
|
+
>>> import mindspore
|
|
149
135
|
>>> shape = (4, 4)
|
|
150
|
-
>>> output = ops.standard_laplace(shape)
|
|
151
|
-
>>>
|
|
152
|
-
|
|
153
|
-
(4, 4)
|
|
136
|
+
>>> output = mindspore.ops.standard_laplace(shape, seed=5)
|
|
137
|
+
>>> print(f'output shape is {output.shape}')
|
|
138
|
+
output shape is (4, 4)
|
|
154
139
|
"""
|
|
155
140
|
seed1, seed2 = _get_seed(seed, "standard_laplace")
|
|
156
141
|
standard_laplace_op = P.StandardLaplace(seed=seed1, seed2=seed2)
|
|
@@ -206,46 +191,33 @@ def random_categorical(logits, num_sample, seed=0, dtype=mstype.int64):
|
|
|
206
191
|
@_function_forbid_reuse
|
|
207
192
|
def multinomial_with_replacement(x, seed, offset, numsamples, replacement=False):
|
|
208
193
|
r"""
|
|
209
|
-
|
|
210
|
-
multinomial distribution with replacement. It is different from `multinomial` in that it allows
|
|
211
|
-
the same outcome to be chosen multiple times.
|
|
194
|
+
Generate a tensor from a multinomial distribution.
|
|
212
195
|
|
|
213
196
|
Note:
|
|
214
|
-
The rows of input do not need to sum to one (in which case we use the values as weights),
|
|
215
|
-
|
|
197
|
+
- The rows of input do not need to sum to one (in which case we use the values as weights),
|
|
198
|
+
but must be non-negative, finite and have a non-zero sum.
|
|
199
|
+
- If `seed` is set to be ``-1`` , and `offset` is set to be ``0``, the random number
|
|
200
|
+
generator is seeded by a random seed.
|
|
216
201
|
|
|
217
202
|
Args:
|
|
218
|
-
x (Tensor):
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
numsamples (int): the number of samples to draw.
|
|
224
|
-
replacement (bool, optional): Whether to draw with replacement or not. Default: ``False`` .
|
|
203
|
+
x (Tensor): The 1-D or 2-D input tensor containing probabilities.
|
|
204
|
+
seed (int): Random seed.
|
|
205
|
+
offset (int): Offset.
|
|
206
|
+
numsamples (int): The number of samples to draw.
|
|
207
|
+
replacement (bool, optional): Whether to draw with replacement or not. Default ``False`` .
|
|
225
208
|
|
|
226
209
|
Returns:
|
|
227
|
-
Tensor
|
|
228
|
-
|
|
229
|
-
Raises:
|
|
230
|
-
TypeError: If `x` is not a 1D or 2D Tensor.
|
|
231
|
-
TypeError: If dtype of `x` is not float16, float32 or float64.
|
|
232
|
-
TypeError: If `numsamples` is not an int.
|
|
233
|
-
TypeError: If `replacement` is not a bool.
|
|
234
|
-
ValueError: If the value of `numsamples` is not greater than x_shape[-1] when `replacement` is False.
|
|
235
|
-
ValueError: If the sum of one row of `x` less than 0.
|
|
236
|
-
ValueError: If one of the element of each row of `x` less than 0.
|
|
237
|
-
ValueError: If `numsamples` equal or less than 0.
|
|
210
|
+
Tensor
|
|
238
211
|
|
|
239
212
|
Supported Platforms:
|
|
240
213
|
``CPU``
|
|
241
214
|
|
|
242
215
|
Examples:
|
|
243
|
-
>>>
|
|
244
|
-
>>>
|
|
245
|
-
>>> x
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
[[1 1]]
|
|
216
|
+
>>> import mindspore
|
|
217
|
+
>>> x = mindspore.tensor([[0., 9., 4., 0.]], mindspore.float32)
|
|
218
|
+
>>> mindspore.ops.multinomial_with_replacement(x, 2, 5, 2, True)
|
|
219
|
+
Tensor(shape=[1, 2], dtype=Int64, value=
|
|
220
|
+
[[1, 1]])
|
|
249
221
|
"""
|
|
250
222
|
if not isinstance(seed, Tensor):
|
|
251
223
|
if not isinstance(seed, int):
|
|
@@ -295,7 +267,58 @@ def uniform_ext(tensor, a, b, generator=None):
|
|
|
295
267
|
generator = default_generator
|
|
296
268
|
seed, offset = generator._step( # pylint: disable=protected-access
|
|
297
269
|
generator_step_)
|
|
298
|
-
return
|
|
270
|
+
return uniform_ext_(tensor, a, b, seed, offset)
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
@_function_forbid_reuse
|
|
274
|
+
def uniform_(input, from_=0, to=1, *, generator=None):
|
|
275
|
+
r"""
|
|
276
|
+
Update the `input` tensor in place by generating random numbers sampled from uniform distribution in the half-open
|
|
277
|
+
interval :math:`[from\_, to)`.
|
|
278
|
+
|
|
279
|
+
.. math::
|
|
280
|
+
P(x)= \frac{1}{to - from\_}
|
|
281
|
+
|
|
282
|
+
.. warning::
|
|
283
|
+
This is an experimental API that is subject to change or deletion.
|
|
284
|
+
|
|
285
|
+
Args:
|
|
286
|
+
input (Tensor): The origin input tensor.
|
|
287
|
+
from_ (Union[number.Number, Tensor], optional): The lower bound of the uniform distribution, it can be a scalar
|
|
288
|
+
value or a tensor of any dimension with a single element. Default: ``0``.
|
|
289
|
+
to (Union[number.Number, Tensor], optional): The upper bound of the uniform distribution, it can be a scalar
|
|
290
|
+
value or a tensor of any dimension with a single element. Default: ``1``.
|
|
291
|
+
|
|
292
|
+
Keyword Args:
|
|
293
|
+
generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
|
|
294
|
+
Default: ``None``, uses the default pseudorandom number generator.
|
|
295
|
+
|
|
296
|
+
Returns:
|
|
297
|
+
Tensor, with the same shape and dtype as `input` tensor.
|
|
298
|
+
|
|
299
|
+
Raises:
|
|
300
|
+
TypeError: If `input` is not a Tensor.
|
|
301
|
+
TypeError: If dtype of `input` is not one of: bool, int8, int16, int32, int64, uint8, float16, float32, float64,
|
|
302
|
+
bfloat16.
|
|
303
|
+
TypeError: If `from_` or `to` is neither a number nor a Tensor.
|
|
304
|
+
TypeError: If dtype of `from` or `to` is not one of: bool, int8, int16, int32, int64, uint8, float32, float64.
|
|
305
|
+
ValueError: If `from_` or `to` is Tensor but contains multiple elements.
|
|
306
|
+
RuntimeError: If `from_` is larger than `to`.
|
|
307
|
+
|
|
308
|
+
Examples:
|
|
309
|
+
>>> import mindspore
|
|
310
|
+
>>> from mindspore import ops
|
|
311
|
+
>>> x = ops.ones((4, 2))
|
|
312
|
+
>>> generator = mindspore.Generator()
|
|
313
|
+
>>> generator.manual_seed(100)
|
|
314
|
+
>>> result = ops.function.random_func.uniform_(x, 1., 2., generator=generator)
|
|
315
|
+
>>> print(result.shape)
|
|
316
|
+
(4, 2)
|
|
317
|
+
"""
|
|
318
|
+
if generator is None:
|
|
319
|
+
generator = default_generator
|
|
320
|
+
seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
|
|
321
|
+
return inplace_uniform_op(input, from_, to, seed, offset)
|
|
299
322
|
|
|
300
323
|
|
|
301
324
|
@_function_forbid_reuse
|
|
@@ -307,49 +330,31 @@ def uniform(shape, minval, maxval, seed=None, dtype=mstype.float32):
|
|
|
307
330
|
The number in tensor minval should be strictly less than maxval at any position after broadcasting.
|
|
308
331
|
|
|
309
332
|
Args:
|
|
310
|
-
shape (Union[tuple, Tensor]): The shape of
|
|
311
|
-
minval (Tensor):
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
It defines the maximum possible generated value, with int32 or float32 data type.
|
|
316
|
-
If dtype is int32, only one number is allowed.
|
|
317
|
-
seed (int): Seed is used as entropy source for the random number engines to generate pseudo-random numbers,
|
|
318
|
-
must be non-negative. Default: ``None`` , which will be treated as 0.
|
|
319
|
-
dtype (mindspore.dtype): Type of the Uniform distribution. If it is int32, it generates numbers from discrete
|
|
320
|
-
uniform distribution; if it is float32, it generates numbers from continuous uniform distribution. It only
|
|
321
|
-
supports these two data types. Default: mstype.float32.
|
|
333
|
+
shape (Union[tuple, Tensor]): The shape of returned tensor.
|
|
334
|
+
minval (Tensor): Defines the minimum possible generated value.
|
|
335
|
+
maxval (Tensor): Defines the maximum possible generated value.
|
|
336
|
+
seed (int): Random number seed. Default ``None`` .
|
|
337
|
+
dtype (mindspore.dtype): Type of the returned tensor.
|
|
322
338
|
|
|
323
339
|
Returns:
|
|
324
|
-
Tensor
|
|
325
|
-
of `minval` and `maxval`.
|
|
326
|
-
The dtype is designated as the input `dtype`.
|
|
327
|
-
|
|
328
|
-
Raises:
|
|
329
|
-
TypeError: If `shape` is neither a tuple nor a Tensor.
|
|
330
|
-
TypeError: If 'minval' or 'maxval' is neither int32 nor float32
|
|
331
|
-
and dtype of 'minval' is not the same as 'maxval'.
|
|
332
|
-
TypeError: If `seed` is not an int.
|
|
333
|
-
TypeError: If 'dtype' is neither int32 nor float32.
|
|
340
|
+
Tensor
|
|
334
341
|
|
|
335
342
|
Supported Platforms:
|
|
336
343
|
``GPU`` ``CPU``
|
|
337
344
|
|
|
338
345
|
Examples:
|
|
339
|
-
>>> from mindspore import Tensor, ops
|
|
340
346
|
>>> import mindspore
|
|
341
|
-
>>> import numpy as np
|
|
342
347
|
>>> # For discrete uniform distribution, only one number is allowed for both minval and maxval:
|
|
343
348
|
>>> shape = (4, 2)
|
|
344
|
-
>>> minval =
|
|
345
|
-
>>> maxval =
|
|
346
|
-
>>> output = ops.uniform(shape, minval, maxval, seed=5, dtype=mindspore.int32)
|
|
349
|
+
>>> minval = mindspore.tensor(1, mindspore.int32)
|
|
350
|
+
>>> maxval = mindspore.tensor(2, mindspore.int32)
|
|
351
|
+
>>> output = mindspore.ops.uniform(shape, minval, maxval, seed=5, dtype=mindspore.int32)
|
|
347
352
|
>>>
|
|
348
353
|
>>> # For continuous uniform distribution, minval and maxval can be multi-dimentional:
|
|
349
354
|
>>> shape = (3, 1, 2)
|
|
350
|
-
>>> minval =
|
|
351
|
-
>>> maxval =
|
|
352
|
-
>>> output = ops.uniform(shape, minval, maxval, seed=5)
|
|
355
|
+
>>> minval = mindspore.tensor([[3, 4], [5, 6]], mindspore.float32)
|
|
356
|
+
>>> maxval = mindspore.tensor([8.0, 10.0], mindspore.float32)
|
|
357
|
+
>>> output = mindspore.ops.uniform(shape, minval, maxval, seed=5)
|
|
353
358
|
>>> result = output.shape
|
|
354
359
|
>>> print(result)
|
|
355
360
|
(3, 2, 2)
|
|
@@ -379,14 +384,23 @@ def uniform(shape, minval, maxval, seed=None, dtype=mstype.float32):
|
|
|
379
384
|
return value
|
|
380
385
|
|
|
381
386
|
|
|
387
|
+
|
|
388
|
+
@_function_forbid_reuse
|
|
389
|
+
def exponential_(input, lambd=1, *, generator=None):
|
|
390
|
+
r"""
|
|
391
|
+
exponential
|
|
392
|
+
"""
|
|
393
|
+
if generator is None:
|
|
394
|
+
generator = default_generator
|
|
395
|
+
seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
|
|
396
|
+
return inplace_exponential_op(input, lambd, seed, offset)
|
|
397
|
+
|
|
398
|
+
|
|
382
399
|
@_function_forbid_reuse
|
|
383
400
|
def standard_normal(shape, seed=None):
|
|
384
401
|
r"""
|
|
385
402
|
Generates random numbers according to the standard Normal (or Gaussian) random number distribution.
|
|
386
403
|
|
|
387
|
-
Returns the tensor with the given shape, the random numbers in it drawn from normal distributions
|
|
388
|
-
whose mean is 0 and standard deviation is 1.
|
|
389
|
-
|
|
390
404
|
.. math::
|
|
391
405
|
f(x)=\frac{1}{\sqrt{2 \pi}} e^{\left(-\frac{x^{2}}{2}\right)}
|
|
392
406
|
|
|
@@ -395,28 +409,25 @@ def standard_normal(shape, seed=None):
|
|
|
395
409
|
the `seed` parameter has no effect.
|
|
396
410
|
|
|
397
411
|
Args:
|
|
398
|
-
shape (Union[tuple, Tensor]): The shape of
|
|
399
|
-
|
|
400
|
-
seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
|
|
401
|
-
Default: ``None`` , which will be treated as 0.
|
|
412
|
+
shape (Union[tuple, Tensor]): The shape of returned tensor.
|
|
413
|
+
seed (int, optional): Random number Seed. Default ``None`` .
|
|
402
414
|
|
|
403
415
|
Returns:
|
|
404
|
-
Tensor
|
|
416
|
+
Tensor
|
|
405
417
|
|
|
406
418
|
Raises:
|
|
407
|
-
TypeError: If `shape` is neither a tuple nor a Tensor.
|
|
408
419
|
ValueError: If `shape` is a tuple containing non-positive items.
|
|
420
|
+
ValueError: If shape is a Tensor, and the rank of the Tensor is not equal to 1.
|
|
409
421
|
|
|
410
422
|
Supported Platforms:
|
|
411
423
|
``Ascend`` ``GPU`` ``CPU``
|
|
412
424
|
|
|
413
425
|
Examples:
|
|
414
|
-
>>>
|
|
426
|
+
>>> import mindspore
|
|
415
427
|
>>> shape = (4, 4)
|
|
416
|
-
>>> output = ops.standard_normal(shape)
|
|
417
|
-
>>>
|
|
418
|
-
|
|
419
|
-
(4, 4)
|
|
428
|
+
>>> output = mindspore.ops.standard_normal(shape, seed=5)
|
|
429
|
+
>>> print(f'output shape is {output.shape}')
|
|
430
|
+
output shape is (4, 4)
|
|
420
431
|
"""
|
|
421
432
|
seed1, seed2 = _get_seed(seed, "standard_normal")
|
|
422
433
|
standard_normal_op = P.StandardNormal(seed=seed1, seed2=seed2)
|
|
@@ -502,7 +513,7 @@ def uniform_candidate_sampler(true_classes,
|
|
|
502
513
|
@_function_forbid_reuse
|
|
503
514
|
def random_poisson(shape, rate, seed=None, dtype=mstype.float32):
|
|
504
515
|
r"""
|
|
505
|
-
|
|
516
|
+
Generate random number Tensor with `shape` according to a Poisson distribution with mean `rate`.
|
|
506
517
|
|
|
507
518
|
|
|
508
519
|
.. math::
|
|
@@ -514,50 +525,31 @@ def random_poisson(shape, rate, seed=None, dtype=mstype.float32):
|
|
|
514
525
|
the `seed` parameter has no effect.
|
|
515
526
|
|
|
516
527
|
Args:
|
|
517
|
-
shape (Tensor): The shape of random tensor to be sampled from each poisson distribution, 1-D
|
|
518
|
-
dtype is mstype.int32 or mstype.int64.
|
|
528
|
+
shape (Tensor): The shape of random tensor to be sampled from each poisson distribution, 1-D integer tensor.
|
|
519
529
|
rate (Tensor): The :math:`μ` parameter the distribution is constructed with.
|
|
520
530
|
It represents the mean of poisson distribution
|
|
521
|
-
and also the variance of the distribution.
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
numbers and must be non-negative. Default: ``None`` , which will be treated as 0.
|
|
525
|
-
dtype (mindspore.dtype): The data type of output: ``mstype.int64``, ``mstype.int32``,
|
|
526
|
-
``mstype.float64``, ``mstype.float32`` or ``mstype.float16``. Default: ``mstype.float32``.
|
|
531
|
+
and also the variance of the distribution.
|
|
532
|
+
seed (int, optional): Random seed, must be non-negative. Default ``None`` .
|
|
533
|
+
dtype (mindspore.dtype): The data type returned. Default ``mstype.float32``.
|
|
527
534
|
|
|
528
535
|
Returns:
|
|
529
|
-
|
|
530
|
-
argument `dtype`.
|
|
531
|
-
|
|
532
|
-
Raises:
|
|
533
|
-
TypeError: If `shape` is not a Tensor.
|
|
534
|
-
TypeError: If datatype of `shape` is not mstype.int64 nor mstype.int32.
|
|
535
|
-
ValueError: If shape of `shape` is not 1-D.
|
|
536
|
-
TypeError: If `rate` is not a Tensor nor a scalar.
|
|
537
|
-
TypeError: If datatype of `rate` is not in [mstype.int64, mstype.int32,
|
|
538
|
-
mstype.float64, mstype.float32 or mstype.float16].
|
|
539
|
-
TypeError: If `seed` is not a non-negtive int.
|
|
540
|
-
TypeError: If `dtype` is not in [mstype.int64, mstype.int32, mstype.float64,
|
|
541
|
-
mstype.float32 nor mstype.float16].
|
|
542
|
-
ValueError: If any element of input `shape` tensor is not positive.
|
|
536
|
+
Tensor, the shape is `mindspore.ops.concat([shape, rate.shape], axis=0)`.
|
|
543
537
|
|
|
544
538
|
Supported Platforms:
|
|
545
539
|
``GPU`` ``CPU``
|
|
546
540
|
|
|
547
541
|
Examples:
|
|
548
542
|
>>> import mindspore
|
|
549
|
-
>>> import numpy as np
|
|
550
|
-
>>> from mindspore import Tensor, ops
|
|
551
543
|
>>> # case 1: 1-D shape, 2-D rate, float64 output
|
|
552
|
-
>>> shape =
|
|
553
|
-
>>> rate =
|
|
554
|
-
>>> output = ops.random_poisson(shape, rate, seed=5, dtype=mindspore.float64)
|
|
544
|
+
>>> shape = mindspore.tensor([2, 2], mindspore.int64)
|
|
545
|
+
>>> rate = mindspore.tensor([[5.0, 10.0], [5.0, 1.0]], mindspore.float32)
|
|
546
|
+
>>> output = mindspore.ops.random_poisson(shape, rate, seed=5, dtype=mindspore.float64)
|
|
555
547
|
>>> print(output.shape, output.dtype)
|
|
556
548
|
(2, 2, 2, 2) Float64
|
|
557
549
|
>>> # case 2: 1-D shape, scalar rate, int64 output
|
|
558
|
-
>>> shape =
|
|
559
|
-
>>> rate =
|
|
560
|
-
>>> output = ops.random_poisson(shape, rate, seed=5, dtype=mindspore.int64)
|
|
550
|
+
>>> shape = mindspore.tensor([2, 2], mindspore.int64)
|
|
551
|
+
>>> rate = mindspore.tensor(5.0, mindspore.float64)
|
|
552
|
+
>>> output = mindspore.ops.random_poisson(shape, rate, seed=5, dtype=mindspore.int64)
|
|
561
553
|
>>> print(output.shape, output.dtype)
|
|
562
554
|
(2, 2) Int64
|
|
563
555
|
"""
|
|
@@ -572,28 +564,22 @@ def random_poisson(shape, rate, seed=None, dtype=mstype.float32):
|
|
|
572
564
|
@_function_forbid_reuse
|
|
573
565
|
def shuffle(x, seed=None):
|
|
574
566
|
r"""
|
|
575
|
-
Randomly
|
|
567
|
+
Randomly shuffle a tensor along its first dimension.
|
|
576
568
|
|
|
577
569
|
Args:
|
|
578
|
-
x (Tensor): The
|
|
579
|
-
seed (int, optional): Random seed
|
|
580
|
-
which will be replaced with a randomly generated value. Default: ``None`` , which will be treated as 0.
|
|
570
|
+
x (Tensor): The input tensor.
|
|
571
|
+
seed (int, optional): Random seed. Default ``None`` , which is equivalent to 0.
|
|
581
572
|
|
|
582
573
|
Returns:
|
|
583
|
-
Tensor
|
|
584
|
-
|
|
585
|
-
Raises:
|
|
586
|
-
TypeError: If data type of `seed` is not None or non-negative int.
|
|
574
|
+
Tensor
|
|
587
575
|
|
|
588
576
|
Supported Platforms:
|
|
589
577
|
``Ascend`` ``GPU`` ``CPU``
|
|
590
578
|
|
|
591
579
|
Examples:
|
|
592
|
-
>>> import
|
|
593
|
-
>>>
|
|
594
|
-
>>>
|
|
595
|
-
>>> x = Tensor(np.array([1, 2, 3, 4]), mstype.float32)
|
|
596
|
-
>>> output = ops.shuffle(x, seed=1)
|
|
580
|
+
>>> import mindspore
|
|
581
|
+
>>> x = mindspore.tensor([1, 2, 3, 4], mindspore.float32)
|
|
582
|
+
>>> output = mindspore.ops.shuffle(x, seed=1)
|
|
597
583
|
>>> print(output)
|
|
598
584
|
[3. 4. 2. 1.]
|
|
599
585
|
"""
|
|
@@ -619,13 +605,13 @@ def log_uniform_candidate_sampler(true_classes, num_true=1, num_sampled=5, uniqu
|
|
|
619
605
|
Args:
|
|
620
606
|
true_classes (Tensor): The target classes. With data type of int64 and
|
|
621
607
|
shape :math:`(batch\_size, num\_true)` .
|
|
622
|
-
num_true (int): The number of target classes per training example. Default: ``1`` .
|
|
623
|
-
num_sampled (int): The number of classes to randomly sample. Default: ``5`` .
|
|
624
|
-
unique (bool): Determines whether sample with rejection. If `unique` is ``True`` ,
|
|
608
|
+
num_true (int, optional): The number of target classes per training example. Default: ``1`` .
|
|
609
|
+
num_sampled (int, optional): The number of classes to randomly sample. Default: ``5`` .
|
|
610
|
+
unique (bool, optional): Determines whether sample with rejection. If `unique` is ``True`` ,
|
|
625
611
|
all sampled classes in a batch are unique. Default: ``True`` .
|
|
626
|
-
range_max (int): The number of possible classes. When `unique` is ``True`` ,
|
|
612
|
+
range_max (int, optional): The number of possible classes. When `unique` is ``True`` ,
|
|
627
613
|
`range_max` must be greater than or equal to `num_sampled`. Default: ``5`` .
|
|
628
|
-
seed (int): Random seed, must be non-negative. Default: ``0`` .
|
|
614
|
+
seed (int, optional): Random seed, must be non-negative. Default: ``0`` .
|
|
629
615
|
|
|
630
616
|
Returns:
|
|
631
617
|
Tuple of 3 Tensors.
|
|
@@ -726,22 +712,69 @@ def is_cpu_backend():
|
|
|
726
712
|
return context.get_context('device_target') == 'CPU'
|
|
727
713
|
|
|
728
714
|
|
|
715
|
+
@_function_forbid_reuse
|
|
716
|
+
def normal_(input, mean=0, std=1, *, generator=None):
|
|
717
|
+
r"""
|
|
718
|
+
Update the `input` tensor in place by generating random numbers sampled from the normal
|
|
719
|
+
distribution which constructed by the parameters `mean` and `std`.
|
|
720
|
+
|
|
721
|
+
.. warning::
|
|
722
|
+
This is an experimental API that is subject to change or deletion.
|
|
723
|
+
|
|
724
|
+
Args:
|
|
725
|
+
input (Tensor): The origin input tensor.
|
|
726
|
+
mean (number, optional): the mean of normal distribution. With float data type.
|
|
727
|
+
Default: ``0``.
|
|
728
|
+
std (number, optional): the std of normal distribution. With float data type.
|
|
729
|
+
Default: ``1``.
|
|
730
|
+
|
|
731
|
+
Keyword Args:
|
|
732
|
+
generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
|
|
733
|
+
Default: ``None``, uses the default pseudorandom number generator.
|
|
734
|
+
|
|
735
|
+
Returns:
|
|
736
|
+
A tensor that is filled with random numbers that follow a normal distribution and
|
|
737
|
+
that has the same type and shape as the `self` tensor.
|
|
738
|
+
|
|
739
|
+
Raises:
|
|
740
|
+
TypeError: If the dtype of `mean` or `std` is not one of: bool, int, float, complex.
|
|
741
|
+
|
|
742
|
+
Supported Platforms:
|
|
743
|
+
``Ascend``
|
|
744
|
+
|
|
745
|
+
Examples:
|
|
746
|
+
>>> import mindspore
|
|
747
|
+
>>> import numpy as np
|
|
748
|
+
>>> x = mindspore.Tensor(np.array([[1, 2], [3, 4]]), dtype=mindspore.float32)
|
|
749
|
+
>>> output = x.normal_()
|
|
750
|
+
>>> print(output)
|
|
751
|
+
[[0.2788825 1.3305743]
|
|
752
|
+
[1.244194 1.16303174]]
|
|
753
|
+
"""
|
|
754
|
+
if generator is None:
|
|
755
|
+
generator = default_generator
|
|
756
|
+
seed, offset = generator._step( # pylint: disable=protected-access
|
|
757
|
+
generator_step_)
|
|
758
|
+
return inplace_normal_(input, mean, std, seed, offset)
|
|
759
|
+
|
|
760
|
+
|
|
729
761
|
def normal_ext(mean=0.0, std=1.0, size=None, generator=None):
|
|
730
762
|
r"""
|
|
763
|
+
normal(mean, std, *, generator=None) -> Tensor
|
|
764
|
+
|
|
731
765
|
Generates random numbers according to the standard Normal (or Gaussian) random number distribution.
|
|
732
766
|
|
|
733
767
|
Args:
|
|
734
|
-
mean (Union[float, Tensor]
|
|
735
|
-
should be the same as that of the
|
|
736
|
-
std (Union[float, Tensor]
|
|
737
|
-
should be the same as that of the
|
|
738
|
-
|
|
739
|
-
|
|
768
|
+
mean (Union[float, Tensor]): Mean value of each element, the shape of the `mean` tensor
|
|
769
|
+
should be the same as that of the `std` tensor.
|
|
770
|
+
std (Union[float, Tensor]): Standard deviation for each element, the shape of the `std` tensor
|
|
771
|
+
should be the same as that of the `mean` tensor. The value of `std` should be greater than or equal to 0.
|
|
772
|
+
|
|
773
|
+
Keyword Args:
|
|
740
774
|
generator (generator, optional): MindSpore generator. Default: ``None``.
|
|
741
775
|
|
|
742
776
|
Returns:
|
|
743
|
-
Outputs a tensor with the same shape as
|
|
744
|
-
or when 'mean' and 'std' are constants and shape is specified as 'size'.
|
|
777
|
+
Outputs a tensor with the same shape as `mean`.
|
|
745
778
|
|
|
746
779
|
Raises:
|
|
747
780
|
TypeError: If `mean` or `std` is not Union[float, Tensor].
|
|
@@ -759,6 +792,58 @@ def normal_ext(mean=0.0, std=1.0, size=None, generator=None):
|
|
|
759
792
|
>>> output = ops.function.random_func.normal_ext(mean, std)
|
|
760
793
|
>>> print(output.shape)
|
|
761
794
|
(3,)
|
|
795
|
+
|
|
796
|
+
.. function:: normal(mean, std=1.0) -> Tensor
|
|
797
|
+
:noindex:
|
|
798
|
+
|
|
799
|
+
Similar to the function above, but the standard deviations are shared among all drawn elements.
|
|
800
|
+
|
|
801
|
+
Args:
|
|
802
|
+
mean (Tensor): Mean value of each element.
|
|
803
|
+
std (float, optional): Standard deviation for each element. The value of `std` should be greater
|
|
804
|
+
than or equal to 0. Default: ``1.0``.
|
|
805
|
+
|
|
806
|
+
Returns:
|
|
807
|
+
Outputs a tensor with the same shape as `mean`.
|
|
808
|
+
|
|
809
|
+
Supported Platforms:
|
|
810
|
+
``Ascend``
|
|
811
|
+
|
|
812
|
+
Examples:
|
|
813
|
+
>>> import mindspore
|
|
814
|
+
>>> import numpy as np
|
|
815
|
+
>>> from mindspore import ops
|
|
816
|
+
>>> from mindspore import Tensor
|
|
817
|
+
>>> mean = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
818
|
+
>>> output = ops.function.random_func.normal_ext(mean, 1.0)
|
|
819
|
+
>>> print(output.shape)
|
|
820
|
+
(3,)
|
|
821
|
+
|
|
822
|
+
.. function:: normal(mean, std, size) -> Tensor
|
|
823
|
+
:noindex:
|
|
824
|
+
|
|
825
|
+
Similar to the function above, but the means and standard deviations are shared among all drawn elements. The
|
|
826
|
+
result tensor has size given by `size`.
|
|
827
|
+
|
|
828
|
+
Args:
|
|
829
|
+
mean (float): Mean value of each element.
|
|
830
|
+
std (float): Standard deviation for each element.
|
|
831
|
+
size (tuple): output shape.
|
|
832
|
+
|
|
833
|
+
Returns:
|
|
834
|
+
Outputs a tensor. The shape is specified as `size`.
|
|
835
|
+
|
|
836
|
+
Supported Platforms:
|
|
837
|
+
``Ascend``
|
|
838
|
+
|
|
839
|
+
Examples:
|
|
840
|
+
>>> import mindspore
|
|
841
|
+
>>> import numpy as np
|
|
842
|
+
>>> from mindspore import ops
|
|
843
|
+
>>> from mindspore import Tensor
|
|
844
|
+
>>> output = ops.function.random_func.normal_ext(1.0, 2.0, (2, 4))
|
|
845
|
+
>>> print(output.shape)
|
|
846
|
+
(2, 4)
|
|
762
847
|
"""
|
|
763
848
|
if generator is None:
|
|
764
849
|
generator = default_generator
|
|
@@ -780,50 +865,44 @@ def normal_ext(mean=0.0, std=1.0, size=None, generator=None):
|
|
|
780
865
|
@_function_forbid_reuse
|
|
781
866
|
def normal(shape, mean, stddev, seed=None):
|
|
782
867
|
"""
|
|
783
|
-
|
|
868
|
+
Return a random tensor that conforms to the normal (Gaussian) distribution.
|
|
784
869
|
|
|
785
870
|
.. warning::
|
|
786
871
|
The Ascend backend does not support the reproducibility of random numbers, so
|
|
787
872
|
the `seed` parameter has no effect.
|
|
788
873
|
|
|
789
874
|
Args:
|
|
790
|
-
shape (tuple): The shape of
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
seed (int): Seed is used as entropy source for the Random number engines to generate pseudo-random numbers.
|
|
795
|
-
The value must be non-negative. Default: ``None`` , which will be treated as 0.
|
|
875
|
+
shape (tuple): The shape of returned tensor.
|
|
876
|
+
mean (Union[Tensor, int, float]): The mean of the normal distribution for the returned tensor.
|
|
877
|
+
stddev (Union[Tensor, int, float]): The standard deviation of the normal distribution for the returned tensor.
|
|
878
|
+
seed (int, optional): Random seed. Default: ``None`` , which is equivalent to 0.
|
|
796
879
|
|
|
797
880
|
Returns:
|
|
798
|
-
Tensor
|
|
799
|
-
of `mean` and `stddev`.
|
|
800
|
-
The dtype is [float32, float64].
|
|
881
|
+
Tensor
|
|
801
882
|
|
|
802
883
|
Supported Platforms:
|
|
803
884
|
``Ascend`` ``GPU`` ``CPU``
|
|
804
885
|
|
|
805
886
|
Examples:
|
|
806
887
|
>>> import mindspore
|
|
807
|
-
>>> import numpy as np
|
|
808
|
-
>>> from mindspore import Tensor, ops
|
|
809
888
|
>>> shape = (3, 1, 2)
|
|
810
|
-
>>> mean =
|
|
811
|
-
>>> stddev =
|
|
812
|
-
>>> output = ops.normal(shape, mean, stddev, seed=5)
|
|
889
|
+
>>> mean = mindspore.tensor([[3, 4], [5, 6]], mindspore.float32)
|
|
890
|
+
>>> stddev = mindspore.tensor(1.0, mindspore.float32)
|
|
891
|
+
>>> output = mindspore.ops.normal(shape, mean, stddev, seed=5)
|
|
813
892
|
>>> result = output.shape
|
|
814
893
|
>>> print(result)
|
|
815
894
|
(3, 2, 2)
|
|
816
895
|
>>> shape = (3, 1, 3)
|
|
817
|
-
>>> mean =
|
|
818
|
-
>>> stddev =
|
|
819
|
-
>>> output = ops.normal(shape, mean, stddev, seed=5)
|
|
896
|
+
>>> mean = mindspore.tensor([[3, 4, 3], [3, 5, 6]], mindspore.float32)
|
|
897
|
+
>>> stddev = mindspore.tensor(1.0, mindspore.float32)
|
|
898
|
+
>>> output = mindspore.ops.normal(shape, mean, stddev, seed=5)
|
|
820
899
|
>>> result = output.shape
|
|
821
900
|
>>> print(result)
|
|
822
901
|
(3, 2, 3)
|
|
823
902
|
>>> shape = (3, 1, 3)
|
|
824
|
-
>>> mean =
|
|
825
|
-
>>> stddev =
|
|
826
|
-
>>> output = ops.normal(shape, mean, stddev, seed=5)
|
|
903
|
+
>>> mean = mindspore.tensor([[1, 2, 3], [3, 4, 3], [3, 5, 6]], mindspore.float32)
|
|
904
|
+
>>> stddev = mindspore.tensor(1.0, mindspore.float32)
|
|
905
|
+
>>> output = mindspore.ops.normal(shape, mean, stddev, seed=5)
|
|
827
906
|
>>> result = output.shape
|
|
828
907
|
>>> print(result)
|
|
829
908
|
(3, 3, 3)
|
|
@@ -847,7 +926,8 @@ def normal(shape, mean, stddev, seed=None):
|
|
|
847
926
|
def laplace(shape, mean, lambda_param, seed=None):
|
|
848
927
|
r"""
|
|
849
928
|
Generates random numbers according to the Laplace random number distribution.
|
|
850
|
-
|
|
929
|
+
|
|
930
|
+
Support broadcasting.
|
|
851
931
|
|
|
852
932
|
.. math::
|
|
853
933
|
\text{f}(x;μ,λ) = \frac{1}{2λ}\exp(-\frac{|x-μ|}{λ}),
|
|
@@ -857,30 +937,24 @@ def laplace(shape, mean, lambda_param, seed=None):
|
|
|
857
937
|
the `seed` parameter has no effect.
|
|
858
938
|
|
|
859
939
|
Args:
|
|
860
|
-
shape (tuple): The shape
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
variance of Laplace distribution is equal to twice the square of lambda_param. With float32 data type.
|
|
866
|
-
seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
|
|
867
|
-
Default: ``None`` , which will be treated as 0.
|
|
940
|
+
shape (tuple): The shape specified.
|
|
941
|
+
mean (Tensor): The mean of distribution.
|
|
942
|
+
lambda_param (Tensor): Control the variance of distribution. The
|
|
943
|
+
variance of Laplace distribution is equal to twice the square of `lambda_param` .
|
|
944
|
+
seed (int, optional): Random seed. Default ``None`` represents 0.
|
|
868
945
|
|
|
869
946
|
Returns:
|
|
870
|
-
Tensor
|
|
871
|
-
The dtype is float32.
|
|
947
|
+
Tensor
|
|
872
948
|
|
|
873
949
|
Supported Platforms:
|
|
874
950
|
``Ascend`` ``GPU`` ``CPU``
|
|
875
951
|
|
|
876
952
|
Examples:
|
|
877
953
|
>>> import mindspore
|
|
878
|
-
>>> from mindspore import Tensor
|
|
879
|
-
>>> from mindspore import ops as ops
|
|
880
954
|
>>> shape = (2, 3)
|
|
881
|
-
>>> mean =
|
|
882
|
-
>>> lambda_param =
|
|
883
|
-
>>> output = ops.laplace(shape, mean, lambda_param, seed=5)
|
|
955
|
+
>>> mean = mindspore.tensor(1.0, mindspore.float32)
|
|
956
|
+
>>> lambda_param = mindspore.tensor(1.0, mindspore.float32)
|
|
957
|
+
>>> output = mindspore.ops.laplace(shape, mean, lambda_param, seed=5)
|
|
884
958
|
>>> print(output.shape)
|
|
885
959
|
(2, 3)
|
|
886
960
|
"""
|
|
@@ -903,56 +977,47 @@ def gamma(shape, alpha, beta, seed=None):
|
|
|
903
977
|
r"""
|
|
904
978
|
Generates random numbers according to the Gamma random number distribution.
|
|
905
979
|
|
|
980
|
+
Support broadcasting.
|
|
981
|
+
|
|
906
982
|
.. warning::
|
|
907
983
|
The Ascend backend does not support the reproducibility of random numbers, so
|
|
908
984
|
the `seed` parameter has no effect.
|
|
909
985
|
|
|
910
986
|
Args:
|
|
911
|
-
shape (tuple): The shape
|
|
912
|
-
alpha (Tensor): The
|
|
913
|
-
beta (Tensor): The
|
|
914
|
-
seed (int, optional):
|
|
915
|
-
pseudo-random numbers, must be non-negative. Default: ``None`` .
|
|
987
|
+
shape (tuple): The shape specified.
|
|
988
|
+
alpha (Tensor): The shape parameter.
|
|
989
|
+
beta (Tensor): The inverse scale parameter.
|
|
990
|
+
seed (int, optional): The random seed, Default ``None`` .
|
|
916
991
|
|
|
917
992
|
Returns:
|
|
918
|
-
Tensor
|
|
919
|
-
of `alpha` and `beta`.
|
|
920
|
-
The dtype is float32.
|
|
921
|
-
|
|
922
|
-
Raises:
|
|
923
|
-
TypeError: If `shape` is not a tuple.
|
|
924
|
-
TypeError: If neither `alpha` nor `beta` is a Tensor.
|
|
925
|
-
TypeError: If `seed` is not an int.
|
|
926
|
-
TypeError: If dtype of `alpha` and `beta` is not float32.
|
|
993
|
+
Tensor
|
|
927
994
|
|
|
928
995
|
Supported Platforms:
|
|
929
996
|
``Ascend``
|
|
930
997
|
|
|
931
998
|
Examples:
|
|
932
999
|
>>> import mindspore
|
|
933
|
-
>>> import numpy as np
|
|
934
|
-
>>> from mindspore import Tensor, ops
|
|
935
1000
|
>>> # case 1: alpha_shape is (2, 2)
|
|
936
1001
|
>>> shape = (3, 1, 2)
|
|
937
|
-
>>> alpha =
|
|
938
|
-
>>> beta =
|
|
939
|
-
>>> output = ops.gamma(shape, alpha, beta, seed=5)
|
|
1002
|
+
>>> alpha = mindspore.tensor([[3, 4], [5, 6]], mindspore.float32)
|
|
1003
|
+
>>> beta = mindspore.tensor([1.0], mindspore.float32)
|
|
1004
|
+
>>> output = mindspore.ops.gamma(shape, alpha, beta, seed=5)
|
|
940
1005
|
>>> result = output.shape
|
|
941
1006
|
>>> print(result)
|
|
942
1007
|
(3, 2, 2)
|
|
943
1008
|
>>> # case 2: alpha_shape is (2, 3), so shape is (3, 1, 3)
|
|
944
1009
|
>>> shape = (3, 1, 3)
|
|
945
|
-
>>> alpha =
|
|
946
|
-
>>> beta =
|
|
947
|
-
>>> output = ops.gamma(shape, alpha, beta, seed=5)
|
|
1010
|
+
>>> alpha = mindspore.tensor([[1, 3, 4], [2, 5, 6]]), mindspore.float32)
|
|
1011
|
+
>>> beta = mindspore.tensor([1.0], mindspore.float32)
|
|
1012
|
+
>>> output = mindspore.ops.gamma(shape, alpha, beta, seed=5)
|
|
948
1013
|
>>> result = output.shape
|
|
949
1014
|
>>> print(result)
|
|
950
1015
|
(3, 2, 3)
|
|
951
1016
|
>>> # case 3: beta_shape is (1, 2), the output is different.
|
|
952
1017
|
>>> shape = (3, 1, 2)
|
|
953
|
-
>>> alpha =
|
|
954
|
-
>>> beta =
|
|
955
|
-
>>> output = ops.gamma(shape, alpha, beta, seed=5)
|
|
1018
|
+
>>> alpha = mindspore.tensor([[3, 4], [5, 6]], mindspore.float32)
|
|
1019
|
+
>>> beta = mindspore.tensor([1.0, 2], mindspore.float32)
|
|
1020
|
+
>>> output = mindspore.ops.gamma(shape, alpha, beta, seed=5)
|
|
956
1021
|
>>> print(output)
|
|
957
1022
|
[[[ 2.2132034 5.8855834]
|
|
958
1023
|
[ 3.8825176 8.6066265]]
|
|
@@ -962,9 +1027,9 @@ def gamma(shape, alpha, beta, seed=None):
|
|
|
962
1027
|
[ 3.786061 5.160872 ]]]
|
|
963
1028
|
>>> # case 4: beta_shape is (2, 1), the output is different.
|
|
964
1029
|
>>> shape = (3, 1, 2)
|
|
965
|
-
>>> alpha =
|
|
966
|
-
>>> beta =
|
|
967
|
-
>>> output = ops.gamma(shape, alpha, beta, seed=5)
|
|
1030
|
+
>>> alpha = mindspore.tensor([[3, 4], [5, 6]], mindspore.float32)
|
|
1031
|
+
>>> beta = mindspore.tensor([[1.0], [2.0]], mindspore.float32)
|
|
1032
|
+
>>> output = mindspore.ops.gamma(shape, alpha, beta, seed=5)
|
|
968
1033
|
>>> print(output)
|
|
969
1034
|
[[[ 5.6085486 7.8280783]
|
|
970
1035
|
[ 15.97684 16.116285]]
|
|
@@ -1007,35 +1072,29 @@ def _generate_shapes(shape):
|
|
|
1007
1072
|
@_function_forbid_reuse
|
|
1008
1073
|
def rand(*size, dtype=None, seed=None):
|
|
1009
1074
|
r"""
|
|
1010
|
-
|
|
1011
|
-
based on the given
|
|
1075
|
+
Return a new tensor that fills numbers from the uniform distribution over an interval :math:`[0, 1)`
|
|
1076
|
+
based on the given `size` and `dtype`.
|
|
1012
1077
|
|
|
1013
1078
|
.. warning::
|
|
1014
1079
|
The Ascend backend does not support the reproducibility of random numbers, so
|
|
1015
1080
|
the `seed` parameter has no effect.
|
|
1016
1081
|
|
|
1017
1082
|
Args:
|
|
1018
|
-
size (Union[int, tuple(int), list(int)]):
|
|
1083
|
+
size (Union[int, tuple(int), list(int)]): The shape of the output tensor.
|
|
1019
1084
|
|
|
1020
1085
|
Keyword Args:
|
|
1021
|
-
dtype (:class:`mindspore.dtype`, optional):
|
|
1022
|
-
|
|
1023
|
-
seed (int, optional): Random seed, must be greater or equal to 0. Default: ``None`` , and ``0`` will be used.
|
|
1086
|
+
dtype (:class:`mindspore.dtype`, optional): The data type returned. Default ``None`` .
|
|
1087
|
+
seed (int, optional): Random seed, must be greater or equal to 0. Default ``None`` .
|
|
1024
1088
|
|
|
1025
1089
|
Returns:
|
|
1026
|
-
Tensor
|
|
1027
|
-
the interval :math:`[0, 1)`.
|
|
1028
|
-
|
|
1029
|
-
Raises:
|
|
1030
|
-
TypeError: `seed` is not a non-negative integer.
|
|
1031
|
-
ValueError: If `dtype` is not a `mstype.float_type` type.
|
|
1090
|
+
Tensor
|
|
1032
1091
|
|
|
1033
1092
|
Supported Platforms:
|
|
1034
1093
|
``Ascend`` ``GPU`` ``CPU``
|
|
1035
1094
|
|
|
1036
1095
|
Examples:
|
|
1037
|
-
>>>
|
|
1038
|
-
>>> print(ops.rand((2,3)))
|
|
1096
|
+
>>> import mindspore
|
|
1097
|
+
>>> print(mindspore.ops.rand((2,3)))
|
|
1039
1098
|
[[4.1702199e-01 9.9718481e-01 7.2032452e-01]
|
|
1040
1099
|
[9.3255734e-01 1.1438108e-04 1.2812445e-01]]
|
|
1041
1100
|
"""
|
|
@@ -1055,37 +1114,31 @@ def rand(*size, dtype=None, seed=None):
|
|
|
1055
1114
|
@_function_forbid_reuse
|
|
1056
1115
|
def rand_like(input, seed=None, *, dtype=None):
|
|
1057
1116
|
r"""
|
|
1058
|
-
|
|
1059
|
-
|
|
1117
|
+
Return a tensor with the same shape as `input` that is filled with random numbers from a uniform distribution
|
|
1118
|
+
on the interval :math:`[0, 1)`.
|
|
1060
1119
|
|
|
1061
1120
|
.. warning::
|
|
1062
1121
|
The Ascend backend does not support the reproducibility of random numbers, so
|
|
1063
1122
|
the `seed` parameter has no effect.
|
|
1064
1123
|
|
|
1065
1124
|
Args:
|
|
1066
|
-
input (Tensor):
|
|
1067
|
-
seed (int, optional): Random seed, must be greater or equal to 0. Default
|
|
1125
|
+
input (Tensor): The input tensor.
|
|
1126
|
+
seed (int, optional): Random seed, must be greater or equal to 0. Default ``None`` .
|
|
1068
1127
|
|
|
1069
1128
|
Keyword Args:
|
|
1070
|
-
dtype (:class:`mindspore.dtype`, optional):
|
|
1071
|
-
|
|
1129
|
+
dtype (:class:`mindspore.dtype`, optional): The data type returned.
|
|
1130
|
+
Default ``None`` .
|
|
1072
1131
|
|
|
1073
1132
|
Returns:
|
|
1074
|
-
Tensor
|
|
1075
|
-
the interval :math:`[0, 1)`.
|
|
1076
|
-
|
|
1077
|
-
Raises:
|
|
1078
|
-
TypeError: If `seed` is not a non-negative integer.
|
|
1079
|
-
ValueError: If `dtype` is not a `mstype.float_type` type.
|
|
1133
|
+
Tensor
|
|
1080
1134
|
|
|
1081
1135
|
Supported Platforms:
|
|
1082
1136
|
``Ascend`` ``GPU`` ``CPU``
|
|
1083
1137
|
|
|
1084
1138
|
Examples:
|
|
1085
|
-
>>> import mindspore
|
|
1086
|
-
>>>
|
|
1087
|
-
>>> a =
|
|
1088
|
-
>>> print(ops.rand_like(a, dtype=ms.float32))
|
|
1139
|
+
>>> import mindspore
|
|
1140
|
+
>>> a = mindspore.tensor([[2, 3, 4], [1, 2, 3]])
|
|
1141
|
+
>>> print(mindspore.ops.rand_like(a, dtype=mindspore.float32))
|
|
1089
1142
|
[[4.1702199e-01 9.9718481e-01 7.2032452e-01]
|
|
1090
1143
|
[9.3255734e-01 1.1438108e-04 1.2812445e-01]]
|
|
1091
1144
|
"""
|
|
@@ -1117,7 +1170,7 @@ def rand_ext(*size, generator=None, dtype=None):
|
|
|
1117
1170
|
Keyword Args:
|
|
1118
1171
|
generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
|
|
1119
1172
|
Default: ``None``, uses the default pseudorandom number generator.
|
|
1120
|
-
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype
|
|
1173
|
+
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype. If ``None``,
|
|
1121
1174
|
`mindspore.float32` will be applied. Default: ``None`` .
|
|
1122
1175
|
|
|
1123
1176
|
Returns:
|
|
@@ -1125,7 +1178,7 @@ def rand_ext(*size, generator=None, dtype=None):
|
|
|
1125
1178
|
the interval :math:`[0, 1)`.
|
|
1126
1179
|
|
|
1127
1180
|
Raises:
|
|
1128
|
-
ValueError: If `
|
|
1181
|
+
ValueError: If `size` contains negative numbers.
|
|
1129
1182
|
|
|
1130
1183
|
Supported Platforms:
|
|
1131
1184
|
``Ascend``
|
|
@@ -1139,6 +1192,8 @@ def rand_ext(*size, generator=None, dtype=None):
|
|
|
1139
1192
|
generator = default_generator
|
|
1140
1193
|
seed, offset = generator._step( # pylint: disable=protected-access
|
|
1141
1194
|
generator_step_)
|
|
1195
|
+
if size and isinstance(size[0], (tuple, list)):
|
|
1196
|
+
size = size[0]
|
|
1142
1197
|
return rand_ext_(size, seed, offset, dtype)
|
|
1143
1198
|
|
|
1144
1199
|
|
|
@@ -1159,9 +1214,6 @@ def rand_like_ext(input, *, dtype=None):
|
|
|
1159
1214
|
Tensor, with the designated shape and dtype, filled with random numbers from the uniform distribution on
|
|
1160
1215
|
the interval :math:`[0, 1)`.
|
|
1161
1216
|
|
|
1162
|
-
Raises:
|
|
1163
|
-
ValueError: If `dtype` is not a `mstype.float_type` type.
|
|
1164
|
-
|
|
1165
1217
|
Supported Platforms:
|
|
1166
1218
|
``Ascend``
|
|
1167
1219
|
|
|
@@ -1192,7 +1244,7 @@ def randn_ext(*size, generator=None, dtype=None):
|
|
|
1192
1244
|
Keyword Args:
|
|
1193
1245
|
generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
|
|
1194
1246
|
Default: ``None``, uses the default pseudorandom number generator.
|
|
1195
|
-
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype
|
|
1247
|
+
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype. If None,
|
|
1196
1248
|
`mindspore.float32` will be applied. Default: ``None`` .
|
|
1197
1249
|
|
|
1198
1250
|
Returns:
|
|
@@ -1200,7 +1252,7 @@ def randn_ext(*size, generator=None, dtype=None):
|
|
|
1200
1252
|
the interval :math:`[0, 1)`.
|
|
1201
1253
|
|
|
1202
1254
|
Raises:
|
|
1203
|
-
ValueError: If `
|
|
1255
|
+
ValueError: If `size` contains negative numbers.
|
|
1204
1256
|
|
|
1205
1257
|
Supported Platforms:
|
|
1206
1258
|
``Ascend``
|
|
@@ -1214,6 +1266,8 @@ def randn_ext(*size, generator=None, dtype=None):
|
|
|
1214
1266
|
generator = default_generator
|
|
1215
1267
|
seed, offset = generator._step( # pylint: disable=protected-access
|
|
1216
1268
|
generator_step_)
|
|
1269
|
+
if size and isinstance(size[0], (tuple, list)):
|
|
1270
|
+
size = size[0]
|
|
1217
1271
|
return randn_(size, seed, offset, dtype)
|
|
1218
1272
|
|
|
1219
1273
|
|
|
@@ -1230,16 +1284,13 @@ def randn_like_ext(input, *, dtype=None):
|
|
|
1230
1284
|
input (Tensor): Input Tensor to specify the output shape and its default dtype.
|
|
1231
1285
|
|
|
1232
1286
|
Keyword Args:
|
|
1233
|
-
dtype (:class:`mindspore.dtype`, optional): Designated
|
|
1287
|
+
dtype (:class:`mindspore.dtype`, optional): Designated Tensor dtype, it must be float type. If ``None``,
|
|
1234
1288
|
the same dtype of `input` will be applied. Default: ``None`` .
|
|
1235
1289
|
|
|
1236
1290
|
Returns:
|
|
1237
1291
|
Tensor, with the designated shape and dtype, filled with random numbers from the normal distribution on
|
|
1238
1292
|
the interval :math:`[0, 1)`.
|
|
1239
1293
|
|
|
1240
|
-
Raises:
|
|
1241
|
-
ValueError: If `dtype` is not a `mstype.float_type` type.
|
|
1242
|
-
|
|
1243
1294
|
Supported Platforms:
|
|
1244
1295
|
``Ascend``
|
|
1245
1296
|
|
|
@@ -1256,8 +1307,10 @@ def randn_like_ext(input, *, dtype=None):
|
|
|
1256
1307
|
|
|
1257
1308
|
|
|
1258
1309
|
@_function_forbid_reuse
|
|
1259
|
-
def randint_ext(
|
|
1310
|
+
def randint_ext(*args, generator=None, dtype=None):
|
|
1260
1311
|
r"""
|
|
1312
|
+
randint(low=0, high, size, *, generator=None, dtype=None) -> Tensor
|
|
1313
|
+
|
|
1261
1314
|
Returns a new tensor filled with integer numbers from the uniform distribution over an interval :math:`[low, high)`
|
|
1262
1315
|
based on the given shape and dtype.
|
|
1263
1316
|
|
|
@@ -1265,7 +1318,7 @@ def randint_ext(low, high, size, *, generator=None, dtype=None):
|
|
|
1265
1318
|
This is an experimental API that is subject to change or deletion.
|
|
1266
1319
|
|
|
1267
1320
|
Args:
|
|
1268
|
-
low (int): the lower bound of the generated random number
|
|
1321
|
+
low (int, optional): the lower bound of the generated random number. Default: ``0``.
|
|
1269
1322
|
high (int): the upper bound of the generated random number
|
|
1270
1323
|
size (Union[tuple(int), list(int)]): Shape of the new tensor, e.g. :math:`(2, 3)`.
|
|
1271
1324
|
|
|
@@ -1295,12 +1348,18 @@ def randint_ext(low, high, size, *, generator=None, dtype=None):
|
|
|
1295
1348
|
generator = default_generator
|
|
1296
1349
|
seed, offset = generator._step( # pylint: disable=protected-access
|
|
1297
1350
|
generator_step_)
|
|
1298
|
-
|
|
1351
|
+
args = list(args)
|
|
1352
|
+
if len(args) == 2:
|
|
1353
|
+
args = [0] + args
|
|
1354
|
+
args += [seed, offset]
|
|
1355
|
+
return randint_(*args, dtype=dtype)
|
|
1299
1356
|
|
|
1300
1357
|
|
|
1301
1358
|
@_function_forbid_reuse
|
|
1302
|
-
def randint_like_ext(
|
|
1359
|
+
def randint_like_ext(*args, dtype=None):
|
|
1303
1360
|
r"""
|
|
1361
|
+
randint_like(input, low=0, high, *, dtype=None) -> Tensor
|
|
1362
|
+
|
|
1304
1363
|
Returns a new tensor filled with integer numbers from the uniform distribution over an interval :math:`[low, high)`
|
|
1305
1364
|
based on the given dtype and shape of the input tensor.
|
|
1306
1365
|
|
|
@@ -1309,7 +1368,7 @@ def randint_like_ext(input, low, high, *, dtype=None):
|
|
|
1309
1368
|
|
|
1310
1369
|
Args:
|
|
1311
1370
|
input (Tensor): Input Tensor to specify the output shape and its default dtype.
|
|
1312
|
-
low (int): the lower bound of the generated random number
|
|
1371
|
+
low (int, optional): the lower bound of the generated random number. Default: ``0``.
|
|
1313
1372
|
high (int): the upper bound of the generated random number
|
|
1314
1373
|
|
|
1315
1374
|
Keyword Args:
|
|
@@ -1337,13 +1396,62 @@ def randint_like_ext(input, low, high, *, dtype=None):
|
|
|
1337
1396
|
"""
|
|
1338
1397
|
seed, offset = default_generator._step( # pylint: disable=protected-access
|
|
1339
1398
|
generator_step_)
|
|
1340
|
-
|
|
1399
|
+
args = list(args)
|
|
1400
|
+
if len(args) == 2:
|
|
1401
|
+
args = [args[0], 0, args[1]]
|
|
1402
|
+
args += [seed, offset]
|
|
1403
|
+
return randint_like_(*args, dtype=dtype)
|
|
1404
|
+
|
|
1405
|
+
|
|
1406
|
+
@_function_forbid_reuse
|
|
1407
|
+
def random_(input, from_=0, to=None, *, generator=None):
|
|
1408
|
+
r"""
|
|
1409
|
+
Fill the input tensor with numbers sampled from a discrete uniform distribution
|
|
1410
|
+
over an interval :math:`[low, high)`.
|
|
1411
|
+
|
|
1412
|
+
.. warning::
|
|
1413
|
+
This is an experimental API that is subject to change or deletion.
|
|
1414
|
+
|
|
1415
|
+
Args:
|
|
1416
|
+
input (Tensor): input tensor.
|
|
1417
|
+
from_ (int, optional): the lower bound of the generated random number. Default: 0.
|
|
1418
|
+
to (int, optional): the upper bound of the generated random number. By default it's the upper limit of
|
|
1419
|
+
the input data type. Default: ``None``.
|
|
1420
|
+
|
|
1421
|
+
Keyword Args:
|
|
1422
|
+
generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
|
|
1423
|
+
Default: ``None``, uses the default pseudorandom number generator.
|
|
1424
|
+
|
|
1425
|
+
Returns:
|
|
1426
|
+
The input tensor.
|
|
1427
|
+
|
|
1428
|
+
Raises:
|
|
1429
|
+
TypeError: If `from_` or `to` is not integer.
|
|
1430
|
+
ValueError: If `from_` >= `to`.
|
|
1431
|
+
|
|
1432
|
+
Supported Platforms:
|
|
1433
|
+
``Ascend``
|
|
1434
|
+
|
|
1435
|
+
Examples:
|
|
1436
|
+
>>> import mindspore as ms
|
|
1437
|
+
>>> from mindspore import Tensor, ops
|
|
1438
|
+
>>> a = Tensor([[2, 3, 4], [1, 2, 3]])
|
|
1439
|
+
>>> from_ = 0
|
|
1440
|
+
>>> to = 5
|
|
1441
|
+
>>> print(ops.function.random_func.random_(a, from_, to).shape)
|
|
1442
|
+
(2, 3)
|
|
1443
|
+
"""
|
|
1444
|
+
if not generator:
|
|
1445
|
+
generator = default_generator
|
|
1446
|
+
seed, offset = generator._step( # pylint: disable=protected-access
|
|
1447
|
+
generator_step_)
|
|
1448
|
+
return inplace_random_(input, from_, to, seed, offset)
|
|
1341
1449
|
|
|
1342
1450
|
|
|
1343
1451
|
@_function_forbid_reuse
|
|
1344
1452
|
def randn(*size, dtype=None, seed=None):
|
|
1345
1453
|
r"""
|
|
1346
|
-
|
|
1454
|
+
Return a new tensor with given shape and dtype, filled with random numbers
|
|
1347
1455
|
from the standard normal distribution.
|
|
1348
1456
|
|
|
1349
1457
|
.. warning::
|
|
@@ -1351,28 +1459,22 @@ def randn(*size, dtype=None, seed=None):
|
|
|
1351
1459
|
the `seed` parameter has no effect.
|
|
1352
1460
|
|
|
1353
1461
|
Args:
|
|
1354
|
-
size (Union[int, tuple(int), list(int)]): Shape of the
|
|
1462
|
+
size (Union[int, tuple(int), list(int)]): Shape of the output tensor.
|
|
1355
1463
|
|
|
1356
1464
|
Keyword Args:
|
|
1357
|
-
dtype (:class:`mindspore.dtype`, optional):
|
|
1358
|
-
|
|
1359
|
-
seed (int, optional): Random seed, must be
|
|
1465
|
+
dtype (:class:`mindspore.dtype`, optional): The data type returned.
|
|
1466
|
+
Default ``None`` .
|
|
1467
|
+
seed (int, optional): Random seed, must be non-negative. Default ``None`` .
|
|
1360
1468
|
|
|
1361
1469
|
Returns:
|
|
1362
|
-
Tensor
|
|
1363
|
-
"standard normal" distribution.
|
|
1364
|
-
|
|
1365
|
-
Raises:
|
|
1366
|
-
TypeError: `seed` is not a non-negative integer.
|
|
1367
|
-
ValueError: If `dtype` is not a `mstype.float_type`.
|
|
1368
|
-
ValueError: If `size` contains invalid number.
|
|
1470
|
+
Tensor
|
|
1369
1471
|
|
|
1370
1472
|
Supported Platforms:
|
|
1371
1473
|
``Ascend`` ``GPU`` ``CPU``
|
|
1372
1474
|
|
|
1373
1475
|
Examples:
|
|
1374
|
-
>>>
|
|
1375
|
-
>>> print(ops.randn((2, 2)))
|
|
1476
|
+
>>> import mindspore
|
|
1477
|
+
>>> print(mindspore.ops.randn((2, 2)))
|
|
1376
1478
|
[[ 0.30639967 -0.42438635]
|
|
1377
1479
|
[-0.4287376 1.3054721 ]]
|
|
1378
1480
|
"""
|
|
@@ -1392,7 +1494,7 @@ def randn(*size, dtype=None, seed=None):
|
|
|
1392
1494
|
@_function_forbid_reuse
|
|
1393
1495
|
def randn_like(input, seed=None, *, dtype=None):
|
|
1394
1496
|
r"""
|
|
1395
|
-
|
|
1497
|
+
Return a tensor with the same shape as `input`, filled with random numbers from the standard normal
|
|
1396
1498
|
distribution.
|
|
1397
1499
|
|
|
1398
1500
|
.. warning::
|
|
@@ -1400,29 +1502,22 @@ def randn_like(input, seed=None, *, dtype=None):
|
|
|
1400
1502
|
the `seed` parameter has no effect.
|
|
1401
1503
|
|
|
1402
1504
|
Args:
|
|
1403
|
-
input (Tensor):
|
|
1404
|
-
seed (int, optional): Random seed, must be
|
|
1505
|
+
input (Tensor): The input tensor.
|
|
1506
|
+
seed (int, optional): Random seed, must be non-negative. Default ``None`` .
|
|
1405
1507
|
|
|
1406
1508
|
Keyword Args:
|
|
1407
|
-
dtype (:class:`mindspore.dtype`, optional):
|
|
1408
|
-
`mindspore.float32` will be used. Default: ``None`` .
|
|
1509
|
+
dtype (:class:`mindspore.dtype`, optional): The data type returned. Default ``None`` .
|
|
1409
1510
|
|
|
1410
1511
|
Returns:
|
|
1411
|
-
Tensor
|
|
1412
|
-
"standard normal" distribution.
|
|
1413
|
-
|
|
1414
|
-
Raises:
|
|
1415
|
-
TypeError: `seed` is not a non-negative integer.
|
|
1416
|
-
ValueError: If `dtype` is not a `mstype.float_type`.
|
|
1512
|
+
Tensor
|
|
1417
1513
|
|
|
1418
1514
|
Supported Platforms:
|
|
1419
1515
|
``Ascend`` ``GPU`` ``CPU``
|
|
1420
1516
|
|
|
1421
1517
|
Examples:
|
|
1422
|
-
>>> import mindspore
|
|
1423
|
-
>>>
|
|
1424
|
-
>>> a =
|
|
1425
|
-
>>> print(ops.randn_like(a, dtype=ms.float32))
|
|
1518
|
+
>>> import mindspore
|
|
1519
|
+
>>> a = mindspore.tensor([[1, 2, 3], [4, 5, 6]])
|
|
1520
|
+
>>> print(mindspore.ops.randn_like(a, dtype=mindspore.float32))
|
|
1426
1521
|
[[ 0.30639967 -0.42438635 -0.20454668]
|
|
1427
1522
|
[-0.4287376 1.3054721 0.64747655]]
|
|
1428
1523
|
"""
|
|
@@ -1445,7 +1540,7 @@ def randn_like(input, seed=None, *, dtype=None):
|
|
|
1445
1540
|
@_function_forbid_reuse
|
|
1446
1541
|
def randint(low, high, size, seed=None, *, dtype=None):
|
|
1447
1542
|
r"""
|
|
1448
|
-
|
|
1543
|
+
Return a tensor whose elements are random integers in the range of [ `low` , `high` ) .
|
|
1449
1544
|
|
|
1450
1545
|
.. warning::
|
|
1451
1546
|
The Ascend backend does not support the reproducibility of random numbers, so
|
|
@@ -1454,30 +1549,22 @@ def randint(low, high, size, seed=None, *, dtype=None):
|
|
|
1454
1549
|
Args:
|
|
1455
1550
|
low (int): Start value of interval.
|
|
1456
1551
|
high (int): End value of interval.
|
|
1457
|
-
size (tuple): Shape of the
|
|
1458
|
-
seed (int, optional): Random seed, must be
|
|
1552
|
+
size (tuple): Shape of the output tensor.
|
|
1553
|
+
seed (int, optional): Random seed, must be non-negative. Default ``None`` .
|
|
1459
1554
|
|
|
1460
1555
|
Keyword Args:
|
|
1461
|
-
dtype (:class:`mindspore.dtype`, optional):
|
|
1462
|
-
|
|
1556
|
+
dtype (:class:`mindspore.dtype`, optional): The data type returned.
|
|
1557
|
+
Default ``None`` .
|
|
1463
1558
|
|
|
1464
1559
|
Returns:
|
|
1465
|
-
Tensor
|
|
1466
|
-
to high (exclusive).
|
|
1467
|
-
|
|
1468
|
-
Raises:
|
|
1469
|
-
TypeError: `seed` is not a non-negative integer.
|
|
1470
|
-
TypeError: `size` is not a tuple.
|
|
1471
|
-
TypeError: `low` or `high` is not an integer.
|
|
1472
|
-
ValueError: If `dtype` is not a `mstype.int_type`.
|
|
1473
|
-
|
|
1560
|
+
Tensor
|
|
1474
1561
|
|
|
1475
1562
|
Supported Platforms:
|
|
1476
1563
|
``Ascend`` ``GPU`` ``CPU``
|
|
1477
1564
|
|
|
1478
1565
|
Examples:
|
|
1479
|
-
>>>
|
|
1480
|
-
>>> print(ops.randint(1, 10, (2,3)))
|
|
1566
|
+
>>> import mindspore
|
|
1567
|
+
>>> print(mindspore.ops.randint(1, 10, (2,3)))
|
|
1481
1568
|
[[4 9 7]
|
|
1482
1569
|
[9 1 2]]
|
|
1483
1570
|
"""
|
|
@@ -1507,7 +1594,7 @@ def randint(low, high, size, seed=None, *, dtype=None):
|
|
|
1507
1594
|
@_function_forbid_reuse
|
|
1508
1595
|
def randint_like(input, low, high, seed=None, *, dtype=None):
|
|
1509
1596
|
r"""
|
|
1510
|
-
Returns a tensor with the same shape as
|
|
1597
|
+
Returns a tensor with the same shape as `input` whose elements are random integers in the range
|
|
1511
1598
|
of [ `low` , `high` ) .
|
|
1512
1599
|
|
|
1513
1600
|
.. warning::
|
|
@@ -1515,31 +1602,25 @@ def randint_like(input, low, high, seed=None, *, dtype=None):
|
|
|
1515
1602
|
the `seed` parameter has no effect.
|
|
1516
1603
|
|
|
1517
1604
|
Args:
|
|
1518
|
-
input (Tensor):
|
|
1605
|
+
input (Tensor): The input tensor.
|
|
1519
1606
|
low(int): Start value of interval.
|
|
1520
1607
|
high(int): End value of interval.
|
|
1521
|
-
seed (int, optional): Random seed, must be
|
|
1608
|
+
seed (int, optional): Random seed, must be non-negative. Default ``None`` .
|
|
1522
1609
|
|
|
1523
1610
|
Keyword Args:
|
|
1524
|
-
dtype (:class:`mindspore.dtype`, optional):
|
|
1525
|
-
|
|
1611
|
+
dtype (:class:`mindspore.dtype`, optional): The data type returned.
|
|
1612
|
+
Default ``None`` .
|
|
1526
1613
|
|
|
1527
1614
|
Returns:
|
|
1528
|
-
Tensor
|
|
1529
|
-
to high (exclusive).
|
|
1530
|
-
|
|
1531
|
-
Raises:
|
|
1532
|
-
TypeError: `seed` is not a non-negative integer.
|
|
1533
|
-
TypeError: `low` or `high` is not an integer.
|
|
1534
|
-
ValueError: If `dtype` is not a `mstype.int_type`.
|
|
1615
|
+
Tensor
|
|
1535
1616
|
|
|
1536
1617
|
Supported Platforms:
|
|
1537
1618
|
``Ascend`` ``GPU`` ``CPU``
|
|
1538
1619
|
|
|
1539
1620
|
Examples:
|
|
1540
|
-
>>>
|
|
1541
|
-
>>> a =
|
|
1542
|
-
>>> print(ops.randint_like(a, 1, 10))
|
|
1621
|
+
>>> import mindspore
|
|
1622
|
+
>>> a = mindspore.tensor([[1, 2, 3], [3, 2, 1]])
|
|
1623
|
+
>>> print(mindspore.ops.randint_like(a, 1, 10))
|
|
1543
1624
|
[[4 9 7]
|
|
1544
1625
|
[9 1 2]]
|
|
1545
1626
|
"""
|
|
@@ -1575,10 +1656,11 @@ def randperm_ext(n, *, generator=None, dtype=mstype.int64):
|
|
|
1575
1656
|
.. warning::
|
|
1576
1657
|
- This is an experimental API that is subject to change or deletion.
|
|
1577
1658
|
|
|
1578
|
-
|
|
1579
1659
|
Args:
|
|
1580
1660
|
n (Union[Tensor, int]): size of the permutation. int or Tensor with shape: () or (1,) and
|
|
1581
1661
|
data type int64. The value of `n` must be greater than zero.
|
|
1662
|
+
|
|
1663
|
+
Keyword Args:
|
|
1582
1664
|
generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
|
|
1583
1665
|
Default: ``None``, uses the default pseudorandom number generator.
|
|
1584
1666
|
dtype (mindspore.dtype, optional): The type of output. Default: mstype.int64.
|
|
@@ -1667,26 +1749,26 @@ def poisson(shape, mean, seed=None):
|
|
|
1667
1749
|
@_function_forbid_reuse
|
|
1668
1750
|
def multinomial(input, num_samples, replacement=True, seed=None):
|
|
1669
1751
|
r"""
|
|
1670
|
-
|
|
1671
|
-
row of the input tensor.
|
|
1752
|
+
Generate a tensor from a multinomial distribution.
|
|
1672
1753
|
|
|
1673
1754
|
The polynomial distribution is a probability distribution that generalizes the binomial distribution formula to
|
|
1674
1755
|
multiple states. In the polynomial distribution, each event has a fixed probability, and the sum of these
|
|
1675
|
-
probabilities is 1.
|
|
1756
|
+
probabilities is 1.
|
|
1757
|
+
|
|
1758
|
+
The purpose of this interface is to perform `num_samples` sampling
|
|
1676
1759
|
on the input `input`, and the output tensor is the index of the input tensor for each sampling.
|
|
1677
1760
|
The values in `input` represent the probability of selecting the corresponding index for each sampling.
|
|
1678
1761
|
|
|
1679
1762
|
Here is an extreme example for better understanding. Suppose we have an input probability tensor with
|
|
1680
|
-
values `
|
|
1763
|
+
values `[90 / 100, 10 / 100, 0]`, which means we can sample three indices,
|
|
1681
1764
|
namely index 0, index 1, and index 2, with probabilities of 90%, 10%, and 0%, respectively. We perform n samplings,
|
|
1682
1765
|
and the resulting sequence is the calculation result of the polynomial distribution, with a length equal to the
|
|
1683
1766
|
number of samplings.
|
|
1684
1767
|
|
|
1685
1768
|
In case 1 of the sample code, we perform two non-replacement samplings (`replacement` is `False`).
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
|
|
1689
|
-
and the resulting sequence is `[0, 1]`.
|
|
1769
|
+
Since the probability of selecting index 0 is 90% for each sampling, the first result is most likely to be index 0.
|
|
1770
|
+
Since the probability of selecting index 2 is 0, index 2 cannot appear in the sampling result. Therefore, the
|
|
1771
|
+
second result must be index 1, and the resulting sequence is `[0, 1]`.
|
|
1690
1772
|
|
|
1691
1773
|
In case 2 of the sample code, we perform 10 replacement samplings (`replacement` is `True`).
|
|
1692
1774
|
As expected, about 90% of the sampling results are index 0.
|
|
@@ -1704,59 +1786,42 @@ def multinomial(input, num_samples, replacement=True, seed=None):
|
|
|
1704
1786
|
the `seed` parameter has no effect.
|
|
1705
1787
|
|
|
1706
1788
|
Args:
|
|
1707
|
-
input (Tensor): The input tensor containing probabilities
|
|
1708
|
-
float32 data type.
|
|
1789
|
+
input (Tensor): The input tensor containing probabilities.
|
|
1709
1790
|
num_samples (int): Number of samples to draw.
|
|
1710
|
-
replacement (bool, optional): Whether to draw with replacement or not. Default
|
|
1711
|
-
seed (int, optional):
|
|
1712
|
-
pseudo-random numbers, must be non-negative. Default: ``None`` .
|
|
1791
|
+
replacement (bool, optional): Whether to draw with replacement or not. Default ``True`` .
|
|
1792
|
+
seed (int, optional): Random seed. Default ``None`` .
|
|
1713
1793
|
|
|
1714
1794
|
Returns:
|
|
1715
|
-
Tensor
|
|
1716
|
-
The dtype is int32.
|
|
1717
|
-
|
|
1718
|
-
Raises:
|
|
1719
|
-
TypeError: If `input` is not a Tensor whose dtype is not float32.
|
|
1720
|
-
TypeError: If `num_samples` is not an int.
|
|
1721
|
-
TypeError: If `seed` is neither an int nor None.
|
|
1795
|
+
Tensor
|
|
1722
1796
|
|
|
1723
1797
|
Supported Platforms:
|
|
1724
1798
|
``Ascend`` ``GPU`` ``CPU``
|
|
1725
1799
|
|
|
1726
1800
|
Examples:
|
|
1727
1801
|
>>> import mindspore
|
|
1728
|
-
>>> from mindspore import Tensor, ops
|
|
1729
|
-
>>> from mindspore import dtype as mstype
|
|
1730
1802
|
>>> # case 1: The output is random, and the length of the output is the same as num_sample.
|
|
1731
1803
|
>>> # replacement is False.
|
|
1732
|
-
>>> input1 =
|
|
1733
|
-
>>> input2 =
|
|
1804
|
+
>>> input1 = mindspore.tensor([90 / 100, 10 / 100, 0])
|
|
1805
|
+
>>> input2 = mindspore.tensor([90, 10, 0])
|
|
1734
1806
|
>>> # input1 and input2 have the same meaning.
|
|
1735
|
-
>>>
|
|
1736
|
-
|
|
1737
|
-
>>>
|
|
1738
|
-
|
|
1739
|
-
>>>
|
|
1740
|
-
>>> # [0 1]
|
|
1741
|
-
>>> print(len(output1))
|
|
1742
|
-
2
|
|
1743
|
-
>>> print(len(output2))
|
|
1744
|
-
2
|
|
1807
|
+
>>> mindspore.ops.multinomial(input1, 2, replacement=False)
|
|
1808
|
+
Tensor(shape=[2], dtype=Int32, value= [0, 1])
|
|
1809
|
+
>>> mindspore.ops.multinomial(input2, 2, replacement=False)
|
|
1810
|
+
Tensor(shape=[2], dtype=Int32, value= [1, 0])
|
|
1811
|
+
>>>
|
|
1745
1812
|
>>> # case 2: The output is random, and the length of the output is the same as num_sample.
|
|
1746
1813
|
>>> # replacement is True.
|
|
1747
|
-
>>>
|
|
1748
|
-
|
|
1749
|
-
>>>
|
|
1750
|
-
>>> print(len(output3))
|
|
1751
|
-
10
|
|
1814
|
+
>>> mindspore.ops.multinomial(input1, 10)
|
|
1815
|
+
Tensor(shape=[10], dtype=Int32, value= [0, 0, 1, 0, 0, 0, 0, 0, 0, 0])
|
|
1816
|
+
>>>
|
|
1752
1817
|
>>> # case 3: The output is random, and the length of the output is the same as num_sample.
|
|
1753
1818
|
>>> # replacement is True.
|
|
1754
1819
|
>>> # rank is 2
|
|
1755
|
-
>>>
|
|
1756
|
-
>>>
|
|
1757
|
-
>>>
|
|
1758
|
-
|
|
1759
|
-
|
|
1820
|
+
>>> input3 = mindspore.tensor([[90, 10, 0], [10, 90, 0]], mindspore.float32)
|
|
1821
|
+
>>> output = mindspore.ops.multinomial(input3, 10)
|
|
1822
|
+
>>> print(output)
|
|
1823
|
+
[[0 0 0 0 0 0 0 0 0 0]
|
|
1824
|
+
[1 0 1 1 1 1 1 1 1 1]]
|
|
1760
1825
|
"""
|
|
1761
1826
|
def _check_valid_dim(dim, name):
|
|
1762
1827
|
if dim not in (1, 2):
|
|
@@ -1799,7 +1864,8 @@ def multinomial_ext(input, num_samples, replacement=False, *, generator=None):
|
|
|
1799
1864
|
|
|
1800
1865
|
The polynomial distribution is a probability distribution that generalizes the binomial distribution formula to
|
|
1801
1866
|
multiple states. In the polynomial distribution, each event has a fixed probability, and the sum of these
|
|
1802
|
-
probabilities is 1. The purpose of the
|
|
1867
|
+
probabilities is 1. The purpose of the :func:`mindspore.mint.multinomial` interface
|
|
1868
|
+
is to perform `num_samples` sampling
|
|
1803
1869
|
on the input `input`, and the output tensor is the index of the input tensor for each sampling.
|
|
1804
1870
|
The values in `input` represent the probability of selecting the corresponding index for each sampling.
|
|
1805
1871
|
|
|
@@ -1844,7 +1910,6 @@ def multinomial_ext(input, num_samples, replacement=False, *, generator=None):
|
|
|
1844
1910
|
|
|
1845
1911
|
Raises:
|
|
1846
1912
|
TypeError: If `input` is not a Tensor whose dtype is not in float16, float32, float64 or bfloat16.
|
|
1847
|
-
, 或是shape为(1, 1)的Tensor
|
|
1848
1913
|
TypeError: If `num_samples` is not an int, a Scalar of int
|
|
1849
1914
|
or a Tensor with shape[1,] and only one int element.
|
|
1850
1915
|
RuntimeError: If :math:`\text{num_samples} <= 0`.
|