mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
- mindspore/__init__.py +1 -2
- mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +25 -5
- mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +0 -29
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +5 -21
- mindspore/_extends/parse/resources.py +7 -5
- mindspore/_extends/parse/standard_method.py +59 -40
- mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -26
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/less_batch_normalization.py +6 -9
- mindspore/common/__init__.py +1 -8
- mindspore/common/_register_for_tensor.py +9 -8
- mindspore/common/api.py +65 -275
- mindspore/common/dtype.py +4 -8
- mindspore/common/dump.py +5 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/lazy_inline.py +2 -14
- mindspore/common/parameter.py +15 -14
- mindspore/common/recompute.py +5 -20
- mindspore/common/sparse_tensor.py +6 -21
- mindspore/common/tensor.py +52 -100
- mindspore/communication/__init__.py +11 -6
- mindspore/communication/management.py +94 -92
- mindspore/context.py +18 -180
- mindspore/dataset/engine/datasets.py +46 -69
- mindspore/dataset/engine/datasets_user_defined.py +53 -72
- mindspore/dataset/engine/datasets_vision.py +2 -2
- mindspore/dataset/engine/queue.py +38 -56
- mindspore/dataset/engine/validators.py +5 -11
- mindspore/dataset/vision/__init__.py +5 -5
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +46 -591
- mindspore/dataset/vision/utils.py +1 -121
- mindspore/dataset/vision/validators.py +3 -9
- mindspore/hal/__init__.py +1 -7
- mindspore/hal/device.py +1 -1
- mindspore/include/api/model.h +0 -3
- mindspore/include/dataset/vision.h +2 -54
- mindspore/include/mindapi/base/types.h +0 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/mindrecord/filewriter.py +2 -2
- mindspore/mint/__init__.py +40 -720
- mindspore/mint/nn/__init__.py +7 -89
- mindspore/mint/nn/functional.py +16 -165
- mindspore/mint/optim/adamw.py +16 -15
- mindspore/nn/__init__.py +2 -0
- mindspore/nn/cell.py +98 -97
- mindspore/nn/extend/basic.py +2 -2
- mindspore/nn/extend/embedding.py +1 -1
- mindspore/nn/extend/layer/normalization.py +5 -7
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +3 -4
- mindspore/nn/layer/basic.py +16 -79
- mindspore/nn/layer/conv.py +8 -17
- mindspore/nn/layer/embedding.py +4 -1
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +1 -1
- mindspore/nn/layer/pooling.py +0 -5
- mindspore/nn/layer/rnn_cells.py +2 -2
- mindspore/nn/loss/loss.py +19 -19
- mindspore/nn/optim/adasum.py +1 -1
- mindspore/nn/optim/sgd.py +2 -3
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/wrap/cell_wrapper.py +1 -25
- mindspore/nn/wrap/loss_scale.py +1 -24
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/math_ops.py +8 -8
- mindspore/ops/__init__.py +1 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
- mindspore/ops/_vmap/vmap_array_ops.py +0 -27
- mindspore/ops/_vmap/vmap_math_ops.py +1 -29
- mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
- mindspore/ops/auto_generate/gen_extend_func.py +27 -603
- mindspore/ops/auto_generate/gen_ops_def.py +203 -993
- mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
- mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
- mindspore/ops/composite/base.py +6 -3
- mindspore/ops/composite/math_ops.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/extend/__init__.py +3 -2
- mindspore/ops/extend/array_func.py +51 -10
- mindspore/ops/extend/nn_func.py +78 -2
- mindspore/ops/function/__init__.py +13 -8
- mindspore/ops/function/array_func.py +179 -455
- mindspore/ops/function/clip_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +3 -3
- mindspore/ops/function/math_func.py +103 -117
- mindspore/ops/function/nn_func.py +163 -275
- mindspore/ops/function/other_func.py +2 -2
- mindspore/ops/function/random_func.py +69 -202
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/functional.py +327 -332
- mindspore/ops/operations/__init__.py +3 -13
- mindspore/ops/operations/_grad_ops.py +27 -3
- mindspore/ops/operations/_inner_ops.py +356 -53
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +65 -82
- mindspore/ops/operations/comm_ops.py +93 -784
- mindspore/ops/operations/custom_ops.py +28 -51
- mindspore/ops/operations/debug_ops.py +4 -4
- mindspore/ops/operations/inner_ops.py +2 -2
- mindspore/ops/operations/manually_defined/ops_def.py +4 -304
- mindspore/ops/operations/math_ops.py +50 -3
- mindspore/ops/operations/nn_ops.py +247 -14
- mindspore/ops/operations/other_ops.py +3 -3
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +1 -1
- mindspore/ops/primitive.py +8 -9
- mindspore/ops/silent_check.py +5 -5
- mindspore/ops_generate/arg_dtype_cast.py +9 -2
- mindspore/ops_generate/arg_handler.py +0 -26
- mindspore/ops_generate/gen_aclnn_implement.py +4 -1
- mindspore/ops_generate/gen_ops.py +4 -26
- mindspore/ops_generate/gen_pyboost_func.py +12 -41
- mindspore/ops_generate/gen_utils.py +0 -21
- mindspore/ops_generate/pyboost_utils.py +2 -7
- mindspore/ops_generate/template.py +0 -1
- mindspore/parallel/_auto_parallel_context.py +1 -21
- mindspore/parallel/_tensor.py +5 -0
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +1 -15
- mindspore/parallel/algo_parameter_config.py +3 -1
- mindspore/parallel/checkpoint_transform.py +9 -12
- mindspore/parallel/cluster/process_entity/_api.py +29 -28
- mindspore/parallel/cluster/process_entity/_utils.py +3 -13
- mindspore/parallel/cluster/run.py +16 -13
- mindspore/parallel/parameter_broadcast.py +2 -2
- mindspore/parallel/shard.py +17 -31
- mindspore/profiler/__init__.py +2 -3
- mindspore/profiler/common/util.py +2 -107
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
- mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
- mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
- mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
- mindspore/profiler/parser/minddata_parser.py +3 -72
- mindspore/profiler/profiling.py +59 -176
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/common/namespace.py +5 -5
- mindspore/rewrite/parsers/assign_parser.py +0 -2
- mindspore/rewrite/parsers/class_def_parser.py +4 -8
- mindspore/run_check/_check_version.py +1 -1
- mindspore/scipy/fft.py +3 -1
- mindspore/scipy/linalg.py +3 -2
- mindspore/scipy/ops.py +3 -5
- mindspore/scipy/optimize/__init__.py +2 -2
- mindspore/train/__init__.py +4 -4
- mindspore/train/anf_ir_pb2.py +2 -8
- mindspore/train/callback/__init__.py +2 -5
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_checkpoint.py +16 -104
- mindspore/train/callback/_landscape.py +1 -1
- mindspore/train/callback/_time_monitor.py +1 -1
- mindspore/train/data_sink.py +4 -5
- mindspore/train/dataset_helper.py +20 -45
- mindspore/train/model.py +38 -266
- mindspore/train/serialization.py +105 -256
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
- mindspore/_extends/pijit/__init__.py +0 -23
- mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
- mindspore/common/file_system.py +0 -48
- mindspore/common/generator.py +0 -260
- mindspore/common/no_inline.py +0 -54
- mindspore/common/np_dtype.py +0 -25
- mindspore/communication/comm_func.py +0 -1140
- mindspore/hal/memory.py +0 -326
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
- mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
- mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
- mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/mint/linalg/__init__.py +0 -22
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/nn/layer/embedding_service_layer.py +0 -393
- mindspore/ops/function/reshard_func.py +0 -102
- mindspore/ops/operations/_infer_ops.py +0 -19
- mindspore/ops/operations/reshard_ops.py +0 -53
- mindspore/profiler/common/process_pool.py +0 -41
- mindspore/profiler/common/singleton.py +0 -28
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/train/callback/_cluster_monitor.py +0 -201
- mindspore/train/callback/_flops_collector.py +0 -238
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -37,7 +37,7 @@ def partial(func, *args):
|
|
|
37
37
|
|
|
38
38
|
Examples:
|
|
39
39
|
>>> from mindspore import Tensor
|
|
40
|
-
>>>
|
|
40
|
+
>>> import mindspore.ops as ops
|
|
41
41
|
>>> def show_input(x, y, z):
|
|
42
42
|
... return x, y, z
|
|
43
43
|
>>> partial_show_input = ops.partial(show_input, Tensor(1))
|
|
@@ -81,7 +81,7 @@ def depend(value, expr):
|
|
|
81
81
|
>>> import numpy as np
|
|
82
82
|
>>> import mindspore
|
|
83
83
|
>>> import mindspore.nn as nn
|
|
84
|
-
>>>
|
|
84
|
+
>>> import mindspore.ops as ops
|
|
85
85
|
>>> from mindspore import Tensor
|
|
86
86
|
>>> class Net(nn.Cell):
|
|
87
87
|
... def __init__(self):
|
|
@@ -28,14 +28,10 @@ from mindspore.common.tensor import Tensor
|
|
|
28
28
|
from mindspore.ops.operations.random_ops import RandomShuffle, RandomChoiceWithMask
|
|
29
29
|
from mindspore.common.api import _function_forbid_reuse
|
|
30
30
|
from mindspore.ops.auto_generate import randperm
|
|
31
|
-
from mindspore.
|
|
32
|
-
from mindspore.ops.auto_generate import UniformExt,
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
normal_tensor_tensor_op = NormalTensorTensor()
|
|
36
|
-
normal_tensor_float_op = NormalTensorFloat()
|
|
37
|
-
normal_float_tensor_op = NormalFloatTensor()
|
|
38
|
-
normal_float_float_op = NormalFloatFloat()
|
|
31
|
+
from mindspore.nn.generator import default_generator
|
|
32
|
+
from mindspore.ops.auto_generate import UniformExt, NormalExt
|
|
33
|
+
|
|
34
|
+
normal_ext_op = NormalExt()
|
|
39
35
|
cast_ = P.Cast()
|
|
40
36
|
log_ = P.Log()
|
|
41
37
|
real_div_ = P.RealDiv()
|
|
@@ -43,10 +39,6 @@ reshape_ = P.Reshape()
|
|
|
43
39
|
shape_ = P.Shape()
|
|
44
40
|
top_k_ = P.TopK()
|
|
45
41
|
uniform_ = UniformExt()
|
|
46
|
-
rand_ext_ = RandExt()
|
|
47
|
-
rand_like_ext_ = RandLikeExt()
|
|
48
|
-
generator_step_ = Tensor(10, mstype.int64)
|
|
49
|
-
|
|
50
42
|
|
|
51
43
|
@constexpr
|
|
52
44
|
def _set_prim_op_user_data(prim, key, value):
|
|
@@ -95,8 +87,7 @@ def random_gamma(shape, alpha, seed=None):
|
|
|
95
87
|
"""
|
|
96
88
|
seed1, seed2 = _get_seed(seed, "random_gamma")
|
|
97
89
|
random_gamma_op = P.RandomGamma(seed1, seed2)
|
|
98
|
-
random_gamma_op = _set_prim_op_user_data(
|
|
99
|
-
random_gamma_op, "random_cache", False)
|
|
90
|
+
random_gamma_op = _set_prim_op_user_data(random_gamma_op, "random_cache", False)
|
|
100
91
|
output = random_gamma_op(shape, alpha)
|
|
101
92
|
return output
|
|
102
93
|
|
|
@@ -143,8 +134,7 @@ def standard_laplace(shape, seed=None):
|
|
|
143
134
|
"""
|
|
144
135
|
seed1, seed2 = _get_seed(seed, "standard_laplace")
|
|
145
136
|
standard_laplace_op = P.StandardLaplace(seed=seed1, seed2=seed2)
|
|
146
|
-
standard_laplace_op = _set_prim_op_user_data(
|
|
147
|
-
standard_laplace_op, "random_cache", False)
|
|
137
|
+
standard_laplace_op = _set_prim_op_user_data(standard_laplace_op, "random_cache", False)
|
|
148
138
|
return standard_laplace_op(shape)
|
|
149
139
|
|
|
150
140
|
|
|
@@ -183,8 +173,7 @@ def random_categorical(logits, num_sample, seed=0, dtype=mstype.int64):
|
|
|
183
173
|
(10, 8)
|
|
184
174
|
"""
|
|
185
175
|
random_categorical_ = P.RandomCategorical(dtype)
|
|
186
|
-
random_categorical_ = _set_prim_op_user_data(
|
|
187
|
-
random_categorical_, "random_cache", False)
|
|
176
|
+
random_categorical_ = _set_prim_op_user_data(random_categorical_, "random_cache", False)
|
|
188
177
|
return random_categorical_(logits, num_sample, seed)
|
|
189
178
|
|
|
190
179
|
|
|
@@ -244,8 +233,7 @@ def multinomial_with_replacement(x, seed, offset, numsamples, replacement=False)
|
|
|
244
233
|
offset = Tensor(offset, dtype=mstype.int64)
|
|
245
234
|
multinomial_with_replacement_ = P.MultinomialWithReplacement(numsamples=numsamples,
|
|
246
235
|
replacement=replacement)
|
|
247
|
-
multinomial_with_replacement_ = _set_prim_op_user_data(
|
|
248
|
-
multinomial_with_replacement_, "random_cache", False)
|
|
236
|
+
multinomial_with_replacement_ = _set_prim_op_user_data(multinomial_with_replacement_, "random_cache", False)
|
|
249
237
|
return multinomial_with_replacement_(x, seed, offset)
|
|
250
238
|
|
|
251
239
|
|
|
@@ -256,8 +244,8 @@ def uniform_ext(tensor, a, b, generator=None):
|
|
|
256
244
|
|
|
257
245
|
Args:
|
|
258
246
|
tensor (Tensor): The origin input tensor.
|
|
259
|
-
a (
|
|
260
|
-
b (
|
|
247
|
+
a (float): The lower bound of the interval.
|
|
248
|
+
b (float): The upper bound of the interval.
|
|
261
249
|
generator (Generator, optional): The random seed. Default: None.
|
|
262
250
|
|
|
263
251
|
Raises:
|
|
@@ -267,18 +255,16 @@ def uniform_ext(tensor, a, b, generator=None):
|
|
|
267
255
|
Tensor, with the same shape as tensor.
|
|
268
256
|
|
|
269
257
|
Examples:
|
|
258
|
+
>>> from mindspore import Tensor, ops
|
|
270
259
|
>>> import mindspore
|
|
271
|
-
>>>
|
|
272
|
-
>>> x = ops.ones(
|
|
273
|
-
>>>
|
|
274
|
-
>>>
|
|
275
|
-
>>> result = ops.function.random_func.uniform_ext(x, 1., 2., generator)
|
|
276
|
-
>>> print(result.shape)
|
|
277
|
-
(4, 2)
|
|
260
|
+
>>> import numpy as np
|
|
261
|
+
>>> x = mindspore.ops.ones(4, 2)
|
|
262
|
+
>>> output = ops.uniform_ext(x, 1., 2.)
|
|
263
|
+
>>> print(result)
|
|
278
264
|
"""
|
|
279
265
|
if generator is None:
|
|
280
|
-
generator = default_generator
|
|
281
|
-
seed, offset = generator
|
|
266
|
+
generator = default_generator()
|
|
267
|
+
seed, offset = generator(1)
|
|
282
268
|
return uniform_(tensor, a, b, seed, offset)
|
|
283
269
|
|
|
284
270
|
|
|
@@ -339,25 +325,21 @@ def uniform(shape, minval, maxval, seed=None, dtype=mstype.float32):
|
|
|
339
325
|
(3, 2, 2)
|
|
340
326
|
"""
|
|
341
327
|
if not isinstance(minval, Tensor) or not isinstance(maxval, Tensor):
|
|
342
|
-
raise TypeError(
|
|
343
|
-
f"For functional operator[uniform], the input[minval] and input[maxval] must be a Tensor.")
|
|
328
|
+
raise TypeError(f"For functional operator[uniform], the input[minval] and input[maxval] must be a Tensor.")
|
|
344
329
|
|
|
345
330
|
minval_dtype = F.dtype(minval)
|
|
346
331
|
maxval_dtype = F.dtype(maxval)
|
|
347
|
-
const_utils.check_type_valid(
|
|
348
|
-
dtype, [mstype.int32, mstype.float32], 'uniform')
|
|
332
|
+
const_utils.check_type_valid(dtype, [mstype.int32, mstype.float32], 'uniform')
|
|
349
333
|
const_utils.check_tensors_dtype_same(minval_dtype, dtype, "uniform")
|
|
350
334
|
const_utils.check_tensors_dtype_same(maxval_dtype, dtype, "uniform")
|
|
351
335
|
seed1, seed2 = _get_seed(seed, "uniform")
|
|
352
336
|
if const_utils.is_same_type(dtype, mstype.int32):
|
|
353
337
|
random_uniform = P.UniformInt(seed1, seed2)
|
|
354
|
-
random_uniform = _set_prim_op_user_data(
|
|
355
|
-
random_uniform, "random_cache", False)
|
|
338
|
+
random_uniform = _set_prim_op_user_data(random_uniform, "random_cache", False)
|
|
356
339
|
value = random_uniform(shape, minval, maxval)
|
|
357
340
|
else:
|
|
358
341
|
uniform_real = P.UniformReal(seed1, seed2)
|
|
359
|
-
uniform_real = _set_prim_op_user_data(
|
|
360
|
-
uniform_real, "random_cache", False)
|
|
342
|
+
uniform_real = _set_prim_op_user_data(uniform_real, "random_cache", False)
|
|
361
343
|
uniform_real = uniform_real(shape)
|
|
362
344
|
value = uniform_real * (maxval - minval) + minval
|
|
363
345
|
return value
|
|
@@ -400,8 +382,7 @@ def standard_normal(shape, seed=None):
|
|
|
400
382
|
"""
|
|
401
383
|
seed1, seed2 = _get_seed(seed, "standard_normal")
|
|
402
384
|
standard_normal_op = P.StandardNormal(seed=seed1, seed2=seed2)
|
|
403
|
-
standard_normal_op = _set_prim_op_user_data(
|
|
404
|
-
standard_normal_op, "random_cache", False)
|
|
385
|
+
standard_normal_op = _set_prim_op_user_data(standard_normal_op, "random_cache", False)
|
|
405
386
|
return standard_normal_op(shape)
|
|
406
387
|
|
|
407
388
|
|
|
@@ -468,8 +449,7 @@ def uniform_candidate_sampler(true_classes,
|
|
|
468
449
|
seed=seed,
|
|
469
450
|
remove_accidental_hits=remove_accidental_hits)
|
|
470
451
|
sampler_op = _set_prim_op_user_data(sampler_op, "random_cache", False)
|
|
471
|
-
sampled_candidates, true_expected_count, sampled_expected_count = sampler_op(
|
|
472
|
-
true_classes)
|
|
452
|
+
sampled_candidates, true_expected_count, sampled_expected_count = sampler_op(true_classes)
|
|
473
453
|
return sampled_candidates, true_expected_count, sampled_expected_count
|
|
474
454
|
|
|
475
455
|
|
|
@@ -533,8 +513,7 @@ def random_poisson(shape, rate, seed=None, dtype=mstype.float32):
|
|
|
533
513
|
"""
|
|
534
514
|
seed1, seed2 = _get_seed(seed, "random_poisson")
|
|
535
515
|
prim_random_poisson = P.RandomPoisson(seed1, seed2, dtype)
|
|
536
|
-
prim_random_poisson = _set_prim_op_user_data(
|
|
537
|
-
prim_random_poisson, "random_cache", False)
|
|
516
|
+
prim_random_poisson = _set_prim_op_user_data(prim_random_poisson, "random_cache", False)
|
|
538
517
|
value = prim_random_poisson(shape, rate)
|
|
539
518
|
return value
|
|
540
519
|
|
|
@@ -569,8 +548,7 @@ def shuffle(x, seed=None):
|
|
|
569
548
|
"""
|
|
570
549
|
seed, seed2 = _get_seed(seed, "shuffle")
|
|
571
550
|
random_shuffle_ = RandomShuffle(seed=seed, seed2=seed2)
|
|
572
|
-
random_shuffle_ = _set_prim_op_user_data(
|
|
573
|
-
random_shuffle_, "random_cache", False)
|
|
551
|
+
random_shuffle_ = _set_prim_op_user_data(random_shuffle_, "random_cache", False)
|
|
574
552
|
output = random_shuffle_(x)
|
|
575
553
|
return output
|
|
576
554
|
|
|
@@ -624,8 +602,7 @@ def log_uniform_candidate_sampler(true_classes, num_true=1, num_sampled=5, uniqu
|
|
|
624
602
|
|
|
625
603
|
"""
|
|
626
604
|
|
|
627
|
-
sampler = P.LogUniformCandidateSampler(
|
|
628
|
-
num_true, num_sampled, unique, range_max, seed)
|
|
605
|
+
sampler = P.LogUniformCandidateSampler(num_true, num_sampled, unique, range_max, seed)
|
|
629
606
|
sampler = _set_prim_op_user_data(sampler, "random_cache", False)
|
|
630
607
|
return sampler(true_classes)
|
|
631
608
|
|
|
@@ -664,7 +641,7 @@ def choice_with_mask(input_x, count=256, seed=None):
|
|
|
664
641
|
Examples:
|
|
665
642
|
>>> import numpy as np
|
|
666
643
|
>>> from mindspore import Tensor, ops
|
|
667
|
-
>>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.
|
|
644
|
+
>>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool))
|
|
668
645
|
>>> output_y, output_mask = ops.choice_with_mask(input_x)
|
|
669
646
|
>>> result = output_y.shape
|
|
670
647
|
>>> print(result)
|
|
@@ -674,10 +651,8 @@ def choice_with_mask(input_x, count=256, seed=None):
|
|
|
674
651
|
(256,)
|
|
675
652
|
"""
|
|
676
653
|
seed1, seed2 = _get_seed(seed, "choice_with_mask")
|
|
677
|
-
choice_with_mask_ = RandomChoiceWithMask(
|
|
678
|
-
|
|
679
|
-
choice_with_mask_ = _set_prim_op_user_data(
|
|
680
|
-
choice_with_mask_, "random_cache", False)
|
|
654
|
+
choice_with_mask_ = RandomChoiceWithMask(count=count, seed=seed1, seed2=seed2)
|
|
655
|
+
choice_with_mask_ = _set_prim_op_user_data(choice_with_mask_, "random_cache", False)
|
|
681
656
|
output = choice_with_mask_(input_x)
|
|
682
657
|
return output
|
|
683
658
|
|
|
@@ -687,23 +662,18 @@ def is_cpu_backend():
|
|
|
687
662
|
"""Check if the CPU is used"""
|
|
688
663
|
return context.get_context('device_target') == 'CPU'
|
|
689
664
|
|
|
690
|
-
|
|
691
|
-
def normal_ext(mean=0.0, std=1.0, size=None, generator=None):
|
|
665
|
+
def normal_ext(mean, std, generator=None):
|
|
692
666
|
r"""
|
|
693
667
|
Generates random numbers according to the standard Normal (or Gaussian) random number distribution.
|
|
694
668
|
|
|
695
669
|
Args:
|
|
696
|
-
mean (Union[float, Tensor]
|
|
697
|
-
|
|
698
|
-
std (Union[float, Tensor]
|
|
699
|
-
|
|
700
|
-
Default: ``1.0``.
|
|
701
|
-
size (tuple, optional): output size, where 'mean' and 'std' are constants. Default: ``None``.
|
|
702
|
-
generator (generator, optional): MindSpore generator. Default: ``None``.
|
|
670
|
+
- **mean** (Union[float, Tensor]) - The mean is a tensor with the mean of each output
|
|
671
|
+
element's normal distribution.
|
|
672
|
+
- **std** (Union[float, Tensor]) - The tensor of per-element standard deviations.
|
|
673
|
+
- **generator** (Generator, optional) - Mindspore generator.
|
|
703
674
|
|
|
704
675
|
Returns:
|
|
705
|
-
|
|
706
|
-
or when 'mean' and 'std' are constants and shape is specified as 'size'.
|
|
676
|
+
- **output** (Tensor) - With the same type and shape as the 'mean'.
|
|
707
677
|
|
|
708
678
|
Raises:
|
|
709
679
|
TypeError: If `mean` or `std` is not Union[float, Tensor].
|
|
@@ -714,28 +684,18 @@ def normal_ext(mean=0.0, std=1.0, size=None, generator=None):
|
|
|
714
684
|
Examples:
|
|
715
685
|
>>> import mindspore
|
|
716
686
|
>>> import numpy as np
|
|
717
|
-
>>>
|
|
687
|
+
>>> import mindspore.ops as ops
|
|
718
688
|
>>> from mindspore import Tensor
|
|
719
689
|
>>> mean = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
720
690
|
>>> std = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
721
|
-
>>> output = ops.
|
|
691
|
+
>>> output = ops.normal_ext(mean, std)
|
|
722
692
|
>>> print(output.shape)
|
|
723
693
|
(3,)
|
|
724
694
|
"""
|
|
725
695
|
if generator is None:
|
|
726
|
-
generator = default_generator
|
|
727
|
-
seed, offset = generator
|
|
728
|
-
|
|
729
|
-
is_mean_tensor = isinstance(mean, Tensor)
|
|
730
|
-
is_std_tensor = isinstance(std, Tensor)
|
|
731
|
-
|
|
732
|
-
if is_mean_tensor and is_std_tensor:
|
|
733
|
-
return normal_tensor_tensor_op(mean, std, seed, offset)
|
|
734
|
-
if is_mean_tensor and not is_std_tensor:
|
|
735
|
-
return normal_tensor_float_op(mean, std, seed, offset)
|
|
736
|
-
if not is_mean_tensor and is_std_tensor:
|
|
737
|
-
return normal_float_tensor_op(mean, std, seed, offset)
|
|
738
|
-
return normal_float_float_op(mean, std, size, seed, offset)
|
|
696
|
+
generator = default_generator()
|
|
697
|
+
seed, offset = generator(1)
|
|
698
|
+
return normal_ext_op(mean, std, seed, offset)
|
|
739
699
|
|
|
740
700
|
|
|
741
701
|
@_function_forbid_reuse
|
|
@@ -840,8 +800,7 @@ def laplace(shape, mean, lambda_param, seed=None):
|
|
|
840
800
|
mean_dtype = F.dtype(mean)
|
|
841
801
|
lambda_param_dtype = F.dtype(lambda_param)
|
|
842
802
|
const_utils.check_tensors_dtype_same(mean_dtype, mstype.float32, "laplace")
|
|
843
|
-
const_utils.check_tensors_dtype_same(
|
|
844
|
-
lambda_param_dtype, mstype.float32, "laplace")
|
|
803
|
+
const_utils.check_tensors_dtype_same(lambda_param_dtype, mstype.float32, "laplace")
|
|
845
804
|
seed1, seed2 = _get_seed(seed, "laplace")
|
|
846
805
|
stdlaplace = P.StandardLaplace(seed1, seed2)
|
|
847
806
|
stdlaplace = _set_prim_op_user_data(stdlaplace, "random_cache", False)
|
|
@@ -979,7 +938,7 @@ def rand(*size, dtype=None, seed=None):
|
|
|
979
938
|
``Ascend`` ``GPU`` ``CPU``
|
|
980
939
|
|
|
981
940
|
Examples:
|
|
982
|
-
>>>
|
|
941
|
+
>>> import mindspore.ops as ops
|
|
983
942
|
>>> print(ops.rand((2,3)))
|
|
984
943
|
[[4.1702199e-01 9.9718481e-01 7.2032452e-01]
|
|
985
944
|
[9.3255734e-01 1.1438108e-04 1.2812445e-01]]
|
|
@@ -987,8 +946,7 @@ def rand(*size, dtype=None, seed=None):
|
|
|
987
946
|
if dtype is None:
|
|
988
947
|
dtype = mstype.float32
|
|
989
948
|
elif dtype not in mstype.float_type:
|
|
990
|
-
raise ValueError(
|
|
991
|
-
f"For 'rand', the 'dtype' must be a float type, but got {dtype}.")
|
|
949
|
+
raise ValueError(f"For 'rand', the 'dtype' must be a float type, but got {dtype}.")
|
|
992
950
|
shape = _generate_shapes(size)
|
|
993
951
|
seed1, seed2 = _get_seed(seed, 'rand')
|
|
994
952
|
rand_op = P.UniformReal(seed1, seed2)
|
|
@@ -1031,13 +989,11 @@ def rand_like(input, seed=None, *, dtype=None):
|
|
|
1031
989
|
[9.3255734e-01 1.1438108e-04 1.2812445e-01]]
|
|
1032
990
|
"""
|
|
1033
991
|
if not isinstance(input, Tensor):
|
|
1034
|
-
raise TypeError(
|
|
1035
|
-
f"For 'rand_like', the 'input' must be a Tensor, but got {type(input)}")
|
|
992
|
+
raise TypeError(f"For 'rand_like', the 'input' must be a Tensor, but got {type(input)}")
|
|
1036
993
|
if dtype is None:
|
|
1037
994
|
dtype = input.dtype
|
|
1038
995
|
if dtype not in mstype.float_type:
|
|
1039
|
-
raise ValueError(
|
|
1040
|
-
f"For 'rand_like', the 'dtype' must be a float type, but got {dtype}.")
|
|
996
|
+
raise ValueError(f"For 'rand_like', the 'dtype' must be a float type, but got {dtype}.")
|
|
1041
997
|
shape = input.shape
|
|
1042
998
|
seed1, seed2 = _get_seed(seed, 'rand_like')
|
|
1043
999
|
rand_op = P.UniformReal(seed1, seed2)
|
|
@@ -1046,76 +1002,6 @@ def rand_like(input, seed=None, *, dtype=None):
|
|
|
1046
1002
|
return cast_(output, dtype)
|
|
1047
1003
|
|
|
1048
1004
|
|
|
1049
|
-
@_function_forbid_reuse
|
|
1050
|
-
def rand_ext(*size, generator=None, dtype=None):
|
|
1051
|
-
r"""
|
|
1052
|
-
Returns a new tensor that fills numbers from the uniform distribution over an interval :math:`[0, 1)`
|
|
1053
|
-
based on the given shape and dtype.
|
|
1054
|
-
|
|
1055
|
-
Args:
|
|
1056
|
-
size (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g. :math:`(2, 3)` or :math:`2`.
|
|
1057
|
-
|
|
1058
|
-
Keyword Args:
|
|
1059
|
-
generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
|
|
1060
|
-
Default: ``None``, uses the default pseudorandom number generator.
|
|
1061
|
-
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
|
|
1062
|
-
`mindspore.float32` will be applied. Default: ``None`` .
|
|
1063
|
-
|
|
1064
|
-
Returns:
|
|
1065
|
-
Tensor, with the designated shape and dtype, filled with random numbers from the uniform distribution on
|
|
1066
|
-
the interval :math:`[0, 1)`.
|
|
1067
|
-
|
|
1068
|
-
Raises:
|
|
1069
|
-
ValueError: If `dtype` is not a `mstype.float_type` type.
|
|
1070
|
-
|
|
1071
|
-
Supported Platforms:
|
|
1072
|
-
``Ascend``
|
|
1073
|
-
|
|
1074
|
-
Examples:
|
|
1075
|
-
>>> import mindspore.ops as ops
|
|
1076
|
-
>>> print(ops.function.random_func.rand_ext(2, 3).shape)
|
|
1077
|
-
(2, 3)
|
|
1078
|
-
"""
|
|
1079
|
-
if not generator:
|
|
1080
|
-
generator = default_generator
|
|
1081
|
-
seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
|
|
1082
|
-
return rand_ext_(size, seed, offset, dtype)
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
@_function_forbid_reuse
|
|
1086
|
-
def rand_like_ext(input, *, dtype=None):
|
|
1087
|
-
r"""
|
|
1088
|
-
Returns a new tensor that fills numbers from the uniform distribution over an interval :math:`[0, 1)`
|
|
1089
|
-
based on the given dtype and shape of the input tensor.
|
|
1090
|
-
|
|
1091
|
-
Args:
|
|
1092
|
-
input (Tensor): Input Tensor to specify the output shape and its default dtype.
|
|
1093
|
-
|
|
1094
|
-
Keyword Args:
|
|
1095
|
-
dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
|
|
1096
|
-
the same dtype of `input` will be applied. Default: ``None`` .
|
|
1097
|
-
|
|
1098
|
-
Returns:
|
|
1099
|
-
Tensor, with the designated shape and dtype, filled with random numbers from the uniform distribution on
|
|
1100
|
-
the interval :math:`[0, 1)`.
|
|
1101
|
-
|
|
1102
|
-
Raises:
|
|
1103
|
-
ValueError: If `dtype` is not a `mstype.float_type` type.
|
|
1104
|
-
|
|
1105
|
-
Supported Platforms:
|
|
1106
|
-
``Ascend``
|
|
1107
|
-
|
|
1108
|
-
Examples:
|
|
1109
|
-
>>> import mindspore as ms
|
|
1110
|
-
>>> from mindspore import Tensor, ops
|
|
1111
|
-
>>> a = Tensor([[2, 3, 4], [1, 2, 3]])
|
|
1112
|
-
>>> print(ops.function.random_func.rand_like_ext(a, dtype=ms.float32).shape)
|
|
1113
|
-
(2, 3)
|
|
1114
|
-
"""
|
|
1115
|
-
seed, offset = default_generator._step(generator_step_) # pylint: disable=protected-access
|
|
1116
|
-
return rand_like_ext_(input, seed, offset, dtype)
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
1005
|
@_function_forbid_reuse
|
|
1120
1006
|
def randn(*size, dtype=None, seed=None):
|
|
1121
1007
|
r"""
|
|
@@ -1143,7 +1029,7 @@ def randn(*size, dtype=None, seed=None):
|
|
|
1143
1029
|
``Ascend`` ``GPU`` ``CPU``
|
|
1144
1030
|
|
|
1145
1031
|
Examples:
|
|
1146
|
-
>>>
|
|
1032
|
+
>>> import mindspore.ops as ops
|
|
1147
1033
|
>>> print(ops.randn((2, 2)))
|
|
1148
1034
|
[[ 0.30639967 -0.42438635]
|
|
1149
1035
|
[-0.4287376 1.3054721 ]]
|
|
@@ -1151,8 +1037,7 @@ def randn(*size, dtype=None, seed=None):
|
|
|
1151
1037
|
if dtype is None:
|
|
1152
1038
|
dtype = mstype.float32
|
|
1153
1039
|
elif dtype not in mstype.float_type:
|
|
1154
|
-
raise ValueError(
|
|
1155
|
-
f"For 'randn', the 'dtype' must be a float type, but got {dtype}.")
|
|
1040
|
+
raise ValueError(f"For 'randn', the 'dtype' must be a float type, but got {dtype}.")
|
|
1156
1041
|
shape = _generate_shapes(size)
|
|
1157
1042
|
seed1, seed2 = _get_seed(seed, 'randn')
|
|
1158
1043
|
rand_op = P.StandardNormal(seed1, seed2)
|
|
@@ -1195,13 +1080,11 @@ def randn_like(input, seed=None, *, dtype=None):
|
|
|
1195
1080
|
[-0.4287376 1.3054721 0.64747655]]
|
|
1196
1081
|
"""
|
|
1197
1082
|
if not isinstance(input, Tensor):
|
|
1198
|
-
raise TypeError(
|
|
1199
|
-
f"For 'randn_like', the 'input' must be a Tensor, but got {type(input)}")
|
|
1083
|
+
raise TypeError(f"For 'randn_like', the 'input' must be a Tensor, but got {type(input)}")
|
|
1200
1084
|
if dtype is None:
|
|
1201
1085
|
dtype = mstype.float32
|
|
1202
1086
|
if dtype not in mstype.float_type:
|
|
1203
|
-
raise ValueError(
|
|
1204
|
-
f"For 'randn_like', the 'dtype' must be a float type, but got {dtype}.")
|
|
1087
|
+
raise ValueError(f"For 'randn_like', the 'dtype' must be a float type, but got {dtype}.")
|
|
1205
1088
|
shape = input.shape
|
|
1206
1089
|
seed1, seed2 = _get_seed(seed, 'randn_like')
|
|
1207
1090
|
rand_op = P.StandardNormal(seed1, seed2)
|
|
@@ -1240,7 +1123,7 @@ def randint(low, high, size, seed=None, *, dtype=None):
|
|
|
1240
1123
|
``Ascend`` ``GPU`` ``CPU``
|
|
1241
1124
|
|
|
1242
1125
|
Examples:
|
|
1243
|
-
>>>
|
|
1126
|
+
>>> import mindspore.ops as ops
|
|
1244
1127
|
>>> print(ops.randint(1, 10, (2,3)))
|
|
1245
1128
|
[[4 9 7]
|
|
1246
1129
|
[9 1 2]]
|
|
@@ -1248,17 +1131,13 @@ def randint(low, high, size, seed=None, *, dtype=None):
|
|
|
1248
1131
|
if dtype is None:
|
|
1249
1132
|
dtype = mstype.int64
|
|
1250
1133
|
elif dtype not in mstype.int_type:
|
|
1251
|
-
raise ValueError(
|
|
1252
|
-
f"For 'randint', the 'dtype' must be an int type, but got {dtype}.")
|
|
1134
|
+
raise ValueError(f"For 'randint', the 'dtype' must be an int type, but got {dtype}.")
|
|
1253
1135
|
if not isinstance(size, tuple):
|
|
1254
|
-
raise ValueError(
|
|
1255
|
-
f"For 'randint', the input 'size' must be a tuple, but got {size}.")
|
|
1136
|
+
raise ValueError(f"For 'randint', the input 'size' must be a tuple, but got {size}.")
|
|
1256
1137
|
if not isinstance(low, int) or isinstance(low, bool):
|
|
1257
|
-
raise TypeError(
|
|
1258
|
-
f"For 'randint_like', 'low' must be an int, but got {type(low)}.")
|
|
1138
|
+
raise TypeError(f"For 'randint_like', 'low' must be an int, but got {type(low)}.")
|
|
1259
1139
|
if not isinstance(high, int) or isinstance(high, bool):
|
|
1260
|
-
raise TypeError(
|
|
1261
|
-
f"For 'randint_like', 'high' must be an int, but got {type(high)}.")
|
|
1140
|
+
raise TypeError(f"For 'randint_like', 'high' must be an int, but got {type(high)}.")
|
|
1262
1141
|
seed1, seed2 = _get_seed(seed, 'randint')
|
|
1263
1142
|
rand_op = P.UniformInt(seed1, seed2)
|
|
1264
1143
|
rand_op = _set_prim_op_user_data(rand_op, "random_cache", False)
|
|
@@ -1304,19 +1183,15 @@ def randint_like(input, low, high, seed=None, *, dtype=None):
|
|
|
1304
1183
|
[9 1 2]]
|
|
1305
1184
|
"""
|
|
1306
1185
|
if not isinstance(input, Tensor):
|
|
1307
|
-
raise TypeError(
|
|
1308
|
-
f"For 'randint_like', the 'input' must be a Tensor, but got {type(input)}")
|
|
1186
|
+
raise TypeError(f"For 'randint_like', the 'input' must be a Tensor, but got {type(input)}")
|
|
1309
1187
|
if dtype is None:
|
|
1310
1188
|
dtype = input.dtype
|
|
1311
1189
|
if dtype not in mstype.int_type:
|
|
1312
|
-
raise ValueError(
|
|
1313
|
-
f"For 'randint_like', the 'dtype' must be an int type, but got {dtype}.")
|
|
1190
|
+
raise ValueError(f"For 'randint_like', the 'dtype' must be an int type, but got {dtype}.")
|
|
1314
1191
|
if not isinstance(low, int) or isinstance(low, bool):
|
|
1315
|
-
raise TypeError(
|
|
1316
|
-
f"For 'randint_like', 'low' must be an int, but got {type(low)}.")
|
|
1192
|
+
raise TypeError(f"For 'randint_like', 'low' must be an int, but got {type(low)}.")
|
|
1317
1193
|
if not isinstance(high, int) or isinstance(high, bool):
|
|
1318
|
-
raise TypeError(
|
|
1319
|
-
f"For 'randint_like', 'high' must be an int, but got {type(high)}.")
|
|
1194
|
+
raise TypeError(f"For 'randint_like', 'high' must be an int, but got {type(high)}.")
|
|
1320
1195
|
size = input.shape
|
|
1321
1196
|
seed1, seed2 = _get_seed(seed, 'randint_like')
|
|
1322
1197
|
rand_op = P.UniformInt(seed1, seed2)
|
|
@@ -1377,8 +1252,7 @@ def poisson(shape, mean, seed=None):
|
|
|
1377
1252
|
"""
|
|
1378
1253
|
seed1, seed2 = _get_seed(seed, "poisson")
|
|
1379
1254
|
random_poisson_op = P.Poisson(seed1, seed2)
|
|
1380
|
-
random_poisson_op = _set_prim_op_user_data(
|
|
1381
|
-
random_poisson_op, "random_cache", False)
|
|
1255
|
+
random_poisson_op = _set_prim_op_user_data(random_poisson_op, "random_cache", False)
|
|
1382
1256
|
value = random_poisson_op(shape, mean)
|
|
1383
1257
|
return value
|
|
1384
1258
|
|
|
@@ -1475,8 +1349,7 @@ def multinomial(input, num_samples, replacement=True, seed=None):
|
|
|
1475
1349
|
"""
|
|
1476
1350
|
def _check_valid_dim(dim, name):
|
|
1477
1351
|
if dim not in (1, 2):
|
|
1478
|
-
raise ValueError(
|
|
1479
|
-
f"For '{name}', the dimension of inputs must be 1d or 2d, but got {dim}.")
|
|
1352
|
+
raise ValueError(f"For '{name}', the dimension of inputs must be 1d or 2d, but got {dim}.")
|
|
1480
1353
|
|
|
1481
1354
|
_check_valid_dim(len(shape_(input)), "multinomial")
|
|
1482
1355
|
seed1, seed2 = _get_seed(seed, "multinomial")
|
|
@@ -1490,34 +1363,29 @@ def multinomial(input, num_samples, replacement=True, seed=None):
|
|
|
1490
1363
|
if len(shape_(input)) > 1:
|
|
1491
1364
|
n_dist = shape_(input)[-2]
|
|
1492
1365
|
random_uniform_real = P.UniformReal(seed1, seed2)
|
|
1493
|
-
random_cache_op = _set_prim_op_user_data(
|
|
1494
|
-
random_uniform_real, "random_cache", False)
|
|
1366
|
+
random_cache_op = _set_prim_op_user_data(random_uniform_real, "random_cache", False)
|
|
1495
1367
|
random_uniform = random_cache_op((n_dist * shape_(input)[-1],))
|
|
1496
1368
|
if n_dist != 1:
|
|
1497
|
-
random_uniform = reshape_(
|
|
1498
|
-
|
|
1369
|
+
random_uniform = reshape_(random_uniform, (n_dist, shape_(input)[-1]))
|
|
1370
|
+
|
|
1499
1371
|
|
|
1500
1372
|
vals = real_div_(log_(random_uniform), input + 1e-6)
|
|
1501
1373
|
_, indices = top_k_(vals, num_samples)
|
|
1502
1374
|
return indices
|
|
1503
1375
|
random_nomial = P.Multinomial(seed1, seed2)
|
|
1504
|
-
random_nomial = _set_prim_op_user_data(
|
|
1505
|
-
random_nomial, "random_cache", False)
|
|
1376
|
+
random_nomial = _set_prim_op_user_data(random_nomial, "random_cache", False)
|
|
1506
1377
|
return random_nomial(input, num_samples)
|
|
1507
1378
|
|
|
1508
1379
|
|
|
1509
1380
|
def _check_shape(input_shape):
|
|
1510
1381
|
"""Check 'shape' value."""
|
|
1511
1382
|
if not isinstance(input_shape, tuple):
|
|
1512
|
-
const_utils.raise_type_error(
|
|
1513
|
-
f"Type of 'shape' must be tuple, but got: {type(input_shape)}")
|
|
1383
|
+
const_utils.raise_type_error(f"Type of 'shape' must be tuple, but got: {type(input_shape)}")
|
|
1514
1384
|
for item in input_shape:
|
|
1515
1385
|
if not isinstance(item, int):
|
|
1516
|
-
const_utils.raise_type_error(
|
|
1517
|
-
f"Elements of 'shape' must be int, but got: {type(item)}")
|
|
1386
|
+
const_utils.raise_type_error(f"Elements of 'shape' must be int, but got: {type(item)}")
|
|
1518
1387
|
if item < 1:
|
|
1519
|
-
const_utils.raise_value_error(
|
|
1520
|
-
f"Elements of 'shape' must be positive int, but got: {item}")
|
|
1388
|
+
const_utils.raise_value_error(f"Elements of 'shape' must be positive int, but got: {item}")
|
|
1521
1389
|
return True
|
|
1522
1390
|
|
|
1523
1391
|
|
|
@@ -1530,10 +1398,9 @@ def _check_param(op_name, param_name, param_value):
|
|
|
1530
1398
|
|
|
1531
1399
|
|
|
1532
1400
|
__all__ = [
|
|
1533
|
-
'standard_laplace', 'random_categorical', 'uniform', 'standard_normal', 'random_gamma',
|
|
1401
|
+
'standard_laplace', 'random_categorical', 'uniform', 'uniform_ext', 'standard_normal', 'random_gamma',
|
|
1534
1402
|
'uniform_candidate_sampler', 'random_poisson', 'log_uniform_candidate_sampler', 'shuffle', 'choice_with_mask',
|
|
1535
|
-
'normal', 'laplace', 'gamma', 'poisson', 'multinomial', 'rand', 'rand_like',
|
|
1536
|
-
'randn', 'randn_like',
|
|
1403
|
+
'normal_ext', 'normal', 'laplace', 'gamma', 'poisson', 'multinomial', 'rand', 'rand_like', 'randn', 'randn_like',
|
|
1537
1404
|
'randint', 'randint_like', 'multinomial_with_replacement', 'randperm'
|
|
1538
1405
|
]
|
|
1539
1406
|
__all__.sort()
|
|
@@ -140,7 +140,7 @@ def coalesce(x_indices: Tensor, x_values: Tensor, x_shape: Tensor) -> Tuple[Tens
|
|
|
140
140
|
|
|
141
141
|
Examples:
|
|
142
142
|
>>> import mindspore
|
|
143
|
-
>>>
|
|
143
|
+
>>> import mindspore.ops as ops
|
|
144
144
|
>>> from mindspore import Tensor
|
|
145
145
|
>>> x_indices = Tensor([[0, 0, 1], [1, 1, 2]], dtype=ms.int64)
|
|
146
146
|
>>> x_values = Tensor([1, 5, 4], dtype=ms.float32)
|
|
@@ -271,7 +271,7 @@ def csr_mm(a: CSRTensor, b: CSRTensor, trans_a: bool = False, trans_b: bool = Fa
|
|
|
271
271
|
Examples:
|
|
272
272
|
>>> from mindspore import Tensor, CSRTensor
|
|
273
273
|
>>> from mindspore import dtype as mstype
|
|
274
|
-
>>>
|
|
274
|
+
>>> import mindspore.ops as ops
|
|
275
275
|
>>> a_shape = (4, 5)
|
|
276
276
|
>>> a_indptr = Tensor([0, 1, 1, 3, 4], dtype=mstype.int32)
|
|
277
277
|
>>> a_indices = Tensor([0, 3, 4, 0],dtype=mstype.int32)
|
|
@@ -745,7 +745,7 @@ def csr_softmax(logits: CSRTensor, dtype: mstype):
|
|
|
745
745
|
|
|
746
746
|
Examples:
|
|
747
747
|
>>> import mindspore as ms
|
|
748
|
-
>>>
|
|
748
|
+
>>> import mindspore.ops as ops
|
|
749
749
|
>>> import mindspore.common.dtype as mstype
|
|
750
750
|
>>> from mindspore import Tensor, CSRTensor
|
|
751
751
|
>>> logits_indptr = Tensor([0, 4, 6], dtype=mstype.int32)
|
|
@@ -807,7 +807,7 @@ def csr_add(a: CSRTensor, b: CSRTensor, alpha: Tensor, beta: Tensor) -> CSRTenso
|
|
|
807
807
|
Examples:
|
|
808
808
|
>>> import mindspore.common.dtype as mstype
|
|
809
809
|
>>> from mindspore import Tensor, CSRTensor
|
|
810
|
-
>>>
|
|
810
|
+
>>> import mindspore.ops as ops
|
|
811
811
|
>>> a_indptr = Tensor([0, 1, 2], dtype=mstype.int32)
|
|
812
812
|
>>> a_indices = Tensor([0, 1], dtype=mstype.int32)
|
|
813
813
|
>>> a_values = Tensor([1, 2], dtype=mstype.float32)
|