mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
- mindspore/__init__.py +1 -2
- mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +25 -5
- mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +0 -29
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +5 -21
- mindspore/_extends/parse/resources.py +7 -5
- mindspore/_extends/parse/standard_method.py +59 -40
- mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -26
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/less_batch_normalization.py +6 -9
- mindspore/common/__init__.py +1 -8
- mindspore/common/_register_for_tensor.py +9 -8
- mindspore/common/api.py +65 -275
- mindspore/common/dtype.py +4 -8
- mindspore/common/dump.py +5 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/lazy_inline.py +2 -14
- mindspore/common/parameter.py +15 -14
- mindspore/common/recompute.py +5 -20
- mindspore/common/sparse_tensor.py +6 -21
- mindspore/common/tensor.py +52 -100
- mindspore/communication/__init__.py +11 -6
- mindspore/communication/management.py +94 -92
- mindspore/context.py +18 -180
- mindspore/dataset/engine/datasets.py +46 -69
- mindspore/dataset/engine/datasets_user_defined.py +53 -72
- mindspore/dataset/engine/datasets_vision.py +2 -2
- mindspore/dataset/engine/queue.py +38 -56
- mindspore/dataset/engine/validators.py +5 -11
- mindspore/dataset/vision/__init__.py +5 -5
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +46 -591
- mindspore/dataset/vision/utils.py +1 -121
- mindspore/dataset/vision/validators.py +3 -9
- mindspore/hal/__init__.py +1 -7
- mindspore/hal/device.py +1 -1
- mindspore/include/api/model.h +0 -3
- mindspore/include/dataset/vision.h +2 -54
- mindspore/include/mindapi/base/types.h +0 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/mindrecord/filewriter.py +2 -2
- mindspore/mint/__init__.py +40 -720
- mindspore/mint/nn/__init__.py +7 -89
- mindspore/mint/nn/functional.py +16 -165
- mindspore/mint/optim/adamw.py +16 -15
- mindspore/nn/__init__.py +2 -0
- mindspore/nn/cell.py +98 -97
- mindspore/nn/extend/basic.py +2 -2
- mindspore/nn/extend/embedding.py +1 -1
- mindspore/nn/extend/layer/normalization.py +5 -7
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/activation.py +3 -4
- mindspore/nn/layer/basic.py +16 -79
- mindspore/nn/layer/conv.py +8 -17
- mindspore/nn/layer/embedding.py +4 -1
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +1 -1
- mindspore/nn/layer/pooling.py +0 -5
- mindspore/nn/layer/rnn_cells.py +2 -2
- mindspore/nn/loss/loss.py +19 -19
- mindspore/nn/optim/adasum.py +1 -1
- mindspore/nn/optim/sgd.py +2 -3
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/wrap/cell_wrapper.py +1 -25
- mindspore/nn/wrap/loss_scale.py +1 -24
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/math_ops.py +8 -8
- mindspore/ops/__init__.py +1 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
- mindspore/ops/_vmap/vmap_array_ops.py +0 -27
- mindspore/ops/_vmap/vmap_math_ops.py +1 -29
- mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
- mindspore/ops/auto_generate/gen_extend_func.py +27 -603
- mindspore/ops/auto_generate/gen_ops_def.py +203 -993
- mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
- mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
- mindspore/ops/composite/base.py +6 -3
- mindspore/ops/composite/math_ops.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/extend/__init__.py +3 -2
- mindspore/ops/extend/array_func.py +51 -10
- mindspore/ops/extend/nn_func.py +78 -2
- mindspore/ops/function/__init__.py +13 -8
- mindspore/ops/function/array_func.py +179 -455
- mindspore/ops/function/clip_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +3 -3
- mindspore/ops/function/math_func.py +103 -117
- mindspore/ops/function/nn_func.py +163 -275
- mindspore/ops/function/other_func.py +2 -2
- mindspore/ops/function/random_func.py +69 -202
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/functional.py +327 -332
- mindspore/ops/operations/__init__.py +3 -13
- mindspore/ops/operations/_grad_ops.py +27 -3
- mindspore/ops/operations/_inner_ops.py +356 -53
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +65 -82
- mindspore/ops/operations/comm_ops.py +93 -784
- mindspore/ops/operations/custom_ops.py +28 -51
- mindspore/ops/operations/debug_ops.py +4 -4
- mindspore/ops/operations/inner_ops.py +2 -2
- mindspore/ops/operations/manually_defined/ops_def.py +4 -304
- mindspore/ops/operations/math_ops.py +50 -3
- mindspore/ops/operations/nn_ops.py +247 -14
- mindspore/ops/operations/other_ops.py +3 -3
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +1 -1
- mindspore/ops/primitive.py +8 -9
- mindspore/ops/silent_check.py +5 -5
- mindspore/ops_generate/arg_dtype_cast.py +9 -2
- mindspore/ops_generate/arg_handler.py +0 -26
- mindspore/ops_generate/gen_aclnn_implement.py +4 -1
- mindspore/ops_generate/gen_ops.py +4 -26
- mindspore/ops_generate/gen_pyboost_func.py +12 -41
- mindspore/ops_generate/gen_utils.py +0 -21
- mindspore/ops_generate/pyboost_utils.py +2 -7
- mindspore/ops_generate/template.py +0 -1
- mindspore/parallel/_auto_parallel_context.py +1 -21
- mindspore/parallel/_tensor.py +5 -0
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +1 -15
- mindspore/parallel/algo_parameter_config.py +3 -1
- mindspore/parallel/checkpoint_transform.py +9 -12
- mindspore/parallel/cluster/process_entity/_api.py +29 -28
- mindspore/parallel/cluster/process_entity/_utils.py +3 -13
- mindspore/parallel/cluster/run.py +16 -13
- mindspore/parallel/parameter_broadcast.py +2 -2
- mindspore/parallel/shard.py +17 -31
- mindspore/profiler/__init__.py +2 -3
- mindspore/profiler/common/util.py +2 -107
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
- mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
- mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
- mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
- mindspore/profiler/parser/minddata_parser.py +3 -72
- mindspore/profiler/profiling.py +59 -176
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/common/namespace.py +5 -5
- mindspore/rewrite/parsers/assign_parser.py +0 -2
- mindspore/rewrite/parsers/class_def_parser.py +4 -8
- mindspore/run_check/_check_version.py +1 -1
- mindspore/scipy/fft.py +3 -1
- mindspore/scipy/linalg.py +3 -2
- mindspore/scipy/ops.py +3 -5
- mindspore/scipy/optimize/__init__.py +2 -2
- mindspore/train/__init__.py +4 -4
- mindspore/train/anf_ir_pb2.py +2 -8
- mindspore/train/callback/__init__.py +2 -5
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_checkpoint.py +16 -104
- mindspore/train/callback/_landscape.py +1 -1
- mindspore/train/callback/_time_monitor.py +1 -1
- mindspore/train/data_sink.py +4 -5
- mindspore/train/dataset_helper.py +20 -45
- mindspore/train/model.py +38 -266
- mindspore/train/serialization.py +105 -256
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
- mindspore/_extends/pijit/__init__.py +0 -23
- mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
- mindspore/common/file_system.py +0 -48
- mindspore/common/generator.py +0 -260
- mindspore/common/no_inline.py +0 -54
- mindspore/common/np_dtype.py +0 -25
- mindspore/communication/comm_func.py +0 -1140
- mindspore/hal/memory.py +0 -326
- mindspore/lib/libavcodec.so.59 +0 -0
- mindspore/lib/libavdevice.so.59 +0 -0
- mindspore/lib/libavfilter.so.8 +0 -0
- mindspore/lib/libavformat.so.59 +0 -0
- mindspore/lib/libavutil.so.57 +0 -0
- mindspore/lib/libmindspore_np_dtype.so +0 -0
- mindspore/lib/libswresample.so.4 +0 -0
- mindspore/lib/libswscale.so.6 +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
- mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
- mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
- mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
- mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
- mindspore/mint/linalg/__init__.py +0 -22
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/nn/layer/embedding_service_layer.py +0 -393
- mindspore/ops/function/reshard_func.py +0 -102
- mindspore/ops/operations/_infer_ops.py +0 -19
- mindspore/ops/operations/reshard_ops.py +0 -53
- mindspore/profiler/common/process_pool.py +0 -41
- mindspore/profiler/common/singleton.py +0 -28
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/train/callback/_cluster_monitor.py +0 -201
- mindspore/train/callback/_flops_collector.py +0 -238
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,297 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""Generator"""
|
|
16
|
+
import os
|
|
17
|
+
|
|
18
|
+
import numpy as np
|
|
19
|
+
|
|
20
|
+
from mindspore import context
|
|
21
|
+
from mindspore.common.parameter import Parameter
|
|
22
|
+
from mindspore.nn.cell import Cell
|
|
23
|
+
from mindspore.ops.operations import Assign, AssignAdd, Depend
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Generator(Cell):
|
|
27
|
+
"""
|
|
28
|
+
A generator that manages the state of random numbers and provides seed and offset for random functions.
|
|
29
|
+
When the seed and offset are fixed, the random function generates the same random sequence.
|
|
30
|
+
|
|
31
|
+
Inputs:
|
|
32
|
+
- **step** (int) - Set the step size for offset update.
|
|
33
|
+
|
|
34
|
+
Outputs:
|
|
35
|
+
Tuple consisting of the seed and offset of generator.
|
|
36
|
+
|
|
37
|
+
Supported Platforms:
|
|
38
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
39
|
+
|
|
40
|
+
Examples:
|
|
41
|
+
>>> import mindspore as ms
|
|
42
|
+
>>> from mindspore.nn import Generator
|
|
43
|
+
>>> import numpy as np
|
|
44
|
+
>>> np.random.seed(10)
|
|
45
|
+
>>> ms.set_context(mode=1)
|
|
46
|
+
>>> generator = Generator()
|
|
47
|
+
>>> print(generator.get_state())
|
|
48
|
+
(Tensor(shape=[], dtype=Int32, value= 0), Tensor(shape=[], dtype=Int32, value= 0))
|
|
49
|
+
>>> print(generator(12))
|
|
50
|
+
(0, 0)
|
|
51
|
+
>>> print(generator.get_state())
|
|
52
|
+
(Tensor(shape=[], dtype=Int32, value= 0), Tensor(shape=[], dtype=Int32, value= 12))
|
|
53
|
+
>>> generator.manual_seed(20)
|
|
54
|
+
>>> print(generator.get_state())
|
|
55
|
+
(Tensor(shape=[], dtype=Int32, value= 20), Tensor(shape=[], dtype=Int32, value= 0))
|
|
56
|
+
>>> print(generator.seed())
|
|
57
|
+
1165313289
|
|
58
|
+
>>> print(generator.initial_seed())
|
|
59
|
+
1165313289
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
def __init__(self):
|
|
63
|
+
super(Generator, self).__init__()
|
|
64
|
+
self._assign = Assign().set_device("CPU")
|
|
65
|
+
self._assign_add = AssignAdd().set_device("CPU")
|
|
66
|
+
self._depend = Depend()
|
|
67
|
+
self._seed = Parameter(0, name="seed", requires_grad=False)
|
|
68
|
+
self._offset = Parameter(0, name="offset", requires_grad=False)
|
|
69
|
+
self._seed_val = 0
|
|
70
|
+
self._offset_val = 0
|
|
71
|
+
|
|
72
|
+
def set_state(self, seed, offset=None): # pylint: disable=redefined-outer-name
|
|
73
|
+
"""
|
|
74
|
+
Sets the generator state.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
seed (int): Seed of the generator.
|
|
78
|
+
offset (int, optional): Offset of the generator, default: ``None`` , means ``0``.
|
|
79
|
+
"""
|
|
80
|
+
self._seed_val = int(seed)
|
|
81
|
+
self._assign(self._seed, self._seed_val)
|
|
82
|
+
if offset is None:
|
|
83
|
+
offset = 0
|
|
84
|
+
self._offset_val = int(offset)
|
|
85
|
+
self._assign(self._offset, self._offset_val)
|
|
86
|
+
|
|
87
|
+
def get_state(self):
|
|
88
|
+
"""
|
|
89
|
+
Get the generator state.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
Tuple consisting of the seed and offset of generator.
|
|
93
|
+
"""
|
|
94
|
+
return self._seed.value(), self._offset.value()
|
|
95
|
+
|
|
96
|
+
def seed(self): # pylint: disable=redefined-outer-name
|
|
97
|
+
"""
|
|
98
|
+
Generate random seeds that can be used as seeds for generator.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Tensor, randomly generated seeds.
|
|
102
|
+
"""
|
|
103
|
+
seed_ = np.random.randint(np.iinfo(np.int32).min, np.iinfo(np.int32).max)
|
|
104
|
+
self.set_state(seed_)
|
|
105
|
+
return self._seed.value()
|
|
106
|
+
|
|
107
|
+
def manual_seed(self, seed): # pylint: disable=redefined-outer-name
|
|
108
|
+
"""
|
|
109
|
+
Sets the generator seed.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
seed (int): Sets the generator seed.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
The generator self.
|
|
116
|
+
"""
|
|
117
|
+
self.set_state(seed)
|
|
118
|
+
return self
|
|
119
|
+
|
|
120
|
+
def initial_seed(self):
|
|
121
|
+
"""
|
|
122
|
+
Return the initial seed of generator.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
The initial seed of generator.
|
|
126
|
+
"""
|
|
127
|
+
return self._seed.value()
|
|
128
|
+
|
|
129
|
+
def construct(self, step):
|
|
130
|
+
"""
|
|
131
|
+
Update the value of offset, and return the seed and the previous offset.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
step (int): Update offset by step.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Seed and offset before update.
|
|
138
|
+
"""
|
|
139
|
+
offset = self._offset.value()
|
|
140
|
+
step = self._depend(step, offset)
|
|
141
|
+
self._assign_add(self._offset, step)
|
|
142
|
+
return self._seed.value(), offset
|
|
143
|
+
|
|
144
|
+
def __call__(self, step):
|
|
145
|
+
if os.getenv("MS_JIT") != '0' and context.get_context("mode") == context.GRAPH_MODE:
|
|
146
|
+
return super().__call__(step)
|
|
147
|
+
|
|
148
|
+
offset_val = self._offset_val
|
|
149
|
+
self._offset_val += step
|
|
150
|
+
self._offset.set_data(self._offset_val)
|
|
151
|
+
return self._seed_val, offset_val
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
default_generator_ = None
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def _init_default_generator():
|
|
158
|
+
global default_generator_
|
|
159
|
+
default_generator_ = Generator()
|
|
160
|
+
default_generator_.seed()
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def default_generator():
|
|
164
|
+
"""
|
|
165
|
+
Return the default generator object.
|
|
166
|
+
|
|
167
|
+
When the user does not specify generator, the random operator invokes default generator to generate random numbers.
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
The default generator.
|
|
171
|
+
|
|
172
|
+
Supported Platforms:
|
|
173
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
174
|
+
|
|
175
|
+
Examples:
|
|
176
|
+
>>> from mindspore.nn import default_generator
|
|
177
|
+
>>> default_gen = default_generator()
|
|
178
|
+
>>> print(type(default_gen))
|
|
179
|
+
<class 'mindspore.nn.generator.Generator'>
|
|
180
|
+
"""
|
|
181
|
+
if default_generator_ is None:
|
|
182
|
+
_init_default_generator()
|
|
183
|
+
return default_generator_
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def seed(): # pylint: disable=redefined-outer-name
|
|
187
|
+
"""
|
|
188
|
+
Generate random seeds that can be used as seeds for default generator.
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
Randomly generated seeds.
|
|
192
|
+
|
|
193
|
+
Supported Platforms:
|
|
194
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
195
|
+
|
|
196
|
+
Examples:
|
|
197
|
+
>>> import numpy as np
|
|
198
|
+
>>> from mindspore.nn import seed
|
|
199
|
+
>>> np.random.seed(20)
|
|
200
|
+
>>> print(seed())
|
|
201
|
+
1663920602
|
|
202
|
+
"""
|
|
203
|
+
if default_generator_ is None:
|
|
204
|
+
_init_default_generator()
|
|
205
|
+
return default_generator_.seed()
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def manual_seed(seed): # pylint: disable=redefined-outer-name
|
|
209
|
+
"""
|
|
210
|
+
Sets the default generator seed.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
seed (int): Sets the default generator seed.
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
The default generator self.
|
|
217
|
+
|
|
218
|
+
Supported Platforms:
|
|
219
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
220
|
+
|
|
221
|
+
Examples:
|
|
222
|
+
>>> from mindspore.nn import manual_seed, initial_seed
|
|
223
|
+
>>> manual_seed(13)
|
|
224
|
+
>>> print(initial_seed())
|
|
225
|
+
13
|
|
226
|
+
"""
|
|
227
|
+
if default_generator_ is None:
|
|
228
|
+
_init_default_generator()
|
|
229
|
+
default_generator_.manual_seed(seed)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def initial_seed():
|
|
233
|
+
"""
|
|
234
|
+
Return the initial seed of default generator.
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
The initial seed of default generator.
|
|
238
|
+
|
|
239
|
+
Supported Platforms:
|
|
240
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
241
|
+
|
|
242
|
+
Examples:
|
|
243
|
+
>>> from mindspore.nn import manual_seed, initial_seed
|
|
244
|
+
>>> manual_seed(14)
|
|
245
|
+
>>> print(initial_seed())
|
|
246
|
+
14
|
|
247
|
+
"""
|
|
248
|
+
if default_generator_ is None:
|
|
249
|
+
_init_default_generator()
|
|
250
|
+
return default_generator_.initial_seed()
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def get_rng_state():
|
|
254
|
+
"""
|
|
255
|
+
Get the default generator state.
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
Tuple consisting of the seed and offset of default generator.
|
|
259
|
+
|
|
260
|
+
Supported Platforms:
|
|
261
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
262
|
+
|
|
263
|
+
Examples:
|
|
264
|
+
>>> import numpy as np
|
|
265
|
+
>>> from mindspore.nn import get_rng_state
|
|
266
|
+
>>> np.random.seed(20)
|
|
267
|
+
>>> print(get_rng_state())
|
|
268
|
+
(Tensor(shape=[], dtype=Int32, value= 378518883), Tensor(shape=[], dtype=Int32, value= 0))
|
|
269
|
+
"""
|
|
270
|
+
if default_generator_ is None:
|
|
271
|
+
_init_default_generator()
|
|
272
|
+
return default_generator_.get_state()
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
def set_rng_state(seed, offset=None): # pylint: disable=redefined-outer-name
|
|
276
|
+
"""
|
|
277
|
+
Sets the default generator state.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
seed (int): Seed of the default generator.
|
|
281
|
+
offset (int, optional): Offset of the default generator, default: ``None`` , means ``0``.
|
|
282
|
+
|
|
283
|
+
Supported Platforms:
|
|
284
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
285
|
+
|
|
286
|
+
Examples:
|
|
287
|
+
>>> from mindspore.nn import set_rng_state, get_rng_state
|
|
288
|
+
>>> set_rng_state(10)
|
|
289
|
+
>>> print(get_rng_state())
|
|
290
|
+
(Tensor(shape=[], dtype=Int32, value= 10), Tensor(shape=[], dtype=Int32, value= 0))
|
|
291
|
+
"""
|
|
292
|
+
if default_generator_ is None:
|
|
293
|
+
_init_default_generator()
|
|
294
|
+
default_generator_.set_state(seed, offset)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
__all__ = ["Generator", "default_generator", "seed", "manual_seed", "initial_seed", "set_rng_state", "get_rng_state"]
|
mindspore/nn/layer/activation.py
CHANGED
|
@@ -351,17 +351,16 @@ class ELU(Cell):
|
|
|
351
351
|
|
|
352
352
|
Args:
|
|
353
353
|
alpha (float): The alpha value of ELU, the data type is float. Default: ``1.0`` .
|
|
354
|
-
Only alpha equal to ``1.0`` is supported currently.
|
|
355
354
|
|
|
356
355
|
Inputs:
|
|
357
|
-
- **
|
|
356
|
+
- **x** (Tensor) - The input of ELU is a Tensor of any dimension with data type of float16 or float32.
|
|
358
357
|
|
|
359
358
|
Outputs:
|
|
360
|
-
Tensor, with the same type and shape as the `
|
|
359
|
+
Tensor, with the same type and shape as the `x`.
|
|
361
360
|
|
|
362
361
|
Raises:
|
|
363
362
|
TypeError: If `alpha` is not a float.
|
|
364
|
-
TypeError: If dtype of `
|
|
363
|
+
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
365
364
|
ValueError: If `alpha` is not equal to 1.0.
|
|
366
365
|
|
|
367
366
|
Supported Platforms:
|
mindspore/nn/layer/basic.py
CHANGED
|
@@ -28,7 +28,6 @@ from mindspore.common.initializer import initializer, HeUniform, Uniform
|
|
|
28
28
|
from mindspore.ops import operations as P
|
|
29
29
|
from mindspore.ops import functional as F
|
|
30
30
|
from mindspore.ops.function.nn_func import interpolate_ext
|
|
31
|
-
from mindspore.ops.auto_generate import unfold_ext
|
|
32
31
|
from mindspore.ops.operations import _inner_ops as inner
|
|
33
32
|
from mindspore.ops.primitive import constexpr, Primitive, _primexpr
|
|
34
33
|
from mindspore.common.parameter import Parameter
|
|
@@ -37,8 +36,8 @@ from mindspore import _checkparam as Validator
|
|
|
37
36
|
from mindspore.nn.cell import Cell
|
|
38
37
|
from mindspore.nn.layer.activation import get_activation
|
|
39
38
|
from mindspore.common._decorator import deprecated
|
|
40
|
-
from mindspore.ops.auto_generate import dropout_ext_op
|
|
41
|
-
from mindspore.
|
|
39
|
+
from mindspore.ops.auto_generate import dropout_ext_op
|
|
40
|
+
from mindspore.nn.generator import default_generator
|
|
42
41
|
|
|
43
42
|
__all__ = ['Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'Pad', 'Unfold', 'Tril', 'Triu',
|
|
44
43
|
'MatrixDiag', 'MatrixDiagPart', 'MatrixSetDiag', 'L1Regularizer', 'Dropout1d',
|
|
@@ -222,20 +221,20 @@ class DropoutExt(Cell):
|
|
|
222
221
|
|
|
223
222
|
Note:
|
|
224
223
|
- Each channel will be zeroed out independently on every construct call.
|
|
225
|
-
|
|
224
|
+
Parameter `p` means the probability of the element of the input tensor to be zeroed.
|
|
226
225
|
|
|
227
226
|
Args:
|
|
228
|
-
p (float): The dropout rate
|
|
229
|
-
Default: ``0.5`` .
|
|
227
|
+
p (float): The dropout rate, greater than or equal to 0 and less than 1.
|
|
228
|
+
E.g. rate=0.9, dropping out 90% of input neurons. Default: ``0.5`` .
|
|
230
229
|
|
|
231
230
|
Inputs:
|
|
232
|
-
- **x** (Tensor) - The input of Dropout.
|
|
231
|
+
- **x** (Tensor) - The input of Dropout with data type of float16 or float32.
|
|
233
232
|
|
|
234
233
|
Outputs:
|
|
235
234
|
Tensor, output tensor with the same shape as the `x`.
|
|
236
235
|
|
|
237
236
|
Raises:
|
|
238
|
-
|
|
237
|
+
ValueError: If `p` is not in range [0, 1).
|
|
239
238
|
ValueError: If length of shape of `x` is less than 1.
|
|
240
239
|
|
|
241
240
|
Supported Platforms:
|
|
@@ -256,15 +255,16 @@ class DropoutExt(Cell):
|
|
|
256
255
|
def __init__(self, p=0.5):
|
|
257
256
|
"""Initialize DropoutExt."""
|
|
258
257
|
super(DropoutExt, self).__init__()
|
|
258
|
+
self.generator = default_generator()
|
|
259
|
+
self.dropout = dropout_ext_op
|
|
259
260
|
self.p = p
|
|
260
|
-
self.generator_step = Tensor(1, mstype.int64)
|
|
261
261
|
|
|
262
262
|
def construct(self, x):
|
|
263
263
|
if not self.training or self.p == 0:
|
|
264
264
|
return x
|
|
265
265
|
|
|
266
|
-
seed, offset =
|
|
267
|
-
out, _ =
|
|
266
|
+
seed, offset = self.generator(1)
|
|
267
|
+
out, _ = self.dropout(x, self.p, seed, offset)
|
|
268
268
|
return out
|
|
269
269
|
|
|
270
270
|
|
|
@@ -477,16 +477,16 @@ class Upsample(Cell):
|
|
|
477
477
|
|
|
478
478
|
class UpsampleExt(Cell):
|
|
479
479
|
r"""
|
|
480
|
-
For details, please refer to :func:`mindspore.mint.
|
|
480
|
+
For details, please refer to :func:`mindspore.mint.interpolate`.
|
|
481
481
|
|
|
482
482
|
Supported Platforms:
|
|
483
|
-
``Ascend``
|
|
483
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
484
484
|
|
|
485
485
|
Examples:
|
|
486
486
|
>>> import mindspore as ms
|
|
487
|
-
>>> from mindspore import
|
|
487
|
+
>>> from mindspore import mint
|
|
488
488
|
>>> x = ms.Tensor([[[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]])
|
|
489
|
-
>>> upsample =
|
|
489
|
+
>>> upsample = mint.Upsample(size=(5, 5))
|
|
490
490
|
>>> out = upsample(x)
|
|
491
491
|
>>> print(x.asnumpy())
|
|
492
492
|
[[[[1. 2. 3. 4.]
|
|
@@ -535,8 +535,7 @@ class Flatten(Cell):
|
|
|
535
535
|
TypeError: If `x` is not a Tensor.
|
|
536
536
|
TypeError: If `start_dim` or `end_dim` is not int.
|
|
537
537
|
ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
|
|
538
|
-
ValueError: If `start_dim` or `end_dim` is not in range of [-x.dim, x.dim-1].
|
|
539
|
-
are used for the args and the input is a 0-dimensional or 1-dimensional Tensor.
|
|
538
|
+
ValueError: If `start_dim` or `end_dim` is not in range of [-x.dim, x.dim-1].
|
|
540
539
|
|
|
541
540
|
Supported Platforms:
|
|
542
541
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1131,68 +1130,6 @@ class Unfold(Cell):
|
|
|
1131
1130
|
return result
|
|
1132
1131
|
|
|
1133
1132
|
|
|
1134
|
-
class UnfoldExt(Cell):
|
|
1135
|
-
r"""
|
|
1136
|
-
Extracts sliding local blocks from a batched input tensor.
|
|
1137
|
-
|
|
1138
|
-
For details, please refer to :func:`mindspore.mint.nn.functional.unfold`.
|
|
1139
|
-
|
|
1140
|
-
Supported Platforms:
|
|
1141
|
-
``Ascend``
|
|
1142
|
-
|
|
1143
|
-
Examples:
|
|
1144
|
-
>>> import mindspore
|
|
1145
|
-
>>> import numpy as np
|
|
1146
|
-
>>> from mindspore import Tensor, nn
|
|
1147
|
-
>>> input = Tensor(np.random.rand(4, 4, 32, 32), mindspore.float64)
|
|
1148
|
-
>>> unfold = nn.UnfoldExt(kernel_size=3, dilation=1, stride=1)
|
|
1149
|
-
>>> output = unfold(input)
|
|
1150
|
-
>>> print(output.shape)
|
|
1151
|
-
(4, 36, 900)
|
|
1152
|
-
"""
|
|
1153
|
-
def __init__(self, kernel_size, dilation=1, padding=0, stride=1):
|
|
1154
|
-
super(UnfoldExt, self).__init__()
|
|
1155
|
-
self.kernel_size = kernel_size
|
|
1156
|
-
self.dilation = dilation
|
|
1157
|
-
self.padding = padding
|
|
1158
|
-
self.stride = stride
|
|
1159
|
-
|
|
1160
|
-
def construct(self, input):
|
|
1161
|
-
return unfold_ext(input, self.kernel_size, self.dilation, self.padding, self.stride)
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
class Fold(Cell):
|
|
1165
|
-
r"""
|
|
1166
|
-
Combines an array of sliding local blocks into a large containing tensor.
|
|
1167
|
-
|
|
1168
|
-
For details, please refer to :func:`mindspore.mint.nn.functional.fold`.
|
|
1169
|
-
|
|
1170
|
-
Supported Platforms:
|
|
1171
|
-
``Ascend``
|
|
1172
|
-
|
|
1173
|
-
Examples:
|
|
1174
|
-
>>> import numpy as np
|
|
1175
|
-
>>> from mindspore import Tensor, nn
|
|
1176
|
-
>>> from mindspore import dtype as mstype
|
|
1177
|
-
>>> fold = nn.Fold([8, 8], [2, 2], [2, 2], [2, 2], [2, 2])
|
|
1178
|
-
>>> input = Tensor(input_data=np.random.rand(16, 64, 25), dtype=mstype.float32)
|
|
1179
|
-
>>> output = fold(input)
|
|
1180
|
-
>>> print(output.shape)
|
|
1181
|
-
(16, 16, 8, 8)
|
|
1182
|
-
"""
|
|
1183
|
-
def __init__(self, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
1184
|
-
super(Fold, self).__init__()
|
|
1185
|
-
self.output_size = output_size
|
|
1186
|
-
self.kernel_size = kernel_size
|
|
1187
|
-
self.dilation = dilation
|
|
1188
|
-
self.padding = padding
|
|
1189
|
-
self.stride = stride
|
|
1190
|
-
|
|
1191
|
-
def construct(self, input):
|
|
1192
|
-
return fold_ext(input, self.output_size, self.kernel_size,
|
|
1193
|
-
self.dilation, self.padding, self.stride)
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
1133
|
@_primexpr
|
|
1197
1134
|
def tril(x_shape, x_dtype, k):
|
|
1198
1135
|
Validator.check_int(len(x_shape), 1, Validator.GE, "x rank", "tril")
|
mindspore/nn/layer/conv.py
CHANGED
|
@@ -16,7 +16,6 @@
|
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
|
|
18
18
|
import math
|
|
19
|
-
import numpy as np
|
|
20
19
|
|
|
21
20
|
from mindspore import context
|
|
22
21
|
from mindspore.ops import operations as P
|
|
@@ -542,14 +541,10 @@ class Conv1d(_Conv):
|
|
|
542
541
|
stride = (1, stride)
|
|
543
542
|
dilation = (1, dilation)
|
|
544
543
|
get_shape = P.Shape()
|
|
545
|
-
get_dtype = P.DType()
|
|
546
544
|
if isinstance(weight_init, Tensor):
|
|
547
545
|
weight_init_shape = get_shape(weight_init)
|
|
548
546
|
Validator.check_equal_int(len(weight_init_shape), 3, 'weight_init_shape', self.cls_name)
|
|
549
|
-
|
|
550
|
-
weight_init_value = weight_init.asnumpy()
|
|
551
|
-
weight_init_value = np.expand_dims(weight_init_value, 2)
|
|
552
|
-
weight_init = Tensor(weight_init_value, weight_init_dtype)
|
|
547
|
+
weight_init = weight_init.expand_dims(2)
|
|
553
548
|
|
|
554
549
|
super(Conv1d, self).__init__(
|
|
555
550
|
in_channels,
|
|
@@ -708,7 +703,7 @@ class Conv3d(_Conv):
|
|
|
708
703
|
|
|
709
704
|
Inputs:
|
|
710
705
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
|
|
711
|
-
Currently
|
|
706
|
+
Currently input data type only support float16 and float32.
|
|
712
707
|
|
|
713
708
|
Outputs:
|
|
714
709
|
Tensor of shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
|
|
@@ -908,11 +903,11 @@ class Conv3dTranspose(_Conv):
|
|
|
908
903
|
group (int): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
909
904
|
divisible by `group`. Default: ``1`` .
|
|
910
905
|
output_padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of
|
|
911
|
-
the output. The data type is an integer or a tuple of
|
|
912
|
-
then the
|
|
913
|
-
If `output_padding` is a tuple of
|
|
914
|
-
`output_padding[0]`, `output_padding[1]
|
|
915
|
-
The value should be greater than or equal to 0.
|
|
906
|
+
the output. The data type is an integer or a tuple of six integers. If `output_padding` is an integer,
|
|
907
|
+
then the head, tail, top, bottom, left, and right padding are all equal to `output_padding`.
|
|
908
|
+
If `output_padding` is a tuple of six integers, then the head, tail, top, bottom, left, and right padding
|
|
909
|
+
is equal to `output_padding[0]`, `output_padding[1]`, `output_padding[2]`, `output_padding[3]`,
|
|
910
|
+
`output_padding[4]` and `output_padding[5]` respectively. The value should be greater than or equal to 0.
|
|
916
911
|
Default: ``0`` .
|
|
917
912
|
has_bias (bool): Whether the Conv3dTranspose layer has a bias parameter. Default: ``False`` .
|
|
918
913
|
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of weight parameter.
|
|
@@ -1437,14 +1432,10 @@ class Conv1dTranspose(_Conv):
|
|
|
1437
1432
|
stride = (1, stride)
|
|
1438
1433
|
dilation = (1, dilation)
|
|
1439
1434
|
get_shape = P.Shape()
|
|
1440
|
-
get_dtype = P.DType()
|
|
1441
1435
|
if isinstance(weight_init, Tensor):
|
|
1442
1436
|
weight_init_shape = get_shape(weight_init)
|
|
1443
1437
|
Validator.check_equal_int(len(weight_init_shape), 3, 'weight_init_shape', self.cls_name)
|
|
1444
|
-
|
|
1445
|
-
weight_init_value = weight_init.asnumpy()
|
|
1446
|
-
weight_init_value = np.expand_dims(weight_init_value, 2)
|
|
1447
|
-
weight_init = Tensor(weight_init_value, weight_init_dtype)
|
|
1438
|
+
weight_init = weight_init.expand_dims(2)
|
|
1448
1439
|
# out_channels and in_channels swap.
|
|
1449
1440
|
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
|
|
1450
1441
|
# then Conv1dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
|
mindspore/nn/layer/embedding.py
CHANGED
|
@@ -124,7 +124,10 @@ class Embedding(Cell):
|
|
|
124
124
|
if isinstance(self.init_tensor, Tensor) and self.init_tensor.init is not None:
|
|
125
125
|
self.init_tensor = self.init_tensor.init_data()
|
|
126
126
|
init_tensor_type = self.init_tensor.dtype
|
|
127
|
-
|
|
127
|
+
if init_tensor_type == mstype.bfloat16:
|
|
128
|
+
self.init_tensor = self.init_tensor.float().asnumpy()
|
|
129
|
+
else:
|
|
130
|
+
self.init_tensor = self.init_tensor.asnumpy()
|
|
128
131
|
self.init_tensor[self.padding_idx] = 0
|
|
129
132
|
self.init_tensor = Tensor(self.init_tensor, init_tensor_type)
|
|
130
133
|
self.embedding_table = Parameter(
|
mindspore/nn/layer/math.py
CHANGED
|
@@ -136,7 +136,7 @@ class Range(Cell):
|
|
|
136
136
|
if delta == 0:
|
|
137
137
|
raise ValueError(f"For '{self.cls_name}', the 'delta' can not be zero.")
|
|
138
138
|
data = np.arange(start, limit, delta)
|
|
139
|
-
if data.dtype == np.
|
|
139
|
+
if data.dtype == np.float:
|
|
140
140
|
self.ms_dtype = mstype.float32
|
|
141
141
|
else:
|
|
142
142
|
self.ms_dtype = mstype.int32
|
|
@@ -1152,7 +1152,7 @@ class GroupNorm(Cell):
|
|
|
1152
1152
|
|
|
1153
1153
|
def _cal_output(self, x):
|
|
1154
1154
|
"""calculate groupnorm output"""
|
|
1155
|
-
return group_norm(x, self.num_groups, self.gamma
|
|
1155
|
+
return group_norm(x, self.num_groups, self.gamma, self.beta, self.eps)
|
|
1156
1156
|
|
|
1157
1157
|
@staticmethod
|
|
1158
1158
|
@_primexpr
|
mindspore/nn/layer/pooling.py
CHANGED
|
@@ -27,7 +27,6 @@ from mindspore.common import dtype as mstype
|
|
|
27
27
|
from mindspore.ops.operations.nn_ops import AdaptiveMaxPool2D
|
|
28
28
|
from mindspore.ops.operations.nn_ops import AdaptiveMaxPool3D, AdaptiveAvgPool3D
|
|
29
29
|
from mindspore.nn.cell import Cell
|
|
30
|
-
from mindspore._c_expression import MSContext
|
|
31
30
|
|
|
32
31
|
__all__ = ['AvgPool3d', 'MaxPool3d', 'AvgPool2d', 'MaxPool2d', 'AvgPool1d', 'MaxPool1d', 'FractionalMaxPool2d',
|
|
33
32
|
'FractionalMaxPool3d', 'AdaptiveAvgPool1d', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d',
|
|
@@ -1015,12 +1014,8 @@ class AvgPool2d(_PoolNd):
|
|
|
1015
1014
|
data_format="NCHW"):
|
|
1016
1015
|
"""Initialize AvgPool2d."""
|
|
1017
1016
|
super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode, data_format)
|
|
1018
|
-
self.ascend_910bc_target = (MSContext.get_instance().get_ascend_soc_version() in ['ascend910b', 'ascend910c'])
|
|
1019
1017
|
if pad_mode.upper() == 'PAD' or padding != 0 or ceil_mode or not count_include_pad \
|
|
1020
1018
|
or divisor_override is not None:
|
|
1021
|
-
if self.ascend_910bc_target:
|
|
1022
|
-
raise ValueError(f"For '{self.cls_name}, the pad_mod 'PAD' is not support in 910B now, "
|
|
1023
|
-
f"it will be supported in the future.")
|
|
1024
1019
|
if self.format == "NHWC":
|
|
1025
1020
|
raise ValueError(f"For '{self.cls_name}, the 'NHWC' format are not support when 'pad_mode' is 'pad' or "
|
|
1026
1021
|
f"'padding' is not 0 or 'ceil_mode' is not False or 'count_include_pad' is not True"
|
mindspore/nn/layer/rnn_cells.py
CHANGED
|
@@ -178,7 +178,7 @@ class RNNCell(RNNCellBase):
|
|
|
178
178
|
Args:
|
|
179
179
|
input_size (int): Number of features of input.
|
|
180
180
|
hidden_size (int): Number of features of hidden layer.
|
|
181
|
-
has_bias (bool): Whether the cell has bias :math:`
|
|
181
|
+
has_bias (bool): Whether the cell has bias :math:`b_ih` and :math:`b_hh`. Default: ``True`` .
|
|
182
182
|
nonlinearity (str): The non-linearity to use. Can be either ``"tanh"`` or ``"relu"`` .
|
|
183
183
|
Default: ``"tanh"`` .
|
|
184
184
|
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
@@ -263,7 +263,7 @@ class LSTMCell(RNNCellBase):
|
|
|
263
263
|
Args:
|
|
264
264
|
input_size (int): Number of features of input.
|
|
265
265
|
hidden_size (int): Number of features of hidden layer.
|
|
266
|
-
has_bias (bool): Whether the cell has bias `
|
|
266
|
+
has_bias (bool): Whether the cell has bias `b_ih` and `b_hh`. Default: ``True`` .
|
|
267
267
|
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
268
268
|
|
|
269
269
|
Inputs:
|