mindspore 2.4.10__cp310-cp310-win_amd64.whl → 2.5.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +8 -3
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +0 -5
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/compile_config.py +64 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
- mindspore/_extends/parse/parser.py +23 -5
- mindspore/_extends/parse/standard_method.py +123 -27
- mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
- mindspore/amp.py +7 -1
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/boost_cell_wrapper.py +136 -41
- mindspore/common/__init__.py +3 -1
- mindspore/common/_register_for_tensor.py +0 -1
- mindspore/common/_stub_tensor.py +25 -4
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +6132 -0
- mindspore/common/api.py +98 -21
- mindspore/common/dtype.py +34 -34
- mindspore/common/dump.py +2 -1
- mindspore/common/file_system.py +8 -3
- mindspore/common/generator.py +2 -0
- mindspore/common/hook_handle.py +3 -1
- mindspore/common/initializer.py +3 -4
- mindspore/common/lazy_inline.py +8 -2
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/parameter.py +31 -15
- mindspore/common/tensor.py +713 -1337
- mindspore/communication/__init__.py +1 -1
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/comm_func.py +215 -173
- mindspore/communication/management.py +23 -20
- mindspore/context.py +285 -191
- mindspore/dataset/__init__.py +23 -19
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +84 -3
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +5 -4
- mindspore/dataset/engine/datasets.py +192 -149
- mindspore/dataset/engine/datasets_audio.py +14 -0
- mindspore/dataset/engine/datasets_standard_format.py +11 -11
- mindspore/dataset/engine/datasets_text.py +38 -1
- mindspore/dataset/engine/datasets_user_defined.py +100 -66
- mindspore/dataset/engine/datasets_vision.py +81 -8
- mindspore/dataset/engine/iterators.py +281 -63
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +26 -2
- mindspore/dataset/engine/serializer_deserializer.py +1 -1
- mindspore/dataset/engine/validators.py +43 -11
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +29 -12
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +94 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +127 -0
- mindspore/device_context/cpu/__init__.py +25 -0
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +134 -0
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/optim/adadelta.py +26 -22
- mindspore/experimental/optim/adam.py +3 -0
- mindspore/experimental/optim/lr_scheduler.py +33 -24
- mindspore/experimental/optim/radam.py +33 -30
- mindspore/hal/device.py +28 -0
- mindspore/hal/event.py +17 -0
- mindspore/hal/memory.py +94 -3
- mindspore/hal/stream.py +91 -6
- mindspore/include/api/context.h +0 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +12 -0
- mindspore/mindrecord/__init__.py +1 -1
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mint/__init__.py +824 -218
- mindspore/mint/distributed/__init__.py +66 -4
- mindspore/mint/distributed/distributed.py +2594 -44
- mindspore/mint/linalg/__init__.py +6 -0
- mindspore/mint/nn/__init__.py +473 -14
- mindspore/mint/nn/functional.py +486 -11
- mindspore/mint/nn/layer/__init__.py +17 -4
- mindspore/mint/nn/layer/_functions.py +330 -0
- mindspore/mint/nn/layer/activation.py +169 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +727 -0
- mindspore/mint/nn/layer/normalization.py +215 -19
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +170 -0
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/special/__init__.py +2 -1
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/cell.py +126 -19
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +6 -6
- mindspore/nn/layer/basic.py +35 -25
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/embedding.py +3 -3
- mindspore/nn/layer/normalization.py +8 -7
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +47 -13
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +48 -26
- mindspore/nn/learning_rate_schedule.py +5 -3
- mindspore/nn/loss/loss.py +31 -36
- mindspore/nn/optim/ada_grad.py +1 -0
- mindspore/nn/optim/adadelta.py +2 -2
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/rprop.py +2 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/cell_wrapper.py +4 -6
- mindspore/nn/wrap/loss_scale.py +3 -4
- mindspore/numpy/array_creations.py +60 -62
- mindspore/numpy/array_ops.py +148 -143
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +16 -16
- mindspore/numpy/utils_const.py +4 -4
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +2 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +94 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_vmap/vmap_array_ops.py +20 -19
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
- mindspore/ops/_vmap/vmap_math_ops.py +11 -9
- mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
- mindspore/ops/auto_generate/gen_extend_func.py +554 -60
- mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
- mindspore/ops/auto_generate/gen_ops_prim.py +8024 -3409
- mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
- mindspore/ops/composite/base.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
- mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
- mindspore/ops/function/__init__.py +12 -0
- mindspore/ops/function/array_func.py +561 -159
- mindspore/ops/function/clip_func.py +64 -0
- mindspore/ops/function/debug_func.py +28 -20
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +5 -4
- mindspore/ops/function/math_func.py +1659 -290
- mindspore/ops/function/nn_func.py +988 -317
- mindspore/ops/function/parameter_func.py +3 -56
- mindspore/ops/function/random_func.py +243 -33
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/functional.py +18 -5
- mindspore/ops/functional_overload.py +897 -0
- mindspore/ops/operations/__init__.py +3 -2
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -34
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +38 -8
- mindspore/ops/operations/array_ops.py +45 -303
- mindspore/ops/operations/comm_ops.py +19 -16
- mindspore/ops/operations/custom_ops.py +11 -55
- mindspore/ops/operations/debug_ops.py +42 -47
- mindspore/ops/operations/inner_ops.py +6 -4
- mindspore/ops/operations/linalg_ops.py +3 -2
- mindspore/ops/operations/manually_defined/ops_def.py +185 -104
- mindspore/ops/operations/math_ops.py +11 -216
- mindspore/ops/operations/nn_ops.py +146 -308
- mindspore/ops/primitive.py +23 -21
- mindspore/ops/tensor_method.py +1669 -0
- mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
- mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
- mindspore/ops_generate/arg_handler.py +0 -61
- mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
- mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/base_generator.py +11 -0
- mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
- mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
- mindspore/ops_generate/functional_overload_py_generator.py +110 -0
- mindspore/ops_generate/functions_cc_generator.py +233 -0
- mindspore/ops_generate/gen_aclnn_implement.py +110 -114
- mindspore/ops_generate/gen_constants.py +157 -3
- mindspore/ops_generate/gen_ops.py +245 -990
- mindspore/ops_generate/gen_pyboost_func.py +97 -998
- mindspore/ops_generate/gen_utils.py +119 -33
- mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
- mindspore/ops_generate/op_api_proto.py +206 -0
- mindspore/ops_generate/op_def_py_generator.py +131 -0
- mindspore/ops_generate/op_prim_py_generator.py +480 -0
- mindspore/ops_generate/op_proto.py +373 -108
- mindspore/ops_generate/op_template_parser.py +436 -0
- mindspore/ops_generate/ops_def_cc_generator.py +288 -0
- mindspore/ops_generate/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/ops_name_h_generator.py +68 -0
- mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
- mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
- mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
- mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
- mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
- mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
- mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
- mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
- mindspore/ops_generate/pyboost_utils.py +92 -33
- mindspore/ops_generate/template.py +294 -44
- mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
- mindspore/parallel/__init__.py +3 -3
- mindspore/parallel/_auto_parallel_context.py +24 -33
- mindspore/parallel/_parallel_serialization.py +13 -2
- mindspore/parallel/_utils.py +4 -1
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +44 -0
- mindspore/parallel/cluster/process_entity/_api.py +131 -37
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +20 -3
- mindspore/parallel/parameter_broadcast.py +1 -1
- mindspore/parallel/shard.py +3 -0
- mindspore/parallel/transform_safetensors.py +119 -253
- mindspore/profiler/__init__.py +17 -4
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +174 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +202 -0
- mindspore/profiler/common/path_manager.py +371 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +476 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +210 -0
- mindspore/profiler/common/profiler_path_manager.py +120 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +270 -37
- mindspore/profiler/envprofiler.py +138 -0
- mindspore/profiler/mstx.py +199 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +309 -0
- mindspore/profiler/profiler.py +580 -93
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +114 -0
- mindspore/profiler/schedule.py +208 -0
- mindspore/rewrite/api/symbol_tree.py +1 -2
- mindspore/run_check/_check_version.py +2 -6
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +148 -0
- mindspore/runtime/memory.py +392 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +2 -2
- mindspore/train/_utils.py +53 -18
- mindspore/train/amp.py +8 -4
- mindspore/train/callback/_checkpoint.py +32 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +105 -69
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_summary_collector.py +44 -6
- mindspore/train/callback/_tft_register.py +31 -10
- mindspore/train/dataset_helper.py +11 -11
- mindspore/train/metrics/precision.py +4 -5
- mindspore/train/mind_ir_pb2.py +167 -46
- mindspore/train/model.py +13 -15
- mindspore/train/serialization.py +462 -76
- mindspore/train/summary/summary_record.py +1 -2
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +4 -2
- mindspore/utils/dryrun.py +138 -0
- mindspore/utils/runtime_execution_order_check.py +550 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/METADATA +2 -3
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/RECORD +362 -238
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
mindspore/common/api.py
CHANGED
|
@@ -41,19 +41,19 @@ from mindspore.common.sparse_tensor import RowTensor as PythonRowTensor
|
|
|
41
41
|
from mindspore._c_expression.amp import get_curr_amp_strategy
|
|
42
42
|
from mindspore._c_expression import GraphExecutor_, Tensor, CSRTensor, RowTensor, COOTensor, \
|
|
43
43
|
PyNativeExecutor_, verify_inputs_signature, init_exec_dataset, _set_dataset_mode_config, init_pipeline, \
|
|
44
|
-
_ms_memory_recycle, _bind_device_ctx
|
|
44
|
+
_ms_memory_recycle, _bind_device_ctx, StubNode
|
|
45
45
|
from mindspore.parallel._ps_context import _is_role_sched
|
|
46
46
|
from mindspore.parallel._utils import _check_full_batch, _get_parameter_broadcast, _is_pynative_parallel, \
|
|
47
47
|
_is_in_auto_parallel_mode, _is_parallel_mode
|
|
48
48
|
from mindspore import _checkparam as Validator
|
|
49
49
|
from mindspore._checkparam import is_stub_tensor
|
|
50
50
|
from mindspore.common._utils import is_shape_unknown
|
|
51
|
-
from mindspore.common.mutable import mutable
|
|
51
|
+
from mindspore.common.mutable import mutable, _check_element_type
|
|
52
52
|
from mindspore.common._register_for_adapter import ms_adapter_registry
|
|
53
53
|
from mindspore.common.auto_dynamic_shape import get_auto_dynamic_shape_args, update_auto_dynamic_shape_phase, \
|
|
54
54
|
get_auto_dynamic_shape_args_with_check_input_signature, update_auto_dynamic_shape_phase_with_check_input_signature
|
|
55
55
|
from mindspore.common._pijit_context import PIJitCaptureContext
|
|
56
|
-
from mindspore.common.parameter import Parameter
|
|
56
|
+
from mindspore.common.parameter import Parameter, set_parameter_hook_updated, parameter_hook_updated
|
|
57
57
|
|
|
58
58
|
# Store ms_function class compiled pipeline cache.
|
|
59
59
|
ms_compile_cache = set()
|
|
@@ -150,6 +150,8 @@ def _convert_python_data(data):
|
|
|
150
150
|
return PythonCOOTensor(coo_tensor=data)
|
|
151
151
|
if isinstance(data, RowTensor) and not isinstance(data, PythonRowTensor):
|
|
152
152
|
return PythonRowTensor(row_tensor=data)
|
|
153
|
+
if isinstance(data, StubNode):
|
|
154
|
+
return ms.common._stub_tensor._convert_stub(data)
|
|
153
155
|
if data.__class__ is tuple:
|
|
154
156
|
# Handle namedtuple since its type is tuple.
|
|
155
157
|
if hasattr(data, "_fields"):
|
|
@@ -273,7 +275,9 @@ def __get_compile_cache_dep_files(file_path, compile_cache_dep_files, pkg):
|
|
|
273
275
|
else:
|
|
274
276
|
whole_module = module_name
|
|
275
277
|
if n.name is not None:
|
|
276
|
-
|
|
278
|
+
if not whole_module.endswith("."):
|
|
279
|
+
whole_module += "."
|
|
280
|
+
whole_module += n.name
|
|
277
281
|
try:
|
|
278
282
|
module_spec = importlib.util.find_spec(whole_module, pkg)
|
|
279
283
|
except (ModuleNotFoundError, ValueError):
|
|
@@ -305,7 +309,22 @@ def _get_compile_cache_dep_files():
|
|
|
305
309
|
return compile_cache_dep_files
|
|
306
310
|
|
|
307
311
|
|
|
308
|
-
def
|
|
312
|
+
def _contains_auto_grad_tensor(obj):
|
|
313
|
+
"""Check object is or contains auto grad tensor element"""
|
|
314
|
+
if isinstance(obj, PythonTensor):
|
|
315
|
+
return obj._has_auto_grad()
|
|
316
|
+
if isinstance(obj, (tuple, list)):
|
|
317
|
+
for element in obj:
|
|
318
|
+
if _contains_auto_grad_tensor(element):
|
|
319
|
+
return True
|
|
320
|
+
if isinstance(obj, dict):
|
|
321
|
+
for key in obj:
|
|
322
|
+
if _contains_auto_grad_tensor(obj[key]):
|
|
323
|
+
return True
|
|
324
|
+
return False
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
def _add_mutable_attr(args_list, compile_args, is_grad):
|
|
309
328
|
"""Restore the mutable attr for every arg."""
|
|
310
329
|
new_compile_args = ()
|
|
311
330
|
for idx, arg in enumerate(args_list):
|
|
@@ -316,7 +335,12 @@ def _restore_mutable_attr(args_list, compile_args):
|
|
|
316
335
|
else:
|
|
317
336
|
new_compile_args += (mutable(compile_args[idx], False),)
|
|
318
337
|
else:
|
|
319
|
-
|
|
338
|
+
if is_grad and _contains_auto_grad_tensor(arg):
|
|
339
|
+
if not _check_element_type(arg):
|
|
340
|
+
raise RuntimeError("Input \"%s\" contains tensor with gradient but can not mutable." % (str(arg)))
|
|
341
|
+
new_compile_args += (mutable(compile_args[idx], False),)
|
|
342
|
+
else:
|
|
343
|
+
new_compile_args += (compile_args[idx],)
|
|
320
344
|
return new_compile_args
|
|
321
345
|
|
|
322
346
|
|
|
@@ -330,6 +354,7 @@ def _get_parameter_layout():
|
|
|
330
354
|
|
|
331
355
|
def _handle_arg(obj, arg, compile_arg):
|
|
332
356
|
"""Handle arg for runtime .If need handle the arg, return True"""
|
|
357
|
+
from mindspore._extends.parse import compile_config
|
|
333
358
|
if isinstance(arg, PythonTensor):
|
|
334
359
|
if arg.has_init:
|
|
335
360
|
arg.init_data()
|
|
@@ -342,7 +367,8 @@ def _handle_arg(obj, arg, compile_arg):
|
|
|
342
367
|
if isinstance(arg, list) and not arg:
|
|
343
368
|
return None
|
|
344
369
|
return arg
|
|
345
|
-
elif context.get_context("grad_for_scalar")
|
|
370
|
+
elif (context.get_context("grad_for_scalar") or str(compile_config.GRAD_FOR_SCALAR) == '1') and \
|
|
371
|
+
isinstance(arg, (int, float)):
|
|
346
372
|
return arg
|
|
347
373
|
elif hasattr(obj, "enable_tuple_broaden") and obj.enable_tuple_broaden and isinstance(arg, tuple) and \
|
|
348
374
|
_check_all_tensor(arg):
|
|
@@ -528,6 +554,29 @@ def _get_parameter_ids(args, kwargs):
|
|
|
528
554
|
parameter_ids += str(id(value))
|
|
529
555
|
return parameter_ids
|
|
530
556
|
|
|
557
|
+
def _get_tensor_hook_key(tensor):
|
|
558
|
+
"""Get the hook key of Tensor/Parameter"""
|
|
559
|
+
return ".".join(map(str, map(id, tensor.hooks())))
|
|
560
|
+
|
|
561
|
+
def _get_hook_key(*args, **kwargs):
|
|
562
|
+
"""Get the hook key of Tensors/Parameters"""
|
|
563
|
+
hook_key = ""
|
|
564
|
+
for idx, arg in enumerate(args):
|
|
565
|
+
if idx != 0:
|
|
566
|
+
hook_key += "."
|
|
567
|
+
# Only arg of the type Tensor or Parameter is supported now
|
|
568
|
+
if isinstance(arg, (Tensor, Parameter)):
|
|
569
|
+
hook_key += _get_tensor_hook_key(arg)
|
|
570
|
+
|
|
571
|
+
for idx, value in enumerate(kwargs.values()):
|
|
572
|
+
if idx != 0:
|
|
573
|
+
hook_key += "."
|
|
574
|
+
# Only kwarg of the type Tensor or Parameter is supported now
|
|
575
|
+
if isinstance(value, (Tensor, Parameter)):
|
|
576
|
+
hook_key += _get_tensor_hook_key(value)
|
|
577
|
+
|
|
578
|
+
return hook_key
|
|
579
|
+
|
|
531
580
|
|
|
532
581
|
class _MindsporeFunctionExecutor:
|
|
533
582
|
"""
|
|
@@ -581,13 +630,14 @@ class _MindsporeFunctionExecutor:
|
|
|
581
630
|
_pynative_executor.clear_res()
|
|
582
631
|
raise err
|
|
583
632
|
|
|
584
|
-
if context.get_context("precompile_only"):
|
|
633
|
+
if context.get_context("precompile_only") or os.getenv('MS_DEV_PRECOMPILE_ONLY') == '1':
|
|
585
634
|
return None
|
|
586
635
|
|
|
587
636
|
new_inputs = self._generate_run_args(args_list, kwargs)
|
|
588
|
-
output = self._graph_executor(tuple(new_inputs), phase)
|
|
589
637
|
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
590
|
-
output = _pynative_executor.grad_jit(
|
|
638
|
+
output = _pynative_executor.grad_jit(*new_inputs)
|
|
639
|
+
else:
|
|
640
|
+
output = self._graph_executor(tuple(new_inputs), phase)
|
|
591
641
|
|
|
592
642
|
return output
|
|
593
643
|
|
|
@@ -605,8 +655,10 @@ class _MindsporeFunctionExecutor:
|
|
|
605
655
|
compile_args = get_auto_dynamic_shape_args_with_check_input_signature(compile_args, key_id,
|
|
606
656
|
self.input_signature)
|
|
607
657
|
|
|
608
|
-
#
|
|
609
|
-
|
|
658
|
+
# Add mutable for compile_args for two scene:
|
|
659
|
+
# 1) Origin args is mutable.
|
|
660
|
+
# 2) Args contains sequence with gradient tensor.
|
|
661
|
+
compile_args = _add_mutable_attr(args, compile_args, _pynative_executor.requires_grad())
|
|
610
662
|
self._compile_args = compile_args
|
|
611
663
|
generate_name, echo_function_name = self._get_generate_name()
|
|
612
664
|
# The full Function name
|
|
@@ -645,11 +697,14 @@ class _MindsporeFunctionExecutor:
|
|
|
645
697
|
parameter_ids = _get_parameter_ids(args, kwargs)
|
|
646
698
|
if parameter_ids != "":
|
|
647
699
|
key = str(key) + '.' + parameter_ids
|
|
700
|
+
|
|
701
|
+
key = str(key) + "." + _get_hook_key(*args, **kwargs)
|
|
702
|
+
|
|
648
703
|
phase = generate_name + '.' + str(key)
|
|
649
704
|
|
|
650
705
|
update_auto_dynamic_shape_phase_with_check_input_signature(compile_args, key_id, phase, self.input_signature)
|
|
651
706
|
|
|
652
|
-
if phase in ms_compile_cache:
|
|
707
|
+
if phase in ms_compile_cache and not parameter_hook_updated():
|
|
653
708
|
# Release resource should be released when CompileInner won't be executed, such as cur_convert_input_
|
|
654
709
|
# generated in generate_arguments_key.
|
|
655
710
|
self._graph_executor.clear_compile_arguments_resource()
|
|
@@ -683,6 +738,7 @@ class _MindsporeFunctionExecutor:
|
|
|
683
738
|
|
|
684
739
|
if not is_compile:
|
|
685
740
|
raise RuntimeError("Executor compile failed.")
|
|
741
|
+
set_parameter_hook_updated(False)
|
|
686
742
|
ms_compile_cache.add(phase)
|
|
687
743
|
|
|
688
744
|
return phase
|
|
@@ -1351,8 +1407,6 @@ class _no_grad(contextlib.ContextDecorator):
|
|
|
1351
1407
|
self.prev_state = False
|
|
1352
1408
|
|
|
1353
1409
|
def __enter__(self):
|
|
1354
|
-
if context.get_context("mode") == context.GRAPH_MODE:
|
|
1355
|
-
raise RuntimeError("For no_grad feature, currently only support Pynative mode, but got Graph mode.")
|
|
1356
1410
|
self.prev_state = _pynative_executor.enable_grad()
|
|
1357
1411
|
_pynative_executor.set_enable_grad(False)
|
|
1358
1412
|
|
|
@@ -1501,18 +1555,18 @@ class _PyNativeExecutor:
|
|
|
1501
1555
|
"""
|
|
1502
1556
|
self._executor.sync()
|
|
1503
1557
|
|
|
1504
|
-
def grad_jit(self,
|
|
1558
|
+
def grad_jit(self, *args):
|
|
1505
1559
|
"""
|
|
1506
1560
|
Building grad graph decorated by jit.
|
|
1507
1561
|
|
|
1508
1562
|
Args:
|
|
1509
|
-
output (tuple): The function or cell decorated by jit output object.
|
|
1510
1563
|
args (tuple): Function or cell decorated by jit input arguments.
|
|
1511
1564
|
|
|
1512
1565
|
Return:
|
|
1513
|
-
|
|
1566
|
+
output: The output object of function or cell decorated by jit.
|
|
1514
1567
|
"""
|
|
1515
|
-
|
|
1568
|
+
output = self._executor.grad_jit(*args)
|
|
1569
|
+
return output
|
|
1516
1570
|
|
|
1517
1571
|
def call_custom_bprop(self, obj, output, *args, **kwargs):
|
|
1518
1572
|
"""
|
|
@@ -1803,8 +1857,12 @@ class _CellGraphExecutor:
|
|
|
1803
1857
|
self.enable_tuple_broaden = obj.enable_tuple_broaden
|
|
1804
1858
|
logger.debug(f"Convert the network: {do_convert}.")
|
|
1805
1859
|
self._graph_executor.set_enable_tuple_broaden(self.enable_tuple_broaden)
|
|
1860
|
+
|
|
1806
1861
|
key = self._graph_executor.generate_arguments_key(obj, args, kwargs, self.enable_tuple_broaden)
|
|
1807
1862
|
obj.arguments_key = str(key)
|
|
1863
|
+
|
|
1864
|
+
obj.arguments_key = obj.arguments_key + "." + _get_hook_key(*args, **kwargs)
|
|
1865
|
+
|
|
1808
1866
|
# When exist parameter in the top graph inputs, need check if the parameter object has changed.
|
|
1809
1867
|
parameter_ids = _get_parameter_ids(args, kwargs)
|
|
1810
1868
|
if parameter_ids != "":
|
|
@@ -1814,7 +1872,7 @@ class _CellGraphExecutor:
|
|
|
1814
1872
|
obj.phase_cache[raw_phase] = phase
|
|
1815
1873
|
update_auto_dynamic_shape_phase(args, key_id, phase)
|
|
1816
1874
|
obj.current_phase = phase
|
|
1817
|
-
if phase in obj.compile_cache and self.has_compiled(phase):
|
|
1875
|
+
if phase in obj.compile_cache and self.has_compiled(phase) and not parameter_hook_updated():
|
|
1818
1876
|
logger.debug("%r graph has existed.", phase)
|
|
1819
1877
|
# Release resource should be released when CompileInner won't be executed, such as cur_convert_input_
|
|
1820
1878
|
# generated in generate_arguments_key.
|
|
@@ -1840,6 +1898,7 @@ class _CellGraphExecutor:
|
|
|
1840
1898
|
obj.compile_cache.add(phase)
|
|
1841
1899
|
if not result:
|
|
1842
1900
|
raise RuntimeError("Executor compile failed.")
|
|
1901
|
+
set_parameter_hook_updated(False)
|
|
1843
1902
|
graph = self._graph_executor.get_func_graph(phase)
|
|
1844
1903
|
|
|
1845
1904
|
if graph is None:
|
|
@@ -1875,7 +1934,7 @@ class _CellGraphExecutor:
|
|
|
1875
1934
|
return self._graph_executor.get_allreduce_fusion(real_phase)
|
|
1876
1935
|
|
|
1877
1936
|
def __call__(self, obj, *args, phase='predict'):
|
|
1878
|
-
if context.get_context("precompile_only") or _is_role_sched():
|
|
1937
|
+
if context.get_context("precompile_only") or os.getenv('MS_DEV_PRECOMPILE_ONLY') == '1' or _is_role_sched():
|
|
1879
1938
|
return None
|
|
1880
1939
|
return self.run(obj, *args, phase=phase)
|
|
1881
1940
|
|
|
@@ -2012,6 +2071,24 @@ def ms_memory_recycle():
|
|
|
2012
2071
|
_ms_memory_recycle()
|
|
2013
2072
|
|
|
2014
2073
|
|
|
2074
|
+
def set_recursion_limit(recursion_limit=1000):
|
|
2075
|
+
"""
|
|
2076
|
+
Specify the recursion depth limit of function call before compiling graph.
|
|
2077
|
+
It needs to be call when the nested function call is too deep or the number of sub graphs is too large.
|
|
2078
|
+
If recursion_limit is set larger than before, the system max stack depth should be set larger too,
|
|
2079
|
+
otherwise a `core dumped` exception may be raised because of system stack overflow.
|
|
2080
|
+
|
|
2081
|
+
Args:
|
|
2082
|
+
recursion_limit (int, optional): The recursion depth limit. Must be a positive integer. Default: ``1000`` .
|
|
2083
|
+
|
|
2084
|
+
Examples:
|
|
2085
|
+
>>> import mindspore as ms
|
|
2086
|
+
>>> ms.set_recursion_limit(10000)
|
|
2087
|
+
"""
|
|
2088
|
+
recursion_limit = Validator.check_positive_int(recursion_limit)
|
|
2089
|
+
GraphExecutor_.get_instance().set_max_call_depth(recursion_limit)
|
|
2090
|
+
|
|
2091
|
+
|
|
2015
2092
|
def _generate_branch_control_input(obf_random_seed):
|
|
2016
2093
|
"""Generate append network input for dynamic obfuscation in random seed mode."""
|
|
2017
2094
|
seed_max = 2 ** 32 - 1
|
mindspore/common/dtype.py
CHANGED
|
@@ -59,58 +59,58 @@ __all__.extend(__dtype__)
|
|
|
59
59
|
__all__.extend(__method__)
|
|
60
60
|
|
|
61
61
|
# type definition
|
|
62
|
-
bool_ = typing.
|
|
62
|
+
bool_ = typing.kBool
|
|
63
63
|
|
|
64
|
-
qint4x2 = typing.
|
|
65
|
-
int8 = typing.
|
|
64
|
+
qint4x2 = typing.kInt4
|
|
65
|
+
int8 = typing.kInt8
|
|
66
66
|
byte = int8
|
|
67
|
-
int16 = typing.
|
|
67
|
+
int16 = typing.kInt16
|
|
68
68
|
short = int16
|
|
69
|
-
int32 = typing.
|
|
69
|
+
int32 = typing.kInt32
|
|
70
70
|
intc = int32
|
|
71
|
-
int64 = typing.
|
|
71
|
+
int64 = typing.kInt64
|
|
72
72
|
intp = int64
|
|
73
73
|
|
|
74
|
-
uint8 = typing.
|
|
74
|
+
uint8 = typing.kUInt8
|
|
75
75
|
ubyte = uint8
|
|
76
|
-
uint16 = typing.
|
|
76
|
+
uint16 = typing.kUInt16
|
|
77
77
|
ushort = uint16
|
|
78
|
-
uint32 = typing.
|
|
78
|
+
uint32 = typing.kUInt32
|
|
79
79
|
uintc = uint32
|
|
80
|
-
uint64 = typing.
|
|
80
|
+
uint64 = typing.kUInt64
|
|
81
81
|
uintp = uint64
|
|
82
82
|
|
|
83
|
-
float16 = typing.
|
|
83
|
+
float16 = typing.kFloat16
|
|
84
84
|
half = float16
|
|
85
|
-
float32 = typing.
|
|
85
|
+
float32 = typing.kFloat32
|
|
86
86
|
single = float32
|
|
87
|
-
float64 = typing.
|
|
87
|
+
float64 = typing.kFloat64
|
|
88
88
|
double = float64
|
|
89
|
-
bfloat16 = typing.
|
|
90
|
-
complex64 = typing.
|
|
91
|
-
complex128 = typing.
|
|
92
|
-
|
|
93
|
-
number = typing.
|
|
94
|
-
int_ = typing.
|
|
95
|
-
uint = typing.
|
|
96
|
-
float_ = typing.
|
|
97
|
-
string = typing.
|
|
98
|
-
list_ = typing.
|
|
99
|
-
tuple_ = typing.
|
|
100
|
-
type_none = typing.
|
|
101
|
-
_null = typing.
|
|
102
|
-
|
|
103
|
-
tensor_type = typing.
|
|
104
|
-
index_slices = typing.
|
|
105
|
-
coo_tensor = typing.
|
|
106
|
-
csr_tensor = typing.
|
|
89
|
+
bfloat16 = typing.kBFloat16
|
|
90
|
+
complex64 = typing.kComplex64
|
|
91
|
+
complex128 = typing.kComplex128
|
|
92
|
+
|
|
93
|
+
number = typing.kNumber
|
|
94
|
+
int_ = typing.kInt
|
|
95
|
+
uint = typing.kUInt
|
|
96
|
+
float_ = typing.kFloat
|
|
97
|
+
string = typing.kString
|
|
98
|
+
list_ = typing.kList
|
|
99
|
+
tuple_ = typing.kTuple
|
|
100
|
+
type_none = typing.kTypeNone
|
|
101
|
+
_null = typing.kTypeNull
|
|
102
|
+
|
|
103
|
+
tensor_type = typing.kTensorType
|
|
104
|
+
index_slices = typing.kRowTensorType
|
|
105
|
+
coo_tensor = typing.kCOOTensorType
|
|
106
|
+
csr_tensor = typing.kCSRTensorType
|
|
107
107
|
undetermined = typing.UndeterminedType()
|
|
108
108
|
|
|
109
109
|
function = typing.Function()
|
|
110
110
|
symbolic_key = typing.SymbolicKeyType()
|
|
111
|
-
env_type = typing.
|
|
112
|
-
type_type = typing.
|
|
113
|
-
type_refkey = typing.
|
|
111
|
+
env_type = typing.kTypeEnv
|
|
112
|
+
type_type = typing.kTypeType
|
|
113
|
+
type_refkey = typing.kRefKeyType
|
|
114
114
|
|
|
115
115
|
Int = typing.Int
|
|
116
116
|
Float = typing.Float
|
mindspore/common/dump.py
CHANGED
|
@@ -64,7 +64,8 @@ def set_dump(target, enabled=True):
|
|
|
64
64
|
>>> import mindspore.nn as nn
|
|
65
65
|
>>> from mindspore import Tensor, set_dump
|
|
66
66
|
>>>
|
|
67
|
-
>>> ms.set_context(
|
|
67
|
+
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
|
68
|
+
>>> ms.set_device(device_target="Ascend")
|
|
68
69
|
>>>
|
|
69
70
|
>>> class MyNet(nn.Cell):
|
|
70
71
|
... def __init__(self):
|
mindspore/common/file_system.py
CHANGED
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""File system registration management"""
|
|
16
|
+
from mindspore import log as logger
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
class FileSystem:
|
|
@@ -22,7 +23,6 @@ class FileSystem:
|
|
|
22
23
|
self.create_args = ("ab",)
|
|
23
24
|
self.open = open
|
|
24
25
|
self.open_args = ("rb",)
|
|
25
|
-
self.backend = "basic"
|
|
26
26
|
|
|
27
27
|
|
|
28
28
|
def _register_basic_file_system(fs: FileSystem):
|
|
@@ -40,11 +40,16 @@ def _register_mindio_file_system(fs: FileSystem):
|
|
|
40
40
|
import mindio
|
|
41
41
|
except ImportError:
|
|
42
42
|
return False
|
|
43
|
-
|
|
43
|
+
try:
|
|
44
|
+
ret = mindio.initialize()
|
|
45
|
+
except AttributeError as e:
|
|
46
|
+
logger.warning(f"Failed to initialize MindIO: {e}")
|
|
47
|
+
return False
|
|
48
|
+
if ret != 0:
|
|
49
|
+
logger.warning(f"Failed to initialize MindIO: ret = {ret}")
|
|
44
50
|
return False
|
|
45
51
|
fs.create = mindio.create_file
|
|
46
52
|
fs.create_args = ()
|
|
47
53
|
fs.open = mindio.open_file
|
|
48
54
|
fs.open_args = ()
|
|
49
|
-
fs.backend = "mindio"
|
|
50
55
|
return True
|
mindspore/common/generator.py
CHANGED
|
@@ -80,6 +80,7 @@ class Generator:
|
|
|
80
80
|
Tensor(0, mstype.int64), name="offset", requires_grad=False)
|
|
81
81
|
|
|
82
82
|
self._generator = GeneratorOp().set_device("CPU")
|
|
83
|
+
self._generator.add_prim_attr("manual_seed", False)
|
|
83
84
|
self._to_scalar = TensorToScalar()
|
|
84
85
|
|
|
85
86
|
def set_state(self, state):
|
|
@@ -125,6 +126,7 @@ class Generator:
|
|
|
125
126
|
raise TypeError("Seed must be an integer.")
|
|
126
127
|
seed = Tensor(seed, mstype.int64)
|
|
127
128
|
self._generator(MANUAL_SEED, (self._seed, self._offset, seed))
|
|
129
|
+
self._generator.add_prim_attr("manual_seed", True)
|
|
128
130
|
return self
|
|
129
131
|
|
|
130
132
|
def initial_seed(self):
|
mindspore/common/hook_handle.py
CHANGED
|
@@ -29,8 +29,9 @@ class _TensorHookHandle:
|
|
|
29
29
|
``Ascend`` ``GPU`` ``CPU``
|
|
30
30
|
"""
|
|
31
31
|
|
|
32
|
-
def __init__(self):
|
|
32
|
+
def __init__(self, tensor):
|
|
33
33
|
self.id = None
|
|
34
|
+
self.tensor = tensor
|
|
34
35
|
|
|
35
36
|
def remove(self):
|
|
36
37
|
"""
|
|
@@ -66,6 +67,7 @@ class _TensorHookHandle:
|
|
|
66
67
|
"""
|
|
67
68
|
if self.id is not None:
|
|
68
69
|
Tensor_.remove_hook(self.id)
|
|
70
|
+
self.tensor._remove_hook() # pylint:disable=protected-access
|
|
69
71
|
|
|
70
72
|
|
|
71
73
|
class HookHandle:
|
mindspore/common/initializer.py
CHANGED
|
@@ -36,9 +36,7 @@ class Initializer:
|
|
|
36
36
|
Note:
|
|
37
37
|
Initializers are intended to be used for delayed initialization in parallel mode rather than Tensor
|
|
38
38
|
initialization. If you have to use Initializers to create a Tensor, :func:`mindspore.Tensor.init_data` should be
|
|
39
|
-
followed in most of the cases. For more information, please refer to
|
|
40
|
-
<https://www.mindspore.cn/docs/en/master/api_python/mindspore/Tensor/mindspore.Tensor.init_data.html#
|
|
41
|
-
mindspore-tensor-init-data>`_ .
|
|
39
|
+
followed in most of the cases. For more information, please refer to :func:`mindspore.Tensor.init_data` .
|
|
42
40
|
|
|
43
41
|
Args:
|
|
44
42
|
kwargs (dict): Keyword arguments for Initializer.
|
|
@@ -841,7 +839,8 @@ def initializer(init, shape=None, dtype=mstype.float32):
|
|
|
841
839
|
dtype (:class:`mindspore.dtype`): The type of data in initialized tensor. Default: ``mstype.float32`` .
|
|
842
840
|
|
|
843
841
|
Returns:
|
|
844
|
-
Tensor
|
|
842
|
+
Returns a Tensor with the shape specified by the input `shape`. If `shape` is ``None``,
|
|
843
|
+
the returned Tensor will have the same shape as ``init``.
|
|
845
844
|
|
|
846
845
|
Raises:
|
|
847
846
|
TypeError: The type of the argument 'init' is not correct.
|
mindspore/common/lazy_inline.py
CHANGED
|
@@ -27,6 +27,10 @@ def lazy_inline(fn=None, attrs=None, policy=None):
|
|
|
27
27
|
Registering the decorator of the built-in function `__init__` of a cell, the decorator
|
|
28
28
|
will add the parameters of `__init__` according to the `attrs` as the attributes of this cell.
|
|
29
29
|
|
|
30
|
+
For a detailed description of the function, see `Using the lazy_inline decorator
|
|
31
|
+
<https://www.mindspore.cn/docs/en/master/model_train/program_form/static_graph_syntax/
|
|
32
|
+
static_graph_expert_programming.html#using-lazy-inline-decorator>`_ .
|
|
33
|
+
|
|
30
34
|
.. warning::
|
|
31
35
|
This feature is only supported on Ascend and is not supported on other hardwares.
|
|
32
36
|
The construct parameters must be positional or key word arguments and have not default values.
|
|
@@ -47,6 +51,7 @@ def lazy_inline(fn=None, attrs=None, policy=None):
|
|
|
47
51
|
``Ascend``
|
|
48
52
|
|
|
49
53
|
Examples:
|
|
54
|
+
>>> import os
|
|
50
55
|
>>> import numpy as np
|
|
51
56
|
>>> from mindspore import Tensor
|
|
52
57
|
>>> import mindspore.nn as nn
|
|
@@ -154,8 +159,9 @@ def lazy_inline(fn=None, attrs=None, policy=None):
|
|
|
154
159
|
... inp = Tensor(np.ones([1, 3, 224, 224]).astype(np.float32))
|
|
155
160
|
... net(inp)
|
|
156
161
|
...
|
|
157
|
-
>>> context.set_context(mode=context.GRAPH_MODE
|
|
158
|
-
|
|
162
|
+
>>> context.set_context(mode=context.GRAPH_MODE)
|
|
163
|
+
>>> os.environ["MS_DEV_SAVE_GRAPHS"] = "2"
|
|
164
|
+
>>> os.environ["MS_DEV_SAVE_GRAPHS_PATH"] = os.path.realpath("./lazy")
|
|
159
165
|
...
|
|
160
166
|
>>> test_compile()
|
|
161
167
|
"""
|
mindspore/common/mindir_util.py
CHANGED
|
@@ -26,6 +26,9 @@ def load_mindir(file_name):
|
|
|
26
26
|
"""
|
|
27
27
|
load protobuf file.
|
|
28
28
|
|
|
29
|
+
Note:
|
|
30
|
+
The interface is deprecated from version 2.5 and will be removed in a future version.
|
|
31
|
+
|
|
29
32
|
Args:
|
|
30
33
|
file_name (str): File name.
|
|
31
34
|
|
|
@@ -42,7 +45,8 @@ def load_mindir(file_name):
|
|
|
42
45
|
>>> import mindspore as ms
|
|
43
46
|
>>> md = ms.load_mindir("test.mindir")
|
|
44
47
|
"""
|
|
45
|
-
|
|
48
|
+
logger.warning("The interface 'mindspore.load_mindir' is deprecated from version 2.5 "
|
|
49
|
+
"and will be removed in a future version.")
|
|
46
50
|
Validator.check_file_name_by_regular(file_name)
|
|
47
51
|
file_name = os.path.realpath(file_name)
|
|
48
52
|
model = mindir_model()
|
|
@@ -65,6 +69,9 @@ def save_mindir(model, file_name):
|
|
|
65
69
|
"""
|
|
66
70
|
save protobuf file.
|
|
67
71
|
|
|
72
|
+
Note:
|
|
73
|
+
The interface is deprecated from version 2.5 and will be removed in a future version.
|
|
74
|
+
|
|
68
75
|
Args:
|
|
69
76
|
model (ModelProto): mindir model
|
|
70
77
|
file_name (str): File name.
|
|
@@ -84,7 +91,8 @@ def save_mindir(model, file_name):
|
|
|
84
91
|
>>> md_new = ms.load_mindir("test_new.mindir")
|
|
85
92
|
>>> md_new.user_info
|
|
86
93
|
"""
|
|
87
|
-
|
|
94
|
+
logger.warning("The interface 'mindspore.save_mindir' is deprecated from version 2.5 "
|
|
95
|
+
"and will be removed in a future version.")
|
|
88
96
|
Validator.check_file_name_by_regular(file_name)
|
|
89
97
|
file_name = os.path.realpath(file_name)
|
|
90
98
|
if not file_name.endswith('.mindir'):
|
mindspore/common/parameter.py
CHANGED
|
@@ -17,12 +17,14 @@
|
|
|
17
17
|
from __future__ import absolute_import
|
|
18
18
|
|
|
19
19
|
from copy import copy
|
|
20
|
+
|
|
20
21
|
import time
|
|
21
22
|
import os
|
|
22
23
|
import sys
|
|
23
24
|
import math
|
|
24
25
|
import numbers
|
|
25
26
|
import numpy as np
|
|
27
|
+
|
|
26
28
|
from mindspore import log as logger
|
|
27
29
|
from mindspore.log import _LogActionOnce
|
|
28
30
|
from mindspore._c_expression import ParamInfo
|
|
@@ -53,6 +55,16 @@ PARAMETER_NAME_PREFIX_MAX_LEN = 1024
|
|
|
53
55
|
# Global variable for parameter unique key.
|
|
54
56
|
_GLOBAL_PARAMETER_KEY = -1
|
|
55
57
|
|
|
58
|
+
# Global variable to mark the hook of parameter is updated
|
|
59
|
+
_parameter_hook_updated = True
|
|
60
|
+
def set_parameter_hook_updated(value):
|
|
61
|
+
global _parameter_hook_updated
|
|
62
|
+
_parameter_hook_updated = value
|
|
63
|
+
|
|
64
|
+
def parameter_hook_updated():
|
|
65
|
+
global _parameter_hook_updated
|
|
66
|
+
return _parameter_hook_updated
|
|
67
|
+
|
|
56
68
|
|
|
57
69
|
def _is_in_auto_parallel_mode():
|
|
58
70
|
"""Get parallel mode."""
|
|
@@ -205,7 +217,7 @@ class Parameter(Tensor_):
|
|
|
205
217
|
broadcast and gradients communication would not be applied to the `Parameter`. Default: ``False`` .
|
|
206
218
|
parallel_optimizer (bool): It is used to filter the weight shard operation in `SEMI_AUTO_PARALLEL` or
|
|
207
219
|
`AUTO_PARALLEL` mode. It works only when enable parallel optimizer in
|
|
208
|
-
|
|
220
|
+
:func:`mindspore.set_auto_parallel_context`. Default: ``True`` .
|
|
209
221
|
storage_format (str): Only Ascend device target is supported. It is used to specify the format of the weight
|
|
210
222
|
loaded to the device. By default, the format is not changed. The optional values are ``"FRACTAL_NZ"`` ,
|
|
211
223
|
``"NC1HWC0"`` , ``"FRACTAL_Z"`` , etc. Default: ``""`` .
|
|
@@ -213,7 +225,8 @@ class Parameter(Tensor_):
|
|
|
213
225
|
stored. By default, the parameter will be stored on NPU while computing. When the device is specified as
|
|
214
226
|
``"CPU"``, the parameter will be loaded into the device when it needs to be used, and unloaded to the CPU
|
|
215
227
|
after use. It takes effext only when `memory_offload` is ``"ON"``, `jit_level` is not ``"O2"`` and
|
|
216
|
-
`memory_optimize_level` is ``O0`` in
|
|
228
|
+
`memory_optimize_level` is ``O0`` in :func:`mindspore.set_context`.
|
|
229
|
+
Less device memory is needed when device is
|
|
217
230
|
specified as ``"CPU"``.
|
|
218
231
|
|
|
219
232
|
Examples:
|
|
@@ -251,7 +264,6 @@ class Parameter(Tensor_):
|
|
|
251
264
|
# it's better to make the Initializer a kind of tensor.
|
|
252
265
|
obj.init_mode = None
|
|
253
266
|
obj.is_default_input_init = init_data_flag
|
|
254
|
-
obj.from_ckpt = False
|
|
255
267
|
if obj.has_init:
|
|
256
268
|
obj.init_mode = default_input
|
|
257
269
|
else:
|
|
@@ -292,7 +304,6 @@ class Parameter(Tensor_):
|
|
|
292
304
|
self.is_in_shard = False
|
|
293
305
|
self._pipeline_stage_list = []
|
|
294
306
|
self.slice_num = 1
|
|
295
|
-
self.from_ckpt = False
|
|
296
307
|
if -1 in self.shape:
|
|
297
308
|
raise ValueError(f"All shape elements of the Parameter must be positive. But got None.")
|
|
298
309
|
if isinstance(default_input, (Tensor_, Tensor)):
|
|
@@ -456,10 +467,12 @@ class Parameter(Tensor_):
|
|
|
456
467
|
|
|
457
468
|
@property
|
|
458
469
|
def param_info(self):
|
|
470
|
+
Tensor_.wait_pipeline(self)
|
|
459
471
|
return self._param_info
|
|
460
472
|
|
|
461
473
|
@param_info.setter
|
|
462
474
|
def param_info(self, param_info_):
|
|
475
|
+
Tensor_.wait_pipeline(self)
|
|
463
476
|
param_info_.obj = self
|
|
464
477
|
self._param_info = param_info_
|
|
465
478
|
Tensor_.param_info.fset(self, param_info_)
|
|
@@ -681,7 +694,7 @@ class Parameter(Tensor_):
|
|
|
681
694
|
Get the optimizer parallel status(bool) of the parameter.
|
|
682
695
|
|
|
683
696
|
It is used to filter the weight shard operation in `AUTO_PARALLEL` and `SEMI_AUTO_PARALLEL` mode. It works only
|
|
684
|
-
when enable parallel optimizer in
|
|
697
|
+
when enable parallel optimizer in :func:`mindspore.set_auto_parallel_context`.
|
|
685
698
|
|
|
686
699
|
Examples:
|
|
687
700
|
>>> from mindspore import Tensor, Parameter
|
|
@@ -846,16 +859,10 @@ class Parameter(Tensor_):
|
|
|
846
859
|
f"Use .set_dtype(xxx) to change the dtype.")
|
|
847
860
|
|
|
848
861
|
@staticmethod
|
|
849
|
-
def _set_data_check_input_valid(
|
|
850
|
-
|
|
851
|
-
if not from_ckpt and incoming_tensor_is_init and not current_tensor_is_init:
|
|
862
|
+
def _set_data_check_input_valid(current_tensor_is_init, incoming_tensor_is_init):
|
|
863
|
+
if incoming_tensor_is_init and not current_tensor_is_init:
|
|
852
864
|
raise TypeError("The original tensor data is initialized, but the argument 'data' is not initialized."
|
|
853
865
|
"Please initialize 'data' before call this method.")
|
|
854
|
-
if tuple(current_shape) != tuple(data_shape):
|
|
855
|
-
# If Slice create Parameter shape can be change.
|
|
856
|
-
if not slice_shape and slice_num == 1:
|
|
857
|
-
raise ValueError(f"Can not change the shape of Parameter which has been initialized."
|
|
858
|
-
f" Current shape is {current_shape}, and incoming is {data_shape}.")
|
|
859
866
|
|
|
860
867
|
@staticmethod
|
|
861
868
|
def _from_tensor(tensor, *args, **kwargs):
|
|
@@ -902,8 +909,6 @@ class Parameter(Tensor_):
|
|
|
902
909
|
# both not init.
|
|
903
910
|
incoming_tensor_is_init = isinstance(data, Tensor) and not data.has_init
|
|
904
911
|
current_tensor_is_init = isinstance(self, Tensor) and not self.has_init
|
|
905
|
-
Parameter._set_data_check_input_valid(self.shape, data.shape, current_tensor_is_init, incoming_tensor_is_init,
|
|
906
|
-
self.from_ckpt, slice_shape, self.slice_num)
|
|
907
912
|
if self.dtype != data.dtype:
|
|
908
913
|
if mstype.implicit_conversion_seq.get(self.dtype) < mstype.implicit_conversion_seq.get(data.dtype):
|
|
909
914
|
self._raise_type_error(data.dtype)
|
|
@@ -1003,6 +1008,17 @@ class Parameter(Tensor_):
|
|
|
1003
1008
|
_offload_if_config(obj)
|
|
1004
1009
|
return obj
|
|
1005
1010
|
|
|
1011
|
+
def register_hook(self, hook_fn):
|
|
1012
|
+
"""
|
|
1013
|
+
For details, please refer to :func:`mindspore.Tensor.register_hook`.
|
|
1014
|
+
"""
|
|
1015
|
+
handle = Tensor.register_hook(self, hook_fn)
|
|
1016
|
+
set_parameter_hook_updated(True)
|
|
1017
|
+
return handle
|
|
1018
|
+
|
|
1019
|
+
def _remove_hook(self):
|
|
1020
|
+
set_parameter_hook_updated(True)
|
|
1021
|
+
|
|
1006
1022
|
|
|
1007
1023
|
class ParameterTuple(tuple):
|
|
1008
1024
|
"""
|