mindspore 2.6.0rc1__cp39-cp39-win_amd64.whl → 2.7.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +40 -9
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
- mindspore/_extends/parse/parser.py +37 -62
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +43 -13
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/amp.py +4 -4
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/common/__init__.py +27 -2
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +77 -16
- mindspore/common/api.py +238 -113
- mindspore/common/dtype.py +21 -11
- mindspore/common/dump.py +10 -15
- mindspore/common/generator.py +5 -3
- mindspore/common/hook_handle.py +11 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/parameter.py +26 -12
- mindspore/common/recompute.py +3 -3
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +81 -81
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +58 -40
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/datasets.py +20 -7
- mindspore/dataset/engine/datasets_user_defined.py +33 -3
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +7 -3
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -7
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +27 -7
- mindspore/include/api/cell.h +37 -1
- mindspore/include/api/delegate.h +10 -0
- mindspore/include/api/model.h +3 -0
- mindspore/include/api/types.h +2 -2
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +60 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +6 -46
- mindspore/mint/distributed/__init__.py +1 -0
- mindspore/mint/distributed/distributed.py +212 -9
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +164 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +137 -101
- mindspore/mint/nn/layer/normalization.py +8 -22
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/nn/cell.py +328 -502
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +32 -34
- mindspore/nn/layer/basic.py +67 -64
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +117 -110
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +37 -39
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +3 -3
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -0
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +34 -37
- mindspore/nn/wrap/grad_reducer.py +37 -37
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +5 -5
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +5 -5
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_vmap/vmap_array_ops.py +31 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +42 -11
- mindspore/ops/auto_generate/gen_extend_func.py +23 -141
- mindspore/ops/auto_generate/gen_ops_def.py +727 -321
- mindspore/ops/auto_generate/gen_ops_prim.py +1721 -984
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +8 -4
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +3 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +9 -96
- mindspore/ops/function/debug_func.py +4 -3
- mindspore/ops/function/grad/grad_func.py +1 -1
- mindspore/ops/function/math_func.py +33 -540
- mindspore/ops/function/nn_func.py +28 -74
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +44 -5
- mindspore/ops/function/vmap_func.py +2 -1
- mindspore/ops/functional.py +2 -3
- mindspore/ops/functional_overload.py +571 -6
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +16 -11
- mindspore/ops/operations/_custom_ops_utils.py +689 -34
- mindspore/ops/operations/_inner_ops.py +3 -6
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +2 -2
- mindspore/ops/operations/comm_ops.py +185 -26
- mindspore/ops/operations/custom_ops.py +294 -174
- mindspore/ops/operations/debug_ops.py +59 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +15 -16
- mindspore/ops/operations/math_ops.py +3 -4
- mindspore/ops/operations/nn_ops.py +7 -39
- mindspore/ops/primitive.py +6 -10
- mindspore/ops/tensor_method.py +47 -8
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +8 -3
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +11 -8
- mindspore/parallel/_cell_wrapper.py +113 -45
- mindspore/parallel/_parallel_serialization.py +1 -1
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +13 -8
- mindspore/parallel/auto_parallel.py +14 -7
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +11 -7
- mindspore/parallel/cluster/process_entity/_api.py +84 -48
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +43 -4
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +6 -7
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
- mindspore/parallel/shard.py +3 -4
- mindspore/parallel/transform_safetensors.py +463 -174
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/profiler_context.py +25 -27
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_op_analyse.py +235 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +305 -314
- mindspore/profiler/envprofiler.py +12 -7
- mindspore/profiler/experimental_config.py +96 -6
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/npu_profiler.py +29 -19
- mindspore/profiler/profiler.py +35 -19
- mindspore/profiler/profiler_action_controller.py +64 -76
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +5 -5
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +60 -45
- mindspore/runtime/memory.py +30 -32
- mindspore/runtime/thread_bind_core.py +298 -164
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +14 -4
- mindspore/train/amp.py +43 -20
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_checkpoint.py +3 -6
- mindspore/train/callback/_flops_collector.py +1 -1
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +97 -16
- mindspore/train/data_sink.py +11 -2
- mindspore/train/dataset_helper.py +9 -0
- mindspore/train/model.py +135 -55
- mindspore/train/serialization.py +133 -111
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +163 -77
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +6 -9
- mindspore/version.py +1 -1
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +333 -371
- mindspore/_deprecated/jit.py +0 -198
- mindspore/experimental/es/__init__.py +0 -22
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
mindspore/common/dtype.py
CHANGED
|
@@ -23,6 +23,7 @@ import numpy as np
|
|
|
23
23
|
from mindspore._c_expression import typing
|
|
24
24
|
from mindspore._c_expression.typing import Type
|
|
25
25
|
from mindspore._c_expression.np_dtypes import np_version_valid
|
|
26
|
+
|
|
26
27
|
if np_version_valid(False):
|
|
27
28
|
from mindspore._c_expression.np_dtypes import bfloat16 as np_bfloat16
|
|
28
29
|
|
|
@@ -46,7 +47,9 @@ __dtype__ = [
|
|
|
46
47
|
"TensorType", "_null",
|
|
47
48
|
"Type", "Int",
|
|
48
49
|
"complex64", "complex128",
|
|
49
|
-
"bfloat16", "qint4x2"
|
|
50
|
+
"bfloat16", "qint4x2",
|
|
51
|
+
"float8_e4m3fn", "float8_e5m2",
|
|
52
|
+
"hifloat8"
|
|
50
53
|
]
|
|
51
54
|
|
|
52
55
|
__method__ = [
|
|
@@ -86,6 +89,9 @@ float32 = typing.kFloat32
|
|
|
86
89
|
single = float32
|
|
87
90
|
float64 = typing.kFloat64
|
|
88
91
|
double = float64
|
|
92
|
+
float8_e4m3fn = typing.kFloat8E4M3FN
|
|
93
|
+
float8_e5m2 = typing.kFloat8E5M2
|
|
94
|
+
hifloat8 = typing.kHiFloat8
|
|
89
95
|
bfloat16 = typing.kBFloat16
|
|
90
96
|
complex64 = typing.kComplex64
|
|
91
97
|
complex128 = typing.kComplex128
|
|
@@ -145,17 +151,19 @@ number_type = (int8,
|
|
|
145
151
|
bfloat16,
|
|
146
152
|
complex64,
|
|
147
153
|
complex128,
|
|
148
|
-
qint4x2,
|
|
154
|
+
qint4x2,
|
|
155
|
+
float8_e4m3fn,
|
|
156
|
+
float8_e5m2,
|
|
157
|
+
hifloat8)
|
|
149
158
|
|
|
150
159
|
int_type = (int8, int16, int32, int64,)
|
|
151
160
|
uint_type = (uint8, uint16, uint32, uint64,)
|
|
152
|
-
float_type = (float16, float32, float64, bfloat16,)
|
|
153
|
-
signed_type = (int8, byte, int16, short, int32, intc, int64,
|
|
154
|
-
|
|
155
|
-
double, bfloat16, complex64, complex128)
|
|
161
|
+
float_type = (float16, float32, float64, bfloat16, float8_e4m3fn, float8_e5m2, hifloat8)
|
|
162
|
+
signed_type = (int8, byte, int16, short, int32, intc, int64, intp, float16, half, float32, single, float64, double,
|
|
163
|
+
bfloat16, complex64, complex128, float8_e4m3fn, float8_e5m2, hifloat8)
|
|
156
164
|
complex_type = (complex64, complex128,)
|
|
157
|
-
all_types = (bool_, int8, uint8, int16, int32, int64, float16, float32, float64, bfloat16, complex64, complex128
|
|
158
|
-
|
|
165
|
+
all_types = (bool_, int8, uint8, int16, int32, int64, float16, float32, float64, bfloat16, complex64, complex128,
|
|
166
|
+
float8_e4m3fn, float8_e5m2, hifloat8)
|
|
159
167
|
|
|
160
168
|
_simple_types = {
|
|
161
169
|
list: list_,
|
|
@@ -281,8 +289,11 @@ def dtype_to_nptype(type_):
|
|
|
281
289
|
}
|
|
282
290
|
if type_ == bfloat16:
|
|
283
291
|
if not np_version_valid(True):
|
|
284
|
-
raise TypeError(
|
|
285
|
-
|
|
292
|
+
raise TypeError(
|
|
293
|
+
"The Numpy bfloat16 data type is not supported now, please ensure that the current "
|
|
294
|
+
"Numpy version is not less than the version when the mindspore is compiled, "
|
|
295
|
+
"and the major versions are same."
|
|
296
|
+
)
|
|
286
297
|
return np_bfloat16
|
|
287
298
|
return _dtype_nptype_dict[type_]
|
|
288
299
|
|
|
@@ -335,7 +346,6 @@ def _issubclass_(type_, dtype):
|
|
|
335
346
|
return typing.is_subclass(type_, dtype)
|
|
336
347
|
|
|
337
348
|
|
|
338
|
-
|
|
339
349
|
def type_size_in_bytes(dtype):
|
|
340
350
|
"""
|
|
341
351
|
Return type size in bytes.
|
mindspore/common/dump.py
CHANGED
|
@@ -25,15 +25,19 @@ def set_dump(target, enabled=True):
|
|
|
25
25
|
Enable or disable dump for the `target` and its contents.
|
|
26
26
|
|
|
27
27
|
`target` should be an instance of :class:`mindspore.nn.Cell` or :class:`mindspore.ops.Primitive` .
|
|
28
|
-
Please note that this API takes effect only when
|
|
29
|
-
field in
|
|
30
|
-
|
|
28
|
+
Please note that this API takes effect only when the Dump function is enabled, and the `dump_mode`
|
|
29
|
+
field in the Dump configuration file is set to `"2"` with the `ms_backend` compilation backend
|
|
30
|
+
(please refer to the backend parameter in
|
|
31
|
+
`jit <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.jit.html>`_).
|
|
32
|
+
See the `dump document <https://www.mindspore.cn/tutorials/en/master/debug/dump.html>`_ for details.
|
|
31
33
|
The default enabled status for
|
|
32
34
|
a :class:`mindspore.nn.Cell` or :class:`mindspore.ops.Primitive` is False.
|
|
33
35
|
|
|
34
36
|
Note:
|
|
35
|
-
1. This API is only
|
|
36
|
-
|
|
37
|
+
1. This API is only available for JIT compilation, requires 'Ascend' as the device_target and
|
|
38
|
+
`ms_backend` as the compilation backend (please refer to the backend parameter in
|
|
39
|
+
`jit <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.jit.html>`_),
|
|
40
|
+
and does not support fused operators.
|
|
37
41
|
2. This API only supports being called before training starts.
|
|
38
42
|
If you call this API during training, it may not be effective.
|
|
39
43
|
3. After using `set_dump(Cell, True)` , operators in forward and backward
|
|
@@ -66,7 +70,6 @@ def set_dump(target, enabled=True):
|
|
|
66
70
|
>>> import mindspore.nn as nn
|
|
67
71
|
>>> from mindspore import Tensor, set_dump
|
|
68
72
|
>>>
|
|
69
|
-
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
|
70
73
|
>>> ms.set_device(device_target="Ascend")
|
|
71
74
|
>>>
|
|
72
75
|
>>> class MyNet(nn.Cell):
|
|
@@ -75,6 +78,7 @@ def set_dump(target, enabled=True):
|
|
|
75
78
|
... self.conv1 = nn.Conv2d(5, 6, 5, pad_mode='valid')
|
|
76
79
|
... self.relu1 = nn.ReLU()
|
|
77
80
|
...
|
|
81
|
+
... @jit
|
|
78
82
|
... def construct(self, x):
|
|
79
83
|
... x = self.conv1(x)
|
|
80
84
|
... x = self.relu1(x)
|
|
@@ -109,15 +113,6 @@ def set_dump(target, enabled=True):
|
|
|
109
113
|
"If you have Ascend device, consider set device_target to Ascend "
|
|
110
114
|
"before calling set_dump.".format(current_target))
|
|
111
115
|
|
|
112
|
-
current_mode = context.get_context("mode")
|
|
113
|
-
if current_mode != context.GRAPH_MODE:
|
|
114
|
-
# We will not return here in case user changed mode later.
|
|
115
|
-
warn(
|
|
116
|
-
"Current mode is PYNATIVE_MODE, which is not supported by set_dump. "
|
|
117
|
-
"Only GRAPH_MODE is supported currently. "
|
|
118
|
-
"Consider set mode to GRAPH_MODE "
|
|
119
|
-
"before calling set_dump.")
|
|
120
|
-
|
|
121
116
|
# The actual set dump logic.
|
|
122
117
|
if isinstance(target, nn.Cell):
|
|
123
118
|
target.add_flags(dump=enabled)
|
mindspore/common/generator.py
CHANGED
|
@@ -56,6 +56,9 @@ class Generator:
|
|
|
56
56
|
A generator that manages the state of random numbers and provides seed and offset for random functions.
|
|
57
57
|
When the seed and offset are fixed, the random function generates the same random sequence.
|
|
58
58
|
|
|
59
|
+
.. note::
|
|
60
|
+
Graph mode does not support the use of multiple generators at the same time for now.
|
|
61
|
+
|
|
59
62
|
Supported Platforms:
|
|
60
63
|
``Ascend`` ``GPU`` ``CPU``
|
|
61
64
|
|
|
@@ -74,10 +77,9 @@ class Generator:
|
|
|
74
77
|
"""
|
|
75
78
|
|
|
76
79
|
def __init__(self):
|
|
77
|
-
self._seed = Parameter(Tensor(0, mstype.int64),
|
|
78
|
-
name="seed", requires_grad=False)
|
|
80
|
+
self._seed = Parameter(Tensor(0, mstype.int64), requires_grad=False)
|
|
79
81
|
self._offset = Parameter(
|
|
80
|
-
Tensor(0, mstype.int64),
|
|
82
|
+
Tensor(0, mstype.int64), requires_grad=False)
|
|
81
83
|
|
|
82
84
|
self._generator = GeneratorOp().set_device("CPU")
|
|
83
85
|
self._generator.add_prim_attr("manual_seed", False)
|
mindspore/common/hook_handle.py
CHANGED
|
@@ -81,19 +81,23 @@ class HookHandle:
|
|
|
81
81
|
It is only supported in pynative mode and works when registering or removing hook function for Cell object.
|
|
82
82
|
|
|
83
83
|
Args:
|
|
84
|
-
hook_dict (Dict): The hook object with hook function registered on. Default value: None.
|
|
84
|
+
hook_dict (Dict, optional): The hook object with hook function registered on. Default value: ``None`` .
|
|
85
|
+
extra_dict (Dict, optional): The extra dict. Default value: ``None`` .
|
|
85
86
|
|
|
86
87
|
Supported Platforms:
|
|
87
88
|
``Ascend`` ``GPU`` ``CPU``
|
|
88
89
|
"""
|
|
89
90
|
unique_id = 0
|
|
90
91
|
|
|
91
|
-
def __init__(self, hook_dict=None):
|
|
92
|
+
def __init__(self, hook_dict=None, *, extra_dict=None):
|
|
92
93
|
self.hook_dict_ref = None
|
|
94
|
+
self.extra_dict_ref = None
|
|
93
95
|
if hook_dict is not None:
|
|
94
96
|
self.hook_dict_ref = weakref.ref(hook_dict)
|
|
95
97
|
self.handle_id = HookHandle.unique_id
|
|
96
98
|
HookHandle.unique_id += 1
|
|
99
|
+
if extra_dict is not None:
|
|
100
|
+
self.extra_dict_ref = weakref.ref(extra_dict)
|
|
97
101
|
|
|
98
102
|
def remove(self):
|
|
99
103
|
"""
|
|
@@ -145,3 +149,8 @@ class HookHandle:
|
|
|
145
149
|
hook_dict = self.hook_dict_ref()
|
|
146
150
|
if hook_dict is not None and self.handle_id in hook_dict:
|
|
147
151
|
del hook_dict[self.handle_id]
|
|
152
|
+
|
|
153
|
+
if self.extra_dict_ref is not None:
|
|
154
|
+
extra_dict = self.extra_dict_ref()
|
|
155
|
+
if extra_dict is not None and self.handle_id in extra_dict:
|
|
156
|
+
del extra_dict[self.handle_id]
|
mindspore/common/jit_config.py
CHANGED
|
@@ -97,7 +97,7 @@ class JitConfig:
|
|
|
97
97
|
self.jit_config_dict["debug_level"] = debug_level
|
|
98
98
|
self.jit_config_dict["infer_boost"] = infer_boost
|
|
99
99
|
if "backend" not in self.jit_config_dict:
|
|
100
|
-
if jit_level
|
|
100
|
+
if jit_level in ["O0", "O1"]:
|
|
101
101
|
self.jit_config_dict["backend"] = "ms_backend"
|
|
102
102
|
elif jit_level == "O2":
|
|
103
103
|
self.jit_config_dict["backend"] = "GE"
|
mindspore/common/jit_trace.py
CHANGED
|
@@ -17,16 +17,15 @@
|
|
|
17
17
|
|
|
18
18
|
import inspect
|
|
19
19
|
import re
|
|
20
|
+
import types
|
|
20
21
|
from functools import wraps
|
|
21
22
|
import mindspore as ms
|
|
22
23
|
from mindspore import log as logger
|
|
23
24
|
from mindspore import context
|
|
24
25
|
from mindspore.common.jit_context import JitContext, set_jit_context, jit_context
|
|
25
26
|
from mindspore.common.tensor import Tensor as PythonTensor
|
|
26
|
-
from mindspore._checkparam import is_stub_tensor
|
|
27
27
|
from mindspore._c_expression import TraceRecorder as tr
|
|
28
28
|
from mindspore._c_expression import JitExecutor_
|
|
29
|
-
from mindspore._c_expression import TensorNode
|
|
30
29
|
from mindspore._c_expression import TensorPy as Tensor, CSRTensor, COOTensor
|
|
31
30
|
from mindspore._c_expression import typing
|
|
32
31
|
|
|
@@ -35,7 +34,7 @@ class TraceJitContext(JitContext):
|
|
|
35
34
|
"""JIT Context for trace JIT."""
|
|
36
35
|
|
|
37
36
|
def __init__(self):
|
|
38
|
-
|
|
37
|
+
super().__init__()
|
|
39
38
|
self._is_nested = False
|
|
40
39
|
|
|
41
40
|
def set_is_nested(self, status):
|
|
@@ -44,29 +43,34 @@ class TraceJitContext(JitContext):
|
|
|
44
43
|
def is_nested(self):
|
|
45
44
|
return self._is_nested
|
|
46
45
|
|
|
46
|
+
def args_preprocess(self, prim_name, prim_res, *args):
|
|
47
|
+
args = tuple(_convert_arg_for_operators(arg, prim_name)
|
|
48
|
+
for arg in args)
|
|
49
|
+
file_names, linenos = _get_caller_lines()
|
|
50
|
+
return prim_res, file_names, linenos, args
|
|
51
|
+
|
|
47
52
|
def run_op(self, prim, prim_res, *args):
|
|
48
53
|
"""Capture op"""
|
|
49
54
|
logger.debug(f'prim: {prim}, args: {args}, prim_res: {prim_res}')
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
prim_res = _sync_stub_tensor(prim_res)
|
|
53
|
-
args = tuple(_sync_stub_tensor(arg) for arg in args)
|
|
54
|
-
args = tuple(_convert_arg_for_operators(arg, prim.name) for arg in args)
|
|
55
|
-
file_names, linenos = _get_caller_lines()
|
|
56
|
-
tr.get_instance().new_node(prim, prim_res, file_names, linenos, False, *args)
|
|
55
|
+
prim_res, file_names, linenos, args = self.args_preprocess(prim.name, prim_res, *args)
|
|
56
|
+
tr.get_instance().new_node(prim, (prim_res, file_names, linenos, False), *args)
|
|
57
57
|
return prim_res
|
|
58
58
|
|
|
59
|
+
def prepare_op(self, prim_name, prim_res, *args):
|
|
60
|
+
"""Prepare op"""
|
|
61
|
+
logger.debug(f'prim: {prim_name}, args: {args}, prim_res: {prim_res}')
|
|
62
|
+
return self.args_preprocess(prim_name, prim_res, *args)
|
|
63
|
+
|
|
59
64
|
def run_graph(self, phase, prim_res, *args):
|
|
60
65
|
"""Capture func_graph generated from ast"""
|
|
61
66
|
logger.debug(f'phase: {phase}, args: {args}, prim_res: {prim_res}')
|
|
62
|
-
if isinstance(prim_res, TensorNode):
|
|
63
|
-
prim_res = prim_res.get_value()
|
|
64
|
-
prim_res = _sync_stub_tensor(prim_res)
|
|
65
|
-
args = tuple(_sync_stub_tensor(arg) for arg in args)
|
|
66
67
|
file_names, linenos = _get_caller_lines()
|
|
67
|
-
tr.get_instance().new_fg_node((
|
|
68
|
+
tr.get_instance().new_fg_node((prim_res, file_names, linenos, phase, self._is_nested), *args)
|
|
68
69
|
return prim_res
|
|
69
70
|
|
|
71
|
+
def default_output(self):
|
|
72
|
+
return PythonTensor(0)
|
|
73
|
+
|
|
70
74
|
|
|
71
75
|
_compile_only = False
|
|
72
76
|
_trace_jit_context = TraceJitContext()
|
|
@@ -80,20 +84,6 @@ def _set_compile_only(compile_only=True):
|
|
|
80
84
|
_compile_only = compile_only
|
|
81
85
|
|
|
82
86
|
|
|
83
|
-
def _sync_stub_tensor(stub):
|
|
84
|
-
"""Synchronize stub tensor"""
|
|
85
|
-
if is_stub_tensor(stub):
|
|
86
|
-
real_tensor = stub.stub_sync()
|
|
87
|
-
logger.debug(f'Convert stub tensor, stub: [{type(stub)}] {id(stub)}/{stub}, '
|
|
88
|
-
f'tensor: [{type(real_tensor)}] {id(real_tensor)}/{real_tensor}')
|
|
89
|
-
return real_tensor
|
|
90
|
-
if isinstance(stub, tuple):
|
|
91
|
-
return tuple(_sync_stub_tensor(item) for item in stub)
|
|
92
|
-
if isinstance(stub, list):
|
|
93
|
-
return list(_sync_stub_tensor(item) for item in stub)
|
|
94
|
-
return stub
|
|
95
|
-
|
|
96
|
-
|
|
97
87
|
def convert_tensorpy(args):
|
|
98
88
|
new_args = []
|
|
99
89
|
for arg in args:
|
|
@@ -129,84 +119,73 @@ def nested_run(obj, cell, *args):
|
|
|
129
119
|
if res is not tuple:
|
|
130
120
|
res = (res,)
|
|
131
121
|
file_names, linenos = _get_caller_lines()
|
|
132
|
-
res = _sync_stub_tensor(res)
|
|
133
122
|
set_jit_context(None)
|
|
134
123
|
return file_names, linenos, res
|
|
135
124
|
|
|
136
125
|
|
|
137
|
-
def _jit_trace(
|
|
138
|
-
"""
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
logger.debug(f'fn: {fn}, fn_res: {fn_res}, line: {line_str}')
|
|
201
|
-
# Use fn's output to build func graph's output.
|
|
202
|
-
output = _jit_trace_end(fn_res)
|
|
203
|
-
else:
|
|
204
|
-
output = _jit_trace_end(None) # Run with compilation.
|
|
205
|
-
logger.debug(f'output: {output}')
|
|
206
|
-
return output
|
|
207
|
-
|
|
208
|
-
jit_trace_wrap.__trace_func__ = True
|
|
209
|
-
return jit_trace_wrap
|
|
126
|
+
def _jit_trace():
|
|
127
|
+
"""Return the wrapped function for trace mode jit."""
|
|
128
|
+
def wrap_func(fn):
|
|
129
|
+
if hasattr(fn, "construct"):
|
|
130
|
+
if isinstance(fn, ms.nn.Cell):
|
|
131
|
+
# Bound the cell object to get the self arg.
|
|
132
|
+
return types.MethodType(_jit_trace()(fn.construct.__func__), fn)
|
|
133
|
+
if isinstance(fn, type) and issubclass(fn, ms.nn.Cell):
|
|
134
|
+
fn.construct = _jit_trace()(fn.construct)
|
|
135
|
+
return fn
|
|
136
|
+
|
|
137
|
+
if isinstance(fn, types.MethodType):
|
|
138
|
+
return types.MethodType(_jit_trace()(fn.__func__), fn.__self__)
|
|
139
|
+
|
|
140
|
+
if not isinstance(fn, types.FunctionType):
|
|
141
|
+
logger.warning(f"The fn should be function, method or cell instance/class, but got {fn}")
|
|
142
|
+
return fn
|
|
143
|
+
|
|
144
|
+
if hasattr(fn, "__wrapped_by_jit__"):
|
|
145
|
+
logger.warning(f"The fn {fn} should be wrapped by jit only once.")
|
|
146
|
+
|
|
147
|
+
@wraps(fn)
|
|
148
|
+
def jit_trace_wrap(*args, **kwargs):
|
|
149
|
+
# If a trace graph is already built, keep going without building a new trace graph.
|
|
150
|
+
if jit_context():
|
|
151
|
+
return fn(*args, **kwargs)
|
|
152
|
+
# Start trace process.
|
|
153
|
+
if kwargs:
|
|
154
|
+
bound_arguments = inspect.signature(fn).bind(*args, **kwargs)
|
|
155
|
+
bound_arguments.apply_defaults()
|
|
156
|
+
args = bound_arguments.args
|
|
157
|
+
kwargs = bound_arguments.kwargs
|
|
158
|
+
generate_name = fn.__module__
|
|
159
|
+
if args:
|
|
160
|
+
jit_args = args[1:] if hasattr(args[0], fn.__name__) else args
|
|
161
|
+
obj = args[0]
|
|
162
|
+
if hasattr(obj, fn.__name__): # Add class name for Cell.
|
|
163
|
+
generate_name = generate_name + "." + obj.__class__.__name__
|
|
164
|
+
else:
|
|
165
|
+
jit_args = args
|
|
166
|
+
generate_name = generate_name + "." + fn.__name__ + "#" + str(id(fn))
|
|
167
|
+
# Add create time for Cell.
|
|
168
|
+
if args and hasattr(obj, fn.__name__):
|
|
169
|
+
generate_name = generate_name + '#created_' + str(args[0].create_time)
|
|
170
|
+
line_str = fn.__code__.co_filename + ":" + str(fn.__code__.co_firstlineno)
|
|
171
|
+
generate_name = generate_name + '#[' + line_str + ']'
|
|
172
|
+
|
|
173
|
+
new_compile = _jit_trace_begin(generate_name, *jit_args)
|
|
174
|
+
if new_compile:
|
|
175
|
+
fn_res = fn(*args, **kwargs)
|
|
176
|
+
logger.debug(f'fn: {fn}, fn_res: {fn_res}, line: {line_str}')
|
|
177
|
+
# Use fn's output to build func graph's output.
|
|
178
|
+
output = _jit_trace_end(fn_res)
|
|
179
|
+
else:
|
|
180
|
+
output = _jit_trace_end(None) # Run with compilation.
|
|
181
|
+
logger.debug(f'output: {output}')
|
|
182
|
+
return output
|
|
183
|
+
|
|
184
|
+
jit_trace_wrap.__trace_func__ = True
|
|
185
|
+
setattr(jit_trace_wrap, "__wrapped_by_jit__", True)
|
|
186
|
+
return jit_trace_wrap
|
|
187
|
+
|
|
188
|
+
return wrap_func
|
|
210
189
|
|
|
211
190
|
|
|
212
191
|
def _get_caller_lines():
|
|
@@ -240,6 +219,8 @@ def _get_args_for_run(args):
|
|
|
240
219
|
new_args.append(arg)
|
|
241
220
|
elif isinstance(arg, dict) and hasattr(arg, "__ms_mutable__"):
|
|
242
221
|
new_args.append(tuple(arg.values()))
|
|
222
|
+
elif isinstance(arg, (tuple, list)) and hasattr(arg, "__ms_mutable__"):
|
|
223
|
+
new_args.append(arg)
|
|
243
224
|
return tuple(new_args)
|
|
244
225
|
|
|
245
226
|
|
|
@@ -284,7 +265,6 @@ def _jit_trace_begin(fn_name, *args):
|
|
|
284
265
|
logger.debug(f'_jit_trace_begin, args: {args}')
|
|
285
266
|
_trace_jit_context.set_is_nested(False)
|
|
286
267
|
set_jit_context(_trace_jit_context)
|
|
287
|
-
args = tuple(_sync_stub_tensor(arg) for arg in args)
|
|
288
268
|
for arg in args:
|
|
289
269
|
logger.debug(f'_jit_trace_begin, arg: {arg}, {type(arg)}')
|
|
290
270
|
|
|
@@ -355,7 +335,6 @@ def _jit_trace_end(*output_args):
|
|
|
355
335
|
logger.debug(f'jit trace result: {output}')
|
|
356
336
|
else:
|
|
357
337
|
logger.debug(f'output_args: {output_args}')
|
|
358
|
-
output_args = tuple(_sync_stub_tensor(arg) for arg in output_args)
|
|
359
338
|
file_names, linenos = _get_caller_lines()
|
|
360
339
|
tr.get_instance().end_graph(file_names, linenos, *output_args)
|
|
361
340
|
if _compile_only:
|
mindspore/common/parameter.py
CHANGED
|
@@ -49,17 +49,37 @@ import mindspore.common._monad as monad
|
|
|
49
49
|
__all__ = ['Parameter', 'ParameterTuple']
|
|
50
50
|
|
|
51
51
|
PARAMETER_NAME_DEFAULT = "Parameter"
|
|
52
|
+
_GENERATED_PARAMETER_NAME_PREFIX = PARAMETER_NAME_DEFAULT + '#'
|
|
52
53
|
PARAMETER_NAME_PREFIX_MAX_LEN = 1024
|
|
53
54
|
|
|
55
|
+
_PARAMETER_NAME_ID = 0
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _generate_parameter_name():
|
|
59
|
+
global _PARAMETER_NAME_ID
|
|
60
|
+
name = _GENERATED_PARAMETER_NAME_PREFIX + str(_PARAMETER_NAME_ID)
|
|
61
|
+
_PARAMETER_NAME_ID += 1
|
|
62
|
+
return name
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _is_parameter_generated(param_name):
|
|
66
|
+
if not param_name or not isinstance(param_name, str):
|
|
67
|
+
return False
|
|
68
|
+
return param_name.startswith(_GENERATED_PARAMETER_NAME_PREFIX)
|
|
69
|
+
|
|
70
|
+
|
|
54
71
|
# Global variable for parameter unique key.
|
|
55
72
|
_GLOBAL_PARAMETER_KEY = -1
|
|
56
73
|
|
|
57
74
|
# Global variable to mark the hook of parameter is updated
|
|
58
75
|
_parameter_hook_updated = True
|
|
76
|
+
|
|
77
|
+
|
|
59
78
|
def set_parameter_hook_updated(value):
|
|
60
79
|
global _parameter_hook_updated
|
|
61
80
|
_parameter_hook_updated = value
|
|
62
81
|
|
|
82
|
+
|
|
63
83
|
def parameter_hook_updated():
|
|
64
84
|
global _parameter_hook_updated
|
|
65
85
|
return _parameter_hook_updated
|
|
@@ -496,11 +516,11 @@ class Parameter(Tensor_):
|
|
|
496
516
|
the default value `PARAMETER_NAME_DEFAULT` is used.
|
|
497
517
|
"""
|
|
498
518
|
if name_ is None:
|
|
499
|
-
name_ =
|
|
519
|
+
name_ = _generate_parameter_name()
|
|
500
520
|
elif isinstance(name_, str):
|
|
501
521
|
name_ = name_.strip()
|
|
502
522
|
if name_ == '':
|
|
503
|
-
name_ =
|
|
523
|
+
name_ = _generate_parameter_name()
|
|
504
524
|
if len(name_) > PARAMETER_NAME_PREFIX_MAX_LEN:
|
|
505
525
|
raise ValueError("The length of the '{}' name should be less than {}.".
|
|
506
526
|
format(name_, PARAMETER_NAME_PREFIX_MAX_LEN))
|
|
@@ -904,13 +924,10 @@ class Parameter(Tensor_):
|
|
|
904
924
|
incoming_tensor_is_init = isinstance(data, Tensor) and not data.has_init
|
|
905
925
|
current_tensor_is_init = isinstance(self, Tensor) and not self.has_init
|
|
906
926
|
if self.dtype != data.dtype:
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
if isinstance(data, Tensor) and data.init is not None:
|
|
912
|
-
data.init_data()
|
|
913
|
-
data = F.cast(data, self.dtype)
|
|
927
|
+
from mindspore.ops import functional as F
|
|
928
|
+
if isinstance(data, Tensor) and data.init is not None:
|
|
929
|
+
data.init_data()
|
|
930
|
+
data = F.cast(data, self.dtype)
|
|
914
931
|
if isinstance(data, Tensor) and data.has_init:
|
|
915
932
|
# The parameter has been initialized, directly update by the data
|
|
916
933
|
if current_tensor_is_init:
|
|
@@ -938,7 +955,6 @@ class Parameter(Tensor_):
|
|
|
938
955
|
init_data_args += (slice_index, layout[2], layout[5])
|
|
939
956
|
return init_data_args
|
|
940
957
|
|
|
941
|
-
|
|
942
958
|
def init_data(self, layout=None, set_sliced=False):
|
|
943
959
|
"""
|
|
944
960
|
Initialize the parameter's data.
|
|
@@ -1030,7 +1046,6 @@ class Parameter(Tensor_):
|
|
|
1030
1046
|
"""
|
|
1031
1047
|
return Tensor_._offload(self, True)
|
|
1032
1048
|
|
|
1033
|
-
|
|
1034
1049
|
def _load(self):
|
|
1035
1050
|
r"""
|
|
1036
1051
|
Load parameter to device.
|
|
@@ -1160,6 +1175,5 @@ class ParameterTuple(tuple):
|
|
|
1160
1175
|
_insert_accumu_init_info(x1.name, init_to_value(init))
|
|
1161
1176
|
return ParameterTuple(new)
|
|
1162
1177
|
|
|
1163
|
-
|
|
1164
1178
|
def __parameter_tuple__(self):
|
|
1165
1179
|
"""For parse check."""
|
mindspore/common/recompute.py
CHANGED
|
@@ -25,7 +25,7 @@ from mindspore.ops.composite import GradOperation
|
|
|
25
25
|
from mindspore.common._register_for_recompute import recompute_registry
|
|
26
26
|
from mindspore.common.api import _pynative_executor, _no_grad
|
|
27
27
|
from mindspore.common.generator import get_rng_state, set_rng_state
|
|
28
|
-
from mindspore.train.amp import
|
|
28
|
+
from mindspore.train.amp import AmpDecorator
|
|
29
29
|
from mindspore._c_expression.amp import get_curr_amp_strategy
|
|
30
30
|
|
|
31
31
|
|
|
@@ -104,8 +104,8 @@ class _RecomputeCell(Cell):
|
|
|
104
104
|
set_rng_state(self.cpu_rng_state)
|
|
105
105
|
_pynative_executor.set_is_run_recompute(True)
|
|
106
106
|
if self.amp_strategy:
|
|
107
|
-
with
|
|
108
|
-
|
|
107
|
+
with AmpDecorator(self.amp_strategy.get_amp_level(), self.amp_strategy.get_amp_dtype(),
|
|
108
|
+
self.amp_strategy.get_white_list(), self.amp_strategy.get_black_list()):
|
|
109
109
|
grads = self.grad(self.net, self.internal_params)(*input_args, **kwargs)
|
|
110
110
|
else:
|
|
111
111
|
grads = self.grad(self.net, self.internal_params)(*input_args, **kwargs)
|
|
@@ -98,7 +98,6 @@ class RowTensor(RowTensorInner):
|
|
|
98
98
|
|
|
99
99
|
.. warning::
|
|
100
100
|
- This is an experimental API that is subjected to change or deletion.
|
|
101
|
-
- If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
|
|
102
101
|
|
|
103
102
|
Args:
|
|
104
103
|
indices (Tensor): A 1-D integer Tensor of shape :math:`(d_0)` . Default: ``None``.
|
|
@@ -232,7 +231,6 @@ class COOTensor(COOTensor_):
|
|
|
232
231
|
|
|
233
232
|
.. warning::
|
|
234
233
|
- This is an experimental API that is subject to change or deletion.
|
|
235
|
-
- If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
|
|
236
234
|
- Currently, duplicate coordinates in the indices will not be coalesced.
|
|
237
235
|
If the indices contain out-of-bound values, the result will be undefined.
|
|
238
236
|
|
|
@@ -681,7 +679,6 @@ class CSRTensor(CSRTensor_):
|
|
|
681
679
|
|
|
682
680
|
.. warning::
|
|
683
681
|
- This is an experimental API that is subjected to change.
|
|
684
|
-
- If use PyNative mode, set "export MS_PYNATIVE_CONFIG_STATIC_SHAPE=1".
|
|
685
682
|
- If the values given by `indptr` or `indices` are invalid, the results may be undefined. Invalid values include
|
|
686
683
|
when the length of `values` or `indices` exceeds the range indicated by `indptr`, and when the columns
|
|
687
684
|
indicated by `indices` are repeated on the same row.
|
mindspore/common/symbol.py
CHANGED
|
@@ -104,7 +104,6 @@ class Symbol:
|
|
|
104
104
|
if not isinstance(unique, bool):
|
|
105
105
|
raise TypeError(f"For 'Symbol', the argument 'unique' must be bool, but got {type(unique)}")
|
|
106
106
|
|
|
107
|
-
# pylint: disable=missing-docstring
|
|
108
107
|
def to_dict(self):
|
|
109
108
|
# Convert the symbolic info to dictionary.
|
|
110
109
|
# This method is not necessary to show in public api document, use comment instead of docstring.
|