mindspore 2.6.0__cp311-cp311-win_amd64.whl → 2.7.0rc1__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +1 -1
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +40 -9
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
- mindspore/_extends/parse/parser.py +36 -61
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +32 -13
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +27 -2
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +76 -15
- mindspore/common/api.py +193 -112
- mindspore/common/dtype.py +21 -11
- mindspore/common/dump.py +10 -15
- mindspore/common/generator.py +2 -3
- mindspore/common/hook_handle.py +11 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/parameter.py +26 -12
- mindspore/common/recompute.py +3 -3
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +48 -83
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +38 -23
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/datasets.py +20 -7
- mindspore/dataset/engine/datasets_user_defined.py +32 -2
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +7 -3
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -5
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +1 -0
- mindspore/include/api/cell.h +37 -1
- mindspore/include/api/delegate.h +10 -0
- mindspore/include/api/model.h +3 -0
- mindspore/include/api/types.h +2 -2
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +60 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +4 -44
- mindspore/mint/distributed/__init__.py +1 -0
- mindspore/mint/distributed/distributed.py +208 -5
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +164 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +122 -98
- mindspore/mint/nn/layer/normalization.py +8 -22
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/cell.py +325 -499
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +32 -34
- mindspore/nn/layer/basic.py +67 -64
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +86 -85
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +37 -39
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +34 -37
- mindspore/nn/wrap/grad_reducer.py +37 -37
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +5 -5
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_vmap/vmap_array_ops.py +6 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +17 -8
- mindspore/ops/auto_generate/gen_extend_func.py +1 -51
- mindspore/ops/auto_generate/gen_ops_def.py +463 -257
- mindspore/ops/auto_generate/gen_ops_prim.py +1127 -885
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +8 -4
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +3 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +7 -94
- mindspore/ops/function/debug_func.py +4 -3
- mindspore/ops/function/grad/grad_func.py +1 -1
- mindspore/ops/function/math_func.py +21 -367
- mindspore/ops/function/nn_func.py +26 -41
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +31 -4
- mindspore/ops/functional.py +0 -2
- mindspore/ops/functional_overload.py +463 -6
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +5 -2
- mindspore/ops/operations/_custom_ops_utils.py +675 -8
- mindspore/ops/operations/_inner_ops.py +3 -6
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/comm_ops.py +185 -26
- mindspore/ops/operations/custom_ops.py +235 -172
- mindspore/ops/operations/debug_ops.py +55 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +15 -16
- mindspore/ops/operations/math_ops.py +3 -4
- mindspore/ops/operations/nn_ops.py +5 -6
- mindspore/ops/primitive.py +6 -10
- mindspore/ops/tensor_method.py +36 -4
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +7 -2
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +4 -2
- mindspore/parallel/_cell_wrapper.py +106 -40
- mindspore/parallel/_parallel_serialization.py +1 -1
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +13 -8
- mindspore/parallel/auto_parallel.py +12 -5
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +3 -1
- mindspore/parallel/cluster/process_entity/_api.py +84 -48
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +43 -4
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +1 -1
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
- mindspore/parallel/shard.py +2 -2
- mindspore/parallel/transform_safetensors.py +462 -174
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/profiler_context.py +25 -27
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_op_analyse.py +235 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +305 -314
- mindspore/profiler/envprofiler.py +12 -7
- mindspore/profiler/experimental_config.py +96 -6
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/npu_profiler.py +29 -19
- mindspore/profiler/profiler.py +35 -19
- mindspore/profiler/profiler_action_controller.py +64 -76
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +5 -5
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +60 -45
- mindspore/runtime/memory.py +21 -30
- mindspore/runtime/thread_bind_core.py +298 -164
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +6 -2
- mindspore/train/amp.py +43 -20
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_checkpoint.py +3 -6
- mindspore/train/callback/_flops_collector.py +1 -1
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +71 -13
- mindspore/train/data_sink.py +11 -2
- mindspore/train/dataset_helper.py +9 -0
- mindspore/train/model.py +51 -33
- mindspore/train/serialization.py +133 -111
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +162 -78
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +6 -9
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
- {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +352 -390
- mindspore/_deprecated/jit.py +0 -198
- mindspore/experimental/es/__init__.py +0 -22
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -18,6 +18,8 @@ import json
|
|
|
18
18
|
import socket
|
|
19
19
|
import ipaddress
|
|
20
20
|
import mindspore.log as logger
|
|
21
|
+
from mindspore.runtime.thread_bind_core import _get_physical_device_id, _get_cpu_available, \
|
|
22
|
+
_auto_generate_strategy, _equal_distribution_strategy
|
|
21
23
|
|
|
22
24
|
CURRENT_IP = None
|
|
23
25
|
|
|
@@ -45,19 +47,19 @@ def _generate_cmd_args_list(cmd, cmd_args):
|
|
|
45
47
|
return [cmd] + cmd_args
|
|
46
48
|
|
|
47
49
|
|
|
48
|
-
def _generate_cmd_args_list_with_core(cmd, cmd_args,
|
|
50
|
+
def _generate_cmd_args_list_with_core(cmd, cmd_args, affinity_cpu_str):
|
|
49
51
|
"""
|
|
50
52
|
Generates arguments list for 'Popen'. It consists of a binary file name and subsequential arguments.
|
|
51
53
|
"""
|
|
52
54
|
# Bind cpu cores to this process.
|
|
53
|
-
taskset_args = ['taskset'] + ['-c'] + [
|
|
55
|
+
taskset_args = ['taskset'] + ['-c'] + [affinity_cpu_str]
|
|
54
56
|
final_cmd = []
|
|
55
57
|
if cmd not in ['python', 'pytest', 'python3']:
|
|
56
58
|
# If user don't set binary file name, defaulty use 'python' to launch the job.
|
|
57
59
|
final_cmd = taskset_args + ['python'] + [cmd] + cmd_args
|
|
58
60
|
else:
|
|
59
61
|
final_cmd = taskset_args + [cmd] + cmd_args
|
|
60
|
-
logger.
|
|
62
|
+
logger.warning(f"Launch process with command: {' '.join(final_cmd)}")
|
|
61
63
|
return final_cmd
|
|
62
64
|
|
|
63
65
|
|
|
@@ -83,8 +85,8 @@ def _get_local_ip(ip_address):
|
|
|
83
85
|
CURRENT_IP = s.getsockname()[0]
|
|
84
86
|
s.close()
|
|
85
87
|
except Exception as e:
|
|
86
|
-
raise RuntimeError(
|
|
87
|
-
"
|
|
88
|
+
raise RuntimeError("Get local ip has failed. Please verify that the accessible address has been "
|
|
89
|
+
"specified in the '--master_address' parameter") from e
|
|
88
90
|
return CURRENT_IP
|
|
89
91
|
|
|
90
92
|
|
|
@@ -124,8 +126,8 @@ def _convert_addr_to_ip(master_addr):
|
|
|
124
126
|
logger.info(f"Convert input host name:{master_addr} to ip address:{ip_address}.")
|
|
125
127
|
return ip_address
|
|
126
128
|
except socket.gaierror as e:
|
|
127
|
-
raise RuntimeError(
|
|
128
|
-
"
|
|
129
|
+
raise RuntimeError("DNS resolution has failed. Please verify that the correct hostname has been "
|
|
130
|
+
"specified in the '--master_address' parameter") from e
|
|
129
131
|
|
|
130
132
|
|
|
131
133
|
def _send_scale_num(url, scale_num):
|
|
@@ -134,3 +136,89 @@ def _send_scale_num(url, scale_num):
|
|
|
134
136
|
|
|
135
137
|
"""
|
|
136
138
|
return ""
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _parse_global_device_to_cpu_map(local_rank_id, physical_device_id, device_to_cpu_map):
|
|
142
|
+
"""
|
|
143
|
+
Parse the global device_to_cpu_map and return a cpu list for assigned local_rank_id.
|
|
144
|
+
|
|
145
|
+
"""
|
|
146
|
+
input_device_id = int(list(device_to_cpu_map.keys())[local_rank_id].replace("device", ""))
|
|
147
|
+
if physical_device_id != input_device_id:
|
|
148
|
+
return ""
|
|
149
|
+
affinity_cpu_list = list(device_to_cpu_map.values())[local_rank_id]
|
|
150
|
+
affinity_cpu_str = ",".join(affinity_cpu_list)
|
|
151
|
+
return affinity_cpu_str
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def _generate_auto_bind_core_strategy(local_worker_num):
|
|
155
|
+
"""
|
|
156
|
+
Get device to core range assigned for the all processes.
|
|
157
|
+
|
|
158
|
+
"""
|
|
159
|
+
simulation_level = os.getenv("MS_SIMULATION_LEVEL", "").strip()
|
|
160
|
+
|
|
161
|
+
try:
|
|
162
|
+
available_cpus = _get_cpu_available()
|
|
163
|
+
except RuntimeError as e:
|
|
164
|
+
logger.warning(f"Failed to acquire available cpu info, error: {e} Will not launch process with taskset.")
|
|
165
|
+
return {}
|
|
166
|
+
|
|
167
|
+
if not simulation_level:
|
|
168
|
+
device_to_cpu_map = _auto_generate_strategy(local_worker_num, available_cpus)
|
|
169
|
+
else:
|
|
170
|
+
device_to_cpu_map = _equal_distribution_strategy(local_worker_num, available_cpus)
|
|
171
|
+
|
|
172
|
+
return device_to_cpu_map
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def ranges_to_str(num_list):
|
|
176
|
+
"""
|
|
177
|
+
Convert a num list to a range string.
|
|
178
|
+
|
|
179
|
+
"""
|
|
180
|
+
ranges = []
|
|
181
|
+
start = num_list[0]
|
|
182
|
+
for i in range(1, len(num_list)):
|
|
183
|
+
if num_list[i] != num_list[i-1] + 1:
|
|
184
|
+
ranges.append((start, num_list[i-1]))
|
|
185
|
+
start = num_list[i]
|
|
186
|
+
ranges.append((start, num_list[-1]))
|
|
187
|
+
|
|
188
|
+
parts = []
|
|
189
|
+
for start, end in ranges:
|
|
190
|
+
if start == end:
|
|
191
|
+
parts.append(str(start))
|
|
192
|
+
else:
|
|
193
|
+
parts.append(f"{start}-{end}")
|
|
194
|
+
return ",".join(parts)
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def _generate_bind_core_strategy(local_rank_id, device_to_cpu_map, arg_bind_core):
|
|
198
|
+
"""
|
|
199
|
+
Get device to core range assigned for the all processes.
|
|
200
|
+
|
|
201
|
+
"""
|
|
202
|
+
affinity_cpu_str = ""
|
|
203
|
+
cpu_list_for_device = []
|
|
204
|
+
simulation_level = os.getenv("MS_SIMULATION_LEVEL", "").strip()
|
|
205
|
+
|
|
206
|
+
try:
|
|
207
|
+
physical_device_id = _get_physical_device_id(local_rank_id, simulation_level)
|
|
208
|
+
except RuntimeError as e:
|
|
209
|
+
logger.warning(f"Failed to acquire device id, error: {e} Will not launch process with taskset.")
|
|
210
|
+
return None
|
|
211
|
+
|
|
212
|
+
if isinstance(arg_bind_core, dict):
|
|
213
|
+
affinity_cpu_str = _parse_global_device_to_cpu_map(local_rank_id, physical_device_id, arg_bind_core)
|
|
214
|
+
if not affinity_cpu_str:
|
|
215
|
+
logger.warning(f"Failed to find physical_device_id[{physical_device_id}] for "
|
|
216
|
+
f"process[{local_rank_id}]. Will not launch process with taskset.")
|
|
217
|
+
return None
|
|
218
|
+
elif arg_bind_core is True:
|
|
219
|
+
cpu_list_for_device = device_to_cpu_map.get(physical_device_id, [])
|
|
220
|
+
if not cpu_list_for_device:
|
|
221
|
+
return None
|
|
222
|
+
os.environ["MSRUN_CPU_LIST"] = str(cpu_list_for_device)
|
|
223
|
+
affinity_cpu_str = ranges_to_str(cpu_list_for_device)
|
|
224
|
+
return affinity_cpu_str
|
|
@@ -14,9 +14,47 @@
|
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""Entrypoint of ms_run"""
|
|
16
16
|
import ast
|
|
17
|
-
|
|
17
|
+
import re
|
|
18
|
+
import json
|
|
19
|
+
from argparse import REMAINDER, ArgumentParser, ArgumentTypeError
|
|
18
20
|
from .process_entity import _ProcessManager
|
|
19
21
|
|
|
22
|
+
|
|
23
|
+
def parse_and_validate_bind_core(value):
|
|
24
|
+
"""
|
|
25
|
+
Parse input argument of --bind_core.
|
|
26
|
+
|
|
27
|
+
"""
|
|
28
|
+
if value.lower() == "true":
|
|
29
|
+
return True
|
|
30
|
+
if value.lower() == "false":
|
|
31
|
+
return False
|
|
32
|
+
|
|
33
|
+
try:
|
|
34
|
+
value_dict = json.loads(value)
|
|
35
|
+
except json.JSONDecodeError as e:
|
|
36
|
+
raise ArgumentTypeError("Failed to parse JSON into a dictionary") from e
|
|
37
|
+
|
|
38
|
+
if isinstance(value_dict, dict):
|
|
39
|
+
range_pattern = re.compile(r'^\d+-\d+$')
|
|
40
|
+
for device_id, affinity_cpu_list in value_dict.items():
|
|
41
|
+
if not re.fullmatch(r"device\d+", device_id):
|
|
42
|
+
raise ArgumentTypeError(f"Key '{device_id}' must be in format 'deviceX' (X ≥ 0).")
|
|
43
|
+
if not isinstance(affinity_cpu_list, list):
|
|
44
|
+
raise ArgumentTypeError(f"Value for '{device_id}':{affinity_cpu_list} should be a list, "
|
|
45
|
+
f"but got {type(affinity_cpu_list)}.")
|
|
46
|
+
|
|
47
|
+
for cpu_range in affinity_cpu_list:
|
|
48
|
+
if not isinstance(cpu_range, str):
|
|
49
|
+
raise ArgumentTypeError(f"CPU range '{cpu_range}' in '{affinity_cpu_list}' should be a string.")
|
|
50
|
+
if not range_pattern.match(cpu_range):
|
|
51
|
+
raise ArgumentTypeError(f"CPU range '{cpu_range}' in '{affinity_cpu_list}' should be "
|
|
52
|
+
"in format 'cpuidX-cpuidY'.")
|
|
53
|
+
return value_dict
|
|
54
|
+
|
|
55
|
+
raise ArgumentTypeError(f"Type of {value} should be bool or dict, but got {type(value)}.")
|
|
56
|
+
|
|
57
|
+
|
|
20
58
|
def get_args():
|
|
21
59
|
"""
|
|
22
60
|
Parses and retrieves command-line arguments.
|
|
@@ -77,9 +115,10 @@ def get_args():
|
|
|
77
115
|
parser.add_argument(
|
|
78
116
|
"--bind_core",
|
|
79
117
|
default=False,
|
|
80
|
-
type=
|
|
81
|
-
|
|
82
|
-
|
|
118
|
+
type=parse_and_validate_bind_core,
|
|
119
|
+
help="specifies whether msrun should bind CPU cores to spawned processes. "
|
|
120
|
+
"If set to True, msrun will bind core based on the environment automatically, "
|
|
121
|
+
"and if passed a dict, msrun will bind core based on this dict information."
|
|
83
122
|
)
|
|
84
123
|
parser.add_argument(
|
|
85
124
|
"--sim_level",
|
|
@@ -16,8 +16,15 @@
|
|
|
16
16
|
"""
|
|
17
17
|
Parallel function operator
|
|
18
18
|
"""
|
|
19
|
+
from __future__ import absolute_import
|
|
19
20
|
|
|
20
|
-
from
|
|
21
|
+
from . import (
|
|
22
|
+
reshard_func
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
from .reshard_func import (
|
|
26
|
+
reshard
|
|
27
|
+
)
|
|
21
28
|
|
|
22
29
|
__all__ = []
|
|
23
30
|
__all__.extend(reshard_func.__all__)
|
|
@@ -235,7 +235,7 @@ def _redistribute(tensor, dst_dtensor_info):
|
|
|
235
235
|
global REDIST_CELL_CACHE
|
|
236
236
|
redist_cache_key = (f"{src_layout_info['device_matrix']}, {src_layout_info['tensor_map']} -> "
|
|
237
237
|
f"{dst_layout_info['device_matrix']}, {dst_layout_info['tensor_map']}")
|
|
238
|
-
if redist_cache_key in REDIST_CELL_CACHE
|
|
238
|
+
if redist_cache_key in REDIST_CELL_CACHE:
|
|
239
239
|
logger.debug(f"redist_cache_key is {redist_cache_key}, match cache")
|
|
240
240
|
redist_func = REDIST_CELL_CACHE[redist_cache_key]
|
|
241
241
|
else:
|
|
@@ -17,8 +17,21 @@ Interfaces for parallel-related functionality
|
|
|
17
17
|
"""
|
|
18
18
|
from __future__ import absolute_import
|
|
19
19
|
|
|
20
|
-
from
|
|
21
|
-
|
|
20
|
+
from . import (
|
|
21
|
+
parallel_grad_reducer,
|
|
22
|
+
parallel_cell_wrapper
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
from .parallel_grad_reducer import (
|
|
26
|
+
PipelineGradReducer
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
from .parallel_cell_wrapper import (
|
|
30
|
+
PipelineCell,
|
|
31
|
+
Pipeline,
|
|
32
|
+
MicroBatchInterleaved,
|
|
33
|
+
GradAccumulation
|
|
34
|
+
)
|
|
22
35
|
|
|
23
36
|
__all__ = []
|
|
24
37
|
__all__.extend(parallel_grad_reducer.__all__)
|
|
@@ -17,6 +17,8 @@
|
|
|
17
17
|
from __future__ import absolute_import
|
|
18
18
|
from __future__ import division
|
|
19
19
|
|
|
20
|
+
__all__ = ['PipelineCell', 'Pipeline', 'MicroBatchInterleaved', 'GradAccumulation']
|
|
21
|
+
|
|
20
22
|
from mindspore import nn
|
|
21
23
|
from mindspore.ops import operations as P
|
|
22
24
|
from mindspore.nn.cell import Cell
|
|
@@ -24,9 +26,6 @@ from mindspore.nn.wrap.cell_wrapper import _MicroBatch
|
|
|
24
26
|
from mindspore import log as logger
|
|
25
27
|
|
|
26
28
|
|
|
27
|
-
__all__ = ['PipelineCell', 'Pipeline', 'MicroBatchInterleaved', 'GradAccumulation']
|
|
28
|
-
|
|
29
|
-
|
|
30
29
|
class PipelineCell(Cell):
|
|
31
30
|
"""
|
|
32
31
|
Slice MiniBatch into finer-grained MicroBatch for use in pipeline-parallel training.
|
|
@@ -101,15 +100,15 @@ class PipelineCell(Cell):
|
|
|
101
100
|
" config stage num:" + str(config_stage_num))
|
|
102
101
|
logger.warning("network:" + str(self.network))
|
|
103
102
|
logger.warning("cell name available:")
|
|
104
|
-
for cell_name,
|
|
103
|
+
for cell_name, _ in self.network.cells_and_names():
|
|
105
104
|
logger.warning(cell_name)
|
|
106
105
|
raise KeyError("For 'PipelineCell', the argument 'stage_config' : {} is not "
|
|
107
106
|
"found in 'network' : {}".format(config_dict, network))
|
|
108
107
|
|
|
109
|
-
def construct(self, *
|
|
108
|
+
def construct(self, *args, **kwargs):
|
|
110
109
|
ret = None
|
|
111
110
|
for i in range(self.micro_size):
|
|
112
|
-
micro_input = self.micro_inputs[i](i, *
|
|
111
|
+
micro_input = self.micro_inputs[i](i, *args, **kwargs)
|
|
113
112
|
output = self.network(*micro_input)
|
|
114
113
|
if ret is not None:
|
|
115
114
|
ret = self.add_list[i](ret, output)
|
|
@@ -197,10 +196,10 @@ class MicroBatchInterleaved(Cell):
|
|
|
197
196
|
self.interleave_inputs.append(interleave_data)
|
|
198
197
|
self._get_attr_from_cell(network)
|
|
199
198
|
|
|
200
|
-
def construct(self, *
|
|
199
|
+
def construct(self, *args, **kwargs):
|
|
201
200
|
output = 0.0
|
|
202
201
|
for i in range(self.interleave_num):
|
|
203
|
-
interleave_input = self.interleave_inputs[i](i, *
|
|
202
|
+
interleave_input = self.interleave_inputs[i](i, *args, **kwargs)
|
|
204
203
|
output = self.add(output, self.network(*interleave_input))
|
|
205
204
|
return output
|
|
206
205
|
|
|
@@ -251,10 +250,10 @@ class GradAccumulation(Cell):
|
|
|
251
250
|
self.add_list.append(self.add)
|
|
252
251
|
self._get_attr_from_cell(network)
|
|
253
252
|
|
|
254
|
-
def construct(self, *
|
|
253
|
+
def construct(self, *args, **kwargs):
|
|
255
254
|
ret = None
|
|
256
255
|
for i in range(self.micro_size):
|
|
257
|
-
micro_input = self.micro_inputs[i](i, *
|
|
256
|
+
micro_input = self.micro_inputs[i](i, *args, **kwargs)
|
|
258
257
|
output = self.network(*micro_input)
|
|
259
258
|
if ret is not None:
|
|
260
259
|
ret = self.add_list[i](ret, output)
|
|
@@ -15,6 +15,8 @@
|
|
|
15
15
|
"""parallel serialization"""
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
|
|
18
|
+
__all__ = ['PipelineGradReducer']
|
|
19
|
+
|
|
18
20
|
from mindspore import context
|
|
19
21
|
from mindspore.nn.cell import Cell
|
|
20
22
|
from mindspore.ops import functional as F, composite as C, operations as P
|
|
@@ -25,8 +27,6 @@ from mindspore.common.parameter import Parameter
|
|
|
25
27
|
from mindspore.nn.layer import Identity
|
|
26
28
|
from mindspore.parallel._utils import _get_enable_parallel_optimizer
|
|
27
29
|
|
|
28
|
-
__all__ = ['PipelineGradReducer']
|
|
29
|
-
|
|
30
30
|
|
|
31
31
|
grad_scale = C.MultitypeFuncGraph("grad_scale")
|
|
32
32
|
shard_grad_scale = C.MultitypeFuncGraph("shard_grad_scale")
|
|
@@ -81,6 +81,7 @@ class PipelineGradReducer(Cell):
|
|
|
81
81
|
>>> import mindspore as ms
|
|
82
82
|
>>> from mindspore import nn, ops, Tensor
|
|
83
83
|
>>> from mindspore.communication import init
|
|
84
|
+
>>> from mindspore.parallel.auto_parallel import AutoParallel
|
|
84
85
|
>>>
|
|
85
86
|
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
|
86
87
|
>>> ms.reset_auto_parallel_context()
|
|
@@ -113,7 +114,7 @@ class PipelineGradReducer(Cell):
|
|
|
113
114
|
>>> net.layer3.pipeline_stage = 1
|
|
114
115
|
>>> loss_fn = nn.CrossEntropyLoss()
|
|
115
116
|
>>> optimizer = nn.SGD(net.trainable_params(), 1e-2)
|
|
116
|
-
>>> net_with_loss = nn.
|
|
117
|
+
>>> net_with_loss = nn.PipelineCell(nn.WithLossCell(net, loss_fn), 2)
|
|
117
118
|
>>> net_with_loss.set_train()
|
|
118
119
|
>>> def forward_fn(inputs, target):
|
|
119
120
|
... loss = net_with_loss(inputs, target)
|
|
@@ -135,7 +136,7 @@ class PipelineGradReducer(Cell):
|
|
|
135
136
|
>>> label = Tensor(np.ones([size, out_features]).astype(np.float32))
|
|
136
137
|
>>> loss, _ = train_one_step(inputs, label)
|
|
137
138
|
>>> print(loss)
|
|
138
|
-
46.
|
|
139
|
+
46.304886
|
|
139
140
|
"""
|
|
140
141
|
def __init__(self, parameters, scale_sense=1.0, opt_shard=None):
|
|
141
142
|
super(PipelineGradReducer, self).__init__(auto_prefix=False)
|
|
@@ -151,10 +152,10 @@ class PipelineGradReducer(Cell):
|
|
|
151
152
|
self.opt_shard = opt_shard
|
|
152
153
|
|
|
153
154
|
@jit
|
|
154
|
-
def construct(self,
|
|
155
|
+
def construct(self, *args, **kwargs):
|
|
155
156
|
new_grads = None
|
|
156
157
|
if self.opt_shard:
|
|
157
|
-
grads = self.grad_reducer(
|
|
158
|
+
grads = self.grad_reducer(*args, **kwargs)
|
|
158
159
|
new_grads = self.hyper_map(F.partial(shard_grad_scale, self.scale_sense * self.degree),
|
|
159
160
|
grads, self.accu_grads)
|
|
160
161
|
else:
|
mindspore/parallel/shard.py
CHANGED
|
@@ -120,8 +120,8 @@ class Layout:
|
|
|
120
120
|
>>> layout = Layout((2, 2, 2), ("dp", "sp", "mp"))
|
|
121
121
|
>>> layout0 = layout("dp", "mp")
|
|
122
122
|
>>> print(layout0.to_dict())
|
|
123
|
-
{
|
|
124
|
-
'alias_name': {'dp', 'sp', 'mp'},
|
|
123
|
+
{"device_matrix": (2, 2, 2), "tensor_map": (2, 0), "interleaved_parallel": False,
|
|
124
|
+
'alias_name': {'dp', 'sp', 'mp'}, "rank_list": [0, 1, 2, 3, 4, 5, 6, 7]}
|
|
125
125
|
>>> layout = Layout((2, 2, 2), ("dp", "sp", "interleaved_parallel"))
|
|
126
126
|
>>> layout1 = layout(("dp", "interleaved_parallel"), "sp")
|
|
127
127
|
"""
|