mindspore 2.6.0rc1__cp311-cp311-win_amd64.whl → 2.7.0__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +2 -2
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +42 -11
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
- mindspore/_extends/parse/parser.py +65 -84
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +58 -14
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
- mindspore/amp.py +4 -22
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +43 -12
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +178 -53
- mindspore/common/_utils.py +9 -1
- mindspore/common/api.py +377 -203
- mindspore/common/dtype.py +108 -57
- mindspore/common/dump.py +11 -16
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
- mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
- mindspore/common/file_system.py +59 -9
- mindspore/common/generator.py +5 -3
- mindspore/common/hook_handle.py +33 -5
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +27 -29
- mindspore/common/recompute.py +5 -7
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +117 -131
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +67 -55
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +38 -4
- mindspore/dataset/engine/datasets.py +350 -322
- mindspore/dataset/engine/datasets_user_defined.py +70 -24
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +10 -6
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +17 -5
- mindspore/dataset/vision/utils.py +632 -21
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/ascend/op_tuning.py +35 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -4
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +27 -7
- mindspore/include/api/cell.h +65 -5
- mindspore/include/api/cfg.h +24 -7
- mindspore/include/api/context.h +1 -0
- mindspore/include/api/delegate.h +10 -2
- mindspore/include/api/dual_abi_helper.h +100 -19
- mindspore/include/api/graph.h +14 -1
- mindspore/include/api/kernel.h +16 -3
- mindspore/include/api/kernel_api.h +9 -1
- mindspore/include/api/metrics/accuracy.h +9 -0
- mindspore/include/api/model.h +8 -1
- mindspore/include/api/model_group.h +4 -0
- mindspore/include/api/model_parallel_runner.h +2 -0
- mindspore/include/api/status.h +48 -10
- mindspore/include/api/types.h +8 -3
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/constants.h +9 -0
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +61 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +6 -46
- mindspore/mint/distributed/__init__.py +5 -0
- mindspore/mint/distributed/distributed.py +429 -23
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +163 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +140 -104
- mindspore/mint/nn/layer/normalization.py +11 -25
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/cell.py +491 -623
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +36 -36
- mindspore/nn/layer/basic.py +74 -77
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +117 -110
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +38 -40
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +4 -6
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -0
- mindspore/nn/optim/lamb.py +1 -3
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/tft_wrapper.py +2 -3
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +73 -42
- mindspore/nn/wrap/grad_reducer.py +37 -52
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +7 -7
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +5 -5
- mindspore/numpy/utils_const.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
- mindspore/ops/_vmap/vmap_array_ops.py +31 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +54 -13
- mindspore/ops/auto_generate/gen_extend_func.py +27 -145
- mindspore/ops/auto_generate/gen_ops_def.py +1027 -347
- mindspore/ops/auto_generate/gen_ops_prim.py +2341 -1117
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +9 -5
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +4 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +19 -102
- mindspore/ops/function/debug_func.py +8 -5
- mindspore/ops/function/grad/grad_func.py +5 -13
- mindspore/ops/function/math_func.py +77 -572
- mindspore/ops/function/nn_func.py +46 -94
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +44 -5
- mindspore/ops/function/vmap_func.py +2 -1
- mindspore/ops/functional.py +4 -4
- mindspore/ops/functional_overload.py +594 -18
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +16 -11
- mindspore/ops/operations/_custom_ops_utils.py +689 -34
- mindspore/ops/operations/_inner_ops.py +14 -18
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +5 -51
- mindspore/ops/operations/comm_ops.py +186 -41
- mindspore/ops/operations/custom_ops.py +303 -177
- mindspore/ops/operations/debug_ops.py +59 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +27 -28
- mindspore/ops/operations/math_ops.py +8 -9
- mindspore/ops/operations/nn_ops.py +8 -40
- mindspore/ops/primitive.py +9 -20
- mindspore/ops/tensor_method.py +63 -15
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +8 -3
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +16 -23
- mindspore/parallel/_cell_wrapper.py +113 -45
- mindspore/parallel/_parallel_serialization.py +4 -3
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +17 -12
- mindspore/parallel/_utils.py +5 -11
- mindspore/parallel/auto_parallel.py +35 -14
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +13 -7
- mindspore/parallel/cluster/process_entity/_api.py +88 -49
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +48 -7
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +12 -12
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
- mindspore/parallel/shard.py +10 -25
- mindspore/parallel/transform_safetensors.py +469 -174
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/msprof_cmd_tool.py +2 -2
- mindspore/profiler/common/path_manager.py +9 -0
- mindspore/profiler/common/profiler_context.py +50 -29
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_meta_data.py +1 -0
- mindspore/profiler/common/profiler_op_analyse.py +239 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +374 -338
- mindspore/profiler/envprofiler.py +42 -12
- mindspore/profiler/experimental_config.py +112 -7
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/cpu_profiler.py +10 -4
- mindspore/profiler/platform/npu_profiler.py +30 -20
- mindspore/profiler/profiler.py +218 -154
- mindspore/profiler/profiler_action_controller.py +65 -77
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +8 -6
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +87 -45
- mindspore/runtime/memory.py +31 -32
- mindspore/runtime/thread_bind_core.py +299 -165
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +17 -7
- mindspore/train/amp.py +43 -23
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_callback.py +2 -1
- mindspore/train/callback/_checkpoint.py +4 -14
- mindspore/train/callback/_flops_collector.py +11 -7
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +98 -21
- mindspore/train/data_sink.py +15 -6
- mindspore/train/dataset_helper.py +14 -5
- mindspore/train/model.py +133 -69
- mindspore/train/serialization.py +168 -126
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +163 -77
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +14 -17
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/RECORD +403 -442
- mindspore/_deprecated/jit.py +0 -198
- mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
- mindspore/communication/_hccl_management.py +0 -297
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/common/validator/__init__.py +0 -14
- mindspore/profiler/common/validator/validate_path.py +0 -84
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- mindspore/utils/hooks.py +0 -81
- /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
|
@@ -17,6 +17,8 @@
|
|
|
17
17
|
from __future__ import absolute_import
|
|
18
18
|
from __future__ import division
|
|
19
19
|
|
|
20
|
+
__all__ = ['PipelineCell', 'Pipeline', 'MicroBatchInterleaved', 'GradAccumulation']
|
|
21
|
+
|
|
20
22
|
from mindspore import nn
|
|
21
23
|
from mindspore.ops import operations as P
|
|
22
24
|
from mindspore.nn.cell import Cell
|
|
@@ -24,12 +26,10 @@ from mindspore.nn.wrap.cell_wrapper import _MicroBatch
|
|
|
24
26
|
from mindspore import log as logger
|
|
25
27
|
|
|
26
28
|
|
|
27
|
-
__all__ = ['PipelineCell', 'Pipeline', 'MicroBatchInterleaved', 'GradAccumulation']
|
|
28
|
-
|
|
29
|
-
|
|
30
29
|
class PipelineCell(Cell):
|
|
31
30
|
"""
|
|
32
|
-
Slice MiniBatch into finer-grained MicroBatch for use in pipeline-parallel training
|
|
31
|
+
Slice MiniBatch into finer-grained MicroBatch for use in pipeline-parallel training,
|
|
32
|
+
and specify the segment info.
|
|
33
33
|
|
|
34
34
|
Note:
|
|
35
35
|
micro_size must be greater or equal to pipeline stages.
|
|
@@ -38,6 +38,8 @@ class PipelineCell(Cell):
|
|
|
38
38
|
network (Cell): The target network to wrap.
|
|
39
39
|
micro_size (int): MicroBatch size.
|
|
40
40
|
stage_config (dict, optional): The stage configuration for each cell's execution in pipeline parallel.
|
|
41
|
+
segment_config (dict, optional): The segment configuration for each cell's execution in pipeline parallel.
|
|
42
|
+
Default ``None``.
|
|
41
43
|
|
|
42
44
|
Supported Platforms:
|
|
43
45
|
``Ascend``
|
|
@@ -49,7 +51,7 @@ class PipelineCell(Cell):
|
|
|
49
51
|
>>> net = LeNet5()
|
|
50
52
|
>>> net = nn.PipelineCell(net, 4, stage_config={"cell_name_0": 0, "cell_name_1": 1})
|
|
51
53
|
"""
|
|
52
|
-
def __init__(self, network, micro_size, stage_config=None):
|
|
54
|
+
def __init__(self, network, micro_size, stage_config=None, segment_config=None):
|
|
53
55
|
super(PipelineCell, self).__init__(auto_prefix=False)
|
|
54
56
|
self.network = network
|
|
55
57
|
self.micro_inputs = nn.CellList()
|
|
@@ -101,15 +103,46 @@ class PipelineCell(Cell):
|
|
|
101
103
|
" config stage num:" + str(config_stage_num))
|
|
102
104
|
logger.warning("network:" + str(self.network))
|
|
103
105
|
logger.warning("cell name available:")
|
|
104
|
-
for cell_name,
|
|
106
|
+
for cell_name, _ in self.network.cells_and_names():
|
|
105
107
|
logger.warning(cell_name)
|
|
106
108
|
raise KeyError("For 'PipelineCell', the argument 'stage_config' : {} is not "
|
|
107
109
|
"found in 'network' : {}".format(config_dict, network))
|
|
108
|
-
|
|
109
|
-
|
|
110
|
+
if segment_config is None:
|
|
111
|
+
return
|
|
112
|
+
self._config_segment(segment_config)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _config_segment(self, segment_config):
|
|
116
|
+
"""
|
|
117
|
+
Config segment num for cell.
|
|
118
|
+
"""
|
|
119
|
+
config_dict = segment_config.copy()
|
|
120
|
+
|
|
121
|
+
for cell_name, cell in self.network.cells_and_names():
|
|
122
|
+
if cell_name in segment_config:
|
|
123
|
+
setattr(cell, "pipeline_segment", segment_config[cell_name])
|
|
124
|
+
del config_dict[cell_name]
|
|
125
|
+
if str(self.network) in segment_config:
|
|
126
|
+
setattr(self.network, "pipeline_segment", segment_config[str(self.network)])
|
|
127
|
+
del config_dict[str(self.network)]
|
|
128
|
+
# if there are any config elements left, print them
|
|
129
|
+
if config_dict:
|
|
130
|
+
for config_cell_name, config_segment_num in config_dict.items():
|
|
131
|
+
logger.error("pipeline_cell segment_config set pipeline_segment fail!")
|
|
132
|
+
logger.warning("config cell name:" + str(config_cell_name) +
|
|
133
|
+
" config segment num:" + str(config_segment_num))
|
|
134
|
+
logger.warning("network:" + str(self.network))
|
|
135
|
+
logger.warning("cell name available:")
|
|
136
|
+
for cell_name, _ in self.network.cells_and_names():
|
|
137
|
+
logger.warning(cell_name)
|
|
138
|
+
raise KeyError("For 'PipelineCell', the argument 'segment_config' : {} is not "
|
|
139
|
+
"found in 'network' : {}".format(config_dict, self.network))
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def construct(self, *args, **kwargs):
|
|
110
143
|
ret = None
|
|
111
144
|
for i in range(self.micro_size):
|
|
112
|
-
micro_input = self.micro_inputs[i](i, *
|
|
145
|
+
micro_input = self.micro_inputs[i](i, *args, **kwargs)
|
|
113
146
|
output = self.network(*micro_input)
|
|
114
147
|
if ret is not None:
|
|
115
148
|
ret = self.add_list[i](ret, output)
|
|
@@ -120,7 +153,8 @@ class PipelineCell(Cell):
|
|
|
120
153
|
|
|
121
154
|
class Pipeline(PipelineCell):
|
|
122
155
|
"""
|
|
123
|
-
Specify the number of micro_batch for pipeline parallelism and the division rules for stage
|
|
156
|
+
Specify the number of micro_batch for pipeline parallelism and the division rules for stage,
|
|
157
|
+
and specify the segment info.
|
|
124
158
|
|
|
125
159
|
Note:
|
|
126
160
|
micro_size must be greater or equal to pipeline stages.
|
|
@@ -129,6 +163,8 @@ class Pipeline(PipelineCell):
|
|
|
129
163
|
network (Cell): The target network to wrap.
|
|
130
164
|
micro_size (int): MicroBatch size.
|
|
131
165
|
stage_config (dict, optional): Stage configuration for cell's execution in pipeline parallel. Default ``None``.
|
|
166
|
+
segment_config (dict, optional): The segment configuration for each cell's execution in pipeline parallel.
|
|
167
|
+
Default ``None``.
|
|
132
168
|
|
|
133
169
|
Raises:
|
|
134
170
|
TypeError: The type of `net` is not cell.
|
|
@@ -197,10 +233,10 @@ class MicroBatchInterleaved(Cell):
|
|
|
197
233
|
self.interleave_inputs.append(interleave_data)
|
|
198
234
|
self._get_attr_from_cell(network)
|
|
199
235
|
|
|
200
|
-
def construct(self, *
|
|
236
|
+
def construct(self, *args, **kwargs):
|
|
201
237
|
output = 0.0
|
|
202
238
|
for i in range(self.interleave_num):
|
|
203
|
-
interleave_input = self.interleave_inputs[i](i, *
|
|
239
|
+
interleave_input = self.interleave_inputs[i](i, *args, **kwargs)
|
|
204
240
|
output = self.add(output, self.network(*interleave_input))
|
|
205
241
|
return output
|
|
206
242
|
|
|
@@ -251,10 +287,10 @@ class GradAccumulation(Cell):
|
|
|
251
287
|
self.add_list.append(self.add)
|
|
252
288
|
self._get_attr_from_cell(network)
|
|
253
289
|
|
|
254
|
-
def construct(self, *
|
|
290
|
+
def construct(self, *args, **kwargs):
|
|
255
291
|
ret = None
|
|
256
292
|
for i in range(self.micro_size):
|
|
257
|
-
micro_input = self.micro_inputs[i](i, *
|
|
293
|
+
micro_input = self.micro_inputs[i](i, *args, **kwargs)
|
|
258
294
|
output = self.network(*micro_input)
|
|
259
295
|
if ret is not None:
|
|
260
296
|
ret = self.add_list[i](ret, output)
|
|
@@ -15,7 +15,8 @@
|
|
|
15
15
|
"""parallel serialization"""
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
|
|
18
|
-
|
|
18
|
+
__all__ = ['PipelineGradReducer']
|
|
19
|
+
|
|
19
20
|
from mindspore.nn.cell import Cell
|
|
20
21
|
from mindspore.ops import functional as F, composite as C, operations as P
|
|
21
22
|
import mindspore.common.dtype as mstype
|
|
@@ -25,8 +26,6 @@ from mindspore.common.parameter import Parameter
|
|
|
25
26
|
from mindspore.nn.layer import Identity
|
|
26
27
|
from mindspore.parallel._utils import _get_enable_parallel_optimizer
|
|
27
28
|
|
|
28
|
-
__all__ = ['PipelineGradReducer']
|
|
29
|
-
|
|
30
29
|
|
|
31
30
|
grad_scale = C.MultitypeFuncGraph("grad_scale")
|
|
32
31
|
shard_grad_scale = C.MultitypeFuncGraph("shard_grad_scale")
|
|
@@ -81,6 +80,7 @@ class PipelineGradReducer(Cell):
|
|
|
81
80
|
>>> import mindspore as ms
|
|
82
81
|
>>> from mindspore import nn, ops, Tensor
|
|
83
82
|
>>> from mindspore.communication import init
|
|
83
|
+
>>> from mindspore.parallel.auto_parallel import AutoParallel
|
|
84
84
|
>>>
|
|
85
85
|
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
|
86
86
|
>>> ms.reset_auto_parallel_context()
|
|
@@ -113,7 +113,7 @@ class PipelineGradReducer(Cell):
|
|
|
113
113
|
>>> net.layer3.pipeline_stage = 1
|
|
114
114
|
>>> loss_fn = nn.CrossEntropyLoss()
|
|
115
115
|
>>> optimizer = nn.SGD(net.trainable_params(), 1e-2)
|
|
116
|
-
>>> net_with_loss = nn.
|
|
116
|
+
>>> net_with_loss = nn.PipelineCell(nn.WithLossCell(net, loss_fn), 2)
|
|
117
117
|
>>> net_with_loss.set_train()
|
|
118
118
|
>>> def forward_fn(inputs, target):
|
|
119
119
|
... loss = net_with_loss(inputs, target)
|
|
@@ -135,11 +135,10 @@ class PipelineGradReducer(Cell):
|
|
|
135
135
|
>>> label = Tensor(np.ones([size, out_features]).astype(np.float32))
|
|
136
136
|
>>> loss, _ = train_one_step(inputs, label)
|
|
137
137
|
>>> print(loss)
|
|
138
|
-
46.
|
|
138
|
+
46.304886
|
|
139
139
|
"""
|
|
140
140
|
def __init__(self, parameters, scale_sense=1.0, opt_shard=None):
|
|
141
141
|
super(PipelineGradReducer, self).__init__(auto_prefix=False)
|
|
142
|
-
self._check_mode()
|
|
143
142
|
self.accu_grads = parameters.clone(prefix="accu_grads", init="zeros")
|
|
144
143
|
self.grad_reducer = Identity()
|
|
145
144
|
self.degree = Tensor(1, mstype.float32)
|
|
@@ -151,19 +150,13 @@ class PipelineGradReducer(Cell):
|
|
|
151
150
|
self.opt_shard = opt_shard
|
|
152
151
|
|
|
153
152
|
@jit
|
|
154
|
-
def construct(self,
|
|
153
|
+
def construct(self, *args, **kwargs):
|
|
155
154
|
new_grads = None
|
|
156
155
|
if self.opt_shard:
|
|
157
|
-
grads = self.grad_reducer(
|
|
156
|
+
grads = self.grad_reducer(*args, **kwargs)
|
|
158
157
|
new_grads = self.hyper_map(F.partial(shard_grad_scale, self.scale_sense * self.degree),
|
|
159
158
|
grads, self.accu_grads)
|
|
160
159
|
else:
|
|
161
160
|
accu_grads = self.grad_reducer(self.accu_grads)
|
|
162
161
|
new_grads = self.hyper_map(F.partial(grad_scale, self.scale_sense * self.degree), grads, accu_grads)
|
|
163
162
|
return new_grads
|
|
164
|
-
|
|
165
|
-
def _check_mode(self):
|
|
166
|
-
"""check parallel mode"""
|
|
167
|
-
mode = context.get_context('mode')
|
|
168
|
-
if mode != context.GRAPH_MODE:
|
|
169
|
-
raise RuntimeError(f"PipelineGradReducer only support graph mode, but get {mode}")
|
mindspore/parallel/shard.py
CHANGED
|
@@ -121,8 +121,7 @@ class Layout:
|
|
|
121
121
|
>>> layout0 = layout("dp", "mp")
|
|
122
122
|
>>> print(layout0.to_dict())
|
|
123
123
|
{"device_matrix": (2, 2, 2), "tensor_map": (2, 0), "interleaved_parallel": False,
|
|
124
|
-
'alias_name': {'dp', 'sp', 'mp'}, "rank_list": [0, 1, 2, 3, 4,
|
|
125
|
-
>>> # Total device num is 4, but split the tensor in local device into two copies.
|
|
124
|
+
'alias_name': {'dp', 'sp', 'mp'}, "rank_list": [0, 1, 2, 3, 4, 5, 6, 7]}
|
|
126
125
|
>>> layout = Layout((2, 2, 2), ("dp", "sp", "interleaved_parallel"))
|
|
127
126
|
>>> layout1 = layout(("dp", "interleaved_parallel"), "sp")
|
|
128
127
|
"""
|
|
@@ -254,13 +253,6 @@ class Shard(Shard_):
|
|
|
254
253
|
"will be overwritten as False.")
|
|
255
254
|
ms.set_algo_parameters(fully_use_devices=False)
|
|
256
255
|
|
|
257
|
-
if ms.context.get_auto_parallel_context("full_batch_is_set") is False and \
|
|
258
|
-
ms.context.get_context("mode") == ms.context.PYNATIVE_MODE:
|
|
259
|
-
logger.warning("When calling the shard interface, "
|
|
260
|
-
"'dataset_strategy' or 'full_batch' is not manually set by the user, "
|
|
261
|
-
"and the 'dataset_strategy' will be set to 'full_batch'.")
|
|
262
|
-
ms.context.set_auto_parallel_context(dataset_strategy="full_batch")
|
|
263
|
-
|
|
264
256
|
if self._is_attrs_has_been_set(fn, in_strategy, out_strategy, device, level):
|
|
265
257
|
return self.shard_fn
|
|
266
258
|
shard_ = Shard()
|
|
@@ -395,11 +387,10 @@ class Shard(Shard_):
|
|
|
395
387
|
f"The tuple strategy for each dimension should be tuple(int).")
|
|
396
388
|
|
|
397
389
|
|
|
398
|
-
def shard(fn, in_strategy, out_strategy=None, parameter_plan=None
|
|
390
|
+
def shard(fn, in_strategy, out_strategy=None, parameter_plan=None):
|
|
399
391
|
"""
|
|
400
392
|
Specify the input and output slicing strategy for a Cell or function.
|
|
401
|
-
In
|
|
402
|
-
execution in graph mode. In Graph mode, use this method to specify distribution strategy for a Cell,
|
|
393
|
+
In Graph mode, use this method to specify distribution strategy for a Cell,
|
|
403
394
|
strategy for others will be set by sharding propagation.
|
|
404
395
|
in_strategy and out_strategy define the input and output layout respectively.
|
|
405
396
|
in_strategy/out_strategy should be a tuple, each element of which corresponds to the desired layout of
|
|
@@ -408,10 +399,12 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
|
|
|
408
399
|
The parallel strategies of remaining operators are derived from the strategy specified by the input and output.
|
|
409
400
|
|
|
410
401
|
Note:
|
|
411
|
-
-
|
|
412
|
-
|
|
402
|
+
- It is valid only in semi auto parallel or auto parallel mode.
|
|
403
|
+
In other parallel modes, strategies set here will be ignored.
|
|
413
404
|
- If the input contain Parameter, its strategy should be set in `in_strategy`.
|
|
414
|
-
|
|
405
|
+
|
|
406
|
+
.. warning::
|
|
407
|
+
The method is currently not supported in PyNative mode.
|
|
415
408
|
|
|
416
409
|
Args:
|
|
417
410
|
fn (Union[Cell, Function]): Function to be executed in parallel.
|
|
@@ -433,19 +426,12 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
|
|
|
433
426
|
has been set, the parameter setting will be ignored. Supported
|
|
434
427
|
only when `fn` is a Cell with parameters.
|
|
435
428
|
Default: ``None`` .
|
|
436
|
-
device (str, optional): Select a certain `device` target. It is not in use right now.
|
|
437
|
-
Support ["CPU", "GPU", "Ascend"]. Default: ``"Ascend"`` .
|
|
438
|
-
level (int, optional): Option for parallel strategy infer algorithm, namely the object function,
|
|
439
|
-
maximize computation
|
|
440
|
-
over communication ratio, maximize speed performance, minimize memory usage etc. It is not in
|
|
441
|
-
use right now. Support [0, 1, 2]. Default: ``0`` .
|
|
442
429
|
|
|
443
430
|
Returns:
|
|
444
431
|
Function, return the function that will be executed under auto parallel process.
|
|
445
432
|
|
|
446
433
|
Raises:
|
|
447
434
|
AssertionError: If parallel mode is not "auto_parallel" nor "semi_auto_parallel".
|
|
448
|
-
AssertionError: If device_target it not "Ascend" or "GPU".
|
|
449
435
|
TypeError: If `in_strategy` is not a tuple.
|
|
450
436
|
TypeError: If `out_strategy` is not a tuple or None.
|
|
451
437
|
TypeError: If any element in `in_strategy` is not a tuple(int) or tuple(mindspore.parallel.Layout).
|
|
@@ -453,8 +439,6 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
|
|
|
453
439
|
TypeError: If `parameter_plan` is not a dict or None.
|
|
454
440
|
TypeError: If any key in `parameter_plan` is not a str.
|
|
455
441
|
TypeError: If any value in `parameter_plan` is not a tuple(int) or a tuple(mindspore.parallel.Layout).
|
|
456
|
-
TypeError: If `device` is not a str.
|
|
457
|
-
TypeError: If `level` is not an integer.
|
|
458
442
|
|
|
459
443
|
Supported Platforms:
|
|
460
444
|
``Ascend``
|
|
@@ -557,4 +541,5 @@ def shard(fn, in_strategy, out_strategy=None, parameter_plan=None, device="Ascen
|
|
|
557
541
|
if not isinstance(fn, (ms.nn.Cell)):
|
|
558
542
|
logger.warning("'fn' is not a mindspore.nn.Cell, and its definition cannot involve Parameter; "
|
|
559
543
|
"otherwise, the result may be incorrect.")
|
|
560
|
-
|
|
544
|
+
|
|
545
|
+
return Shard()(fn, in_strategy, out_strategy, parameter_plan)
|