mindspore 2.6.0__cp310-cp310-win_amd64.whl → 2.7.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +2 -2
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +42 -11
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
- mindspore/_extends/parse/parser.py +64 -83
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +47 -14
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
- mindspore/amp.py +4 -22
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +43 -12
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +177 -52
- mindspore/common/_utils.py +9 -1
- mindspore/common/api.py +338 -208
- mindspore/common/dtype.py +108 -57
- mindspore/common/dump.py +11 -16
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
- mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
- mindspore/common/file_system.py +59 -9
- mindspore/common/generator.py +2 -3
- mindspore/common/hook_handle.py +33 -5
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +27 -29
- mindspore/common/recompute.py +5 -7
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +84 -133
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +47 -38
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +38 -4
- mindspore/dataset/engine/datasets.py +350 -322
- mindspore/dataset/engine/datasets_user_defined.py +69 -23
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +10 -6
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +17 -5
- mindspore/dataset/vision/utils.py +632 -21
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/ascend/op_tuning.py +35 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +5 -4
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +1 -0
- mindspore/include/api/cell.h +65 -5
- mindspore/include/api/cfg.h +24 -7
- mindspore/include/api/context.h +1 -0
- mindspore/include/api/delegate.h +10 -2
- mindspore/include/api/dual_abi_helper.h +100 -19
- mindspore/include/api/graph.h +14 -1
- mindspore/include/api/kernel.h +16 -3
- mindspore/include/api/kernel_api.h +9 -1
- mindspore/include/api/metrics/accuracy.h +9 -0
- mindspore/include/api/model.h +8 -1
- mindspore/include/api/model_group.h +4 -0
- mindspore/include/api/model_parallel_runner.h +2 -0
- mindspore/include/api/status.h +48 -10
- mindspore/include/api/types.h +8 -3
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/constants.h +9 -0
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +61 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +4 -44
- mindspore/mint/distributed/__init__.py +5 -0
- mindspore/mint/distributed/distributed.py +425 -19
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +163 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +125 -101
- mindspore/mint/nn/layer/normalization.py +11 -25
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/cell.py +488 -620
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +36 -36
- mindspore/nn/layer/basic.py +74 -77
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +86 -85
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +38 -40
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +2 -4
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/lamb.py +1 -3
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/tft_wrapper.py +2 -3
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +73 -42
- mindspore/nn/wrap/grad_reducer.py +37 -52
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +7 -7
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +1 -1
- mindspore/numpy/utils_const.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
- mindspore/ops/_vmap/vmap_array_ops.py +6 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +29 -10
- mindspore/ops/auto_generate/gen_extend_func.py +5 -55
- mindspore/ops/auto_generate/gen_ops_def.py +753 -273
- mindspore/ops/auto_generate/gen_ops_prim.py +1687 -958
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +9 -5
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +4 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +17 -100
- mindspore/ops/function/debug_func.py +8 -5
- mindspore/ops/function/grad/grad_func.py +5 -13
- mindspore/ops/function/math_func.py +65 -399
- mindspore/ops/function/nn_func.py +44 -61
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +31 -4
- mindspore/ops/functional.py +2 -3
- mindspore/ops/functional_overload.py +486 -18
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +5 -2
- mindspore/ops/operations/_custom_ops_utils.py +675 -8
- mindspore/ops/operations/_inner_ops.py +14 -18
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +4 -50
- mindspore/ops/operations/comm_ops.py +186 -41
- mindspore/ops/operations/custom_ops.py +244 -175
- mindspore/ops/operations/debug_ops.py +55 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +27 -28
- mindspore/ops/operations/math_ops.py +8 -9
- mindspore/ops/operations/nn_ops.py +6 -7
- mindspore/ops/primitive.py +9 -20
- mindspore/ops/tensor_method.py +52 -11
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +7 -2
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +9 -17
- mindspore/parallel/_cell_wrapper.py +106 -40
- mindspore/parallel/_parallel_serialization.py +4 -3
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +17 -12
- mindspore/parallel/_utils.py +5 -11
- mindspore/parallel/auto_parallel.py +33 -12
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +5 -1
- mindspore/parallel/cluster/process_entity/_api.py +88 -49
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +48 -7
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +7 -6
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
- mindspore/parallel/shard.py +9 -23
- mindspore/parallel/transform_safetensors.py +468 -174
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/msprof_cmd_tool.py +2 -2
- mindspore/profiler/common/path_manager.py +9 -0
- mindspore/profiler/common/profiler_context.py +50 -29
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_meta_data.py +1 -0
- mindspore/profiler/common/profiler_op_analyse.py +239 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +374 -338
- mindspore/profiler/envprofiler.py +42 -12
- mindspore/profiler/experimental_config.py +112 -7
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/cpu_profiler.py +10 -4
- mindspore/profiler/platform/npu_profiler.py +30 -20
- mindspore/profiler/profiler.py +218 -154
- mindspore/profiler/profiler_action_controller.py +65 -77
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +8 -6
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +87 -45
- mindspore/runtime/memory.py +22 -30
- mindspore/runtime/thread_bind_core.py +299 -165
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +9 -5
- mindspore/train/amp.py +43 -23
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_callback.py +2 -1
- mindspore/train/callback/_checkpoint.py +4 -14
- mindspore/train/callback/_flops_collector.py +11 -7
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +72 -18
- mindspore/train/data_sink.py +15 -6
- mindspore/train/dataset_helper.py +14 -5
- mindspore/train/model.py +49 -47
- mindspore/train/serialization.py +168 -126
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +162 -78
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +14 -17
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/RECORD +400 -439
- mindspore/_deprecated/jit.py +0 -198
- mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
- mindspore/communication/_hccl_management.py +0 -297
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/common/validator/__init__.py +0 -14
- mindspore/profiler/common/validator/validate_path.py +0 -84
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- mindspore/utils/hooks.py +0 -81
- /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
mindspore/common/dtype.py
CHANGED
|
@@ -17,20 +17,25 @@
|
|
|
17
17
|
"""Data type for MindSpore."""
|
|
18
18
|
from __future__ import absolute_import
|
|
19
19
|
|
|
20
|
+
import builtins
|
|
20
21
|
import enum
|
|
21
22
|
from inspect import isfunction
|
|
22
23
|
import numpy as np
|
|
24
|
+
from mindspore import log as logger
|
|
23
25
|
from mindspore._c_expression import typing
|
|
24
26
|
from mindspore._c_expression.typing import Type
|
|
25
|
-
from mindspore._c_expression.np_dtypes import
|
|
26
|
-
|
|
27
|
+
from mindspore._c_expression.np_dtypes import np_dtype_valid
|
|
28
|
+
|
|
29
|
+
if np_dtype_valid(False):
|
|
27
30
|
from mindspore._c_expression.np_dtypes import bfloat16 as np_bfloat16
|
|
28
31
|
|
|
32
|
+
# bool, int, float are not defined in __all__ to avoid conflict with built-in types.
|
|
29
33
|
__dtype__ = [
|
|
34
|
+
"bool_",
|
|
30
35
|
"int8", "byte",
|
|
31
36
|
"int16", "short",
|
|
32
37
|
"int32", "intc",
|
|
33
|
-
"int64", "intp",
|
|
38
|
+
"int64", "long", "intp",
|
|
34
39
|
"uint8", "ubyte",
|
|
35
40
|
"uint16", "ushort",
|
|
36
41
|
"uint32", "uintc",
|
|
@@ -38,15 +43,15 @@ __dtype__ = [
|
|
|
38
43
|
"float16", "half",
|
|
39
44
|
"float32", "single",
|
|
40
45
|
"float64", "double",
|
|
41
|
-
"
|
|
42
|
-
"
|
|
43
|
-
"
|
|
46
|
+
"complex64", "cfloat",
|
|
47
|
+
"complex128", "cdouble",
|
|
48
|
+
"qint4x2", "bfloat16",
|
|
49
|
+
"float8_e4m3fn", "float8_e5m2", "hifloat8",
|
|
50
|
+
"int_", "uint", "float_",
|
|
51
|
+
"list_", "tuple_", "string",
|
|
44
52
|
"number", "tensor_type",
|
|
45
|
-
"
|
|
46
|
-
"TensorType", "
|
|
47
|
-
"Type", "Int",
|
|
48
|
-
"complex64", "complex128",
|
|
49
|
-
"bfloat16", "qint4x2"
|
|
53
|
+
"type_none", "_null",
|
|
54
|
+
"TensorType", "Type", "Int",
|
|
50
55
|
]
|
|
51
56
|
|
|
52
57
|
__method__ = [
|
|
@@ -59,16 +64,18 @@ __all__.extend(__dtype__)
|
|
|
59
64
|
__all__.extend(__method__)
|
|
60
65
|
|
|
61
66
|
# type definition
|
|
62
|
-
|
|
67
|
+
bool = typing.kBool
|
|
68
|
+
bool_ = bool
|
|
63
69
|
|
|
64
|
-
qint4x2 = typing.kInt4
|
|
65
70
|
int8 = typing.kInt8
|
|
66
71
|
byte = int8
|
|
67
72
|
int16 = typing.kInt16
|
|
68
73
|
short = int16
|
|
69
74
|
int32 = typing.kInt32
|
|
75
|
+
int = int32
|
|
70
76
|
intc = int32
|
|
71
77
|
int64 = typing.kInt64
|
|
78
|
+
long = int64
|
|
72
79
|
intp = int64
|
|
73
80
|
|
|
74
81
|
uint8 = typing.kUInt8
|
|
@@ -83,12 +90,21 @@ uintp = uint64
|
|
|
83
90
|
float16 = typing.kFloat16
|
|
84
91
|
half = float16
|
|
85
92
|
float32 = typing.kFloat32
|
|
93
|
+
float = float32
|
|
86
94
|
single = float32
|
|
87
95
|
float64 = typing.kFloat64
|
|
88
96
|
double = float64
|
|
97
|
+
|
|
98
|
+
qint4x2 = typing.kInt4
|
|
99
|
+
float8_e4m3fn = typing.kFloat8E4M3FN
|
|
100
|
+
float8_e5m2 = typing.kFloat8E5M2
|
|
101
|
+
hifloat8 = typing.kHiFloat8
|
|
89
102
|
bfloat16 = typing.kBFloat16
|
|
103
|
+
|
|
90
104
|
complex64 = typing.kComplex64
|
|
105
|
+
cfloat = complex64
|
|
91
106
|
complex128 = typing.kComplex128
|
|
107
|
+
cdouble = complex128
|
|
92
108
|
|
|
93
109
|
number = typing.kNumber
|
|
94
110
|
int_ = typing.kInt
|
|
@@ -131,39 +147,25 @@ AnythingType = typing.TypeAny
|
|
|
131
147
|
RefType = typing.RefType
|
|
132
148
|
_NullType = typing.TypeNull
|
|
133
149
|
|
|
134
|
-
number_type = (int8,
|
|
135
|
-
|
|
136
|
-
int32,
|
|
137
|
-
int64,
|
|
138
|
-
uint8,
|
|
139
|
-
uint16,
|
|
140
|
-
uint32,
|
|
141
|
-
uint64,
|
|
142
|
-
float16,
|
|
143
|
-
float32,
|
|
144
|
-
float64,
|
|
145
|
-
bfloat16,
|
|
146
|
-
complex64,
|
|
147
|
-
complex128,
|
|
148
|
-
qint4x2,)
|
|
150
|
+
number_type = (int8, int16, int32, int64, uint8, uint16, uint32, uint64, float16, float32, float64, bfloat16, complex64,
|
|
151
|
+
complex128, qint4x2, float8_e4m3fn, float8_e5m2, hifloat8)
|
|
149
152
|
|
|
150
153
|
int_type = (int8, int16, int32, int64,)
|
|
151
154
|
uint_type = (uint8, uint16, uint32, uint64,)
|
|
152
|
-
float_type = (float16, float32, float64, bfloat16,)
|
|
153
|
-
signed_type = (int8,
|
|
154
|
-
|
|
155
|
-
double, bfloat16, complex64, complex128)
|
|
155
|
+
float_type = (float16, float32, float64, bfloat16, float8_e4m3fn, float8_e5m2, hifloat8)
|
|
156
|
+
signed_type = (int8, int16, int32, int64, float16, float32, float64, bfloat16, complex64, complex128, qint4x2,
|
|
157
|
+
float8_e4m3fn, float8_e5m2, hifloat8)
|
|
156
158
|
complex_type = (complex64, complex128,)
|
|
157
|
-
all_types = (
|
|
158
|
-
|
|
159
|
+
all_types = (bool, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float16, float32, float64, bfloat16,
|
|
160
|
+
complex64, complex128, qint4x2, float8_e4m3fn, float8_e5m2, hifloat8)
|
|
159
161
|
|
|
160
162
|
_simple_types = {
|
|
161
163
|
list: list_,
|
|
162
164
|
tuple: tuple_,
|
|
163
165
|
type(None): type_none,
|
|
164
|
-
bool: bool_,
|
|
165
|
-
int: int64,
|
|
166
|
-
float: float64,
|
|
166
|
+
builtins.bool: bool_,
|
|
167
|
+
builtins.int: int64,
|
|
168
|
+
builtins.float: float64,
|
|
167
169
|
complex: complex128,
|
|
168
170
|
str: string,
|
|
169
171
|
np.bool_: bool_,
|
|
@@ -186,6 +188,9 @@ def pytype_to_dtype(obj):
|
|
|
186
188
|
"""
|
|
187
189
|
Convert python type to MindSpore type.
|
|
188
190
|
|
|
191
|
+
Note:
|
|
192
|
+
The interface is deprecated from version 2.7 and will be removed in a future version.
|
|
193
|
+
|
|
189
194
|
Args:
|
|
190
195
|
obj (type): A python type object.
|
|
191
196
|
|
|
@@ -201,6 +206,15 @@ def pytype_to_dtype(obj):
|
|
|
201
206
|
>>> print(out)
|
|
202
207
|
Bool
|
|
203
208
|
"""
|
|
209
|
+
logger.warning("The interface 'mindspore.pytype_to_dtype' is deprecated from version 2.7 "
|
|
210
|
+
"and will be removed in a future version.")
|
|
211
|
+
return _pytype_to_dtype(obj)
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _pytype_to_dtype(obj):
|
|
215
|
+
"""
|
|
216
|
+
Convert python type to MindSpore type.
|
|
217
|
+
"""
|
|
204
218
|
|
|
205
219
|
if isinstance(obj, np.dtype):
|
|
206
220
|
obj = obj.type
|
|
@@ -218,6 +232,9 @@ def get_py_obj_dtype(obj):
|
|
|
218
232
|
"""
|
|
219
233
|
Get the MindSpore data type, which corresponds to python type or variable.
|
|
220
234
|
|
|
235
|
+
Note:
|
|
236
|
+
The interface is deprecated from version 2.7 and will be removed in a future version.
|
|
237
|
+
|
|
221
238
|
Args:
|
|
222
239
|
obj (type): An object of python type, or a variable of python type.
|
|
223
240
|
|
|
@@ -229,6 +246,15 @@ def get_py_obj_dtype(obj):
|
|
|
229
246
|
>>> ms.get_py_obj_dtype(1)
|
|
230
247
|
mindspore.int64
|
|
231
248
|
"""
|
|
249
|
+
logger.warning("The interface 'mindspore.get_py_obj_dtype' is deprecated from version 2.7 "
|
|
250
|
+
"and will be removed in a future version.")
|
|
251
|
+
return _get_py_obj_dtype(obj)
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def _get_py_obj_dtype(obj):
|
|
255
|
+
"""
|
|
256
|
+
Get the MindSpore data type, which corresponds to python type or variable.
|
|
257
|
+
"""
|
|
232
258
|
# Tensor
|
|
233
259
|
if hasattr(obj, 'shape') and hasattr(obj, 'dtype') and isinstance(obj.dtype, typing.Type):
|
|
234
260
|
return TensorType(obj.dtype)
|
|
@@ -252,6 +278,9 @@ def dtype_to_nptype(type_):
|
|
|
252
278
|
r"""
|
|
253
279
|
Convert MindSpore dtype to numpy data type.
|
|
254
280
|
|
|
281
|
+
Note:
|
|
282
|
+
The interface is deprecated from version 2.7 and will be removed in a future version.
|
|
283
|
+
|
|
255
284
|
Args:
|
|
256
285
|
type\_ (:class:`mindspore.dtype`): MindSpore's dtype.
|
|
257
286
|
|
|
@@ -263,6 +292,15 @@ def dtype_to_nptype(type_):
|
|
|
263
292
|
>>> ms.dtype_to_nptype(ms.int8)
|
|
264
293
|
<class 'numpy.int8'>
|
|
265
294
|
"""
|
|
295
|
+
logger.warning("The interface 'mindspore.dtype_to_nptype' is deprecated from version 2.7 "
|
|
296
|
+
"and will be removed in a future version.")
|
|
297
|
+
return _dtype_to_nptype(type_)
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def _dtype_to_nptype(type_):
|
|
301
|
+
"""
|
|
302
|
+
Convert MindSpore dtype to numpy data type.
|
|
303
|
+
"""
|
|
266
304
|
_dtype_nptype_dict = {
|
|
267
305
|
bool_: np.bool_,
|
|
268
306
|
int8: np.int8,
|
|
@@ -280,9 +318,12 @@ def dtype_to_nptype(type_):
|
|
|
280
318
|
complex128: np.complex128,
|
|
281
319
|
}
|
|
282
320
|
if type_ == bfloat16:
|
|
283
|
-
if not
|
|
284
|
-
raise TypeError(
|
|
285
|
-
|
|
321
|
+
if not np_dtype_valid(True):
|
|
322
|
+
raise TypeError(
|
|
323
|
+
"The Numpy bfloat16 data type is not supported now, please ensure that the current "
|
|
324
|
+
"Numpy version is not less than the version when the mindspore is compiled, "
|
|
325
|
+
"and the major versions are same."
|
|
326
|
+
)
|
|
286
327
|
return np_bfloat16
|
|
287
328
|
return _dtype_nptype_dict[type_]
|
|
288
329
|
|
|
@@ -291,6 +332,9 @@ def dtype_to_pytype(type_):
|
|
|
291
332
|
r"""
|
|
292
333
|
Convert MindSpore dtype to python data type.
|
|
293
334
|
|
|
335
|
+
Note:
|
|
336
|
+
The interface is deprecated from version 2.7 and will be removed in a future version.
|
|
337
|
+
|
|
294
338
|
Args:
|
|
295
339
|
type\_ (:class:`mindspore.dtype`): MindSpore's dtype.
|
|
296
340
|
|
|
@@ -303,23 +347,31 @@ def dtype_to_pytype(type_):
|
|
|
303
347
|
>>> print(out)
|
|
304
348
|
<class 'bool'>
|
|
305
349
|
"""
|
|
350
|
+
logger.warning("The interface 'mindspore.dtype_to_pytype' is deprecated from version 2.7 "
|
|
351
|
+
"and will be removed in a future version.")
|
|
352
|
+
return _dtype_to_pytype(type_)
|
|
306
353
|
|
|
354
|
+
|
|
355
|
+
def _dtype_to_pytype(type_):
|
|
356
|
+
"""
|
|
357
|
+
Convert MindSpore dtype to python data type.
|
|
358
|
+
"""
|
|
307
359
|
return {
|
|
308
|
-
bool_: bool,
|
|
309
|
-
int_: int,
|
|
310
|
-
int8: int,
|
|
311
|
-
int16: int,
|
|
312
|
-
int32: int,
|
|
313
|
-
int64: int,
|
|
314
|
-
uint8: int,
|
|
315
|
-
uint16: int,
|
|
316
|
-
uint32: int,
|
|
317
|
-
uint64: int,
|
|
318
|
-
float_: float,
|
|
319
|
-
float16: float,
|
|
320
|
-
float32: float,
|
|
321
|
-
float64: float,
|
|
322
|
-
bfloat16: float,
|
|
360
|
+
bool_: builtins.bool,
|
|
361
|
+
int_: builtins.int,
|
|
362
|
+
int8: builtins.int,
|
|
363
|
+
int16: builtins.int,
|
|
364
|
+
int32: builtins.int,
|
|
365
|
+
int64: builtins.int,
|
|
366
|
+
uint8: builtins.int,
|
|
367
|
+
uint16: builtins.int,
|
|
368
|
+
uint32: builtins.int,
|
|
369
|
+
uint64: builtins.int,
|
|
370
|
+
float_: builtins.float,
|
|
371
|
+
float16: builtins.float,
|
|
372
|
+
float32: builtins.float,
|
|
373
|
+
float64: builtins.float,
|
|
374
|
+
bfloat16: builtins.float,
|
|
323
375
|
list_: list,
|
|
324
376
|
tuple_: tuple,
|
|
325
377
|
string: str,
|
|
@@ -335,7 +387,6 @@ def _issubclass_(type_, dtype):
|
|
|
335
387
|
return typing.is_subclass(type_, dtype)
|
|
336
388
|
|
|
337
389
|
|
|
338
|
-
|
|
339
390
|
def type_size_in_bytes(dtype):
|
|
340
391
|
"""
|
|
341
392
|
Return type size in bytes.
|
|
@@ -407,7 +458,7 @@ class QuantDtype(enum.Enum):
|
|
|
407
458
|
def __str__(self):
|
|
408
459
|
return f"{self.name}"
|
|
409
460
|
|
|
410
|
-
def value(self) -> int:
|
|
461
|
+
def value(self) -> builtins.int:
|
|
411
462
|
"""
|
|
412
463
|
Return value of `QuantDtype`. This interface is currently used to serialize or deserialize `QuantDtype`
|
|
413
464
|
primarily.
|
mindspore/common/dump.py
CHANGED
|
@@ -25,15 +25,19 @@ def set_dump(target, enabled=True):
|
|
|
25
25
|
Enable or disable dump for the `target` and its contents.
|
|
26
26
|
|
|
27
27
|
`target` should be an instance of :class:`mindspore.nn.Cell` or :class:`mindspore.ops.Primitive` .
|
|
28
|
-
Please note that this API takes effect only when
|
|
29
|
-
field in
|
|
30
|
-
|
|
28
|
+
Please note that this API takes effect only when the Dump function is enabled, and the `dump_mode`
|
|
29
|
+
field in the Dump configuration file is set to `"2"` with the `ms_backend` compilation backend
|
|
30
|
+
(please refer to the backend parameter in
|
|
31
|
+
`jit <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.jit.html>`_).
|
|
32
|
+
See the `dump document <https://www.mindspore.cn/tutorials/en/master/debug/dump.html>`_ for details.
|
|
31
33
|
The default enabled status for
|
|
32
34
|
a :class:`mindspore.nn.Cell` or :class:`mindspore.ops.Primitive` is False.
|
|
33
35
|
|
|
34
36
|
Note:
|
|
35
|
-
1. This API is only
|
|
36
|
-
|
|
37
|
+
1. This API is only available for JIT compilation, requires 'Ascend' as the device_target and
|
|
38
|
+
`ms_backend` as the compilation backend (please refer to the backend parameter in
|
|
39
|
+
`jit <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.jit.html>`_),
|
|
40
|
+
and does not support fused operators.
|
|
37
41
|
2. This API only supports being called before training starts.
|
|
38
42
|
If you call this API during training, it may not be effective.
|
|
39
43
|
3. After using `set_dump(Cell, True)` , operators in forward and backward
|
|
@@ -64,9 +68,8 @@ def set_dump(target, enabled=True):
|
|
|
64
68
|
>>> import numpy as np
|
|
65
69
|
>>> import mindspore as ms
|
|
66
70
|
>>> import mindspore.nn as nn
|
|
67
|
-
>>> from mindspore import Tensor, set_dump
|
|
71
|
+
>>> from mindspore import Tensor, set_dump, jit
|
|
68
72
|
>>>
|
|
69
|
-
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
|
70
73
|
>>> ms.set_device(device_target="Ascend")
|
|
71
74
|
>>>
|
|
72
75
|
>>> class MyNet(nn.Cell):
|
|
@@ -75,6 +78,7 @@ def set_dump(target, enabled=True):
|
|
|
75
78
|
... self.conv1 = nn.Conv2d(5, 6, 5, pad_mode='valid')
|
|
76
79
|
... self.relu1 = nn.ReLU()
|
|
77
80
|
...
|
|
81
|
+
... @jit
|
|
78
82
|
... def construct(self, x):
|
|
79
83
|
... x = self.conv1(x)
|
|
80
84
|
... x = self.relu1(x)
|
|
@@ -109,15 +113,6 @@ def set_dump(target, enabled=True):
|
|
|
109
113
|
"If you have Ascend device, consider set device_target to Ascend "
|
|
110
114
|
"before calling set_dump.".format(current_target))
|
|
111
115
|
|
|
112
|
-
current_mode = context.get_context("mode")
|
|
113
|
-
if current_mode != context.GRAPH_MODE:
|
|
114
|
-
# We will not return here in case user changed mode later.
|
|
115
|
-
warn(
|
|
116
|
-
"Current mode is PYNATIVE_MODE, which is not supported by set_dump. "
|
|
117
|
-
"Only GRAPH_MODE is supported currently. "
|
|
118
|
-
"Consider set mode to GRAPH_MODE "
|
|
119
|
-
"before calling set_dump.")
|
|
120
|
-
|
|
121
116
|
# The actual set dump logic.
|
|
122
117
|
if isinstance(target, nn.Cell):
|
|
123
118
|
target.add_flags(dump=enabled)
|
|
File without changes
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
|
|
2
2
|
#
|
|
3
|
-
# Copyright 2020-
|
|
3
|
+
# Copyright 2020-2025 Huawei Technologies Co., Ltd
|
|
4
4
|
#
|
|
5
5
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
6
|
# you may not use this file except in compliance with the License.
|
|
@@ -261,7 +261,12 @@ class _AutoIdentifyDynamicShape:
|
|
|
261
261
|
return False
|
|
262
262
|
return True
|
|
263
263
|
|
|
264
|
-
|
|
264
|
+
@staticmethod
|
|
265
|
+
def _is_invalid_shape(shape):
|
|
266
|
+
"""Check if input shape is valid"""
|
|
267
|
+
return is_shape_unknown(shape) or not shape
|
|
268
|
+
|
|
269
|
+
def _is_enable_auto_dynamic_shape(self, args_list, is_sink_mode, enable_jit_dynamic=False):
|
|
265
270
|
"""is enable auto identify shape"""
|
|
266
271
|
if not is_sink_mode and not args_list:
|
|
267
272
|
return False
|
|
@@ -270,10 +275,12 @@ class _AutoIdentifyDynamicShape:
|
|
|
270
275
|
continue
|
|
271
276
|
if not isinstance(elem, (list, tuple, Tensor, int, float)):
|
|
272
277
|
return False
|
|
273
|
-
if isinstance(elem, Tensor) and
|
|
278
|
+
if isinstance(elem, Tensor) and \
|
|
279
|
+
self._is_invalid_shape(elem.shape) and \
|
|
280
|
+
not enable_jit_dynamic:
|
|
274
281
|
return False
|
|
275
282
|
if not is_sink_mode and isinstance(elem, (list, tuple)):
|
|
276
|
-
return self._is_enable_auto_dynamic_shape(elem, is_sink_mode)
|
|
283
|
+
return self._is_enable_auto_dynamic_shape(elem, is_sink_mode, enable_jit_dynamic)
|
|
277
284
|
return True
|
|
278
285
|
|
|
279
286
|
@staticmethod
|
|
@@ -328,10 +335,10 @@ class _AutoIdentifyDynamicShape:
|
|
|
328
335
|
logger.info((f'generalize with generalize shape cache, compile args shape = {res_shape}'))
|
|
329
336
|
return new_generalize_shape
|
|
330
337
|
|
|
331
|
-
def auto_dynamic_generate_compile_args(self, args_list, is_sink_mode):
|
|
338
|
+
def auto_dynamic_generate_compile_args(self, args_list, is_sink_mode, enable_jit_dynamic=False):
|
|
332
339
|
"""generate compile args in auto dynamic shape"""
|
|
333
340
|
if not self.is_enable_auto_dynamic_shape or \
|
|
334
|
-
not self._is_enable_auto_dynamic_shape(args_list, is_sink_mode) or \
|
|
341
|
+
not self._is_enable_auto_dynamic_shape(args_list, is_sink_mode, enable_jit_dynamic) or \
|
|
335
342
|
not self._check_input_number_and_type(args_list):
|
|
336
343
|
self.is_enable_auto_dynamic_shape = False
|
|
337
344
|
return args_list
|
|
@@ -475,11 +482,13 @@ class _AutoIdentifyDynamicShape:
|
|
|
475
482
|
_auto_dynamic_shape = _AutoIdentifyDynamicShape()
|
|
476
483
|
|
|
477
484
|
|
|
478
|
-
def get_auto_dynamic_shape_args(compile_args, key_id, enable_auto_dynamic=False):
|
|
485
|
+
def get_auto_dynamic_shape_args(compile_args, key_id, enable_auto_dynamic=False, enable_jit_dynamic=False):
|
|
479
486
|
"""get auto dynamic shape args."""
|
|
480
487
|
if key_id not in auto_dynamic_shape_dict:
|
|
481
488
|
auto_dynamic_shape_dict[key_id] = _AutoIdentifyDynamicShape(enable_auto_dynamic)
|
|
482
|
-
compile_args = auto_dynamic_shape_dict[key_id].auto_dynamic_generate_compile_args(
|
|
489
|
+
compile_args = auto_dynamic_shape_dict[key_id].auto_dynamic_generate_compile_args(
|
|
490
|
+
compile_args, False, enable_jit_dynamic
|
|
491
|
+
)
|
|
483
492
|
return compile_args
|
|
484
493
|
|
|
485
494
|
|
|
@@ -487,18 +496,3 @@ def update_auto_dynamic_shape_phase(compile_args, key_id, phase):
|
|
|
487
496
|
"""update auto dynamic shape phase."""
|
|
488
497
|
if key_id in auto_dynamic_shape_dict:
|
|
489
498
|
auto_dynamic_shape_dict[key_id].update_phase_and_compile_args(compile_args, phase, False)
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
def get_auto_dynamic_shape_args_with_check_input_signature(compile_args, key_id, input_signature,
|
|
493
|
-
enable_auto_dynamic=False):
|
|
494
|
-
"""get auto dynamic shape args."""
|
|
495
|
-
if input_signature is None:
|
|
496
|
-
return get_auto_dynamic_shape_args(compile_args, key_id, enable_auto_dynamic)
|
|
497
|
-
return compile_args
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
def update_auto_dynamic_shape_phase_with_check_input_signature(compile_args, key_id, phase, input_signature):
|
|
501
|
-
"""update auto dynamic shape phase."""
|
|
502
|
-
if input_signature is None:
|
|
503
|
-
if key_id in auto_dynamic_shape_dict:
|
|
504
|
-
auto_dynamic_shape_dict[key_id].update_phase_and_compile_args(compile_args, phase, False)
|
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
# This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
|
|
2
|
+
#
|
|
3
|
+
# Copyright 2025 Huawei Technologies Co., Ltd
|
|
4
|
+
#
|
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
# you may not use this file except in compliance with the License.
|
|
7
|
+
# You may obtain a copy of the License at
|
|
8
|
+
#
|
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
#
|
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
# See the License for the specific language governing permissions and
|
|
15
|
+
# limitations under the License.
|
|
16
|
+
# ============================================================================
|
|
17
|
+
"""Define enable_dynamic decorator."""
|
|
18
|
+
import types
|
|
19
|
+
import inspect
|
|
20
|
+
from mindspore import log as logger
|
|
21
|
+
from mindspore.common.tensor import Tensor
|
|
22
|
+
from mindspore.common._utils import get_func, is_dim_unknown
|
|
23
|
+
from mindspore.common.dynamic_shape.auto_dynamic_shape import SHAPE_DIM_ANY
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
ENABLE_DYNAMIC = "__enable_dynamic__"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _check_element_valid(item, shape, name):
|
|
30
|
+
"""Check elements in shape."""
|
|
31
|
+
if item is not SHAPE_DIM_ANY and (isinstance(item, int) and item <= 0):
|
|
32
|
+
raise TypeError(f"The argument '{name}' has invalid shape '{shape}', only supports None " \
|
|
33
|
+
f"or a tuple/list of positive integers and None.")
|
|
34
|
+
return True
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _check_arg_shape_valid(arg, name):
|
|
38
|
+
"""Check if the shape of arg is valid"""
|
|
39
|
+
#if the shape of arg is None
|
|
40
|
+
if isinstance(arg, Tensor) and is_dim_unknown(arg.shape):
|
|
41
|
+
return True
|
|
42
|
+
if isinstance(arg, Tensor) and \
|
|
43
|
+
SHAPE_DIM_ANY in arg.shape and \
|
|
44
|
+
all(_check_element_valid(item, arg.shape, name) for item in arg.shape):
|
|
45
|
+
return True
|
|
46
|
+
if isinstance(arg, (tuple, list)) and any(_check_arg_shape_valid(item, name) for item in arg):
|
|
47
|
+
return True
|
|
48
|
+
return False
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _check_arg_type_valid(arg, name):
|
|
52
|
+
"""Check if the type of arg is valid."""
|
|
53
|
+
if isinstance(arg, Tensor):
|
|
54
|
+
return
|
|
55
|
+
if isinstance(arg, (tuple, list)):
|
|
56
|
+
for item in arg:
|
|
57
|
+
_check_arg_type_valid(item, name)
|
|
58
|
+
else:
|
|
59
|
+
raise TypeError(f"The decorator enable_dynamic only supports Tensor " \
|
|
60
|
+
f"or a tuple/list of Tensor, but the argument : {name} is type of:{type(arg)}.")
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _check_input_valid(arg):
|
|
64
|
+
"""Check if real argument is valid."""
|
|
65
|
+
if isinstance(arg, Tensor):
|
|
66
|
+
if not all(isinstance(item, int) and item > 0 for item in arg.shape):
|
|
67
|
+
raise ValueError(f"When using decorator enable_dynamic, the corresponding shape of inputs should be " \
|
|
68
|
+
f"a tuple/list of positive integers")
|
|
69
|
+
elif isinstance(arg, (tuple, list)):
|
|
70
|
+
for item in arg:
|
|
71
|
+
_check_input_valid(item)
|
|
72
|
+
else:
|
|
73
|
+
raise TypeError(f"When using decorator enable_dynamic, the corresponding inputs only supports Tensor " \
|
|
74
|
+
f"or a tuple/list of Tensor.")
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _check_arg_type_shape(arg, dyn_arg, name):
|
|
78
|
+
"""Check the type, shape and dtype of real argument."""
|
|
79
|
+
if isinstance(arg, Tensor) and isinstance(dyn_arg, Tensor):
|
|
80
|
+
if arg.dtype != dyn_arg.dtype:
|
|
81
|
+
raise TypeError(f"When using decorator enable_dynamic, input tensor dtype = {arg.dtype}, " \
|
|
82
|
+
f"dynamic tensor dtype = {dyn_arg.dtype}, tensor dtypes are not the same.")
|
|
83
|
+
if is_dim_unknown(dyn_arg.shape):
|
|
84
|
+
return
|
|
85
|
+
if len(arg.shape) != len(dyn_arg.shape) or \
|
|
86
|
+
any(y is not SHAPE_DIM_ANY and x != y for x, y in zip(arg.shape, dyn_arg.shape)):
|
|
87
|
+
raise ValueError(f"When using decorator enable_dynamic, input tensor shape = {arg.shape}, " \
|
|
88
|
+
f"dynamic tensor shape = {dyn_arg.shape}, tensor shapes are not the same.")
|
|
89
|
+
elif isinstance(arg, (tuple, list)) and isinstance(dyn_arg, (tuple, list)):
|
|
90
|
+
if len(arg) != len(dyn_arg):
|
|
91
|
+
raise ValueError("Input sequences must have the same structure and length.")
|
|
92
|
+
for x, y in zip(arg, dyn_arg):
|
|
93
|
+
_check_arg_type_shape(x, y, name)
|
|
94
|
+
else:
|
|
95
|
+
raise TypeError(f"When using decorator enable_dynamic, the type between argument '{name}' " \
|
|
96
|
+
f"and corresponding input are not the same.")
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def generate_dynamic_sequence_args(args_list, dyn_args_list):
|
|
100
|
+
"""Generate dynamic shapes for input sequence"""
|
|
101
|
+
if isinstance(args_list, Tensor):
|
|
102
|
+
return dyn_args_list if args_list.shape != dyn_args_list.shape else args_list
|
|
103
|
+
result = []
|
|
104
|
+
for x, y in zip(args_list, dyn_args_list):
|
|
105
|
+
result.append(generate_dynamic_sequence_args(x, y))
|
|
106
|
+
return type(args_list)(result)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def generate_dynamic_tensor_args(args_list, dynamic_shapes):
|
|
110
|
+
"""Generate compile args with dynamic_shapes"""
|
|
111
|
+
new_compile_args = list(args_list)
|
|
112
|
+
for index, arg in enumerate(args_list):
|
|
113
|
+
if isinstance(arg, (tuple, list)) and not hasattr(arg, "__ms_mutable__"):
|
|
114
|
+
raise ValueError(f"When using decorator enable_dynamic, the corresponding attribute of input should be " \
|
|
115
|
+
f"mutable(tuple/list)")
|
|
116
|
+
if index not in dynamic_shapes:
|
|
117
|
+
continue
|
|
118
|
+
_check_input_valid(arg)
|
|
119
|
+
name, dyn_arg = dynamic_shapes[index]
|
|
120
|
+
_check_arg_type_shape(arg, dyn_arg, name)
|
|
121
|
+
new_compile_args[index] = generate_dynamic_sequence_args(arg, dyn_arg)
|
|
122
|
+
logger.debug(f"args_list: {args_list}, dynamic_shapes: {dynamic_shapes}, " \
|
|
123
|
+
f"new_compile_args: {new_compile_args}")
|
|
124
|
+
return new_compile_args
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def enable_dynamic(**kwargs):
|
|
128
|
+
"""
|
|
129
|
+
Use to specify whether the shape of the parameter is dynamic shape or dynamic rank.
|
|
130
|
+
|
|
131
|
+
Note:
|
|
132
|
+
- It needs to be used in conjunction with the JIT interface. Without using the JIT decorator,
|
|
133
|
+
the dynamic shape and dynamic rank functions will not be enabled.
|
|
134
|
+
- In the scenario where both set_context(mode=GRAPH_MODE) and nn.Cell are set simultaneously,
|
|
135
|
+
use enabled_dynamic to report an error.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
kwargs (dict): The input types are Tensor, tuple[Tensor] and list[Tensor]. If one or
|
|
139
|
+
more dimensions in the shape of the parameter need to be specified as dynamic shapes,
|
|
140
|
+
the corresponding dimensions in the shape can be set to None. If the shape that needs
|
|
141
|
+
to generate specified parameters is dynamic rank, the shape can be set to None.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
Function, return a function that specifies the dynamic shape information of the parameter.
|
|
145
|
+
|
|
146
|
+
Supported Platforms:
|
|
147
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
148
|
+
|
|
149
|
+
Examples:
|
|
150
|
+
>>> import numpy as np
|
|
151
|
+
>>> import mindspore as ms
|
|
152
|
+
>>> from mindspore import Tensor
|
|
153
|
+
>>> from mindspore import enable_dynamic
|
|
154
|
+
>>> from mindspore import jit
|
|
155
|
+
...
|
|
156
|
+
>>> x = Tensor(np.random.randn(2, 3), ms.float32)
|
|
157
|
+
>>> y = Tensor(np.random.randn(2, 3), ms.float32)
|
|
158
|
+
...
|
|
159
|
+
>>> # Specify parameter y as dynamic shape
|
|
160
|
+
>>> @enable_dynamic(y=Tensor(shape=None, dtype=ms.float32))
|
|
161
|
+
>>> @jit
|
|
162
|
+
>>> def func(x, y):
|
|
163
|
+
... return x + 1, y + 1
|
|
164
|
+
...
|
|
165
|
+
>>> out = func(x, y)
|
|
166
|
+
"""
|
|
167
|
+
# Check inputs at first.
|
|
168
|
+
if not kwargs:
|
|
169
|
+
raise ValueError(f"When using decorator enable_dynamic, the input cannot be empty!")
|
|
170
|
+
for name, arg in kwargs.items():
|
|
171
|
+
_check_arg_type_valid(arg, name)
|
|
172
|
+
if not _check_arg_shape_valid(arg, name):
|
|
173
|
+
raise TypeError(f"When using decorator enable_dynamic, the shape of argument '{name}' " \
|
|
174
|
+
f"at least have one None.")
|
|
175
|
+
|
|
176
|
+
def decorator(func):
|
|
177
|
+
if not isinstance(func, (types.FunctionType, types.MethodType)):
|
|
178
|
+
raise ValueError(f"Decorator enable_dynamic can only be used for function or method " \
|
|
179
|
+
f"decrocated by ms.jit, but got {func}.")
|
|
180
|
+
signature = inspect.signature(func)
|
|
181
|
+
sigs_name = [sig_name for sig_name in signature.parameters if sig_name != "self"]
|
|
182
|
+
if len(kwargs) > len(sigs_name):
|
|
183
|
+
raise ValueError(f"When using decorator enable_dynamic, the number of arguments {len(kwargs)} " \
|
|
184
|
+
f"exceeds the number of function arguments {len(sigs_name)}.")
|
|
185
|
+
# Generate dynamic args.
|
|
186
|
+
dynamic_args = dict()
|
|
187
|
+
for key, value in kwargs.items():
|
|
188
|
+
index = sigs_name.index(key)
|
|
189
|
+
if index in dynamic_args:
|
|
190
|
+
raise ValueError(f"keyword argument repeated: {key}")
|
|
191
|
+
dynamic_args[index] = (key, value)
|
|
192
|
+
# Set dynamic_tensor_shape to func.
|
|
193
|
+
inner_func = inspect.unwrap(func, stop=lambda f: not hasattr(f, '__wrapped__'))
|
|
194
|
+
setattr(get_func(inner_func), ENABLE_DYNAMIC, dynamic_args)
|
|
195
|
+
logger.info(f"Set enable dynamic: {dynamic_args} to {inner_func}")
|
|
196
|
+
return func
|
|
197
|
+
return decorator
|