mindspore 2.6.0rc1__cp311-cp311-win_amd64.whl → 2.7.0__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +2 -2
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +42 -11
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
- mindspore/_extends/parse/parser.py +65 -84
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +58 -14
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
- mindspore/amp.py +4 -22
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +43 -12
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +178 -53
- mindspore/common/_utils.py +9 -1
- mindspore/common/api.py +377 -203
- mindspore/common/dtype.py +108 -57
- mindspore/common/dump.py +11 -16
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
- mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
- mindspore/common/file_system.py +59 -9
- mindspore/common/generator.py +5 -3
- mindspore/common/hook_handle.py +33 -5
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +27 -29
- mindspore/common/recompute.py +5 -7
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +117 -131
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +67 -55
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +38 -4
- mindspore/dataset/engine/datasets.py +350 -322
- mindspore/dataset/engine/datasets_user_defined.py +70 -24
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +10 -6
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +17 -5
- mindspore/dataset/vision/utils.py +632 -21
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/ascend/op_tuning.py +35 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -4
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +27 -7
- mindspore/include/api/cell.h +65 -5
- mindspore/include/api/cfg.h +24 -7
- mindspore/include/api/context.h +1 -0
- mindspore/include/api/delegate.h +10 -2
- mindspore/include/api/dual_abi_helper.h +100 -19
- mindspore/include/api/graph.h +14 -1
- mindspore/include/api/kernel.h +16 -3
- mindspore/include/api/kernel_api.h +9 -1
- mindspore/include/api/metrics/accuracy.h +9 -0
- mindspore/include/api/model.h +8 -1
- mindspore/include/api/model_group.h +4 -0
- mindspore/include/api/model_parallel_runner.h +2 -0
- mindspore/include/api/status.h +48 -10
- mindspore/include/api/types.h +8 -3
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/constants.h +9 -0
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +61 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +6 -46
- mindspore/mint/distributed/__init__.py +5 -0
- mindspore/mint/distributed/distributed.py +429 -23
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +163 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +140 -104
- mindspore/mint/nn/layer/normalization.py +11 -25
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/cell.py +491 -623
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +36 -36
- mindspore/nn/layer/basic.py +74 -77
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +117 -110
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +38 -40
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +4 -6
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -0
- mindspore/nn/optim/lamb.py +1 -3
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/tft_wrapper.py +2 -3
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +73 -42
- mindspore/nn/wrap/grad_reducer.py +37 -52
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +7 -7
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +5 -5
- mindspore/numpy/utils_const.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
- mindspore/ops/_vmap/vmap_array_ops.py +31 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +54 -13
- mindspore/ops/auto_generate/gen_extend_func.py +27 -145
- mindspore/ops/auto_generate/gen_ops_def.py +1027 -347
- mindspore/ops/auto_generate/gen_ops_prim.py +2341 -1117
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +9 -5
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +4 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +19 -102
- mindspore/ops/function/debug_func.py +8 -5
- mindspore/ops/function/grad/grad_func.py +5 -13
- mindspore/ops/function/math_func.py +77 -572
- mindspore/ops/function/nn_func.py +46 -94
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +44 -5
- mindspore/ops/function/vmap_func.py +2 -1
- mindspore/ops/functional.py +4 -4
- mindspore/ops/functional_overload.py +594 -18
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +16 -11
- mindspore/ops/operations/_custom_ops_utils.py +689 -34
- mindspore/ops/operations/_inner_ops.py +14 -18
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +5 -51
- mindspore/ops/operations/comm_ops.py +186 -41
- mindspore/ops/operations/custom_ops.py +303 -177
- mindspore/ops/operations/debug_ops.py +59 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +27 -28
- mindspore/ops/operations/math_ops.py +8 -9
- mindspore/ops/operations/nn_ops.py +8 -40
- mindspore/ops/primitive.py +9 -20
- mindspore/ops/tensor_method.py +63 -15
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +8 -3
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +16 -23
- mindspore/parallel/_cell_wrapper.py +113 -45
- mindspore/parallel/_parallel_serialization.py +4 -3
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +17 -12
- mindspore/parallel/_utils.py +5 -11
- mindspore/parallel/auto_parallel.py +35 -14
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +13 -7
- mindspore/parallel/cluster/process_entity/_api.py +88 -49
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +48 -7
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +12 -12
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
- mindspore/parallel/shard.py +10 -25
- mindspore/parallel/transform_safetensors.py +469 -174
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/msprof_cmd_tool.py +2 -2
- mindspore/profiler/common/path_manager.py +9 -0
- mindspore/profiler/common/profiler_context.py +50 -29
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_meta_data.py +1 -0
- mindspore/profiler/common/profiler_op_analyse.py +239 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +374 -338
- mindspore/profiler/envprofiler.py +42 -12
- mindspore/profiler/experimental_config.py +112 -7
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/cpu_profiler.py +10 -4
- mindspore/profiler/platform/npu_profiler.py +30 -20
- mindspore/profiler/profiler.py +218 -154
- mindspore/profiler/profiler_action_controller.py +65 -77
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +8 -6
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +87 -45
- mindspore/runtime/memory.py +31 -32
- mindspore/runtime/thread_bind_core.py +299 -165
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +17 -7
- mindspore/train/amp.py +43 -23
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_callback.py +2 -1
- mindspore/train/callback/_checkpoint.py +4 -14
- mindspore/train/callback/_flops_collector.py +11 -7
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +98 -21
- mindspore/train/data_sink.py +15 -6
- mindspore/train/dataset_helper.py +14 -5
- mindspore/train/model.py +133 -69
- mindspore/train/serialization.py +168 -126
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +163 -77
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +14 -17
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/RECORD +403 -442
- mindspore/_deprecated/jit.py +0 -198
- mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
- mindspore/communication/_hccl_management.py +0 -297
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/common/validator/__init__.py +0 -14
- mindspore/profiler/common/validator/validate_path.py +0 -84
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- mindspore/utils/hooks.py +0 -81
- /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
|
@@ -81,7 +81,7 @@ def gen_custom_op_files(config_dir, dsl_dir):
|
|
|
81
81
|
f.write(json.dumps(ops_info, indent=4))
|
|
82
82
|
|
|
83
83
|
# custom akg op dsl file
|
|
84
|
-
custom_py = os.path.join(cur_path, "
|
|
84
|
+
custom_py = os.path.join(cur_path, "custom.py")
|
|
85
85
|
if not os.path.isfile(custom_py):
|
|
86
86
|
raise RuntimeError("custom.py path is invalid: {}".format(custom_py))
|
|
87
87
|
shutil.copy(custom_py, dsl_dir)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2020-
|
|
1
|
+
# Copyright 2020-2025 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -30,7 +30,7 @@ from .parser import (Parser, create_instance, is_supported_create_instance_type,
|
|
|
30
30
|
get_obj_defined_from_obj_type, is_from_third_party_library, get_const_abs, get_const_round,
|
|
31
31
|
get_const_len, convert_to_namedtuple, check_attrs, generate_lambda_object,
|
|
32
32
|
check_is_subclass, check_attr_is_property, get_method_info, can_constant_fold,
|
|
33
|
-
convert_to_mutable)
|
|
33
|
+
convert_to_mutable, get_ast_augassign_namespace_symbol, hook_wrapper)
|
|
34
34
|
|
|
35
35
|
__all__ = ['Parser', 'create_instance', 'is_supported_create_instance_type', 'generate_scope', 'get_attr_from_object',
|
|
36
36
|
'get_bprop_method_of_class', 'get_class_instance_type', 'get_class_member_namespace_symbol',
|
|
@@ -45,4 +45,4 @@ __all__ = ['Parser', 'create_instance', 'is_supported_create_instance_type', 'ge
|
|
|
45
45
|
'is_class_member_recursive', 'get_obj_defined_from_obj_type',
|
|
46
46
|
'is_from_third_party_library', 'get_const_abs', 'get_const_round', 'get_const_len', 'get_method_info',
|
|
47
47
|
'convert_to_namedtuple', 'check_attrs', 'generate_lambda_object', 'check_is_subclass', 'check_attr_is_property',
|
|
48
|
-
'can_constant_fold', 'convert_to_mutable']
|
|
48
|
+
'can_constant_fold', 'convert_to_mutable', 'get_ast_augassign_namespace_symbol', 'hook_wrapper']
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
1
|
+
# Copyright 2024-2025 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -275,16 +275,6 @@ Value Range:
|
|
|
275
275
|
"""
|
|
276
276
|
STRICT_CHECK_PARENT_CONTEXT = ''
|
|
277
277
|
|
|
278
|
-
"""
|
|
279
|
-
Name: CELL_PARAMETER_HOOK
|
|
280
|
-
Function: Whether to enable cell parameter hook.
|
|
281
|
-
Cell parameter hook is an experimental api that may be deleted later.
|
|
282
|
-
Value Range:
|
|
283
|
-
1: Enable
|
|
284
|
-
Default: Disable
|
|
285
|
-
"""
|
|
286
|
-
CELL_PARAMETERS_HOOK = ''
|
|
287
|
-
|
|
288
278
|
"""
|
|
289
279
|
Name: CHECK_BPROP
|
|
290
280
|
Function: Whether to check back propagation nodes. The checking ensures that the shape and dtype of
|
|
@@ -315,15 +305,6 @@ Value Range:
|
|
|
315
305
|
"""
|
|
316
306
|
DEBUG_LEVEL = ''
|
|
317
307
|
|
|
318
|
-
"""
|
|
319
|
-
Name: PYNATIVE_JIT_GRAD_MODE
|
|
320
|
-
Function: Which method used for grad jit in pynative mode
|
|
321
|
-
Value Range:
|
|
322
|
-
1: Replace ValueNode
|
|
323
|
-
Default: Parametrization
|
|
324
|
-
"""
|
|
325
|
-
PYNATIVE_JIT_GRAD_MODE = ''
|
|
326
|
-
|
|
327
308
|
"""
|
|
328
309
|
Name: PIJIT_SUBGRAPH_BREAK_OPTIMIZE
|
|
329
310
|
Function: Whether to enable subgraph break optimization in PIJit.
|
|
@@ -333,6 +314,15 @@ Value Range:
|
|
|
333
314
|
"""
|
|
334
315
|
PIJIT_SUBGRAPH_BREAK_OPTIMIZE = ''
|
|
335
316
|
|
|
317
|
+
"""
|
|
318
|
+
Name: ENABLE_ELIMINATE_UNUSED_PARAMS
|
|
319
|
+
Function: Whether to enable eliminate unused parameters optimization in PIJit.
|
|
320
|
+
Value Range:
|
|
321
|
+
1: Enable, Disable if other value.
|
|
322
|
+
Default: Disable.
|
|
323
|
+
"""
|
|
324
|
+
ENABLE_ELIMINATE_UNUSED_PARAMS = ''
|
|
325
|
+
|
|
336
326
|
"""
|
|
337
327
|
Name: PUT_ALL_CNODE_INTO_ORDER_LIST
|
|
338
328
|
Function: Whether to put all CNode into order list in back prop.
|
|
@@ -342,6 +332,36 @@ Value Range:
|
|
|
342
332
|
"""
|
|
343
333
|
PUT_ALL_CNODE_INTO_ORDER_LIST = ''
|
|
344
334
|
|
|
335
|
+
"""
|
|
336
|
+
Name: CHECK_PASS_NODE_SCOPE
|
|
337
|
+
Function: Whether to check
|
|
338
|
+
Value Range:
|
|
339
|
+
1: Enable
|
|
340
|
+
Default: Disable.
|
|
341
|
+
"""
|
|
342
|
+
CHECK_PASS_NODE_SCOPE = ''
|
|
343
|
+
|
|
344
|
+
"""
|
|
345
|
+
Name: CHECK_INVALID_VIEW_INPLACE_DOUT_LEVEL
|
|
346
|
+
Function: The level of check invalid dout under view+inplace scene
|
|
347
|
+
Value Range:
|
|
348
|
+
1: Only check scenario 1
|
|
349
|
+
2: Only check scenario 2
|
|
350
|
+
Default(""): Check all invalid dout for view inplace scene
|
|
351
|
+
Others: No invalid dout check for view inplace scene
|
|
352
|
+
"""
|
|
353
|
+
CHECK_INVALID_VIEW_INPLACE_DOUT_LEVEL = ''
|
|
354
|
+
|
|
355
|
+
"""
|
|
356
|
+
Name: JIT_ENABLE_AUGASSIGN_INPLACE
|
|
357
|
+
Function: Whether enable augassign inplace.
|
|
358
|
+
Value Range:
|
|
359
|
+
0: Disable
|
|
360
|
+
1: Enable
|
|
361
|
+
Default: Disable
|
|
362
|
+
"""
|
|
363
|
+
JIT_ENABLE_AUGASSIGN_INPLACE = '0'
|
|
364
|
+
|
|
345
365
|
__all__ = [
|
|
346
366
|
"COMPILE_PROFILE",
|
|
347
367
|
"COMPILE_PROFILE_FINISH_ACTION",
|
|
@@ -372,11 +392,13 @@ __all__ = [
|
|
|
372
392
|
"ENABLE_RECOMPUTE_BEFORE_INLINE",
|
|
373
393
|
"STRICT_CHECK_PARENT_CONTEXT",
|
|
374
394
|
"AUTO_PASSES_OPTIMIZE_PATH",
|
|
375
|
-
"CELL_PARAMETERS_HOOK",
|
|
376
395
|
"CHECK_BPROP",
|
|
377
396
|
"GRAD_FOR_SCALAR",
|
|
378
397
|
"DEBUG_LEVEL",
|
|
379
|
-
"PYNATIVE_JIT_GRAD_MODE",
|
|
380
398
|
"PIJIT_SUBGRAPH_BREAK_OPTIMIZE",
|
|
399
|
+
"ENABLE_ELIMINATE_UNUSED_PARAMS",
|
|
381
400
|
"PUT_ALL_CNODE_INTO_ORDER_LIST",
|
|
401
|
+
"CHECK_PASS_NODE_SCOPE",
|
|
402
|
+
"CHECK_INVALID_VIEW_INPLACE_DOUT_LEVEL",
|
|
403
|
+
"JIT_ENABLE_AUGASSIGN_INPLACE"
|
|
382
404
|
]
|
|
@@ -33,7 +33,7 @@ deprecated_tensor_method_map = {
|
|
|
33
33
|
# 7 allclose
|
|
34
34
|
"allclose": "tensor_allclose",
|
|
35
35
|
# 8 any
|
|
36
|
-
"any": "
|
|
36
|
+
"any": "reduce_tensor_any",
|
|
37
37
|
# 9 arctan2
|
|
38
38
|
"arctan2": "tensor_arctan2",
|
|
39
39
|
# 10 argmax
|
|
@@ -338,7 +338,6 @@ deprecated_tensor_method_map = {
|
|
|
338
338
|
"atan": "deprecated_tensor_atan",
|
|
339
339
|
"arctan": "deprecated_tensor_arctan",
|
|
340
340
|
"dot": "deprecated_tensor_dot",
|
|
341
|
-
"copy_": "deprecated_tensor_copy_",
|
|
342
341
|
|
|
343
342
|
# 153
|
|
344
343
|
"logsumexp": "deprecated_tensor_logsumexp",
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
|
|
2
2
|
#
|
|
3
|
-
# Copyright 2020-
|
|
3
|
+
# Copyright 2020-2025 Huawei Technologies Co., Ltd
|
|
4
4
|
#
|
|
5
5
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
6
|
# you may not use this file except in compliance with the License.
|
|
@@ -42,9 +42,9 @@ from mindspore.common.api import _JitExecutor
|
|
|
42
42
|
from mindspore.common import dtype as mstype
|
|
43
43
|
from mindspore.common.parameter import Parameter
|
|
44
44
|
from mindspore.common import mutable
|
|
45
|
-
from mindspore._checkparam import is_stub_tensor
|
|
46
45
|
from .namespace import Namespace, ModuleNamespace, ClosureNamespace, ClassMemberNamespace
|
|
47
|
-
from .resources import parse_object_map, ops_symbol_map, convert_object_map,
|
|
46
|
+
from .resources import (parse_object_map, parse_augassign_object_map, ops_symbol_map, convert_object_map,
|
|
47
|
+
convert_class_to_function_map, trope_ns)
|
|
48
48
|
from .resources import SYMBOL_UNDEFINE, constant_fold_functions
|
|
49
49
|
from .jit_fallback_modules.check_utils import third_party_checker
|
|
50
50
|
from ...common.api import _convert_python_data
|
|
@@ -103,17 +103,6 @@ parse_expr_statement_white_list = (
|
|
|
103
103
|
"append", "insert", "clear", "reverse", "extend", "update", "register_hook",
|
|
104
104
|
)
|
|
105
105
|
|
|
106
|
-
# Methods that need to reorder after it's caller is used before
|
|
107
|
-
# e.g. We need to reorder `x.register_hook` after x is used in `out = x + 1` when `register_hook` is called.
|
|
108
|
-
# def construct(x):
|
|
109
|
-
# out = x + 1
|
|
110
|
-
# x.register_hook(hook_fn)
|
|
111
|
-
# return out
|
|
112
|
-
# equals to:
|
|
113
|
-
# def construct(x):
|
|
114
|
-
# x = x.register_hook(hook_fn) # register_hook will return itself when it is called in the graph (in `GRAPH_MODE`).
|
|
115
|
-
# out = x + 1
|
|
116
|
-
# return out
|
|
117
106
|
_need_reorder_methods = (
|
|
118
107
|
"register_hook",
|
|
119
108
|
)
|
|
@@ -125,10 +114,6 @@ _unsupported_python_builtin_type = (
|
|
|
125
114
|
set, dict, slice, complex, reversed, type,
|
|
126
115
|
)
|
|
127
116
|
|
|
128
|
-
# Unsupported python builtin type in JIT Fallback.
|
|
129
|
-
_fallback_unsupported_python_builtin_type = (
|
|
130
|
-
compile, eval, exec
|
|
131
|
-
)
|
|
132
117
|
|
|
133
118
|
_global_params = {}
|
|
134
119
|
|
|
@@ -197,10 +182,7 @@ def get_parse_method_of_class(obj, parse_method=None):
|
|
|
197
182
|
if parse_method is not None:
|
|
198
183
|
method_name = parse_method
|
|
199
184
|
elif isinstance(obj, nn.Cell):
|
|
200
|
-
|
|
201
|
-
method_name = "_backward_hook_construct"
|
|
202
|
-
else:
|
|
203
|
-
method_name = "construct"
|
|
185
|
+
method_name = "construct"
|
|
204
186
|
|
|
205
187
|
return get_attr_from_object(obj, method_name)
|
|
206
188
|
|
|
@@ -264,7 +246,7 @@ def resolve_symbol(namespace, symbol):
|
|
|
264
246
|
if isinstance(resolve_, _JitExecutor):
|
|
265
247
|
logger.debug("Resolve class _JitExecutor, resolve fn instead.")
|
|
266
248
|
resolve_ = resolve_.fn
|
|
267
|
-
logger.debug(
|
|
249
|
+
logger.debug("Found '%s' in %s, resolved: %s / %s", symbol, namespace, resolve_, type(resolve_))
|
|
268
250
|
return resolve_
|
|
269
251
|
|
|
270
252
|
|
|
@@ -667,6 +649,14 @@ def get_ast_namespace_symbol(obj):
|
|
|
667
649
|
return ops_info
|
|
668
650
|
|
|
669
651
|
|
|
652
|
+
def get_ast_augassign_namespace_symbol(obj):
|
|
653
|
+
"""Get obj type and namespace and symbol."""
|
|
654
|
+
# Get symbol from object map.
|
|
655
|
+
ops_info = parse_augassign_object_map.get(type(obj), SYMBOL_UNDEFINE)
|
|
656
|
+
logger.debug("ops info: %r", ops_info)
|
|
657
|
+
return ops_info
|
|
658
|
+
|
|
659
|
+
|
|
670
660
|
def get_operation_symbol(obj):
|
|
671
661
|
"""Get obj operation symbol."""
|
|
672
662
|
ops_symbol = ops_symbol_map.get(type(obj), SYMBOL_UNDEFINE)
|
|
@@ -680,6 +670,7 @@ def get_operation_namespace_symbol(var: str):
|
|
|
680
670
|
logger.debug("get operation ops info: %r", ops_info)
|
|
681
671
|
return ops_info
|
|
682
672
|
|
|
673
|
+
|
|
683
674
|
def get_ast_type(node):
|
|
684
675
|
"""Get the ast type."""
|
|
685
676
|
ast_type = AST_SUB_TYPE_UNKNOWN
|
|
@@ -784,32 +775,6 @@ def get_arg_spec_and_default_values(func):
|
|
|
784
775
|
return arg_spec, defaults
|
|
785
776
|
|
|
786
777
|
|
|
787
|
-
def _convert_stub_tensor(data):
|
|
788
|
-
"""Convert stub tensor output to tensor"""
|
|
789
|
-
if is_stub_tensor(data):
|
|
790
|
-
return data.stub_sync()
|
|
791
|
-
if isinstance(data, tuple):
|
|
792
|
-
# Handle namedtuple since its type is tuple.
|
|
793
|
-
if hasattr(data, "_fields"):
|
|
794
|
-
type_name = data.__class__.__name__
|
|
795
|
-
data_dict = data._asdict()
|
|
796
|
-
fields = data_dict.keys()
|
|
797
|
-
return namedtuple(type_name, fields)(**_convert_stub_tensor(data_dict))
|
|
798
|
-
return tuple(_convert_stub_tensor(x) for x in data)
|
|
799
|
-
if data.__class__ is list:
|
|
800
|
-
# Keep the list object not change.
|
|
801
|
-
for i in range(len(data)):
|
|
802
|
-
data[i] = _convert_stub_tensor(data[i])
|
|
803
|
-
return data
|
|
804
|
-
if data.__class__ is dict:
|
|
805
|
-
# Keep the dict object not change.
|
|
806
|
-
keys = tuple(data.keys())
|
|
807
|
-
for key in keys:
|
|
808
|
-
data[_convert_stub_tensor(key)] = _convert_stub_tensor(data.pop(key))
|
|
809
|
-
return data
|
|
810
|
-
return data
|
|
811
|
-
|
|
812
|
-
|
|
813
778
|
def eval_script(exp_str, params):
|
|
814
779
|
"""Evaluate a python expression."""
|
|
815
780
|
if not isinstance(params, tuple):
|
|
@@ -822,8 +787,13 @@ def eval_script(exp_str, params):
|
|
|
822
787
|
local_params = params[1]
|
|
823
788
|
try:
|
|
824
789
|
local_params = _convert_python_data(local_params)
|
|
790
|
+
# There are two sources of scripts:
|
|
791
|
+
# 1. The user's original Python script code, which is directly passed back to Python for execution,
|
|
792
|
+
# and its behavior is guaranteed by the user.
|
|
793
|
+
# 2. Internally provided Python expression code, similar to
|
|
794
|
+
# `__iternal_sequence_input__[__internal_sequence_index__]`.
|
|
795
|
+
# In addition, MindIR load and export do not involve the use of the `eval_script` function.
|
|
825
796
|
res = eval(exp_str, global_params, local_params)
|
|
826
|
-
res = _convert_stub_tensor(res)
|
|
827
797
|
except Exception as e:
|
|
828
798
|
error_info = f"When eval '{exp_str}' by using JIT Fallback feature, an error occurred: " + str(e)
|
|
829
799
|
logger.debug(error_info)
|
|
@@ -852,7 +822,7 @@ def get_script_id_attrs(script):
|
|
|
852
822
|
|
|
853
823
|
def generate_lambda_object(script):
|
|
854
824
|
"""Generate lambda expression object using script"""
|
|
855
|
-
return
|
|
825
|
+
return eval_script(script, ({}, {}))
|
|
856
826
|
|
|
857
827
|
|
|
858
828
|
def get_global_params():
|
|
@@ -924,25 +894,31 @@ def get_method_info(obj):
|
|
|
924
894
|
return class_name_and_method_name[0], class_name_and_method_name[1]
|
|
925
895
|
|
|
926
896
|
|
|
927
|
-
def is_ms_tensor_method(obj):
|
|
928
|
-
"""Check if the obj is a method of MindSpore Tensor"""
|
|
929
|
-
if not hasattr(obj, '__name__') or not hasattr(Tensor, obj.__name__):
|
|
930
|
-
return False
|
|
931
|
-
fn = inspect.unwrap(obj.__func__ if isinstance(obj, types.MethodType) else obj)
|
|
932
|
-
tensor_method = getattr(Tensor, obj.__name__)
|
|
933
|
-
tensor_method = tensor_method.__func__ if hasattr(tensor_method, "__func__") else tensor_method
|
|
934
|
-
is_builtin_tensor = False
|
|
935
|
-
if inspect.isbuiltin(obj):
|
|
936
|
-
class_name_and_method_name = obj.__qualname__.split('.')
|
|
937
|
-
is_builtin_tensor = obj.__module__ is None and class_name_and_method_name[0] == Tensor.__name__
|
|
938
|
-
return fn == tensor_method or is_builtin_tensor
|
|
939
|
-
|
|
940
|
-
|
|
941
897
|
def can_constant_fold(obj):
|
|
942
898
|
"""Check if the obj is the function can be constantly folded."""
|
|
943
899
|
return obj in constant_fold_functions
|
|
944
900
|
|
|
945
901
|
|
|
902
|
+
def hook_wrapper(hook_fn):
|
|
903
|
+
"""
|
|
904
|
+
Decorator wrapper for gradient hook functions.
|
|
905
|
+
Handles custom logic when the hook returns None to ensure execution dependencies.
|
|
906
|
+
|
|
907
|
+
Args:
|
|
908
|
+
hook_fn (function): The original hook function to be wrapped.
|
|
909
|
+
|
|
910
|
+
Returns:
|
|
911
|
+
function: Wrapped inner hook function with dependency handling logic.
|
|
912
|
+
"""
|
|
913
|
+
def inner(dout):
|
|
914
|
+
fdout = hook_fn(dout)
|
|
915
|
+
if fdout is None:
|
|
916
|
+
dout = ops.Depend()(dout, fdout)
|
|
917
|
+
return dout
|
|
918
|
+
return fdout
|
|
919
|
+
return inner
|
|
920
|
+
|
|
921
|
+
|
|
946
922
|
class Parser:
|
|
947
923
|
"""
|
|
948
924
|
Parser python code to ast tree.
|
|
@@ -976,8 +952,6 @@ class Parser:
|
|
|
976
952
|
"""To check if not supported for namespace"""
|
|
977
953
|
unsupported = isinstance(value, _builtin_function_or_method_type) and value not in convert_object_map
|
|
978
954
|
logger.debug(f"'{value}' unsupported: {unsupported}.")
|
|
979
|
-
if unsupported and value in _fallback_unsupported_python_builtin_type:
|
|
980
|
-
raise TypeError(f"'{value}' is not supported both in JIT Fallback and graph mode.")
|
|
981
955
|
return unsupported
|
|
982
956
|
|
|
983
957
|
@staticmethod
|
|
@@ -1037,31 +1011,38 @@ class Parser:
|
|
|
1037
1011
|
"the code 'def __init__(self, combine_fn=lambda x: x + 1):' rewritten as\n"
|
|
1038
1012
|
"'def __init__(self, combine_fn=\nlambda x: x + 1\n):' will solve the problem.")
|
|
1039
1013
|
|
|
1014
|
+
def save_source_code(self, attr_name, source_lines):
|
|
1015
|
+
"""Save cell and func source code to support run graph mode with pyc or so."""
|
|
1016
|
+
if '/mindspore/' in self.filename or '\\mindspore\\' in self.filename:
|
|
1017
|
+
return
|
|
1018
|
+
if getattr(self.fn, attr_name, None) == source_lines:
|
|
1019
|
+
return
|
|
1020
|
+
if not os.access(self.filename, os.W_OK):
|
|
1021
|
+
raise PermissionError(f"Don't have the write permission on the file {self.filename}.")
|
|
1022
|
+
with open(self.filename, 'a') as f:
|
|
1023
|
+
logger.debug(f"setattr for {self.fn}, attr: {attr_name}, value: {source_lines}")
|
|
1024
|
+
f.write(f"\n# Set source attribute for function {self.function_name} "
|
|
1025
|
+
f"to support run so or pyc file in Graph Mode."
|
|
1026
|
+
f"\nsetattr({self.function_name}, '{attr_name}', {source_lines})\n")
|
|
1027
|
+
setattr(self.fn, attr_name, source_lines)
|
|
1028
|
+
|
|
1040
1029
|
def parse(self):
|
|
1041
1030
|
"""Parse the function or method."""
|
|
1042
1031
|
logger.debug("fn: %r", self.fn)
|
|
1043
1032
|
if isinstance(self.fn, (types.FunctionType, types.MethodType)) or \
|
|
1044
1033
|
type(self.fn).__name__ == 'cython_function_or_method':
|
|
1045
|
-
|
|
1034
|
+
attr_name = 'source'
|
|
1046
1035
|
try:
|
|
1047
1036
|
source_lines = inspect.getsourcelines(self.fn)
|
|
1048
|
-
if context.get_context('support_binary')
|
|
1049
|
-
|
|
1050
|
-
(not hasattr(self.fn, attr) or getattr(self.fn, attr) != source_lines):
|
|
1051
|
-
if not os.access(self.filename, os.W_OK):
|
|
1052
|
-
raise PermissionError(f"Don't have the write permission on the file {self.filename}.")
|
|
1053
|
-
with open(self.filename, 'a') as f:
|
|
1054
|
-
f.write(f"\n# Set source attribute for function {self.function_name} "
|
|
1055
|
-
f"to support run so or pyc file in Graph Mode."
|
|
1056
|
-
f"\nsetattr({self.function_name}, '{attr}', {source_lines})\n")
|
|
1057
|
-
setattr(self.fn, attr, source_lines)
|
|
1037
|
+
if context.get_context('support_binary') or os.getenv('MS_SUPPORT_BINARY', None) == '1':
|
|
1038
|
+
self.save_source_code(attr_name, source_lines)
|
|
1058
1039
|
except (OSError, TypeError) as e:
|
|
1059
|
-
if hasattr(self.fn,
|
|
1060
|
-
source_lines = getattr(self.fn,
|
|
1040
|
+
if hasattr(self.fn, attr_name):
|
|
1041
|
+
source_lines = getattr(self.fn, attr_name)
|
|
1042
|
+
elif e.__str__() == "could not get source code":
|
|
1043
|
+
raise OSError(f"Mindspore can not compile temporary source code in terminal. "
|
|
1044
|
+
f"Please write source code to a python file and run the file.")
|
|
1061
1045
|
else:
|
|
1062
|
-
if e.__str__() == "could not get source code":
|
|
1063
|
-
raise OSError(f"Mindspore can not compile temporary source code in terminal. "
|
|
1064
|
-
f"Please write source code to a python file and run the file.")
|
|
1065
1046
|
raise e
|
|
1066
1047
|
self.lines, self.line_offset = source_lines
|
|
1067
1048
|
original_src = ''.join(self.lines)
|
|
@@ -1072,7 +1053,7 @@ class Parser:
|
|
|
1072
1053
|
self.col_offset = \
|
|
1073
1054
|
len(original_src.split('\n')[0]) - len(src.split('\n')[0])
|
|
1074
1055
|
logger.debug("Get source: %s", src)
|
|
1075
|
-
if not hasattr(self.fn,
|
|
1056
|
+
if not hasattr(self.fn, attr_name):
|
|
1076
1057
|
self.check_lambda(src)
|
|
1077
1058
|
try:
|
|
1078
1059
|
ast_tokens = asttokens.ASTTokens(src, parse=True)
|
|
@@ -76,6 +76,45 @@ parse_object_map = {
|
|
|
76
76
|
SYMBOL_UNDEFINE: (None, 'undefine', ''),
|
|
77
77
|
}
|
|
78
78
|
|
|
79
|
+
parse_augassign_object_map = {
|
|
80
|
+
# ast grammar
|
|
81
|
+
ast.Add: (trope_ns, 'augassign_add', '+='),
|
|
82
|
+
ast.Sub: (trope_ns, 'augassign_sub', '-='),
|
|
83
|
+
ast.Mult: (trope_ns, 'augassign_mul', '*='),
|
|
84
|
+
ast.Div: (trope_ns, 'augassign_div', '/='),
|
|
85
|
+
ast.FloorDiv: (trope_ns, 'augassign_floordiv', '//='),
|
|
86
|
+
|
|
87
|
+
ast.Mod: (trope_ns, 'mod', '%'),
|
|
88
|
+
ast.Pow: (trope_ns, 'pow', '**'),
|
|
89
|
+
ast.MatMult: (trope_ns, 'matmul', '@'),
|
|
90
|
+
ast.LShift: (trope_ns, 'lshift', '<<'),
|
|
91
|
+
ast.RShift: (trope_ns, 'rshift', '>>'),
|
|
92
|
+
ast.BitAnd: (trope_ns, 'and_', '&'),
|
|
93
|
+
ast.BitOr: (trope_ns, 'or_', '|'),
|
|
94
|
+
ast.BitXor: (trope_ns, 'xor', '^'),
|
|
95
|
+
ast.UAdd: (trope_ns, 'pos', '+'),
|
|
96
|
+
ast.USub: (trope_ns, 'neg', '-'),
|
|
97
|
+
ast.Invert: (trope_ns, 'invert', '~'),
|
|
98
|
+
ast.Not: (trope_ns, 'not_', 'not'),
|
|
99
|
+
ast.Eq: (trope_ns, 'eq', '=='),
|
|
100
|
+
ast.NotEq: (trope_ns, 'ne', '!='),
|
|
101
|
+
ast.Lt: (trope_ns, 'lt', '<'),
|
|
102
|
+
ast.Gt: (trope_ns, 'gt', '>'),
|
|
103
|
+
ast.LtE: (trope_ns, 'le', '<='),
|
|
104
|
+
ast.GtE: (trope_ns, 'ge', '>='),
|
|
105
|
+
ast.Is: (trope_ns, 'is_', 'is'),
|
|
106
|
+
ast.IsNot: (trope_ns, 'is_not', 'is not'),
|
|
107
|
+
ast.In: (trope_ns, 'contains', 'in'),
|
|
108
|
+
ast.NotIn: (trope_ns, 'not_contains', 'not in'),
|
|
109
|
+
|
|
110
|
+
# operation symbol type
|
|
111
|
+
'getitem': (composite_ns, 'getitem', ''),
|
|
112
|
+
'ms_next': (composite_ns, 'ms_next', ''),
|
|
113
|
+
|
|
114
|
+
# undefined type
|
|
115
|
+
SYMBOL_UNDEFINE: (None, 'undefine', ''),
|
|
116
|
+
}
|
|
117
|
+
|
|
79
118
|
# Operation symbols corresponding to ast grammar
|
|
80
119
|
ops_symbol_map = {
|
|
81
120
|
# ast grammar
|
|
@@ -27,9 +27,11 @@ from mindspore.common.sparse_tensor import RowTensorInner
|
|
|
27
27
|
from mindspore.ops.composite.base import _append, _insert, _pop, _list_clear, _reverse, \
|
|
28
28
|
_extend, _dict_setitem, _dict_clear, _haskey, _update, _fromkeys
|
|
29
29
|
from mindspore.ops.operations._sequence_ops import TensorToTuple
|
|
30
|
-
from mindspore.ops.auto_generate import trace_v2_op, inplace_addmm_op, inplace_index_put_op, inplace_normal_op,
|
|
30
|
+
from mindspore.ops.auto_generate import trace_v2_op, inplace_addmm_op, inplace_index_put_op, inplace_normal_op, \
|
|
31
|
+
inplace_index_add_op
|
|
31
32
|
from mindspore.ops.auto_generate import inplace_copy_op, inplace_uniform_op, inplace_erfinv_op
|
|
32
33
|
from mindspore.ops.auto_generate import inplace_scatter_add as inplace_scatter_add_
|
|
34
|
+
from mindspore.ops.auto_generate import inplace_exponential_op
|
|
33
35
|
|
|
34
36
|
from ... import _checkparam as validator
|
|
35
37
|
from ..._checkparam import check_is_number, check_reshape_shp, check_axis_in_range, \
|
|
@@ -579,7 +581,7 @@ def transpose(x, *axis):
|
|
|
579
581
|
|
|
580
582
|
Raises:
|
|
581
583
|
TypeError: If input arguments have types not specified above.
|
|
582
|
-
ValueError: If the number of `axes` is not
|
|
584
|
+
ValueError: If the number of `axes` is not equal to a.ndim.
|
|
583
585
|
|
|
584
586
|
Supported Platforms:
|
|
585
587
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1112,7 +1114,7 @@ def copy_(self, src, non_blocking=False):
|
|
|
1112
1114
|
"""
|
|
1113
1115
|
Copies the elements from src into self tensor and returns self.
|
|
1114
1116
|
"""
|
|
1115
|
-
return inplace_copy_op(self, src)
|
|
1117
|
+
return inplace_copy_op(self, src, non_blocking)
|
|
1116
1118
|
|
|
1117
1119
|
|
|
1118
1120
|
def max(input, axis=None, keepdims=False, *, initial=None, # pylint: disable=redefined-builtin
|
|
@@ -1232,7 +1234,7 @@ def pow(x, y): # pylint: disable=redefined-builtin
|
|
|
1232
1234
|
return F.pow(x, y)
|
|
1233
1235
|
|
|
1234
1236
|
|
|
1235
|
-
def put_(x, index, source, accumulate=False):
|
|
1237
|
+
def put_(x, index, source, accumulate=False):
|
|
1236
1238
|
"""
|
|
1237
1239
|
Copies the elements from source into the positions specified by index.
|
|
1238
1240
|
"""
|
|
@@ -2131,14 +2133,14 @@ def _check_sum_to_size(size, input_dim, shape_input):
|
|
|
2131
2133
|
|
|
2132
2134
|
|
|
2133
2135
|
@_primexpr
|
|
2134
|
-
def _count_axes(size, input_shape, shape_input):
|
|
2136
|
+
def _count_axes(size, input_shape, shape_input, pre_len, pre_axis):
|
|
2135
2137
|
"""Count the sum axes for sum_to_size."""
|
|
2136
|
-
axes =
|
|
2138
|
+
axes = pre_axis
|
|
2137
2139
|
for i in range(len(size)):
|
|
2138
2140
|
element = size[i]
|
|
2139
|
-
if element != input_shape[i] and element == 1:
|
|
2140
|
-
axes.append(i)
|
|
2141
|
-
elif element != input_shape[i]:
|
|
2141
|
+
if element != input_shape[i + pre_len] and element == 1:
|
|
2142
|
+
axes.append(i + pre_len)
|
|
2143
|
+
elif element != input_shape[i + pre_len]:
|
|
2142
2144
|
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
|
|
2143
2145
|
return axes
|
|
2144
2146
|
|
|
@@ -2151,13 +2153,15 @@ def sum_to_size(input, *size):
|
|
|
2151
2153
|
size = size[0]
|
|
2152
2154
|
shape_input = input.shape
|
|
2153
2155
|
_check_sum_to_size(size, input.ndim, shape_input)
|
|
2156
|
+
pre_len = 0
|
|
2157
|
+
pre_axis = []
|
|
2154
2158
|
if len(size) < input.ndim:
|
|
2155
|
-
|
|
2156
|
-
|
|
2159
|
+
pre_len = input.ndim - len(size)
|
|
2160
|
+
pre_axis = [axis for axis in range(pre_len)]
|
|
2157
2161
|
|
|
2158
|
-
axes = _count_axes(size, input.shape, shape_input)
|
|
2162
|
+
axes = _count_axes(size, input.shape, shape_input, pre_len, pre_axis)
|
|
2159
2163
|
if axes:
|
|
2160
|
-
return input.sum(tuple(axes), keepdims=True)
|
|
2164
|
+
return input.sum(tuple(axes), keepdims=True).reshape(size)
|
|
2161
2165
|
return input
|
|
2162
2166
|
|
|
2163
2167
|
|
|
@@ -3705,6 +3709,13 @@ def bernoulli(input, *, generator=None):
|
|
|
3705
3709
|
return F.bernoulli_ext(input, generator=generator)
|
|
3706
3710
|
|
|
3707
3711
|
|
|
3712
|
+
def bernoulli_(input, p=0.5, *, generator=None):
|
|
3713
|
+
"""
|
|
3714
|
+
Randomly draws binary numbers from a Bernoulli distribution.
|
|
3715
|
+
"""
|
|
3716
|
+
return F.bernoulli_(input, p, generator=generator)
|
|
3717
|
+
|
|
3718
|
+
|
|
3708
3719
|
def gather_nd(input_x, indices):
|
|
3709
3720
|
r"""
|
|
3710
3721
|
Gathers slices from a tensor by indices.
|
|
@@ -4004,6 +4015,7 @@ def to_double(input_x):
|
|
|
4004
4015
|
"""
|
|
4005
4016
|
return F.cast(input_x, mstype.float64)
|
|
4006
4017
|
|
|
4018
|
+
|
|
4007
4019
|
def to_bfloat16(input_x):
|
|
4008
4020
|
r"""
|
|
4009
4021
|
Converts input tensor dtype to bfloat16.
|
|
@@ -4486,10 +4498,20 @@ def uniform_(input, from_=0, to=1, *, generator=None):
|
|
|
4486
4498
|
"""
|
|
4487
4499
|
if generator is None:
|
|
4488
4500
|
generator = default_generator
|
|
4489
|
-
seed, offset = generator._step(generator_step_)
|
|
4501
|
+
seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
|
|
4490
4502
|
return inplace_uniform_op(input, from_, to, seed, offset)
|
|
4491
4503
|
|
|
4492
4504
|
|
|
4505
|
+
def exponential_(input, lambd=1, *, generator=None):
|
|
4506
|
+
r"""
|
|
4507
|
+
Fills `self` tensor with elements drawn from the exponential distribution:
|
|
4508
|
+
"""
|
|
4509
|
+
if generator is None:
|
|
4510
|
+
generator = default_generator
|
|
4511
|
+
seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
|
|
4512
|
+
return inplace_exponential_op(input, lambd, seed, offset)
|
|
4513
|
+
|
|
4514
|
+
|
|
4493
4515
|
def amin(input, axis=None, keep_dims=False):
|
|
4494
4516
|
r"""
|
|
4495
4517
|
For details, please refer to :func:`mindspore.ops.amin`.
|
|
@@ -4520,6 +4542,13 @@ def index_put(input, indices, values, accumulate=False):
|
|
|
4520
4542
|
return _index_put(input, values, indices)
|
|
4521
4543
|
|
|
4522
4544
|
|
|
4545
|
+
def move_to(input, to, blocking=True):
|
|
4546
|
+
r"""
|
|
4547
|
+
Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
|
|
4548
|
+
"""
|
|
4549
|
+
raise ValueError(f"The method 'move_to' is not supported in jit.")
|
|
4550
|
+
|
|
4551
|
+
|
|
4523
4552
|
def index_put_(input, indices, values, accumulate=False):
|
|
4524
4553
|
r"""
|
|
4525
4554
|
For details, please refer to :func:`mindspore.Tensor.index_put_`.
|
|
@@ -4592,3 +4621,18 @@ def zero_(input):
|
|
|
4592
4621
|
Return a tensor filled with zeros.
|
|
4593
4622
|
"""
|
|
4594
4623
|
return F.zero_(input)
|
|
4624
|
+
|
|
4625
|
+
|
|
4626
|
+
def slice_get_start(slice_node):
|
|
4627
|
+
"""Using SliceGetItem to get slice_node.start"""
|
|
4628
|
+
return F.SliceGetItem(slice_node, "start")
|
|
4629
|
+
|
|
4630
|
+
|
|
4631
|
+
def slice_get_stop(slice_node):
|
|
4632
|
+
"""Using SliceGetItem to get slice_node.stop"""
|
|
4633
|
+
return F.SliceGetItem(slice_node, "stop")
|
|
4634
|
+
|
|
4635
|
+
|
|
4636
|
+
def slice_get_step(slice_node):
|
|
4637
|
+
"""Using SliceGetItem to get slice_node.step"""
|
|
4638
|
+
return F.SliceGetItem(slice_node, "step")
|
|
@@ -37,6 +37,12 @@ from functools import ( # noqa
|
|
|
37
37
|
partial
|
|
38
38
|
)
|
|
39
39
|
|
|
40
|
+
from mindspore.ops.composite.multitype_ops.add_impl import augassign_add
|
|
41
|
+
from mindspore.ops.composite.multitype_ops.sub_impl import augassign_sub
|
|
42
|
+
from mindspore.ops.composite.multitype_ops.mul_impl import augassign_mul
|
|
43
|
+
from mindspore.ops.composite.multitype_ops.div_impl import augassign_div
|
|
44
|
+
from mindspore.ops.composite.multitype_ops.floordiv_impl import augassign_floordiv
|
|
45
|
+
|
|
40
46
|
from ...common import mutable
|
|
41
47
|
|
|
42
48
|
__all__ = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod', 'eq', 'ne', 'lt', 'gt', 'le', 'ge', 'pos', 'neg',
|
|
@@ -44,7 +50,8 @@ __all__ = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod', 'eq', 'ne', 'lt',
|
|
|
44
50
|
'matmul', 'getitem', 'setitem',
|
|
45
51
|
'bool', 'getattr', 'setattr', 'hasattr', 'len', 'iter', 'next', 'pow', 'range', 'map', 'zip',
|
|
46
52
|
'partial', 'print', 'enumerate', 'isinstance', 'filter', 'abs', 'round', 'mutable',
|
|
47
|
-
'max', 'min', 'sum', 'list', 'tuple'
|
|
53
|
+
'max', 'min', 'sum', 'list', 'tuple',
|
|
54
|
+
'augassign_add', 'augassign_sub', 'augassign_mul', 'augassign_div', 'augassign_floordiv']
|
|
48
55
|
|
|
49
56
|
|
|
50
57
|
def MakeTuple(*elts): # pragma: no cover
|
|
@@ -16,8 +16,7 @@
|
|
|
16
16
|
Helper module for pijit analyze
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
|
+
__all__ = ['pijit_func_white_list_map', "get_tensor_method_name"]
|
|
19
20
|
|
|
20
21
|
from .pijit_func_white_list import _func_map as pijit_func_white_list_map
|
|
21
22
|
from .tensor_func_list import get_tensor_method_name
|
|
22
|
-
|
|
23
|
-
__all__ = ['pijit_func_white_list_map', "get_tensor_method_name"]
|