mindspore 2.6.0__cp310-cp310-win_amd64.whl → 2.7.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +2 -2
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +42 -11
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
- mindspore/_extends/parse/parser.py +64 -83
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +47 -14
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
- mindspore/amp.py +4 -22
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +43 -12
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +177 -52
- mindspore/common/_utils.py +9 -1
- mindspore/common/api.py +338 -208
- mindspore/common/dtype.py +108 -57
- mindspore/common/dump.py +11 -16
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
- mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
- mindspore/common/file_system.py +59 -9
- mindspore/common/generator.py +2 -3
- mindspore/common/hook_handle.py +33 -5
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +27 -29
- mindspore/common/recompute.py +5 -7
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +84 -133
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +47 -38
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +38 -4
- mindspore/dataset/engine/datasets.py +350 -322
- mindspore/dataset/engine/datasets_user_defined.py +69 -23
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +10 -6
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +17 -5
- mindspore/dataset/vision/utils.py +632 -21
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/ascend/op_tuning.py +35 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +5 -4
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +1 -0
- mindspore/include/api/cell.h +65 -5
- mindspore/include/api/cfg.h +24 -7
- mindspore/include/api/context.h +1 -0
- mindspore/include/api/delegate.h +10 -2
- mindspore/include/api/dual_abi_helper.h +100 -19
- mindspore/include/api/graph.h +14 -1
- mindspore/include/api/kernel.h +16 -3
- mindspore/include/api/kernel_api.h +9 -1
- mindspore/include/api/metrics/accuracy.h +9 -0
- mindspore/include/api/model.h +8 -1
- mindspore/include/api/model_group.h +4 -0
- mindspore/include/api/model_parallel_runner.h +2 -0
- mindspore/include/api/status.h +48 -10
- mindspore/include/api/types.h +8 -3
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/constants.h +9 -0
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +61 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +4 -44
- mindspore/mint/distributed/__init__.py +5 -0
- mindspore/mint/distributed/distributed.py +425 -19
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +163 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +125 -101
- mindspore/mint/nn/layer/normalization.py +11 -25
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/cell.py +488 -620
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +36 -36
- mindspore/nn/layer/basic.py +74 -77
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +86 -85
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +38 -40
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +2 -4
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/lamb.py +1 -3
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/tft_wrapper.py +2 -3
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +73 -42
- mindspore/nn/wrap/grad_reducer.py +37 -52
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +7 -7
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +1 -1
- mindspore/numpy/utils_const.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
- mindspore/ops/_vmap/vmap_array_ops.py +6 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +29 -10
- mindspore/ops/auto_generate/gen_extend_func.py +5 -55
- mindspore/ops/auto_generate/gen_ops_def.py +753 -273
- mindspore/ops/auto_generate/gen_ops_prim.py +1687 -958
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +9 -5
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +4 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +17 -100
- mindspore/ops/function/debug_func.py +8 -5
- mindspore/ops/function/grad/grad_func.py +5 -13
- mindspore/ops/function/math_func.py +65 -399
- mindspore/ops/function/nn_func.py +44 -61
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +31 -4
- mindspore/ops/functional.py +2 -3
- mindspore/ops/functional_overload.py +486 -18
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +5 -2
- mindspore/ops/operations/_custom_ops_utils.py +675 -8
- mindspore/ops/operations/_inner_ops.py +14 -18
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +4 -50
- mindspore/ops/operations/comm_ops.py +186 -41
- mindspore/ops/operations/custom_ops.py +244 -175
- mindspore/ops/operations/debug_ops.py +55 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +27 -28
- mindspore/ops/operations/math_ops.py +8 -9
- mindspore/ops/operations/nn_ops.py +6 -7
- mindspore/ops/primitive.py +9 -20
- mindspore/ops/tensor_method.py +52 -11
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +7 -2
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +9 -17
- mindspore/parallel/_cell_wrapper.py +106 -40
- mindspore/parallel/_parallel_serialization.py +4 -3
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +17 -12
- mindspore/parallel/_utils.py +5 -11
- mindspore/parallel/auto_parallel.py +33 -12
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +5 -1
- mindspore/parallel/cluster/process_entity/_api.py +88 -49
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +48 -7
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +7 -6
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
- mindspore/parallel/shard.py +9 -23
- mindspore/parallel/transform_safetensors.py +468 -174
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/msprof_cmd_tool.py +2 -2
- mindspore/profiler/common/path_manager.py +9 -0
- mindspore/profiler/common/profiler_context.py +50 -29
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_meta_data.py +1 -0
- mindspore/profiler/common/profiler_op_analyse.py +239 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +374 -338
- mindspore/profiler/envprofiler.py +42 -12
- mindspore/profiler/experimental_config.py +112 -7
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/cpu_profiler.py +10 -4
- mindspore/profiler/platform/npu_profiler.py +30 -20
- mindspore/profiler/profiler.py +218 -154
- mindspore/profiler/profiler_action_controller.py +65 -77
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +8 -6
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +87 -45
- mindspore/runtime/memory.py +22 -30
- mindspore/runtime/thread_bind_core.py +299 -165
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +9 -5
- mindspore/train/amp.py +43 -23
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_callback.py +2 -1
- mindspore/train/callback/_checkpoint.py +4 -14
- mindspore/train/callback/_flops_collector.py +11 -7
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +72 -18
- mindspore/train/data_sink.py +15 -6
- mindspore/train/dataset_helper.py +14 -5
- mindspore/train/model.py +49 -47
- mindspore/train/serialization.py +168 -126
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +162 -78
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +14 -17
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/RECORD +400 -439
- mindspore/_deprecated/jit.py +0 -198
- mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
- mindspore/communication/_hccl_management.py +0 -297
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/common/validator/__init__.py +0 -14
- mindspore/profiler/common/validator/validate_path.py +0 -84
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- mindspore/utils/hooks.py +0 -81
- /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
mindspore/amp.py
CHANGED
|
@@ -167,9 +167,6 @@ def all_finite(inputs):
|
|
|
167
167
|
>>> x = (Tensor(np.array([np.log(-1), 1, np.log(0)])), Tensor(np.array([1.0])))
|
|
168
168
|
>>> output = amp.all_finite(x)
|
|
169
169
|
|
|
170
|
-
Tutorial Examples:
|
|
171
|
-
- `Automatic Mix Precision - Loss Scaling
|
|
172
|
-
<https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
|
|
173
170
|
"""
|
|
174
171
|
inputs = mutable(inputs)
|
|
175
172
|
_check_overflow_mode = os.environ.get('MS_ASCEND_CHECK_OVERFLOW_MODE')
|
|
@@ -185,9 +182,6 @@ class LossScaler(ABC):
|
|
|
185
182
|
to scale and unscale the loss value and gradients to avoid overflow, `adjust` is used to update the
|
|
186
183
|
loss scale value.
|
|
187
184
|
|
|
188
|
-
For more information, refer to the `tutorials <https://mindspore.cn/tutorials/en/master/beginner/
|
|
189
|
-
mixed_precision.html#loss-scaling>`_.
|
|
190
|
-
|
|
191
185
|
.. warning::
|
|
192
186
|
This is an experimental API that is subject to change or deletion.
|
|
193
187
|
|
|
@@ -327,10 +321,10 @@ class DynamicLossScaler(LossScaler):
|
|
|
327
321
|
r"""
|
|
328
322
|
Manager for dynamically adjusting the loss scaling factor.
|
|
329
323
|
|
|
330
|
-
Dynamic loss scaling
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
324
|
+
Dynamic loss scaling attempts to determine the largest loss scale `scale_value` while keeping
|
|
325
|
+
the gradients finite. If the gradients remain finite for `scale_window` consecutive steps,
|
|
326
|
+
it increases the loss scale `scale_value` by `scale_factor`, otherwise it decreases the loss
|
|
327
|
+
scale `scale_value` by `1 / scale_factor` and resets the counter.
|
|
334
328
|
|
|
335
329
|
.. warning::
|
|
336
330
|
This is an experimental API that is subject to change or deletion.
|
|
@@ -377,10 +371,6 @@ class DynamicLossScaler(LossScaler):
|
|
|
377
371
|
|
|
378
372
|
Returns:
|
|
379
373
|
Union(Tensor, tuple(Tensor)), the scaled value.
|
|
380
|
-
|
|
381
|
-
Tutorial Examples:
|
|
382
|
-
- `Automatic Mix Precision - Loss Scaling
|
|
383
|
-
<https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
|
|
384
374
|
"""
|
|
385
375
|
inputs = mutable(inputs)
|
|
386
376
|
return _grad_scale_map(self.scale_value, inputs)
|
|
@@ -394,10 +384,6 @@ class DynamicLossScaler(LossScaler):
|
|
|
394
384
|
|
|
395
385
|
Returns:
|
|
396
386
|
Union(Tensor, tuple(Tensor)), the unscaled value.
|
|
397
|
-
|
|
398
|
-
Tutorial Examples:
|
|
399
|
-
- `Automatic Mix Precision - Loss Scaling
|
|
400
|
-
<https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
|
|
401
387
|
"""
|
|
402
388
|
inputs = mutable(inputs)
|
|
403
389
|
return _grad_unscale_map(self.scale_value, inputs)
|
|
@@ -408,10 +394,6 @@ class DynamicLossScaler(LossScaler):
|
|
|
408
394
|
|
|
409
395
|
Args:
|
|
410
396
|
grads_finite (Tensor): a scalar bool Tensor indicating whether the grads are finite.
|
|
411
|
-
|
|
412
|
-
Tutorial Examples:
|
|
413
|
-
- `Automatic Mix Precision - Loss Scaling
|
|
414
|
-
<https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
|
|
415
397
|
"""
|
|
416
398
|
one = ops.ones((), self.scale_value.dtype)
|
|
417
399
|
scale_mul_factor = self.scale_value * self.scale_factor
|
mindspore/atlprov.dll
CHANGED
|
Binary file
|
mindspore/avcodec-59.dll
CHANGED
|
Binary file
|
mindspore/avdevice-59.dll
CHANGED
|
Binary file
|
mindspore/avfilter-8.dll
CHANGED
|
Binary file
|
mindspore/avformat-59.dll
CHANGED
|
Binary file
|
mindspore/avutil-57.dll
CHANGED
|
Binary file
|
mindspore/boost/adasum.py
CHANGED
|
@@ -168,7 +168,7 @@ class AdaSum(Cell):
|
|
|
168
168
|
@staticmethod
|
|
169
169
|
def _hash(step, target, weights_index):
|
|
170
170
|
target = "tag" + str(step) + str(target) + str(weights_index)
|
|
171
|
-
target_hash = hashlib.
|
|
171
|
+
target_hash = hashlib.sha256(target.encode()).hexdigest()
|
|
172
172
|
max_num_hash = 2 ** 31
|
|
173
173
|
hash_res = int(int(target_hash, 16) % max_num_hash)
|
|
174
174
|
return hash_res
|
|
@@ -507,10 +507,10 @@ class BoostTrainOneStepWithLossScaleCell(BoostTrainOneStepCell):
|
|
|
507
507
|
self.reduce_sum = P.ReduceSum(keep_dims=False)
|
|
508
508
|
self.less_equal = P.LessEqual()
|
|
509
509
|
self.allreduce = P.AllReduce()
|
|
510
|
-
self.is_distributed =
|
|
511
|
-
self.gpu_target =
|
|
512
|
-
self.ascend_910a_target =
|
|
513
|
-
self.ascend_910b_target =
|
|
510
|
+
self.is_distributed = self.parallel_mode != ParallelMode.STAND_ALONE
|
|
511
|
+
self.gpu_target = context.get_context("device_target") == "GPU"
|
|
512
|
+
self.ascend_910a_target = MSContext.get_instance().get_ascend_soc_version() == 'ascend910'
|
|
513
|
+
self.ascend_910b_target = MSContext.get_instance().get_ascend_soc_version() in ['ascend910b', 'ascend910_93']
|
|
514
514
|
self.loss_scaling_manager = None
|
|
515
515
|
self._ascend_check_overflow_mode = os.environ.get('MS_ASCEND_CHECK_OVERFLOW_MODE')
|
|
516
516
|
|
mindspore/c1.dll
CHANGED
|
Binary file
|
mindspore/c1xx.dll
CHANGED
|
Binary file
|
mindspore/c2.dll
CHANGED
|
Binary file
|
mindspore/common/__init__.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2020-
|
|
1
|
+
# Copyright 2020-2025 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -17,13 +17,15 @@ from __future__ import absolute_import
|
|
|
17
17
|
from mindspore.common import dtype
|
|
18
18
|
from mindspore.common.api import ms_memory_recycle, jit, jit_class, _no_grad, \
|
|
19
19
|
flops_collection, set_recursion_limit
|
|
20
|
-
from mindspore.common.dtype import Type, int8, byte, int16, short, int32, intc, int64, intp, \
|
|
20
|
+
from mindspore.common.dtype import Type, int8, byte, int16, short, int, int32, intc, long, int64, intp, \
|
|
21
21
|
uint8, ubyte, uint16, ushort, uint32, uintc, uint64, uintp, float16, half, \
|
|
22
|
-
float32, single, float64, bfloat16, double, bool_, float_, list_, tuple_, int_, \
|
|
22
|
+
float, float32, single, float64, bfloat16, double, bool, bool_, float_, list_, tuple_, int_, \
|
|
23
23
|
uint, number, tensor_type, string, type_none, TensorType, Int, \
|
|
24
|
-
complex64, complex128, dtype_to_nptype, _null, _NullType, \
|
|
25
|
-
dtype_to_pytype, pytype_to_dtype, get_py_obj_dtype, QuantDtype, qint4x2
|
|
24
|
+
cfloat, complex64, cdouble, complex128, dtype_to_nptype, _null, _NullType, \
|
|
25
|
+
dtype_to_pytype, pytype_to_dtype, get_py_obj_dtype, QuantDtype, qint4x2, \
|
|
26
|
+
float8_e4m3fn, float8_e5m2, hifloat8
|
|
26
27
|
from mindspore.common.dump import set_dump
|
|
28
|
+
from mindspore.common.file_system import set_mindio_server_info, mindio_preload
|
|
27
29
|
from mindspore.common.parameter import Parameter, ParameterTuple
|
|
28
30
|
from mindspore.common.seed import set_seed, get_seed
|
|
29
31
|
from mindspore.common.tensor import Tensor, tensor
|
|
@@ -40,13 +42,39 @@ from mindspore.common.generator import (
|
|
|
40
42
|
Generator, default_generator, seed, manual_seed, initial_seed, get_rng_state, set_rng_state)
|
|
41
43
|
from mindspore.ops.function.array_func import is_tensor, from_numpy
|
|
42
44
|
from mindspore.common._grad_function import _Function
|
|
45
|
+
from mindspore.common.dynamic_shape.enable_dynamic import enable_dynamic
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
import triton
|
|
49
|
+
if isinstance(getattr(triton.runtime.jit, "type_canonicalisation_dict", None), dict):
|
|
50
|
+
ms_type_canonicalisation_dict = {
|
|
51
|
+
"Bool": "i1",
|
|
52
|
+
"Float16": "fp16",
|
|
53
|
+
"BFloat16": "bf16",
|
|
54
|
+
"Float32": "fp32",
|
|
55
|
+
"Float64": "fp64",
|
|
56
|
+
"Int8": "i8",
|
|
57
|
+
"Int16": "i16",
|
|
58
|
+
"Int32": "i32",
|
|
59
|
+
"Int64": "i64",
|
|
60
|
+
"UInt8": "u8",
|
|
61
|
+
"UInt16": "u16",
|
|
62
|
+
"UInt32": "u32",
|
|
63
|
+
"UInt64": "u64",
|
|
64
|
+
}
|
|
65
|
+
triton.runtime.jit.type_canonicalisation_dict.update(ms_type_canonicalisation_dict)
|
|
66
|
+
|
|
67
|
+
except ImportError:
|
|
68
|
+
pass
|
|
43
69
|
|
|
44
70
|
# symbols from dtype
|
|
71
|
+
# bool, int, float are not defined in __all__ to avoid conflict with built-in types.
|
|
45
72
|
__all__ = [
|
|
73
|
+
"bool_",
|
|
46
74
|
"int8", "byte",
|
|
47
75
|
"int16", "short",
|
|
48
76
|
"int32", "intc",
|
|
49
|
-
"int64", "intp",
|
|
77
|
+
"int64", "long", "intp",
|
|
50
78
|
"uint8", "ubyte",
|
|
51
79
|
"uint16", "ushort",
|
|
52
80
|
"uint32", "uintc",
|
|
@@ -54,19 +82,20 @@ __all__ = [
|
|
|
54
82
|
"float16", "half",
|
|
55
83
|
"float32", "single",
|
|
56
84
|
"float64", "double",
|
|
57
|
-
"
|
|
58
|
-
"list_", "tuple_",
|
|
85
|
+
"float_", "list_", "tuple_",
|
|
59
86
|
"int_", "uint",
|
|
60
87
|
"number", "tensor_type",
|
|
61
88
|
"string", "type_none",
|
|
62
89
|
"_null",
|
|
63
90
|
"TensorType", "QuantDtype",
|
|
64
91
|
"Type", "Int", "_NullType",
|
|
65
|
-
"complex64", "
|
|
92
|
+
"complex64", "cfloat",
|
|
93
|
+
"complex128", "cdouble",
|
|
94
|
+
"bfloat16", "qint4x2",
|
|
95
|
+
"float8_e4m3fn", "float8_e5m2", "hifloat8",
|
|
66
96
|
# __method__ from dtype
|
|
67
97
|
"dtype_to_nptype", "dtype_to_pytype",
|
|
68
|
-
"pytype_to_dtype", "get_py_obj_dtype"
|
|
69
|
-
"bfloat16", "qint4x2"
|
|
98
|
+
"pytype_to_dtype", "get_py_obj_dtype"
|
|
70
99
|
]
|
|
71
100
|
|
|
72
101
|
__all__.extend([
|
|
@@ -79,11 +108,13 @@ __all__.extend([
|
|
|
79
108
|
"ms_memory_recycle",
|
|
80
109
|
"set_recursion_limit",
|
|
81
110
|
"mutable", "JitConfig",
|
|
111
|
+
"enable_dynamic",
|
|
82
112
|
"flops_collection",
|
|
83
113
|
"lazy_inline", "load_mindir", "save_mindir",
|
|
84
114
|
"no_inline",
|
|
85
115
|
"Symbol",
|
|
86
116
|
"recompute",
|
|
87
|
-
"is_tensor", "from_numpy", "_Function"
|
|
117
|
+
"is_tensor", "from_numpy", "_Function",
|
|
118
|
+
"set_mindio_server_info", "mindio_preload"
|
|
88
119
|
])
|
|
89
120
|
__all__.extend(generator.__all__)
|
|
@@ -15,11 +15,12 @@
|
|
|
15
15
|
|
|
16
16
|
"""Defines custom autograd function with functional form."""
|
|
17
17
|
|
|
18
|
+
__all__ = ['_Function']
|
|
19
|
+
|
|
18
20
|
from typing import Any
|
|
19
21
|
from mindspore._c_expression import FunctionBase as FunctionBase_
|
|
20
22
|
from mindspore.common.tensor import Tensor
|
|
21
23
|
|
|
22
|
-
__all__ = ['_Function']
|
|
23
24
|
|
|
24
25
|
class _Function(FunctionBase_):
|
|
25
26
|
"""
|
|
@@ -12,8 +12,9 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ============================================================================
|
|
15
|
-
|
|
16
|
-
|
|
15
|
+
|
|
16
|
+
"""Define pijit context."""
|
|
17
|
+
|
|
17
18
|
import inspect
|
|
18
19
|
import types
|
|
19
20
|
import functools
|
|
@@ -37,14 +38,22 @@ def _update_graph_executor_config(jit_config):
|
|
|
37
38
|
GraphExecutor_.get_instance().set_jit_config(JitConfig(**valid_config).jit_config_dict)
|
|
38
39
|
|
|
39
40
|
|
|
41
|
+
class Unsupported(RuntimeError):
|
|
42
|
+
"""If using @jit(fullgraph=True), pijit will raise an Unsupported exception when encountering a graph break."""
|
|
43
|
+
|
|
44
|
+
# pylint: disable=useless-super-delegation
|
|
45
|
+
def __init__(self, msg: str):
|
|
46
|
+
super().__init__(msg)
|
|
47
|
+
|
|
48
|
+
|
|
40
49
|
class PIJitCaptureContext:
|
|
41
50
|
"""
|
|
42
51
|
Context manager for pijit graph capture
|
|
43
52
|
"""
|
|
44
53
|
|
|
45
|
-
def __init__(self, jit_config=None, input_signature=None):
|
|
54
|
+
def __init__(self, fullgraph=False, jit_config=None, input_signature=None):
|
|
46
55
|
_update_graph_executor_config(jit_config)
|
|
47
|
-
config = {}
|
|
56
|
+
config = {'fullgraph': fullgraph}
|
|
48
57
|
if isinstance(jit_config, JitConfig):
|
|
49
58
|
config.update(jit_config.jit_config_dict)
|
|
50
59
|
elif jit_config is not None:
|
|
@@ -74,6 +83,9 @@ class PIJitCaptureContext:
|
|
|
74
83
|
or inspect.isasyncgenfunction(fn) or inspect.isawaitable(fn)
|
|
75
84
|
|
|
76
85
|
def _wrapper(self):
|
|
86
|
+
"""
|
|
87
|
+
pijit wrapper of fn.
|
|
88
|
+
"""
|
|
77
89
|
def _fn(*args, **kwds):
|
|
78
90
|
PreJit(args, kwds)
|
|
79
91
|
disable_pijit = self.config.get('_disable_pijit', None)
|
|
@@ -82,21 +94,27 @@ class PIJitCaptureContext:
|
|
|
82
94
|
with self:
|
|
83
95
|
self.ret = self.fn(*args, **kwds)
|
|
84
96
|
return self.ret
|
|
97
|
+
|
|
85
98
|
return _fn
|
|
86
99
|
|
|
87
100
|
def __call__(self, fn):
|
|
101
|
+
"""
|
|
102
|
+
:raises Unsupported: If using @jit(fullgraph=True), will raise exception when encountering a graph break.
|
|
103
|
+
"""
|
|
88
104
|
if isinstance(fn, type) and issubclass(fn, mindspore.nn.Cell):
|
|
89
105
|
fn.construct = self(fn.construct)
|
|
90
106
|
return fn
|
|
91
107
|
if isinstance(fn, mindspore.nn.Cell):
|
|
92
|
-
|
|
93
|
-
return fn
|
|
108
|
+
return types.MethodType(self(fn.construct.__func__), fn)
|
|
94
109
|
if isinstance(fn, types.MethodType):
|
|
95
110
|
return types.MethodType(self(fn.__func__), fn.__self__)
|
|
96
111
|
if not isinstance(fn, types.FunctionType) or self._is_unsupported(fn):
|
|
97
112
|
logger.warning("unsupported function type" + str(fn))
|
|
98
113
|
return fn
|
|
99
114
|
|
|
115
|
+
if hasattr(fn, "__wrapped_by_jit__"):
|
|
116
|
+
logger.warning(f"The fn {fn} should be wrapped by jit only once.")
|
|
117
|
+
|
|
100
118
|
module = inspect.getmodule(fn.__code__)
|
|
101
119
|
if module is not None and module.__name__.startswith("mindspore"):
|
|
102
120
|
if fn.__code__.co_name != 'after_grad':
|
|
@@ -107,7 +125,9 @@ class PIJitCaptureContext:
|
|
|
107
125
|
if fn.__code__ is _fn.__code__:
|
|
108
126
|
fn = fn.__closure__[0].cell_contents.fn
|
|
109
127
|
self.fn = fn
|
|
110
|
-
|
|
128
|
+
wrap_fn = functools.wraps(fn)(_fn)
|
|
129
|
+
setattr(wrap_fn, "__wrapped_by_jit__", True)
|
|
130
|
+
return wrap_fn
|
|
111
131
|
|
|
112
132
|
def __enter__(self):
|
|
113
133
|
pi_jit_set_context(self.fn, *self._init_arg)
|
|
@@ -122,6 +142,7 @@ def _get_skip_files():
|
|
|
122
142
|
"""
|
|
123
143
|
Get skip files by SKIP_RULES
|
|
124
144
|
"""
|
|
145
|
+
|
|
125
146
|
def _filter(path: str):
|
|
126
147
|
if path.endswith("__init__.py"):
|
|
127
148
|
return path[0:-11]
|
mindspore/common/_stub_tensor.py
CHANGED
|
@@ -14,213 +14,5 @@
|
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""Stub Tensor implementation."""
|
|
16
16
|
|
|
17
|
-
import inspect
|
|
18
|
-
from functools import reduce
|
|
19
|
-
from mindspore.common.tensor import Tensor
|
|
20
|
-
from mindspore.common.dtype import type_size_in_bytes
|
|
21
|
-
import mindspore._c_expression as c_expression
|
|
22
|
-
from mindspore._c_expression import TensorNode, SequenceNode, NoneTypeNode, AnyTypeNode
|
|
23
|
-
from mindspore._c_expression import TensorPy as Tensor_
|
|
24
|
-
from mindspore.common.api import _convert_python_data
|
|
25
|
-
from mindspore.common._tensor_cpp_method import tensor_cpp_methods
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
def _stub_member(var, init):
|
|
29
|
-
"""handle stub tensor's member, use a member cache to improve performance"""
|
|
30
|
-
def getx(stub):
|
|
31
|
-
if stub.tensor is not None:
|
|
32
|
-
return getattr(stub.tensor, var)
|
|
33
|
-
if hasattr(stub, "member_cache"):
|
|
34
|
-
return stub.member_cache.get(var, init)
|
|
35
|
-
return init
|
|
36
|
-
|
|
37
|
-
def setx(stub, value):
|
|
38
|
-
if stub.tensor is not None:
|
|
39
|
-
setattr(stub.tensor, var, value)
|
|
40
|
-
else:
|
|
41
|
-
if not hasattr(stub, "member_cache"):
|
|
42
|
-
stub.member_cache = {}
|
|
43
|
-
stub.member_cache[var] = value
|
|
44
|
-
return property(getx, setx)
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
def _stub_method(method):
|
|
48
|
-
def fun(*arg, **kwargs):
|
|
49
|
-
stub = arg[0]
|
|
50
|
-
arg = (stub.stub_sync(),) + arg[1:]
|
|
51
|
-
return method(*arg, **kwargs)
|
|
52
|
-
return fun
|
|
53
|
-
|
|
54
|
-
|
|
55
17
|
class StubTensor:
|
|
56
|
-
"""
|
|
57
|
-
const_arg = _stub_member("const_arg", None)
|
|
58
|
-
init = _stub_member("init", None)
|
|
59
|
-
init_finished = _stub_member("init_finished", False)
|
|
60
|
-
virtual_flag = _stub_member("virtual_flag", False)
|
|
61
|
-
parent_tensor_ = _stub_member("parent_tensor_", None)
|
|
62
|
-
index_of_parent_ = _stub_member("index_of_parent_", None)
|
|
63
|
-
slice_num_of_persistent_data_ = _stub_member("slice_num_of_persistent_data_", None)
|
|
64
|
-
slice_shape_of_persistent_data_ = _stub_member("slice_shape_of_persistent_data_", None)
|
|
65
|
-
|
|
66
|
-
def __init__(self, stub=None, tensor=None):
|
|
67
|
-
self.stub = stub
|
|
68
|
-
self.tensor = tensor
|
|
69
|
-
|
|
70
|
-
__str__ = _stub_method(Tensor.__str__)
|
|
71
|
-
__repr__ = _stub_method(Tensor.__repr__)
|
|
72
|
-
__setitem__ = _stub_method(Tensor.__setitem__)
|
|
73
|
-
|
|
74
|
-
__lt__ = Tensor.__lt__
|
|
75
|
-
__le__ = Tensor.__le__
|
|
76
|
-
__gt__ = Tensor.__gt__
|
|
77
|
-
__ge__ = Tensor.__ge__
|
|
78
|
-
__eq__ = Tensor.__eq__
|
|
79
|
-
__ne__ = Tensor.__ne__
|
|
80
|
-
|
|
81
|
-
@property
|
|
82
|
-
def shape(self):
|
|
83
|
-
"""shape stub."""
|
|
84
|
-
if self.stub:
|
|
85
|
-
if not hasattr(self, "stub_shape"):
|
|
86
|
-
self.stub_shape = self.stub.get_shape()
|
|
87
|
-
return self.stub_shape
|
|
88
|
-
return self.tensor.shape
|
|
89
|
-
|
|
90
|
-
@property
|
|
91
|
-
def dtype(self):
|
|
92
|
-
"""dtype stub."""
|
|
93
|
-
if self.stub:
|
|
94
|
-
if not hasattr(self, "stub_dtype"):
|
|
95
|
-
self.stub_dtype = self.stub.get_dtype()
|
|
96
|
-
return self.stub_dtype
|
|
97
|
-
return self.tensor.dtype
|
|
98
|
-
|
|
99
|
-
@property
|
|
100
|
-
def size(self):
|
|
101
|
-
"""size stub."""
|
|
102
|
-
shape = self.shape
|
|
103
|
-
return reduce((lambda x, y: x * y), shape) if shape else 1
|
|
104
|
-
|
|
105
|
-
@property
|
|
106
|
-
def itemsize(self):
|
|
107
|
-
"""itemsize stub."""
|
|
108
|
-
return type_size_in_bytes(self.dtype)
|
|
109
|
-
|
|
110
|
-
@property
|
|
111
|
-
def nbytes(self):
|
|
112
|
-
"""nbytes stub."""
|
|
113
|
-
return self.size * self.itemsize
|
|
114
|
-
|
|
115
|
-
@property
|
|
116
|
-
def ndim(self):
|
|
117
|
-
"""ndim stub."""
|
|
118
|
-
return len(self.shape)
|
|
119
|
-
|
|
120
|
-
@property
|
|
121
|
-
def strides(self):
|
|
122
|
-
"""strides stub."""
|
|
123
|
-
return self.stub_sync().strides
|
|
124
|
-
|
|
125
|
-
@property
|
|
126
|
-
def has_init(self):
|
|
127
|
-
"""has_init stub."""
|
|
128
|
-
return False
|
|
129
|
-
|
|
130
|
-
def ndimension(self):
|
|
131
|
-
r"""
|
|
132
|
-
Alias for :func:`mindspore.Tensor.ndim`.
|
|
133
|
-
"""
|
|
134
|
-
return self.ndim
|
|
135
|
-
|
|
136
|
-
def dim(self):
|
|
137
|
-
r"""
|
|
138
|
-
Alias for :func:`mindspore.Tensor.ndim`.
|
|
139
|
-
"""
|
|
140
|
-
return self.ndim
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
def is_contiguous(self):
|
|
144
|
-
if self.stub:
|
|
145
|
-
return self.stub.get_value().is_contiguous()
|
|
146
|
-
return self.tensor.is_contiguous()
|
|
147
|
-
|
|
148
|
-
def set_cast_dtype(self):
|
|
149
|
-
if self.stub:
|
|
150
|
-
return self.stub.get_value().set_cast_dtype()
|
|
151
|
-
return self.tensor.set_cast_dtype()
|
|
152
|
-
|
|
153
|
-
def storage_offset(self):
|
|
154
|
-
if self.stub:
|
|
155
|
-
return self.stub.get_value().storage_offset()
|
|
156
|
-
return self.tensor.storage_offset()
|
|
157
|
-
|
|
158
|
-
def _need_contiguous(self):
|
|
159
|
-
if self.stub:
|
|
160
|
-
return self.stub.get_value()._need_contiguous() # pylint: disable=protected-access
|
|
161
|
-
return self.tensor._need_contiguous() # pylint: disable=protected-access
|
|
162
|
-
|
|
163
|
-
asnumpy = _stub_method(Tensor.asnumpy)
|
|
164
|
-
is_persistent_data = _stub_method(Tensor.is_persistent_data)
|
|
165
|
-
asnumpy_of_slice_persistent_data = _stub_method(Tensor.asnumpy_of_slice_persistent_data)
|
|
166
|
-
slice_num_of_persistent_data = _stub_method(Tensor.slice_num_of_persistent_data)
|
|
167
|
-
slice_shape_of_persistent_data = _stub_method(Tensor.slice_shape_of_persistent_data)
|
|
168
|
-
flush_from_cache = _stub_method(Tensor.flush_from_cache)
|
|
169
|
-
register_hook = _stub_method(Tensor.register_hook)
|
|
170
|
-
|
|
171
|
-
def stub_sync(self):
|
|
172
|
-
"""sync real tensor."""
|
|
173
|
-
if self.stub:
|
|
174
|
-
val = self.stub.get_value()
|
|
175
|
-
self.tensor = Tensor(val)
|
|
176
|
-
if hasattr(self, "member_cache"):
|
|
177
|
-
for k, v in self.member_cache.items():
|
|
178
|
-
setattr(self.tensor, k, v)
|
|
179
|
-
self.stub = None
|
|
180
|
-
return self.tensor
|
|
181
|
-
|
|
182
|
-
def __getstate__(self):
|
|
183
|
-
state = {}
|
|
184
|
-
value = self.stub.get_value() if self.stub else self.tensor.__getstate__()
|
|
185
|
-
state["value"] = value
|
|
186
|
-
return state
|
|
187
|
-
|
|
188
|
-
def __setstate__(self, state):
|
|
189
|
-
value = state.pop("value")
|
|
190
|
-
self.stub = None
|
|
191
|
-
self.tensor = Tensor(value)
|
|
192
|
-
|
|
193
|
-
no_stub_sync_cpp_api = ["set_cast_dtype", "storage_offset", "is_contiguous", "_need_contiguous"]
|
|
194
|
-
|
|
195
|
-
def _init_stub_tensor_api():
|
|
196
|
-
"""adapt to python tensor and cpp tensor api"""
|
|
197
|
-
need_init_func = set(dir(Tensor)) - set(dir(StubTensor))
|
|
198
|
-
cpp_tensor_func = dir(Tensor_)
|
|
199
|
-
for attr in need_init_func:
|
|
200
|
-
func = inspect.getattr_static(Tensor, attr)
|
|
201
|
-
if attr in cpp_tensor_func and attr not in tensor_cpp_methods:
|
|
202
|
-
# for cpp tensor api, we always need to sync for real tensor first
|
|
203
|
-
setattr(StubTensor, attr, _stub_method(func))
|
|
204
|
-
elif attr not in no_stub_sync_cpp_api:
|
|
205
|
-
setattr(StubTensor, attr, func)
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
_init_stub_tensor_api()
|
|
209
|
-
c_expression.addStubTensorMethods()
|
|
210
|
-
|
|
211
|
-
def _convert_stub(stub):
|
|
212
|
-
"convert stub to StubNode or Value"
|
|
213
|
-
if isinstance(stub, TensorNode):
|
|
214
|
-
return StubTensor(stub)
|
|
215
|
-
if isinstance(stub, tuple):
|
|
216
|
-
return tuple(_convert_stub(e) for e in stub)
|
|
217
|
-
if isinstance(stub, SequenceNode):
|
|
218
|
-
elements = stub.get_elements()
|
|
219
|
-
return tuple(_convert_stub(e) for e in elements)
|
|
220
|
-
if isinstance(stub, NoneTypeNode):
|
|
221
|
-
val = stub.get_real_value()
|
|
222
|
-
return _convert_python_data(val)
|
|
223
|
-
if isinstance(stub, AnyTypeNode):
|
|
224
|
-
val = stub.get_real_node()
|
|
225
|
-
return _convert_stub(val)
|
|
226
|
-
return _convert_python_data(stub)
|
|
18
|
+
"""Remove this class when other modules are adapted."""
|
|
@@ -14,4 +14,4 @@
|
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""Add tensor cpp methods for stub tensor"""
|
|
16
16
|
|
|
17
|
-
tensor_cpp_methods = ['abs', 'absolute', '__abs__', 'acos', 'arccos', 'acosh', 'arccosh', 'add', '__add__', 'addbmm', 'addcdiv', 'addmm', 'addmv', 'add_', '__iadd__', 'all', 'allclose', 'any', 'argmax', 'argmin', 'argsort', 'asin', 'arcsin', 'asinh', 'arcsinh', 'atan', 'arctan', 'atan2', 'arctan2', 'atanh', 'arctanh', 'baddbmm', 'bincount', 'bitwise_and', '__and__', 'bitwise_not', 'bitwise_or', '__or__', 'bitwise_xor', '__xor__', 'ceil', 'chunk', 'clamp', 'clip', 'clone', 'copy_', 'cos', 'cosh', 'count_nonzero', 'cumsum', 'diag', 'div', 'divide', 'div_', '__itruediv__', 'dot', 'eq', 'erf', 'erfc', 'exp', 'expand_as', 'expm1', 'exp_', 'fill_', 'fill_diagonal_', 'flatten', 'floor', 'floor_divide', 'floor_divide_', '__ifloordiv__', 'fmod', 'frac', 'gather', 'gcd', 'greater', 'gt', 'greater_equal', 'ge', 'hardshrink', 'histc', 'index_add', 'index_select', 'inverse', 'isclose', 'isfinite', 'isinf', 'isneginf', 'kthvalue', 'lerp', 'less', 'lt', 'less_equal', 'le', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logsumexp', 'log_', 'masked_fill', 'masked_fill_', 'masked_select', 'matmul', 'max', 'maximum', 'mean', 'median', 'min', 'minimum', 'mm', 'mul', 'mul_', '__imul__', 'nansum', 'nan_to_num', 'narrow', 'neg', 'negative', 'new_ones', 'new_zeros', 'not_equal', 'ne', 'outer', 'pow', '__pow__', 'prod', 'put_', 'reciprocal', 'remainder', 'repeat', 'repeat_interleave', 'reshape', 'roll', 'round', 'rsqrt', 'scatter', 'scatter_', 'scatter_add', 'select', 'sigmoid', 'sin', 'sinc', 'sinh', 'sort', 'split', 'sqrt', 'square', 'std', 'sub', '__sub__', 'subtract', 'sub_', '__isub__', 'sum', 't', 'take', 'tan', 'tanh', 'tile', 'topk', 'transpose', 'tril', 'triu', 'true_divide', 'trunc', 'type_as', 'unbind', 'unique', 'unsqueeze', 'var', 'view_as', 'where', 'xlogy', '_to']
|
|
17
|
+
tensor_cpp_methods = ['abs', 'absolute', '__abs__', 'acos', 'arccos', 'acosh', 'arccosh', 'add', '__add__', 'addbmm', 'addcdiv', 'addmm', 'addmv', 'add_', '__iadd__', 'all', 'allclose', 'any', 'argmax', 'argmin', 'argsort', 'asin', 'arcsin', 'asinh', 'arcsinh', 'atan', 'arctan', 'atan2', 'arctan2', 'atanh', 'arctanh', 'baddbmm', 'bincount', 'bitwise_and', '__and__', 'bitwise_not', 'bitwise_or', '__or__', 'bitwise_xor', '__xor__', 'ceil', 'chunk', 'clamp', 'clip', 'clone', 'copy_', 'cos', 'cosh', 'count_nonzero', 'cumsum', 'diag', 'div', 'divide', 'div_', '__itruediv__', 'dot', 'eq', 'erf', 'erfc', 'exp', 'expand_as', 'expm1', 'exp_', 'fill_', 'fill_diagonal_', 'flatten', 'floor', 'floor_divide', 'floor_divide_', '__ifloordiv__', 'fmod', 'frac', 'gather', 'gcd', 'greater', 'gt', 'greater_equal', 'ge', 'hardshrink', 'histc', 'index_add', 'index_select', 'inverse', 'isclose', 'isfinite', 'isinf', 'isneginf', 'kthvalue', 'lerp', 'less', 'lt', 'less_equal', 'le', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logsumexp', 'log_', 'masked_fill', 'masked_fill_', 'masked_scatter', 'masked_select', 'matmul', 'max', 'maximum', 'mean', 'median', 'min', 'minimum', 'mm', 'mul', 'mul_', '__imul__', 'nansum', 'nan_to_num', 'narrow', 'neg', 'negative', 'new_empty', 'new_full', 'new_ones', 'new_zeros', 'not_equal', 'ne', 'outer', 'pow', '__pow__', 'prod', 'put_', 'reciprocal', 'remainder', 'remainder_', '__imod__', 'repeat', 'repeat_interleave', 'reshape', 'roll', 'round', 'rsqrt', 'scatter', 'scatter_', 'scatter_add', 'select', 'sigmoid', 'sin', 'sinc', 'sinh', 'sort', 'split', 'sqrt', 'square', 'std', 'sub', '__sub__', 'subtract', 'sub_', '__isub__', 'sum', 't', 'take', 'tan', 'tanh', 'tile', 'topk', 'transpose', 'tril', 'triu', 'true_divide', 'trunc', 'type_as', 'unbind', 'unique', 'unsqueeze', 'var', 'view_as', 'where', 'xlogy', '_to', '__mod__']
|