mindspore 2.6.0rc1__cp39-cp39-win_amd64.whl → 2.7.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +40 -9
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
- mindspore/_extends/parse/parser.py +37 -62
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +43 -13
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/amp.py +4 -4
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/common/__init__.py +27 -2
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +77 -16
- mindspore/common/api.py +238 -113
- mindspore/common/dtype.py +21 -11
- mindspore/common/dump.py +10 -15
- mindspore/common/generator.py +5 -3
- mindspore/common/hook_handle.py +11 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/parameter.py +26 -12
- mindspore/common/recompute.py +3 -3
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +81 -81
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +58 -40
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/datasets.py +20 -7
- mindspore/dataset/engine/datasets_user_defined.py +33 -3
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +7 -3
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -7
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +27 -7
- mindspore/include/api/cell.h +37 -1
- mindspore/include/api/delegate.h +10 -0
- mindspore/include/api/model.h +3 -0
- mindspore/include/api/types.h +2 -2
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +60 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +6 -46
- mindspore/mint/distributed/__init__.py +1 -0
- mindspore/mint/distributed/distributed.py +212 -9
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +164 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +137 -101
- mindspore/mint/nn/layer/normalization.py +8 -22
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/nn/cell.py +328 -502
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +32 -34
- mindspore/nn/layer/basic.py +67 -64
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +117 -110
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +37 -39
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +3 -3
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -0
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +34 -37
- mindspore/nn/wrap/grad_reducer.py +37 -37
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +5 -5
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +5 -5
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_vmap/vmap_array_ops.py +31 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +42 -11
- mindspore/ops/auto_generate/gen_extend_func.py +23 -141
- mindspore/ops/auto_generate/gen_ops_def.py +727 -321
- mindspore/ops/auto_generate/gen_ops_prim.py +1721 -984
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +8 -4
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +3 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +9 -96
- mindspore/ops/function/debug_func.py +4 -3
- mindspore/ops/function/grad/grad_func.py +1 -1
- mindspore/ops/function/math_func.py +33 -540
- mindspore/ops/function/nn_func.py +28 -74
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +44 -5
- mindspore/ops/function/vmap_func.py +2 -1
- mindspore/ops/functional.py +2 -3
- mindspore/ops/functional_overload.py +571 -6
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +16 -11
- mindspore/ops/operations/_custom_ops_utils.py +689 -34
- mindspore/ops/operations/_inner_ops.py +3 -6
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +2 -2
- mindspore/ops/operations/comm_ops.py +185 -26
- mindspore/ops/operations/custom_ops.py +294 -174
- mindspore/ops/operations/debug_ops.py +59 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +15 -16
- mindspore/ops/operations/math_ops.py +3 -4
- mindspore/ops/operations/nn_ops.py +7 -39
- mindspore/ops/primitive.py +6 -10
- mindspore/ops/tensor_method.py +47 -8
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +8 -3
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +11 -8
- mindspore/parallel/_cell_wrapper.py +113 -45
- mindspore/parallel/_parallel_serialization.py +1 -1
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +13 -8
- mindspore/parallel/auto_parallel.py +14 -7
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +11 -7
- mindspore/parallel/cluster/process_entity/_api.py +84 -48
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +43 -4
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +6 -7
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
- mindspore/parallel/shard.py +3 -4
- mindspore/parallel/transform_safetensors.py +463 -174
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/profiler_context.py +25 -27
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_op_analyse.py +235 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +305 -314
- mindspore/profiler/envprofiler.py +12 -7
- mindspore/profiler/experimental_config.py +96 -6
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/npu_profiler.py +29 -19
- mindspore/profiler/profiler.py +35 -19
- mindspore/profiler/profiler_action_controller.py +64 -76
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +5 -5
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +60 -45
- mindspore/runtime/memory.py +30 -32
- mindspore/runtime/thread_bind_core.py +298 -164
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +14 -4
- mindspore/train/amp.py +43 -20
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_checkpoint.py +3 -6
- mindspore/train/callback/_flops_collector.py +1 -1
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +97 -16
- mindspore/train/data_sink.py +11 -2
- mindspore/train/dataset_helper.py +9 -0
- mindspore/train/model.py +135 -55
- mindspore/train/serialization.py +133 -111
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +163 -77
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +6 -9
- mindspore/version.py +1 -1
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +333 -371
- mindspore/_deprecated/jit.py +0 -198
- mindspore/experimental/es/__init__.py +0 -22
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -27,9 +27,11 @@ from mindspore.common.sparse_tensor import RowTensorInner
|
|
|
27
27
|
from mindspore.ops.composite.base import _append, _insert, _pop, _list_clear, _reverse, \
|
|
28
28
|
_extend, _dict_setitem, _dict_clear, _haskey, _update, _fromkeys
|
|
29
29
|
from mindspore.ops.operations._sequence_ops import TensorToTuple
|
|
30
|
-
from mindspore.ops.auto_generate import trace_v2_op, inplace_addmm_op, inplace_index_put_op, inplace_normal_op,
|
|
30
|
+
from mindspore.ops.auto_generate import trace_v2_op, inplace_addmm_op, inplace_index_put_op, inplace_normal_op, \
|
|
31
|
+
inplace_index_add_op
|
|
31
32
|
from mindspore.ops.auto_generate import inplace_copy_op, inplace_uniform_op, inplace_erfinv_op
|
|
32
33
|
from mindspore.ops.auto_generate import inplace_scatter_add as inplace_scatter_add_
|
|
34
|
+
from mindspore.ops.auto_generate import inplace_exponential_op
|
|
33
35
|
|
|
34
36
|
from ... import _checkparam as validator
|
|
35
37
|
from ..._checkparam import check_is_number, check_reshape_shp, check_axis_in_range, \
|
|
@@ -579,7 +581,7 @@ def transpose(x, *axis):
|
|
|
579
581
|
|
|
580
582
|
Raises:
|
|
581
583
|
TypeError: If input arguments have types not specified above.
|
|
582
|
-
ValueError: If the number of `axes` is not
|
|
584
|
+
ValueError: If the number of `axes` is not equal to a.ndim.
|
|
583
585
|
|
|
584
586
|
Supported Platforms:
|
|
585
587
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1232,7 +1234,7 @@ def pow(x, y): # pylint: disable=redefined-builtin
|
|
|
1232
1234
|
return F.pow(x, y)
|
|
1233
1235
|
|
|
1234
1236
|
|
|
1235
|
-
def put_(x, index, source, accumulate=False):
|
|
1237
|
+
def put_(x, index, source, accumulate=False):
|
|
1236
1238
|
"""
|
|
1237
1239
|
Copies the elements from source into the positions specified by index.
|
|
1238
1240
|
"""
|
|
@@ -2131,14 +2133,14 @@ def _check_sum_to_size(size, input_dim, shape_input):
|
|
|
2131
2133
|
|
|
2132
2134
|
|
|
2133
2135
|
@_primexpr
|
|
2134
|
-
def _count_axes(size, input_shape, shape_input):
|
|
2136
|
+
def _count_axes(size, input_shape, shape_input, pre_len, pre_axis):
|
|
2135
2137
|
"""Count the sum axes for sum_to_size."""
|
|
2136
|
-
axes =
|
|
2138
|
+
axes = pre_axis
|
|
2137
2139
|
for i in range(len(size)):
|
|
2138
2140
|
element = size[i]
|
|
2139
|
-
if element != input_shape[i] and element == 1:
|
|
2140
|
-
axes.append(i)
|
|
2141
|
-
elif element != input_shape[i]:
|
|
2141
|
+
if element != input_shape[i + pre_len] and element == 1:
|
|
2142
|
+
axes.append(i + pre_len)
|
|
2143
|
+
elif element != input_shape[i + pre_len]:
|
|
2142
2144
|
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
|
|
2143
2145
|
return axes
|
|
2144
2146
|
|
|
@@ -2151,13 +2153,15 @@ def sum_to_size(input, *size):
|
|
|
2151
2153
|
size = size[0]
|
|
2152
2154
|
shape_input = input.shape
|
|
2153
2155
|
_check_sum_to_size(size, input.ndim, shape_input)
|
|
2156
|
+
pre_len = 0
|
|
2157
|
+
pre_axis = []
|
|
2154
2158
|
if len(size) < input.ndim:
|
|
2155
|
-
|
|
2156
|
-
|
|
2159
|
+
pre_len = input.ndim - len(size)
|
|
2160
|
+
pre_axis = [axis for axis in range(pre_len)]
|
|
2157
2161
|
|
|
2158
|
-
axes = _count_axes(size, input.shape, shape_input)
|
|
2162
|
+
axes = _count_axes(size, input.shape, shape_input, pre_len, pre_axis)
|
|
2159
2163
|
if axes:
|
|
2160
|
-
return input.sum(tuple(axes), keepdims=True)
|
|
2164
|
+
return input.sum(tuple(axes), keepdims=True).reshape(size)
|
|
2161
2165
|
return input
|
|
2162
2166
|
|
|
2163
2167
|
|
|
@@ -4004,6 +4008,7 @@ def to_double(input_x):
|
|
|
4004
4008
|
"""
|
|
4005
4009
|
return F.cast(input_x, mstype.float64)
|
|
4006
4010
|
|
|
4011
|
+
|
|
4007
4012
|
def to_bfloat16(input_x):
|
|
4008
4013
|
r"""
|
|
4009
4014
|
Converts input tensor dtype to bfloat16.
|
|
@@ -4486,10 +4491,20 @@ def uniform_(input, from_=0, to=1, *, generator=None):
|
|
|
4486
4491
|
"""
|
|
4487
4492
|
if generator is None:
|
|
4488
4493
|
generator = default_generator
|
|
4489
|
-
seed, offset = generator._step(generator_step_)
|
|
4494
|
+
seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
|
|
4490
4495
|
return inplace_uniform_op(input, from_, to, seed, offset)
|
|
4491
4496
|
|
|
4492
4497
|
|
|
4498
|
+
def exponential_(input, lambd=1, *, generator=None):
|
|
4499
|
+
r"""
|
|
4500
|
+
Fills `self` tensor with elements drawn from the exponential distribution:
|
|
4501
|
+
"""
|
|
4502
|
+
if generator is None:
|
|
4503
|
+
generator = default_generator
|
|
4504
|
+
seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
|
|
4505
|
+
return inplace_exponential_op(input, lambd, seed, offset)
|
|
4506
|
+
|
|
4507
|
+
|
|
4493
4508
|
def amin(input, axis=None, keep_dims=False):
|
|
4494
4509
|
r"""
|
|
4495
4510
|
For details, please refer to :func:`mindspore.ops.amin`.
|
|
@@ -4592,3 +4607,18 @@ def zero_(input):
|
|
|
4592
4607
|
Return a tensor filled with zeros.
|
|
4593
4608
|
"""
|
|
4594
4609
|
return F.zero_(input)
|
|
4610
|
+
|
|
4611
|
+
|
|
4612
|
+
def slice_get_start(slice_node):
|
|
4613
|
+
"""Using SliceGetItem to get slice_node.start"""
|
|
4614
|
+
return F.SliceGetItem(slice_node, "start")
|
|
4615
|
+
|
|
4616
|
+
|
|
4617
|
+
def slice_get_stop(slice_node):
|
|
4618
|
+
"""Using SliceGetItem to get slice_node.stop"""
|
|
4619
|
+
return F.SliceGetItem(slice_node, "stop")
|
|
4620
|
+
|
|
4621
|
+
|
|
4622
|
+
def slice_get_step(slice_node):
|
|
4623
|
+
"""Using SliceGetItem to get slice_node.step"""
|
|
4624
|
+
return F.SliceGetItem(slice_node, "step")
|
|
@@ -37,6 +37,12 @@ from functools import ( # noqa
|
|
|
37
37
|
partial
|
|
38
38
|
)
|
|
39
39
|
|
|
40
|
+
from mindspore.ops.composite.multitype_ops.add_impl import augassign_add
|
|
41
|
+
from mindspore.ops.composite.multitype_ops.sub_impl import augassign_sub
|
|
42
|
+
from mindspore.ops.composite.multitype_ops.mul_impl import augassign_mul
|
|
43
|
+
from mindspore.ops.composite.multitype_ops.div_impl import augassign_div
|
|
44
|
+
from mindspore.ops.composite.multitype_ops.floordiv_impl import augassign_floordiv
|
|
45
|
+
|
|
40
46
|
from ...common import mutable
|
|
41
47
|
|
|
42
48
|
__all__ = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod', 'eq', 'ne', 'lt', 'gt', 'le', 'ge', 'pos', 'neg',
|
|
@@ -44,7 +50,8 @@ __all__ = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod', 'eq', 'ne', 'lt',
|
|
|
44
50
|
'matmul', 'getitem', 'setitem',
|
|
45
51
|
'bool', 'getattr', 'setattr', 'hasattr', 'len', 'iter', 'next', 'pow', 'range', 'map', 'zip',
|
|
46
52
|
'partial', 'print', 'enumerate', 'isinstance', 'filter', 'abs', 'round', 'mutable',
|
|
47
|
-
'max', 'min', 'sum', 'list', 'tuple'
|
|
53
|
+
'max', 'min', 'sum', 'list', 'tuple',
|
|
54
|
+
'augassign_add', 'augassign_sub', 'augassign_mul', 'augassign_div', 'augassign_floordiv']
|
|
48
55
|
|
|
49
56
|
|
|
50
57
|
def MakeTuple(*elts): # pragma: no cover
|
|
@@ -16,8 +16,7 @@
|
|
|
16
16
|
Helper module for pijit analyze
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
|
+
__all__ = ['pijit_func_white_list_map', "get_tensor_method_name"]
|
|
19
20
|
|
|
20
21
|
from .pijit_func_white_list import _func_map as pijit_func_white_list_map
|
|
21
22
|
from .tensor_func_list import get_tensor_method_name
|
|
22
|
-
|
|
23
|
-
__all__ = ['pijit_func_white_list_map', "get_tensor_method_name"]
|
mindspore/amp.py
CHANGED
|
@@ -327,10 +327,10 @@ class DynamicLossScaler(LossScaler):
|
|
|
327
327
|
r"""
|
|
328
328
|
Manager for dynamically adjusting the loss scaling factor.
|
|
329
329
|
|
|
330
|
-
Dynamic loss scaling
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
330
|
+
Dynamic loss scaling attempts to determine the largest loss scale `scale_value` while keeping
|
|
331
|
+
the gradients finite. If the gradients remain finite for `scale_window` consecutive steps,
|
|
332
|
+
it increases the loss scale `scale_value` by `scale_factor`, otherwise it decreases the loss
|
|
333
|
+
scale `scale_value` by `1 / scale_factor` and resets the counter.
|
|
334
334
|
|
|
335
335
|
.. warning::
|
|
336
336
|
This is an experimental API that is subject to change or deletion.
|
mindspore/avcodec-59.dll
CHANGED
|
Binary file
|
mindspore/avdevice-59.dll
CHANGED
|
Binary file
|
mindspore/avfilter-8.dll
CHANGED
|
Binary file
|
mindspore/avformat-59.dll
CHANGED
|
Binary file
|
mindspore/avutil-57.dll
CHANGED
|
Binary file
|
mindspore/boost/adasum.py
CHANGED
|
@@ -168,7 +168,7 @@ class AdaSum(Cell):
|
|
|
168
168
|
@staticmethod
|
|
169
169
|
def _hash(step, target, weights_index):
|
|
170
170
|
target = "tag" + str(step) + str(target) + str(weights_index)
|
|
171
|
-
target_hash = hashlib.
|
|
171
|
+
target_hash = hashlib.sha256(target.encode()).hexdigest()
|
|
172
172
|
max_num_hash = 2 ** 31
|
|
173
173
|
hash_res = int(int(target_hash, 16) % max_num_hash)
|
|
174
174
|
return hash_res
|
|
@@ -507,10 +507,10 @@ class BoostTrainOneStepWithLossScaleCell(BoostTrainOneStepCell):
|
|
|
507
507
|
self.reduce_sum = P.ReduceSum(keep_dims=False)
|
|
508
508
|
self.less_equal = P.LessEqual()
|
|
509
509
|
self.allreduce = P.AllReduce()
|
|
510
|
-
self.is_distributed =
|
|
511
|
-
self.gpu_target =
|
|
512
|
-
self.ascend_910a_target =
|
|
513
|
-
self.ascend_910b_target =
|
|
510
|
+
self.is_distributed = self.parallel_mode != ParallelMode.STAND_ALONE
|
|
511
|
+
self.gpu_target = context.get_context("device_target") == "GPU"
|
|
512
|
+
self.ascend_910a_target = MSContext.get_instance().get_ascend_soc_version() == 'ascend910'
|
|
513
|
+
self.ascend_910b_target = MSContext.get_instance().get_ascend_soc_version() in ['ascend910b', 'ascend910_93']
|
|
514
514
|
self.loss_scaling_manager = None
|
|
515
515
|
self._ascend_check_overflow_mode = os.environ.get('MS_ASCEND_CHECK_OVERFLOW_MODE')
|
|
516
516
|
|
mindspore/common/__init__.py
CHANGED
|
@@ -22,7 +22,8 @@ from mindspore.common.dtype import Type, int8, byte, int16, short, int32, intc,
|
|
|
22
22
|
float32, single, float64, bfloat16, double, bool_, float_, list_, tuple_, int_, \
|
|
23
23
|
uint, number, tensor_type, string, type_none, TensorType, Int, \
|
|
24
24
|
complex64, complex128, dtype_to_nptype, _null, _NullType, \
|
|
25
|
-
dtype_to_pytype, pytype_to_dtype, get_py_obj_dtype, QuantDtype, qint4x2
|
|
25
|
+
dtype_to_pytype, pytype_to_dtype, get_py_obj_dtype, QuantDtype, qint4x2, \
|
|
26
|
+
float8_e4m3fn, float8_e5m2, hifloat8
|
|
26
27
|
from mindspore.common.dump import set_dump
|
|
27
28
|
from mindspore.common.parameter import Parameter, ParameterTuple
|
|
28
29
|
from mindspore.common.seed import set_seed, get_seed
|
|
@@ -41,6 +42,29 @@ from mindspore.common.generator import (
|
|
|
41
42
|
from mindspore.ops.function.array_func import is_tensor, from_numpy
|
|
42
43
|
from mindspore.common._grad_function import _Function
|
|
43
44
|
|
|
45
|
+
try:
|
|
46
|
+
import triton
|
|
47
|
+
if isinstance(getattr(triton.runtime.jit, "type_canonicalisation_dict", None), dict):
|
|
48
|
+
ms_type_canonicalisation_dict = {
|
|
49
|
+
"Bool": "i1",
|
|
50
|
+
"Float16": "fp16",
|
|
51
|
+
"BFloat16": "bf16",
|
|
52
|
+
"Float32": "fp32",
|
|
53
|
+
"Float64": "fp64",
|
|
54
|
+
"Int8": "i8",
|
|
55
|
+
"Int16": "i16",
|
|
56
|
+
"Int32": "i32",
|
|
57
|
+
"Int64": "i64",
|
|
58
|
+
"UInt8": "u8",
|
|
59
|
+
"UInt16": "u16",
|
|
60
|
+
"UInt32": "u32",
|
|
61
|
+
"UInt64": "u64",
|
|
62
|
+
}
|
|
63
|
+
triton.runtime.jit.type_canonicalisation_dict.update(ms_type_canonicalisation_dict)
|
|
64
|
+
|
|
65
|
+
except ImportError:
|
|
66
|
+
pass
|
|
67
|
+
|
|
44
68
|
# symbols from dtype
|
|
45
69
|
__all__ = [
|
|
46
70
|
"int8", "byte",
|
|
@@ -66,7 +90,8 @@ __all__ = [
|
|
|
66
90
|
# __method__ from dtype
|
|
67
91
|
"dtype_to_nptype", "dtype_to_pytype",
|
|
68
92
|
"pytype_to_dtype", "get_py_obj_dtype",
|
|
69
|
-
"bfloat16", "qint4x2"
|
|
93
|
+
"bfloat16", "qint4x2",
|
|
94
|
+
"float8_e4m3fn", "float8_e5m2", "hifloat8"
|
|
70
95
|
]
|
|
71
96
|
|
|
72
97
|
__all__.extend([
|
|
@@ -15,11 +15,12 @@
|
|
|
15
15
|
|
|
16
16
|
"""Defines custom autograd function with functional form."""
|
|
17
17
|
|
|
18
|
+
__all__ = ['_Function']
|
|
19
|
+
|
|
18
20
|
from typing import Any
|
|
19
21
|
from mindspore._c_expression import FunctionBase as FunctionBase_
|
|
20
22
|
from mindspore.common.tensor import Tensor
|
|
21
23
|
|
|
22
|
-
__all__ = ['_Function']
|
|
23
24
|
|
|
24
25
|
class _Function(FunctionBase_):
|
|
25
26
|
"""
|
|
@@ -12,8 +12,9 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ============================================================================
|
|
15
|
-
|
|
16
|
-
|
|
15
|
+
|
|
16
|
+
"""Define pijit context."""
|
|
17
|
+
|
|
17
18
|
import inspect
|
|
18
19
|
import types
|
|
19
20
|
import functools
|
|
@@ -37,14 +38,22 @@ def _update_graph_executor_config(jit_config):
|
|
|
37
38
|
GraphExecutor_.get_instance().set_jit_config(JitConfig(**valid_config).jit_config_dict)
|
|
38
39
|
|
|
39
40
|
|
|
41
|
+
class Unsupported(RuntimeError):
|
|
42
|
+
"""If using @jit(fullgraph=True), pijit will raise an Unsupported exception when encountering a graph break."""
|
|
43
|
+
|
|
44
|
+
# pylint: disable=useless-super-delegation
|
|
45
|
+
def __init__(self, msg: str):
|
|
46
|
+
super().__init__(msg)
|
|
47
|
+
|
|
48
|
+
|
|
40
49
|
class PIJitCaptureContext:
|
|
41
50
|
"""
|
|
42
51
|
Context manager for pijit graph capture
|
|
43
52
|
"""
|
|
44
53
|
|
|
45
|
-
def __init__(self, jit_config=None, input_signature=None):
|
|
54
|
+
def __init__(self, fullgraph=False, jit_config=None, input_signature=None):
|
|
46
55
|
_update_graph_executor_config(jit_config)
|
|
47
|
-
config = {}
|
|
56
|
+
config = {'fullgraph': fullgraph}
|
|
48
57
|
if isinstance(jit_config, JitConfig):
|
|
49
58
|
config.update(jit_config.jit_config_dict)
|
|
50
59
|
elif jit_config is not None:
|
|
@@ -74,6 +83,9 @@ class PIJitCaptureContext:
|
|
|
74
83
|
or inspect.isasyncgenfunction(fn) or inspect.isawaitable(fn)
|
|
75
84
|
|
|
76
85
|
def _wrapper(self):
|
|
86
|
+
"""
|
|
87
|
+
pijit wrapper of fn.
|
|
88
|
+
"""
|
|
77
89
|
def _fn(*args, **kwds):
|
|
78
90
|
PreJit(args, kwds)
|
|
79
91
|
disable_pijit = self.config.get('_disable_pijit', None)
|
|
@@ -82,21 +94,27 @@ class PIJitCaptureContext:
|
|
|
82
94
|
with self:
|
|
83
95
|
self.ret = self.fn(*args, **kwds)
|
|
84
96
|
return self.ret
|
|
97
|
+
|
|
85
98
|
return _fn
|
|
86
99
|
|
|
87
100
|
def __call__(self, fn):
|
|
101
|
+
"""
|
|
102
|
+
:raises Unsupported: If using @jit(fullgraph=True), will raise exception when encountering a graph break.
|
|
103
|
+
"""
|
|
88
104
|
if isinstance(fn, type) and issubclass(fn, mindspore.nn.Cell):
|
|
89
105
|
fn.construct = self(fn.construct)
|
|
90
106
|
return fn
|
|
91
107
|
if isinstance(fn, mindspore.nn.Cell):
|
|
92
|
-
|
|
93
|
-
return fn
|
|
108
|
+
return types.MethodType(self(fn.construct.__func__), fn)
|
|
94
109
|
if isinstance(fn, types.MethodType):
|
|
95
110
|
return types.MethodType(self(fn.__func__), fn.__self__)
|
|
96
111
|
if not isinstance(fn, types.FunctionType) or self._is_unsupported(fn):
|
|
97
112
|
logger.warning("unsupported function type" + str(fn))
|
|
98
113
|
return fn
|
|
99
114
|
|
|
115
|
+
if hasattr(fn, "__wrapped_by_jit__"):
|
|
116
|
+
logger.warning(f"The fn {fn} should be wrapped by jit only once.")
|
|
117
|
+
|
|
100
118
|
module = inspect.getmodule(fn.__code__)
|
|
101
119
|
if module is not None and module.__name__.startswith("mindspore"):
|
|
102
120
|
if fn.__code__.co_name != 'after_grad':
|
|
@@ -107,7 +125,9 @@ class PIJitCaptureContext:
|
|
|
107
125
|
if fn.__code__ is _fn.__code__:
|
|
108
126
|
fn = fn.__closure__[0].cell_contents.fn
|
|
109
127
|
self.fn = fn
|
|
110
|
-
|
|
128
|
+
wrap_fn = functools.wraps(fn)(_fn)
|
|
129
|
+
setattr(wrap_fn, "__wrapped_by_jit__", True)
|
|
130
|
+
return wrap_fn
|
|
111
131
|
|
|
112
132
|
def __enter__(self):
|
|
113
133
|
pi_jit_set_context(self.fn, *self._init_arg)
|
|
@@ -122,6 +142,7 @@ def _get_skip_files():
|
|
|
122
142
|
"""
|
|
123
143
|
Get skip files by SKIP_RULES
|
|
124
144
|
"""
|
|
145
|
+
|
|
125
146
|
def _filter(path: str):
|
|
126
147
|
if path.endswith("__init__.py"):
|
|
127
148
|
return path[0:-11]
|
mindspore/common/_stub_tensor.py
CHANGED
|
@@ -14,213 +14,5 @@
|
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""Stub Tensor implementation."""
|
|
16
16
|
|
|
17
|
-
import inspect
|
|
18
|
-
from functools import reduce
|
|
19
|
-
from mindspore.common.tensor import Tensor
|
|
20
|
-
from mindspore.common.dtype import type_size_in_bytes
|
|
21
|
-
import mindspore._c_expression as c_expression
|
|
22
|
-
from mindspore._c_expression import TensorNode, SequenceNode, NoneTypeNode, AnyTypeNode
|
|
23
|
-
from mindspore._c_expression import TensorPy as Tensor_
|
|
24
|
-
from mindspore.common.api import _convert_python_data
|
|
25
|
-
from mindspore.common._tensor_cpp_method import tensor_cpp_methods
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
def _stub_member(var, init):
|
|
29
|
-
"""handle stub tensor's member, use a member cache to improve performance"""
|
|
30
|
-
def getx(stub):
|
|
31
|
-
if stub.tensor is not None:
|
|
32
|
-
return getattr(stub.tensor, var)
|
|
33
|
-
if hasattr(stub, "member_cache"):
|
|
34
|
-
return stub.member_cache.get(var, init)
|
|
35
|
-
return init
|
|
36
|
-
|
|
37
|
-
def setx(stub, value):
|
|
38
|
-
if stub.tensor is not None:
|
|
39
|
-
setattr(stub.tensor, var, value)
|
|
40
|
-
else:
|
|
41
|
-
if not hasattr(stub, "member_cache"):
|
|
42
|
-
stub.member_cache = {}
|
|
43
|
-
stub.member_cache[var] = value
|
|
44
|
-
return property(getx, setx)
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
def _stub_method(method):
|
|
48
|
-
def fun(*arg, **kwargs):
|
|
49
|
-
stub = arg[0]
|
|
50
|
-
arg = (stub.stub_sync(),) + arg[1:]
|
|
51
|
-
return method(*arg, **kwargs)
|
|
52
|
-
return fun
|
|
53
|
-
|
|
54
|
-
|
|
55
17
|
class StubTensor:
|
|
56
|
-
"""
|
|
57
|
-
const_arg = _stub_member("const_arg", None)
|
|
58
|
-
init = _stub_member("init", None)
|
|
59
|
-
init_finished = _stub_member("init_finished", False)
|
|
60
|
-
virtual_flag = _stub_member("virtual_flag", False)
|
|
61
|
-
parent_tensor_ = _stub_member("parent_tensor_", None)
|
|
62
|
-
index_of_parent_ = _stub_member("index_of_parent_", None)
|
|
63
|
-
slice_num_of_persistent_data_ = _stub_member("slice_num_of_persistent_data_", None)
|
|
64
|
-
slice_shape_of_persistent_data_ = _stub_member("slice_shape_of_persistent_data_", None)
|
|
65
|
-
|
|
66
|
-
def __init__(self, stub=None, tensor=None):
|
|
67
|
-
self.stub = stub
|
|
68
|
-
self.tensor = tensor
|
|
69
|
-
|
|
70
|
-
__str__ = _stub_method(Tensor.__str__)
|
|
71
|
-
__repr__ = _stub_method(Tensor.__repr__)
|
|
72
|
-
__setitem__ = _stub_method(Tensor.__setitem__)
|
|
73
|
-
|
|
74
|
-
__lt__ = Tensor.__lt__
|
|
75
|
-
__le__ = Tensor.__le__
|
|
76
|
-
__gt__ = Tensor.__gt__
|
|
77
|
-
__ge__ = Tensor.__ge__
|
|
78
|
-
__eq__ = Tensor.__eq__
|
|
79
|
-
__ne__ = Tensor.__ne__
|
|
80
|
-
|
|
81
|
-
@property
|
|
82
|
-
def shape(self):
|
|
83
|
-
"""shape stub."""
|
|
84
|
-
if self.stub:
|
|
85
|
-
if not hasattr(self, "stub_shape"):
|
|
86
|
-
self.stub_shape = self.stub.get_shape()
|
|
87
|
-
return self.stub_shape
|
|
88
|
-
return self.tensor.shape
|
|
89
|
-
|
|
90
|
-
@property
|
|
91
|
-
def dtype(self):
|
|
92
|
-
"""dtype stub."""
|
|
93
|
-
if self.stub:
|
|
94
|
-
if not hasattr(self, "stub_dtype"):
|
|
95
|
-
self.stub_dtype = self.stub.get_dtype()
|
|
96
|
-
return self.stub_dtype
|
|
97
|
-
return self.tensor.dtype
|
|
98
|
-
|
|
99
|
-
@property
|
|
100
|
-
def size(self):
|
|
101
|
-
"""size stub."""
|
|
102
|
-
shape = self.shape
|
|
103
|
-
return reduce((lambda x, y: x * y), shape) if shape else 1
|
|
104
|
-
|
|
105
|
-
@property
|
|
106
|
-
def itemsize(self):
|
|
107
|
-
"""itemsize stub."""
|
|
108
|
-
return type_size_in_bytes(self.dtype)
|
|
109
|
-
|
|
110
|
-
@property
|
|
111
|
-
def nbytes(self):
|
|
112
|
-
"""nbytes stub."""
|
|
113
|
-
return self.size * self.itemsize
|
|
114
|
-
|
|
115
|
-
@property
|
|
116
|
-
def ndim(self):
|
|
117
|
-
"""ndim stub."""
|
|
118
|
-
return len(self.shape)
|
|
119
|
-
|
|
120
|
-
@property
|
|
121
|
-
def strides(self):
|
|
122
|
-
"""strides stub."""
|
|
123
|
-
return self.stub_sync().strides
|
|
124
|
-
|
|
125
|
-
@property
|
|
126
|
-
def has_init(self):
|
|
127
|
-
"""has_init stub."""
|
|
128
|
-
return False
|
|
129
|
-
|
|
130
|
-
def ndimension(self):
|
|
131
|
-
r"""
|
|
132
|
-
Alias for :func:`mindspore.Tensor.ndim`.
|
|
133
|
-
"""
|
|
134
|
-
return self.ndim
|
|
135
|
-
|
|
136
|
-
def dim(self):
|
|
137
|
-
r"""
|
|
138
|
-
Alias for :func:`mindspore.Tensor.ndim`.
|
|
139
|
-
"""
|
|
140
|
-
return self.ndim
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
def is_contiguous(self):
|
|
144
|
-
if self.stub:
|
|
145
|
-
return self.stub.get_value().is_contiguous()
|
|
146
|
-
return self.tensor.is_contiguous()
|
|
147
|
-
|
|
148
|
-
def set_cast_dtype(self):
|
|
149
|
-
if self.stub:
|
|
150
|
-
return self.stub.get_value().set_cast_dtype()
|
|
151
|
-
return self.tensor.set_cast_dtype()
|
|
152
|
-
|
|
153
|
-
def storage_offset(self):
|
|
154
|
-
if self.stub:
|
|
155
|
-
return self.stub.get_value().storage_offset()
|
|
156
|
-
return self.tensor.storage_offset()
|
|
157
|
-
|
|
158
|
-
def _need_contiguous(self):
|
|
159
|
-
if self.stub:
|
|
160
|
-
return self.stub.get_value()._need_contiguous() # pylint: disable=protected-access
|
|
161
|
-
return self.tensor._need_contiguous() # pylint: disable=protected-access
|
|
162
|
-
|
|
163
|
-
asnumpy = _stub_method(Tensor.asnumpy)
|
|
164
|
-
is_persistent_data = _stub_method(Tensor.is_persistent_data)
|
|
165
|
-
asnumpy_of_slice_persistent_data = _stub_method(Tensor.asnumpy_of_slice_persistent_data)
|
|
166
|
-
slice_num_of_persistent_data = _stub_method(Tensor.slice_num_of_persistent_data)
|
|
167
|
-
slice_shape_of_persistent_data = _stub_method(Tensor.slice_shape_of_persistent_data)
|
|
168
|
-
flush_from_cache = _stub_method(Tensor.flush_from_cache)
|
|
169
|
-
register_hook = _stub_method(Tensor.register_hook)
|
|
170
|
-
|
|
171
|
-
def stub_sync(self):
|
|
172
|
-
"""sync real tensor."""
|
|
173
|
-
if self.stub:
|
|
174
|
-
val = self.stub.get_value()
|
|
175
|
-
self.tensor = Tensor(val)
|
|
176
|
-
if hasattr(self, "member_cache"):
|
|
177
|
-
for k, v in self.member_cache.items():
|
|
178
|
-
setattr(self.tensor, k, v)
|
|
179
|
-
self.stub = None
|
|
180
|
-
return self.tensor
|
|
181
|
-
|
|
182
|
-
def __getstate__(self):
|
|
183
|
-
state = {}
|
|
184
|
-
value = self.stub.get_value() if self.stub else self.tensor.__getstate__()
|
|
185
|
-
state["value"] = value
|
|
186
|
-
return state
|
|
187
|
-
|
|
188
|
-
def __setstate__(self, state):
|
|
189
|
-
value = state.pop("value")
|
|
190
|
-
self.stub = None
|
|
191
|
-
self.tensor = Tensor(value)
|
|
192
|
-
|
|
193
|
-
no_stub_sync_cpp_api = ["set_cast_dtype", "storage_offset", "is_contiguous", "_need_contiguous"]
|
|
194
|
-
|
|
195
|
-
def _init_stub_tensor_api():
|
|
196
|
-
"""adapt to python tensor and cpp tensor api"""
|
|
197
|
-
need_init_func = set(dir(Tensor)) - set(dir(StubTensor))
|
|
198
|
-
cpp_tensor_func = dir(Tensor_)
|
|
199
|
-
for attr in need_init_func:
|
|
200
|
-
func = inspect.getattr_static(Tensor, attr)
|
|
201
|
-
if attr in cpp_tensor_func and attr not in tensor_cpp_methods:
|
|
202
|
-
# for cpp tensor api, we always need to sync for real tensor first
|
|
203
|
-
setattr(StubTensor, attr, _stub_method(func))
|
|
204
|
-
elif attr not in no_stub_sync_cpp_api:
|
|
205
|
-
setattr(StubTensor, attr, func)
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
_init_stub_tensor_api()
|
|
209
|
-
c_expression.addStubTensorMethods()
|
|
210
|
-
|
|
211
|
-
def _convert_stub(stub):
|
|
212
|
-
"convert stub to StubNode or Value"
|
|
213
|
-
if isinstance(stub, TensorNode):
|
|
214
|
-
return StubTensor(stub)
|
|
215
|
-
if isinstance(stub, tuple):
|
|
216
|
-
return tuple(_convert_stub(e) for e in stub)
|
|
217
|
-
if isinstance(stub, SequenceNode):
|
|
218
|
-
elements = stub.get_elements()
|
|
219
|
-
return tuple(_convert_stub(e) for e in elements)
|
|
220
|
-
if isinstance(stub, NoneTypeNode):
|
|
221
|
-
val = stub.get_real_value()
|
|
222
|
-
return _convert_python_data(val)
|
|
223
|
-
if isinstance(stub, AnyTypeNode):
|
|
224
|
-
val = stub.get_real_node()
|
|
225
|
-
return _convert_stub(val)
|
|
226
|
-
return _convert_python_data(stub)
|
|
18
|
+
"""Remove this class when other modules are adapted."""
|
|
@@ -14,4 +14,4 @@
|
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""Add tensor cpp methods for stub tensor"""
|
|
16
16
|
|
|
17
|
-
tensor_cpp_methods = ['abs', 'absolute', '__abs__', 'acos', 'arccos', 'acosh', 'arccosh', 'add', '__add__', 'addbmm', 'addcdiv', 'addmm', 'addmv', 'add_', '__iadd__', 'all', 'allclose', 'any', 'argmax', 'argmin', 'argsort', 'asin', 'arcsin', 'asinh', 'arcsinh', 'atan', 'arctan', 'atan2', 'arctan2', 'atanh', 'arctanh', 'baddbmm', 'bincount', 'bitwise_and', '__and__', 'bitwise_not', 'bitwise_or', '__or__', 'bitwise_xor', '__xor__', 'ceil', 'chunk', 'clamp', 'clip', 'clone', 'copy_', 'cos', 'cosh', 'count_nonzero', 'cumsum', 'diag', 'div', 'divide', 'div_', '__itruediv__', 'dot', 'eq', 'erf', 'erfc', 'exp', 'expand_as', 'expm1', 'exp_', 'fill_', 'fill_diagonal_', 'flatten', 'floor', 'floor_divide', 'floor_divide_', '__ifloordiv__', 'fmod', 'frac', 'gather', 'gcd', 'greater', 'gt', 'greater_equal', 'ge', 'hardshrink', 'histc', 'index_add', 'index_select', 'inverse', 'isclose', 'isfinite', 'isinf', 'isneginf', 'kthvalue', 'lerp', 'less', 'lt', 'less_equal', 'le', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logsumexp', 'log_', 'masked_fill', 'masked_fill_', 'masked_select', 'matmul', 'max', 'maximum', 'mean', 'median', 'min', 'minimum', 'mm', 'mul', 'mul_', '__imul__', 'nansum', 'nan_to_num', 'narrow', 'neg', 'negative', 'new_ones', 'new_zeros', 'not_equal', 'ne', 'outer', 'pow', '__pow__', 'prod', 'put_', 'reciprocal', 'remainder', 'repeat', 'repeat_interleave', 'reshape', 'roll', 'round', 'rsqrt', 'scatter', 'scatter_', 'scatter_add', 'select', 'sigmoid', 'sin', 'sinc', 'sinh', 'sort', 'split', 'sqrt', 'square', 'std', 'sub', '__sub__', 'subtract', 'sub_', '__isub__', 'sum', 't', 'take', 'tan', 'tanh', 'tile', 'topk', 'transpose', 'tril', 'triu', 'true_divide', 'trunc', 'type_as', 'unbind', 'unique', 'unsqueeze', 'var', 'view_as', 'where', 'xlogy', '_to']
|
|
17
|
+
tensor_cpp_methods = ['abs', 'absolute', '__abs__', 'acos', 'arccos', 'acosh', 'arccosh', 'add', '__add__', 'addbmm', 'addcdiv', 'addmm', 'addmv', 'add_', '__iadd__', 'all', 'allclose', 'any', 'argmax', 'argmin', 'argsort', 'asin', 'arcsin', 'asinh', 'arcsinh', 'atan', 'arctan', 'atan2', 'arctan2', 'atanh', 'arctanh', 'baddbmm', 'bincount', 'bitwise_and', '__and__', 'bitwise_not', 'bitwise_or', '__or__', 'bitwise_xor', '__xor__', 'ceil', 'chunk', 'clamp', 'clip', 'clone', 'copy_', 'cos', 'cosh', 'count_nonzero', 'cumsum', 'diag', 'div', 'divide', 'div_', '__itruediv__', 'dot', 'eq', 'erf', 'erfc', 'exp', 'expand_as', 'expm1', 'exp_', 'fill_', 'fill_diagonal_', 'flatten', 'floor', 'floor_divide', 'floor_divide_', '__ifloordiv__', 'fmod', 'frac', 'gather', 'gcd', 'greater', 'gt', 'greater_equal', 'ge', 'hardshrink', 'histc', 'index_add', 'index_select', 'inverse', 'isclose', 'isfinite', 'isinf', 'isneginf', 'kthvalue', 'lerp', 'less', 'lt', 'less_equal', 'le', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logsumexp', 'log_', 'masked_fill', 'masked_fill_', 'masked_select', 'matmul', 'max', 'maximum', 'mean', 'median', 'min', 'minimum', 'mm', 'mul', 'mul_', '__imul__', 'nansum', 'nan_to_num', 'narrow', 'neg', 'negative', 'new_empty', 'new_full', 'new_ones', 'new_zeros', 'not_equal', 'ne', 'outer', 'pow', '__pow__', 'prod', 'put_', 'reciprocal', 'remainder', 'repeat', 'repeat_interleave', 'reshape', 'roll', 'round', 'rsqrt', 'scatter', 'scatter_', 'scatter_add', 'select', 'sigmoid', 'sin', 'sinc', 'sinh', 'sort', 'split', 'sqrt', 'square', 'std', 'sub', '__sub__', 'subtract', 'sub_', '__isub__', 'sum', 't', 'take', 'tan', 'tanh', 'tile', 'topk', 'transpose', 'tril', 'triu', 'true_divide', 'trunc', 'type_as', 'unbind', 'unique', 'unsqueeze', 'var', 'view_as', 'where', 'xlogy', '_to', '__mod__']
|