mindspore 2.6.0rc1__cp39-cp39-win_amd64.whl → 2.7.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +40 -9
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
- mindspore/_extends/parse/parser.py +37 -62
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +43 -13
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/amp.py +4 -4
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/common/__init__.py +27 -2
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +77 -16
- mindspore/common/api.py +238 -113
- mindspore/common/dtype.py +21 -11
- mindspore/common/dump.py +10 -15
- mindspore/common/generator.py +5 -3
- mindspore/common/hook_handle.py +11 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/parameter.py +26 -12
- mindspore/common/recompute.py +3 -3
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +81 -81
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +58 -40
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/datasets.py +20 -7
- mindspore/dataset/engine/datasets_user_defined.py +33 -3
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +7 -3
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -7
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +27 -7
- mindspore/include/api/cell.h +37 -1
- mindspore/include/api/delegate.h +10 -0
- mindspore/include/api/model.h +3 -0
- mindspore/include/api/types.h +2 -2
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +60 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +6 -46
- mindspore/mint/distributed/__init__.py +1 -0
- mindspore/mint/distributed/distributed.py +212 -9
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +164 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +137 -101
- mindspore/mint/nn/layer/normalization.py +8 -22
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/nn/cell.py +328 -502
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +32 -34
- mindspore/nn/layer/basic.py +67 -64
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +117 -110
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +37 -39
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +3 -3
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -0
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +34 -37
- mindspore/nn/wrap/grad_reducer.py +37 -37
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +5 -5
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +5 -5
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_vmap/vmap_array_ops.py +31 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +42 -11
- mindspore/ops/auto_generate/gen_extend_func.py +23 -141
- mindspore/ops/auto_generate/gen_ops_def.py +727 -321
- mindspore/ops/auto_generate/gen_ops_prim.py +1721 -984
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +8 -4
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +3 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +9 -96
- mindspore/ops/function/debug_func.py +4 -3
- mindspore/ops/function/grad/grad_func.py +1 -1
- mindspore/ops/function/math_func.py +33 -540
- mindspore/ops/function/nn_func.py +28 -74
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +44 -5
- mindspore/ops/function/vmap_func.py +2 -1
- mindspore/ops/functional.py +2 -3
- mindspore/ops/functional_overload.py +571 -6
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +16 -11
- mindspore/ops/operations/_custom_ops_utils.py +689 -34
- mindspore/ops/operations/_inner_ops.py +3 -6
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +2 -2
- mindspore/ops/operations/comm_ops.py +185 -26
- mindspore/ops/operations/custom_ops.py +294 -174
- mindspore/ops/operations/debug_ops.py +59 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +15 -16
- mindspore/ops/operations/math_ops.py +3 -4
- mindspore/ops/operations/nn_ops.py +7 -39
- mindspore/ops/primitive.py +6 -10
- mindspore/ops/tensor_method.py +47 -8
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +8 -3
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +11 -8
- mindspore/parallel/_cell_wrapper.py +113 -45
- mindspore/parallel/_parallel_serialization.py +1 -1
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +13 -8
- mindspore/parallel/auto_parallel.py +14 -7
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +11 -7
- mindspore/parallel/cluster/process_entity/_api.py +84 -48
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +43 -4
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +6 -7
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
- mindspore/parallel/shard.py +3 -4
- mindspore/parallel/transform_safetensors.py +463 -174
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/profiler_context.py +25 -27
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_op_analyse.py +235 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +305 -314
- mindspore/profiler/envprofiler.py +12 -7
- mindspore/profiler/experimental_config.py +96 -6
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/npu_profiler.py +29 -19
- mindspore/profiler/profiler.py +35 -19
- mindspore/profiler/profiler_action_controller.py +64 -76
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +5 -5
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +60 -45
- mindspore/runtime/memory.py +30 -32
- mindspore/runtime/thread_bind_core.py +298 -164
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +14 -4
- mindspore/train/amp.py +43 -20
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_checkpoint.py +3 -6
- mindspore/train/callback/_flops_collector.py +1 -1
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +97 -16
- mindspore/train/data_sink.py +11 -2
- mindspore/train/dataset_helper.py +9 -0
- mindspore/train/model.py +135 -55
- mindspore/train/serialization.py +133 -111
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +163 -77
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +6 -9
- mindspore/version.py +1 -1
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +333 -371
- mindspore/_deprecated/jit.py +0 -198
- mindspore/experimental/es/__init__.py +0 -22
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -30,7 +30,6 @@ from mindspore.ops.primitive import prim_attr_register, Primitive, PrimitiveWith
|
|
|
30
30
|
from mindspore._checkparam import check_hook_fn
|
|
31
31
|
from mindspore.ops import operations as P
|
|
32
32
|
|
|
33
|
-
|
|
34
33
|
SUMMARY_TENSOR_CACHE = []
|
|
35
34
|
|
|
36
35
|
|
|
@@ -396,6 +395,10 @@ class InsertGradientOf(Primitive):
|
|
|
396
395
|
"""
|
|
397
396
|
Attaches callback to the graph node that will be invoked on the node's gradient.
|
|
398
397
|
|
|
398
|
+
.. warning::
|
|
399
|
+
In the callback, exercise caution when using side-effect operators,
|
|
400
|
+
such as the TensorDump operator, as current support is incomplete.
|
|
401
|
+
|
|
399
402
|
Args:
|
|
400
403
|
f (Function): MindSpore's Function. Callback function.
|
|
401
404
|
|
|
@@ -459,15 +462,67 @@ class InsertGradientOf(Primitive):
|
|
|
459
462
|
self.f = f
|
|
460
463
|
|
|
461
464
|
|
|
465
|
+
class DumpGradient(Primitive):
|
|
466
|
+
"""
|
|
467
|
+
The `DumpGradient` Primitive is a hook, used to dump dout which pass to `x`.
|
|
468
|
+
|
|
469
|
+
Inputs:
|
|
470
|
+
- **path** (str) - The path of the file to be saved.
|
|
471
|
+
- **x** (Tensor) - Input Tensor of any dimension.
|
|
472
|
+
- **input_output** (str) - support value should be one of ['in', 'out'].
|
|
473
|
+
|
|
474
|
+
Supported Platforms:
|
|
475
|
+
``Ascend``
|
|
476
|
+
|
|
477
|
+
Examples:
|
|
478
|
+
>>> import numpy as np
|
|
479
|
+
>>> import mindspore as ms
|
|
480
|
+
>>> from mindspore import ops
|
|
481
|
+
>>> from mindspore import Tensor
|
|
482
|
+
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
|
483
|
+
>>> ms.set_device(device_target="Ascend")
|
|
484
|
+
>>> dg = ops.DumpGradient()
|
|
485
|
+
>>> def dout_dump_test(x, y):
|
|
486
|
+
... x = dg("x_dout.npy", x, 'out')
|
|
487
|
+
... print(f"x value is {x}")
|
|
488
|
+
... z = x * y
|
|
489
|
+
... return z
|
|
490
|
+
>>> ms_grad = ms.grad(dout_dump_test, grad_position=(0,1))
|
|
491
|
+
>>> x_grad, y_grad = ms_grad(Tensor(1, ms.float32), Tensor(2, ms.float32))
|
|
492
|
+
>>> print(f"x grad is {x_grad}, y_grad is {y_grad}")
|
|
493
|
+
>>> x_grad_npy = np.load("x_dout.npy")
|
|
494
|
+
>>> print(f"load x_grad from npy, x_grad is {x_grad_npy}")
|
|
495
|
+
x value is 1.0
|
|
496
|
+
x grad is 2.0, y grad is 1.0
|
|
497
|
+
load x_grad from npy, x_grad is array(2., dtype=float32)
|
|
498
|
+
"""
|
|
499
|
+
|
|
500
|
+
@prim_attr_register
|
|
501
|
+
def __init__(self):
|
|
502
|
+
pass
|
|
503
|
+
|
|
504
|
+
def _dump_hook(self, dout):
|
|
505
|
+
P.TensorDump()(self.bwd_dump_path, dout)
|
|
506
|
+
return dout
|
|
507
|
+
|
|
508
|
+
def __call__(self, path, x, input_output):
|
|
509
|
+
self.bwd_dump_path = path
|
|
510
|
+
x = P.InsertGradientOf(self._dump_hook)(x)
|
|
511
|
+
return x
|
|
512
|
+
|
|
513
|
+
|
|
462
514
|
class Morph(PrimitiveWithInfer):
|
|
463
515
|
"""
|
|
464
516
|
The `Morph` Primitive is used to encapsulate a user-defined function `fn`, allowing it to be used as a custom
|
|
465
517
|
Primitive.
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
518
|
+
|
|
519
|
+
The `Morph` Primitive is primarily designed for custom graph optimization in GRAPH mode. For example, it supports
|
|
520
|
+
encapsulation of irregular collective communications (such as :func:`mindspore.ops.AlltoAllV`) in distributed
|
|
521
|
+
auto-parallel training scenarios.
|
|
522
|
+
|
|
469
523
|
When the `Morph` Primitive is applied to inputs, it is actually the encapsulated user-defined function `fn` that is
|
|
470
524
|
applied to the inputs.
|
|
525
|
+
|
|
471
526
|
The main difference between the `Morph` Primitive and :func:`mindspore.ops.Custom` is that the former is expanded
|
|
472
527
|
and replaced by the user-defined `fn` before automatic differentiation, so there is no need to implement a backward
|
|
473
528
|
function.
|
|
@@ -102,19 +102,19 @@ class AdjustContrastv2(Primitive):
|
|
|
102
102
|
``Ascend`` ``GPU`` ``CPU``
|
|
103
103
|
|
|
104
104
|
Examples:
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
105
|
+
>>> images = Tensor([[[1.0, 2.0, 3.0],
|
|
106
|
+
... [4.0, 5.0, 6.0]],
|
|
107
|
+
... [[7.0, 8.0, 9.0],
|
|
108
|
+
... [10.0, 11.0, 12.0]]], mstype.float32)
|
|
109
|
+
>>> contrast_factor = Tensor(2., mstype.float32)
|
|
110
|
+
>>> adjustcontrastv2 = AdjustContrastv2()
|
|
111
|
+
>>> output = adjustcontrastv2(images, contrast_factor)
|
|
112
|
+
>>> print(output)
|
|
113
|
+
[[[-3.5 -2.5 -1.5]
|
|
114
|
+
[ 2.5 3.5 4.5]]
|
|
115
|
+
<BLANKLINE>
|
|
116
|
+
[[ 8.5 9.5 10.5]
|
|
117
|
+
[14.5 15.5 16.5]]]
|
|
118
118
|
"""
|
|
119
119
|
|
|
120
120
|
@prim_attr_register
|
|
@@ -26,7 +26,6 @@ from mindspore.ops._primitive_cache import _get_cache_prim
|
|
|
26
26
|
from mindspore.ops._utils import arg_handler as handler
|
|
27
27
|
from mindspore.ops._utils.arg_dtype_cast import DtypeToEnum
|
|
28
28
|
from mindspore.common import Tensor, CSRTensor, COOTensor
|
|
29
|
-
from mindspore.common._stub_tensor import _convert_stub
|
|
30
29
|
from mindspore._c_expression import typing
|
|
31
30
|
from mindspore._c_expression import TensorPy as Tensor_
|
|
32
31
|
from mindspore._c_expression import pyboost_cast, pyboost_tile, pyboost_zeros, pyboost_ones, pyboost_type_as
|
|
@@ -1057,8 +1056,8 @@ class Tile(Primitive):
|
|
|
1057
1056
|
def __call__(self, input, dims):
|
|
1058
1057
|
# Add for jit context.
|
|
1059
1058
|
if jit_context() and jit_context().compiled:
|
|
1060
|
-
return
|
|
1061
|
-
res =
|
|
1059
|
+
return jit_context().default_output()
|
|
1060
|
+
res = pyboost_tile(self, [input, dims])
|
|
1062
1061
|
# Add for jit context.
|
|
1063
1062
|
if jit_context():
|
|
1064
1063
|
if validator.is_stub_tensor(res):
|
|
@@ -1066,7 +1065,6 @@ class Tile(Primitive):
|
|
|
1066
1065
|
return jit_context().run_op(self, res, input, dims)
|
|
1067
1066
|
return res
|
|
1068
1067
|
|
|
1069
|
-
# pylint: disable=missing-docstring
|
|
1070
1068
|
def check_elim(self, *args):
|
|
1071
1069
|
base_tensor, dims = args
|
|
1072
1070
|
if not isinstance(base_tensor, Tensor):
|
|
@@ -1230,11 +1228,11 @@ class Cast(Primitive):
|
|
|
1230
1228
|
def __call__(self, input_x, dtype):
|
|
1231
1229
|
# Add for jit context.
|
|
1232
1230
|
if jit_context() and jit_context().compiled:
|
|
1233
|
-
return
|
|
1231
|
+
return jit_context().default_output()
|
|
1234
1232
|
should_elim, output = self.check_elim(input_x, dtype)
|
|
1235
1233
|
if should_elim:
|
|
1236
1234
|
return output
|
|
1237
|
-
res =
|
|
1235
|
+
res = pyboost_cast(self, [input_x, dtype_to_type_id('Cast', 'dtype', dtype)])
|
|
1238
1236
|
# Add for jit context.
|
|
1239
1237
|
if jit_context():
|
|
1240
1238
|
if validator.is_stub_tensor(res):
|
|
@@ -1293,7 +1291,7 @@ class TypeAs(Primitive):
|
|
|
1293
1291
|
def __call__(self, input, other):
|
|
1294
1292
|
if input.dtype == other.dtype:
|
|
1295
1293
|
return input
|
|
1296
|
-
return
|
|
1294
|
+
return pyboost_type_as(self, [input, other])
|
|
1297
1295
|
|
|
1298
1296
|
|
|
1299
1297
|
def to_sequence(val):
|
|
@@ -2070,9 +2068,9 @@ class Ones(Primitive):
|
|
|
2070
2068
|
def __call__(self, size, type=None):
|
|
2071
2069
|
# Add for jit context.
|
|
2072
2070
|
if jit_context() and jit_context().compiled:
|
|
2073
|
-
return
|
|
2074
|
-
res =
|
|
2075
|
-
else handler.dtype_to_type_id('Ones', 'type', type)])
|
|
2071
|
+
return jit_context().default_output()
|
|
2072
|
+
res = pyboost_ones(self, [size, type if type is None \
|
|
2073
|
+
else handler.dtype_to_type_id('Ones', 'type', type)])
|
|
2076
2074
|
# Add for jit context.
|
|
2077
2075
|
if jit_context():
|
|
2078
2076
|
if validator.is_stub_tensor(res):
|
|
@@ -2130,9 +2128,9 @@ class Zeros(Primitive):
|
|
|
2130
2128
|
def __call__(self, size, type=None):
|
|
2131
2129
|
# Add for jit context.
|
|
2132
2130
|
if jit_context() and jit_context().compiled:
|
|
2133
|
-
return
|
|
2134
|
-
res =
|
|
2135
|
-
handler.dtype_to_type_id('Zeros', 'type', type)])
|
|
2131
|
+
return jit_context().default_output()
|
|
2132
|
+
res = pyboost_zeros(self, [size, type if type is None else \
|
|
2133
|
+
handler.dtype_to_type_id('Zeros', 'type', type)])
|
|
2136
2134
|
# Add for jit context.
|
|
2137
2135
|
if jit_context():
|
|
2138
2136
|
if validator.is_stub_tensor(res):
|
|
@@ -2217,7 +2215,8 @@ def flash_attention_score(query, key, value, head_num, real_shift=None, drop_mas
|
|
|
2217
2215
|
keep_prob (double, optional): The keep probability of dropout. Value range is (0.0, 1.0]. When `keep_prob`
|
|
2218
2216
|
is 1.0, `drop_mask` should be None.
|
|
2219
2217
|
Default: ``1.0``.
|
|
2220
|
-
scalar_value (double, optional): The scale
|
|
2218
|
+
scalar_value (double, optional): The scale value indicating the scale coefficient, which is used as the
|
|
2219
|
+
scalar of Muls in the calculation. Generally, the value is 1.0 / (D ** 0.5).
|
|
2221
2220
|
Default: ``1.0``.
|
|
2222
2221
|
pre_tokens (int, optional): Parameter for sparse computation, represents how many tokens are counted forward.
|
|
2223
2222
|
When `sparse_mode` is set to 1, 2, 3, or 5, this parameter does not take effect.
|
|
@@ -2599,8 +2598,8 @@ def fused_infer_attention_score(query, key, value, *, pse_shift=None, atten_mask
|
|
|
2599
2598
|
taking exp, and then the sum is computed to obtain softmax_sum. Finally, the log of softmax_sum is taken,
|
|
2600
2599
|
and softmax_max is added to obtain softmax_lse. The softmax_lse is only calculated when softmax_lse_flag
|
|
2601
2600
|
is True, and the shape would be :math:`(B, N, Q\_S, 1)`. If softmax_lse_flag is False, then a tensor with
|
|
2602
|
-
shape :math:`(1)` filled with zeros would be returned. In
|
|
2603
|
-
|
|
2601
|
+
shape :math:`(1)` filled with zeros would be returned. In GE backend, please ensure that the softmax_lse_flag
|
|
2602
|
+
is enabled before using softmax_lse; otherwise, an exception will occur.
|
|
2604
2603
|
|
|
2605
2604
|
Constraints:
|
|
2606
2605
|
- Full Inference Scenario (Q_S > 1):
|
|
@@ -1231,7 +1231,7 @@ class Heaviside(Primitive):
|
|
|
1231
1231
|
0, & \text { if x }<0 \\
|
|
1232
1232
|
\text { values, } & \text { if x }==0 \\
|
|
1233
1233
|
1, & \text { if x }>0
|
|
1234
|
-
\end{array}\right
|
|
1234
|
+
\end{array}\right.
|
|
1235
1235
|
|
|
1236
1236
|
.. warning::
|
|
1237
1237
|
This is an experimental API that is subject to change or deletion.
|
|
@@ -3354,9 +3354,8 @@ class ComplexAbs(Primitive):
|
|
|
3354
3354
|
|
|
3355
3355
|
Examples:
|
|
3356
3356
|
>>> import mindspore
|
|
3357
|
-
>>> import numpy as np
|
|
3358
3357
|
>>> from mindspore import Tensor, ops
|
|
3359
|
-
>>> x = Tensor(
|
|
3358
|
+
>>> x = Tensor(3+4j, mindspore.complex64)
|
|
3360
3359
|
>>> complex_abs = ops.ComplexAbs()
|
|
3361
3360
|
>>> output = complex_abs(x)
|
|
3362
3361
|
>>> print(output)
|
|
@@ -3630,7 +3629,7 @@ class MatrixSolveLs(Primitive):
|
|
|
3630
3629
|
TypeError: If `l2_regularizer` is not float64.
|
|
3631
3630
|
TypeError: If `fast` is not bool.
|
|
3632
3631
|
ValueError: If dimensions of `matrix` or `rhs` is less than 2.
|
|
3633
|
-
ValueError: If shape of `matrix`
|
|
3632
|
+
ValueError: If shape of `matrix` does not match the shape of `rhs`.
|
|
3634
3633
|
|
|
3635
3634
|
Supported Platforms:
|
|
3636
3635
|
``CPU``
|
|
@@ -32,7 +32,6 @@ from mindspore.ops.primitive import PrimitiveWithCheck
|
|
|
32
32
|
from mindspore.ops.primitive import prim_attr_register
|
|
33
33
|
from mindspore.run_check._check_version import AscendEnvChecker
|
|
34
34
|
from mindspore._c_expression import pyboost_all_finite
|
|
35
|
-
from mindspore.common._stub_tensor import _convert_stub
|
|
36
35
|
from ..auto_generate import (CeLU, Flatten, LogSoftmax, LogSoftmaxExt, GLU, ReLU, ReLU6, Dense, Tanh,
|
|
37
36
|
Elu, Sigmoid, Softmax, SoftplusExt, HSwish, HSigmoid, AvgPool, BiasAdd,
|
|
38
37
|
NLLLoss, OneHot, GeLU, FastGeLU, PReLU, RmsNorm, IncreFlashAttention, MSELossExt,
|
|
@@ -6434,39 +6433,8 @@ class Conv3D(Primitive):
|
|
|
6434
6433
|
|
|
6435
6434
|
Outputs:
|
|
6436
6435
|
Tensor, the value that applied 3D convolution. The shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
|
|
6437
|
-
|
|
6438
|
-
`
|
|
6439
|
-
|
|
6440
|
-
.. math::
|
|
6441
|
-
\begin{array}{ll} \\
|
|
6442
|
-
D_{out} = \left \lceil{\frac{D_{in}}{\text{stride[0]}}} \right \rceil \\
|
|
6443
|
-
H_{out} = \left \lceil{\frac{H_{in}}{\text{stride[1]}}} \right \rceil \\
|
|
6444
|
-
W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[2]}}} \right \rceil \\
|
|
6445
|
-
\end{array}
|
|
6446
|
-
|
|
6447
|
-
`pad_mode` is ``"valid"``:
|
|
6448
|
-
|
|
6449
|
-
.. math::
|
|
6450
|
-
\begin{array}{ll} \\
|
|
6451
|
-
D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
|
|
6452
|
-
{\text{stride[0]}} + 1} \right \rfloor \\
|
|
6453
|
-
H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
|
|
6454
|
-
{\text{stride[1]}} + 1} \right \rfloor \\
|
|
6455
|
-
W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) }
|
|
6456
|
-
{\text{stride[2]}} + 1} \right \rfloor \\
|
|
6457
|
-
\end{array}
|
|
6458
|
-
|
|
6459
|
-
`pad_mode` is ``"pad"``:
|
|
6460
|
-
|
|
6461
|
-
.. math::
|
|
6462
|
-
\begin{array}{ll} \\
|
|
6463
|
-
D_{out} = \left \lfloor{\frac{D_{in} + pad[0] + pad[1] - (\text{dilation[0]} - 1) \times
|
|
6464
|
-
\text{kernel_size[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
|
|
6465
|
-
H_{out} = \left \lfloor{\frac{H_{in} + pad[2] + pad[3] - (\text{dilation[1]} - 1) \times
|
|
6466
|
-
\text{kernel_size[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
|
|
6467
|
-
W_{out} = \left \lfloor{\frac{W_{in} + pad[4] + pad[5] - (\text{dilation[2]} - 1) \times
|
|
6468
|
-
\text{kernel_size[2]} - 1 }{\text{stride[2]}} + 1} \right \rfloor \\
|
|
6469
|
-
\end{array}
|
|
6436
|
+
To see how different pad modes affect the output shape, please refer to
|
|
6437
|
+
:class:`mindspore.nn.Conv3d` for more details.
|
|
6470
6438
|
|
|
6471
6439
|
Raises:
|
|
6472
6440
|
TypeError: If `out_channel` or `group` is not an int.
|
|
@@ -7151,8 +7119,8 @@ class Conv3DTranspose(Primitive):
|
|
|
7151
7119
|
self.format = validator.check_string(data_format, ['NCDHW'], 'format', self.name)
|
|
7152
7120
|
self.add_prim_attr('data_format', self.format)
|
|
7153
7121
|
|
|
7154
|
-
self.output_padding = _check_3d_int_or_tuple('output_padding', output_padding, self.name,
|
|
7155
|
-
|
|
7122
|
+
self.output_padding = _check_3d_int_or_tuple('output_padding', output_padding, self.name, allow_five=False,
|
|
7123
|
+
ret_five=True, greater_zero=False, pad_value=0)
|
|
7156
7124
|
output_padding_ = (self.output_padding[2], self.output_padding[3], self.output_padding[4])
|
|
7157
7125
|
if self.pad_mode != 'pad' and output_padding_ != (0, 0, 0):
|
|
7158
7126
|
raise ValueError(f"For '{self.name}', the 'output_padding' must be zero or (0, 0, 0) "
|
|
@@ -7402,8 +7370,8 @@ class ApplyAdagradDA(Primitive):
|
|
|
7402
7370
|
>>> global_step = Tensor(2, mstype.int32)
|
|
7403
7371
|
>>> output = net(grad, lr, l1, l2, global_step)
|
|
7404
7372
|
>>> print(output)
|
|
7405
|
-
[[-0.00073906
|
|
7406
|
-
[-0.00059699
|
|
7373
|
+
[[-0.00073906 -0.00136889]
|
|
7374
|
+
[-0.00059699 -0.00142478]]
|
|
7407
7375
|
"""
|
|
7408
7376
|
|
|
7409
7377
|
__mindspore_signature__ = (
|
|
@@ -9289,4 +9257,4 @@ class AllFinite(Primitive):
|
|
|
9289
9257
|
"in the current environment does not support AllFinite.")
|
|
9290
9258
|
|
|
9291
9259
|
def __call__(self, *args):
|
|
9292
|
-
return
|
|
9260
|
+
return pyboost_all_finite(self, args)
|
mindspore/ops/primitive.py
CHANGED
|
@@ -170,10 +170,13 @@ class Primitive(Primitive_):
|
|
|
170
170
|
raise TypeError(f'The element of strategy must be tuple/Layout type, but got:{type(in_ele)}')
|
|
171
171
|
if isinstance(in_ele, tuple):
|
|
172
172
|
for in_value in in_ele:
|
|
173
|
-
if not isinstance(in_value, int) and self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY
|
|
173
|
+
if not isinstance(in_value, int) and self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY \
|
|
174
|
+
and not self.attrs.get("self_define_shard", False):
|
|
174
175
|
raise TypeError(f'The {log_info}: {strategy} of {self.name} is not valid,'
|
|
175
176
|
f' the value of strategy must be int type, but got:{type(in_value)}')
|
|
176
|
-
if isinstance(in_value, Layout) and (
|
|
177
|
+
if isinstance(in_value, Layout) and (
|
|
178
|
+
self.name in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY or self.attrs.get("self_define_shard",
|
|
179
|
+
False)):
|
|
177
180
|
is_layout.append(True)
|
|
178
181
|
continue
|
|
179
182
|
is_layout.append(False)
|
|
@@ -315,7 +318,7 @@ class Primitive(Primitive_):
|
|
|
315
318
|
out_is_layout = self._check_shard_strategy(out_strategy, "out_strategy")
|
|
316
319
|
is_layout = in_is_layout if in_is_layout is not None else out_is_layout
|
|
317
320
|
if out_is_layout is not None and is_layout != out_is_layout and \
|
|
318
|
-
self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY:
|
|
321
|
+
self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY and not self.attrs.get("self_define_shard", False):
|
|
319
322
|
raise ValueError(f'The in_strategy type must equal to the out_strategy type, '
|
|
320
323
|
f'one using tuple(tuple) and the other using tuple(Layout) is not allowed.')
|
|
321
324
|
|
|
@@ -409,12 +412,6 @@ class Primitive(Primitive_):
|
|
|
409
412
|
return output
|
|
410
413
|
return _run_op(self, self.name, args)
|
|
411
414
|
|
|
412
|
-
def __getstate__(self):
|
|
413
|
-
return self.__dict__
|
|
414
|
-
|
|
415
|
-
def __setstate__(self, d):
|
|
416
|
-
self.__dict__.update(d)
|
|
417
|
-
|
|
418
415
|
def __deepcopy__(self, memo):
|
|
419
416
|
return type(self)(**self.init_attrs)
|
|
420
417
|
|
|
@@ -1032,7 +1029,6 @@ def _run_op(obj, op_name, args):
|
|
|
1032
1029
|
res = _pynative_executor.run_op_async(obj, op_name, args)
|
|
1033
1030
|
# Add for jit context.
|
|
1034
1031
|
if jit_context():
|
|
1035
|
-
# todo support TensorPy
|
|
1036
1032
|
return jit_context().run_op(obj, res, *args)
|
|
1037
1033
|
return res
|
|
1038
1034
|
|
mindspore/ops/tensor_method.py
CHANGED
|
@@ -21,7 +21,7 @@ from mindspore.ops import operations as P
|
|
|
21
21
|
from mindspore.ops import functional as F
|
|
22
22
|
from mindspore.ops.composite.multitype_ops import _compile_utils as utils
|
|
23
23
|
from mindspore.ops.composite.multitype_ops._compile_utils import (
|
|
24
|
-
sequence_to_tensor, _tensor_sub, _tensor_pow, _tensor_div, _tensor_floordiv
|
|
24
|
+
sequence_to_tensor, _tensor_sub, _tensor_pow, _tensor_div, _tensor_floordiv, _tensor_mod
|
|
25
25
|
)
|
|
26
26
|
from mindspore.ops.auto_generate.gen_ops_prim import (
|
|
27
27
|
inplace_scatter_src_op, inplace_scatter_src_reduce_op, inplace_scatter_value_op, inplace_scatter_value_reduce_op,
|
|
@@ -237,7 +237,7 @@ from mindspore.ops.function.array_func import scatter
|
|
|
237
237
|
# 92 scatter_add
|
|
238
238
|
from mindspore.ops.function.array_func import tensor_scatter_add
|
|
239
239
|
# 93 select
|
|
240
|
-
from mindspore.ops.auto_generate import select,
|
|
240
|
+
from mindspore.ops.auto_generate import select, select_ext_view
|
|
241
241
|
# 94 sigmoid
|
|
242
242
|
from mindspore.ops.auto_generate import sigmoid
|
|
243
243
|
# 95 sin
|
|
@@ -273,7 +273,7 @@ from mindspore.ops.operations.manually_defined import tile
|
|
|
273
273
|
# 110 topk
|
|
274
274
|
from mindspore.ops.function.array_func import topk
|
|
275
275
|
# 111 transpose
|
|
276
|
-
from mindspore.ops.auto_generate import transpose,
|
|
276
|
+
from mindspore.ops.auto_generate import transpose, transpose_ext_view
|
|
277
277
|
# 112 tril
|
|
278
278
|
from mindspore.ops.function.array_func import tril
|
|
279
279
|
# 113 trunc
|
|
@@ -503,16 +503,16 @@ def tensor_allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
|
503
503
|
|
|
504
504
|
|
|
505
505
|
# 8 any
|
|
506
|
-
def
|
|
506
|
+
def reduce_tensor_any(x, axis=None, keep_dims=False):
|
|
507
507
|
if axis is None:
|
|
508
508
|
axis = ()
|
|
509
509
|
return any(x, axis, keep_dims)
|
|
510
510
|
|
|
511
511
|
|
|
512
|
-
def
|
|
512
|
+
def tensor_any(input, dim=None, keepdim=False):
|
|
513
513
|
if dim is None:
|
|
514
514
|
dim = ()
|
|
515
|
-
return any(
|
|
515
|
+
return any(input, dim, keepdim)
|
|
516
516
|
|
|
517
517
|
|
|
518
518
|
# 9 arctan2
|
|
@@ -948,6 +948,10 @@ def deprecated_tensor_remainder(input, divisor):
|
|
|
948
948
|
return remainder(input, divisor)
|
|
949
949
|
|
|
950
950
|
|
|
951
|
+
def deprecated_tensor_mod(input, other):
|
|
952
|
+
return _tensor_mod(input, other)
|
|
953
|
+
|
|
954
|
+
|
|
951
955
|
# 86 repeat
|
|
952
956
|
def tensor_repeat(input, *repeats):
|
|
953
957
|
raise RuntimeError("'repeat' is not supported on this device.")
|
|
@@ -998,7 +1002,7 @@ def deprecated_tensor_scatter_add(input, indices, updates):
|
|
|
998
1002
|
|
|
999
1003
|
# 93 select
|
|
1000
1004
|
def tensor_select_ext(input, dim, index):
|
|
1001
|
-
return
|
|
1005
|
+
return select_ext_view(input, dim, index)
|
|
1002
1006
|
|
|
1003
1007
|
|
|
1004
1008
|
def deprecated_tensor_select(input, condition, y):
|
|
@@ -1170,7 +1174,7 @@ def deprecated_tensor_topk(input, k, dim=None, largest=True, sorted=True):
|
|
|
1170
1174
|
|
|
1171
1175
|
# 111 transpose
|
|
1172
1176
|
def tensor_transpose_ext(input, dim0, dim1):
|
|
1173
|
-
return
|
|
1177
|
+
return transpose_ext_view(input, dim0, dim1)
|
|
1174
1178
|
|
|
1175
1179
|
|
|
1176
1180
|
def deprecated_tensor_transpose(input, *axes):
|
|
@@ -1514,6 +1518,16 @@ def tensor_empty(*size, dtype=None, device=None):
|
|
|
1514
1518
|
"This is a function for empty not should be called. Please check the implementation.")
|
|
1515
1519
|
|
|
1516
1520
|
|
|
1521
|
+
def tensor_empty_like(input, *, dtype=None, device=None):
|
|
1522
|
+
raise NotImplementedError(
|
|
1523
|
+
"This is a function for empty_like should not be called. Please check the implementation.")
|
|
1524
|
+
|
|
1525
|
+
|
|
1526
|
+
def tensor_new_empty(input, size, *, dtype=None, device=None):
|
|
1527
|
+
raise NotImplementedError(
|
|
1528
|
+
"This is a function for new_empty should not be called. Please check the implementation.")
|
|
1529
|
+
|
|
1530
|
+
|
|
1517
1531
|
def deprecated_tensor_logaddexp(input, other):
|
|
1518
1532
|
return F.logaddexp(input, other)
|
|
1519
1533
|
|
|
@@ -1744,6 +1758,10 @@ def deprecated_tensor_diag(input):
|
|
|
1744
1758
|
return F.diag(input)
|
|
1745
1759
|
|
|
1746
1760
|
|
|
1761
|
+
def deprecated_einsum(equation, operands):
|
|
1762
|
+
raise NotImplementedError('einsum only supports Ascend.')
|
|
1763
|
+
|
|
1764
|
+
|
|
1747
1765
|
# 916 index_add
|
|
1748
1766
|
@constexpr
|
|
1749
1767
|
def _check_index_add_alpha(alpha):
|
|
@@ -1808,6 +1826,8 @@ def tensor_inplace_sub(input, other, *, alpha=1):
|
|
|
1808
1826
|
return sub(input, other)
|
|
1809
1827
|
return sub_ext(input, other, alpha=alpha)
|
|
1810
1828
|
|
|
1829
|
+
def tensor_new_full(input, size, fill_value, *, dtype=None):
|
|
1830
|
+
raise NotImplementedError("new_full method support Ascend only")
|
|
1811
1831
|
|
|
1812
1832
|
def tensor_div_empty_(input, other, rounding_mode=None):
|
|
1813
1833
|
raise ValueError("should not come here for div_ method.")
|
|
@@ -1840,6 +1860,10 @@ def all_gather_matmul(
|
|
|
1840
1860
|
raise NotImplementedError('all_gather_matmul only supports Ascend.')
|
|
1841
1861
|
|
|
1842
1862
|
|
|
1863
|
+
def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
|
1864
|
+
raise NotImplementedError('conv3d only supports Ascend.')
|
|
1865
|
+
|
|
1866
|
+
|
|
1843
1867
|
def tensor_floor_divide_(input, other):
|
|
1844
1868
|
return _tensor_floordiv(input, other)
|
|
1845
1869
|
|
|
@@ -1886,3 +1910,18 @@ def tensor_gelu(input, *, approximate):
|
|
|
1886
1910
|
|
|
1887
1911
|
def deprecated_pixel_shuffle(input, upscale_factor):
|
|
1888
1912
|
return F.pixel_shuffle(input, upscale_factor)
|
|
1913
|
+
|
|
1914
|
+
|
|
1915
|
+
def tensor_quant_matmul(x1, x2, scale, *, offset=None, pertoken_scale=None, bias=None, output_dtype=None,
|
|
1916
|
+
x1_dtype=None, x2_dtype=None, pertoken_scale_dtype=None, scale_dtype=None, group_sizes=None):
|
|
1917
|
+
r"""
|
|
1918
|
+
For details, please refer to :func:`mindspore.ops.auto_generate.quant_matmul`.
|
|
1919
|
+
"""
|
|
1920
|
+
raise NotImplementedError('quant_matmul only supports Ascend.')
|
|
1921
|
+
|
|
1922
|
+
|
|
1923
|
+
def tensor_gmm(x, weight, *, bias=None, group_list=None, group_type=0, group_list_type=0):
|
|
1924
|
+
raise NotImplementedError("gmm has not been implemented by python.")
|
|
1925
|
+
|
|
1926
|
+
def raise_func(*args, **kwargs):
|
|
1927
|
+
raise NotImplementedError("this func has not been implemented.")
|
|
@@ -53,7 +53,7 @@ class CppCreatePrimInstanceHelperGenerator(BaseGenerator):
|
|
|
53
53
|
"""
|
|
54
54
|
py_arg_default = self.generate_op_arg_default_value(op_protos)
|
|
55
55
|
py_labels = self.generate_op_labels(op_protos)
|
|
56
|
-
res_str =
|
|
56
|
+
res_str = template.PY_LICENSE_STR + py_arg_default + py_labels
|
|
57
57
|
|
|
58
58
|
save_path = os.path.join(work_path, K.PY_AUTO_GEN_PATH)
|
|
59
59
|
file_name = "cpp_create_prim_instance_helper.py"
|
|
@@ -159,14 +159,6 @@ class FunctionalMapCppGenerator(BaseGenerator):
|
|
|
159
159
|
sig_str += '}\n},'
|
|
160
160
|
return sig_str
|
|
161
161
|
|
|
162
|
-
def _is_input_arg(self, arg_name, op_name):
|
|
163
|
-
res = False
|
|
164
|
-
if op_name in K.INPUT_NAME_MAP and arg_name == K.INPUT_NAME_MAP[op_name]:
|
|
165
|
-
res = True
|
|
166
|
-
elif op_name not in K.INPUT_NAME_MAP and arg_name in K.INPUT_ARGS_NAME:
|
|
167
|
-
res = True
|
|
168
|
-
return res
|
|
169
|
-
|
|
170
162
|
def _generate_single_signature_str(self, func_api_name, tensor_proto, is_tensor_method) -> str:
|
|
171
163
|
"""
|
|
172
164
|
Generates a single function signature string for the given operation prototype.
|
|
@@ -186,7 +178,7 @@ class FunctionalMapCppGenerator(BaseGenerator):
|
|
|
186
178
|
arg_valid_types = []
|
|
187
179
|
for _, arg in enumerate(op_proto.op_args):
|
|
188
180
|
arg_name = arg.arg_name
|
|
189
|
-
if is_tensor_method and
|
|
181
|
+
if is_tensor_method and _is_input_arg(arg_name, op_name):
|
|
190
182
|
continue
|
|
191
183
|
|
|
192
184
|
arg_valid_types = self._handle_arg_valid_types(arg, arg_name, arg_valid_types, func_api_name)
|
|
@@ -502,3 +494,12 @@ class FunctionalMapCppGenerator(BaseGenerator):
|
|
|
502
494
|
self._get_and_append_single_op_varargs_list(func_protos,
|
|
503
495
|
mint_varargs_list)
|
|
504
496
|
return mint_varargs_list
|
|
497
|
+
|
|
498
|
+
|
|
499
|
+
def _is_input_arg(arg_name, op_name):
|
|
500
|
+
res = False
|
|
501
|
+
if op_name in K.INPUT_NAME_MAP and arg_name == K.INPUT_NAME_MAP[op_name]:
|
|
502
|
+
res = True
|
|
503
|
+
elif op_name not in K.INPUT_NAME_MAP and arg_name in K.INPUT_ARGS_NAME:
|
|
504
|
+
res = True
|
|
505
|
+
return res
|