mindspore 2.6.0rc1__cp39-cp39-win_amd64.whl → 2.7.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +40 -9
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
- mindspore/_extends/parse/parser.py +37 -62
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +43 -13
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/amp.py +4 -4
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/common/__init__.py +27 -2
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +77 -16
- mindspore/common/api.py +238 -113
- mindspore/common/dtype.py +21 -11
- mindspore/common/dump.py +10 -15
- mindspore/common/generator.py +5 -3
- mindspore/common/hook_handle.py +11 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/parameter.py +26 -12
- mindspore/common/recompute.py +3 -3
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +81 -81
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +58 -40
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/datasets.py +20 -7
- mindspore/dataset/engine/datasets_user_defined.py +33 -3
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +7 -3
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -7
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +27 -7
- mindspore/include/api/cell.h +37 -1
- mindspore/include/api/delegate.h +10 -0
- mindspore/include/api/model.h +3 -0
- mindspore/include/api/types.h +2 -2
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +60 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +6 -46
- mindspore/mint/distributed/__init__.py +1 -0
- mindspore/mint/distributed/distributed.py +212 -9
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +164 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +137 -101
- mindspore/mint/nn/layer/normalization.py +8 -22
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/nn/cell.py +328 -502
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +32 -34
- mindspore/nn/layer/basic.py +67 -64
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +117 -110
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +37 -39
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +3 -3
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -0
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +34 -37
- mindspore/nn/wrap/grad_reducer.py +37 -37
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +5 -5
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +5 -5
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_vmap/vmap_array_ops.py +31 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +42 -11
- mindspore/ops/auto_generate/gen_extend_func.py +23 -141
- mindspore/ops/auto_generate/gen_ops_def.py +727 -321
- mindspore/ops/auto_generate/gen_ops_prim.py +1721 -984
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +8 -4
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +3 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +9 -96
- mindspore/ops/function/debug_func.py +4 -3
- mindspore/ops/function/grad/grad_func.py +1 -1
- mindspore/ops/function/math_func.py +33 -540
- mindspore/ops/function/nn_func.py +28 -74
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +44 -5
- mindspore/ops/function/vmap_func.py +2 -1
- mindspore/ops/functional.py +2 -3
- mindspore/ops/functional_overload.py +571 -6
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +16 -11
- mindspore/ops/operations/_custom_ops_utils.py +689 -34
- mindspore/ops/operations/_inner_ops.py +3 -6
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +2 -2
- mindspore/ops/operations/comm_ops.py +185 -26
- mindspore/ops/operations/custom_ops.py +294 -174
- mindspore/ops/operations/debug_ops.py +59 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +15 -16
- mindspore/ops/operations/math_ops.py +3 -4
- mindspore/ops/operations/nn_ops.py +7 -39
- mindspore/ops/primitive.py +6 -10
- mindspore/ops/tensor_method.py +47 -8
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +8 -3
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +11 -8
- mindspore/parallel/_cell_wrapper.py +113 -45
- mindspore/parallel/_parallel_serialization.py +1 -1
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +13 -8
- mindspore/parallel/auto_parallel.py +14 -7
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +11 -7
- mindspore/parallel/cluster/process_entity/_api.py +84 -48
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +43 -4
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +6 -7
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
- mindspore/parallel/shard.py +3 -4
- mindspore/parallel/transform_safetensors.py +463 -174
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/profiler_context.py +25 -27
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_op_analyse.py +235 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +305 -314
- mindspore/profiler/envprofiler.py +12 -7
- mindspore/profiler/experimental_config.py +96 -6
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/npu_profiler.py +29 -19
- mindspore/profiler/profiler.py +35 -19
- mindspore/profiler/profiler_action_controller.py +64 -76
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +5 -5
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +60 -45
- mindspore/runtime/memory.py +30 -32
- mindspore/runtime/thread_bind_core.py +298 -164
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +14 -4
- mindspore/train/amp.py +43 -20
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_checkpoint.py +3 -6
- mindspore/train/callback/_flops_collector.py +1 -1
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +97 -16
- mindspore/train/data_sink.py +11 -2
- mindspore/train/dataset_helper.py +9 -0
- mindspore/train/model.py +135 -55
- mindspore/train/serialization.py +133 -111
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +163 -77
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +6 -9
- mindspore/version.py +1 -1
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +333 -371
- mindspore/_deprecated/jit.py +0 -198
- mindspore/experimental/es/__init__.py +0 -22
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -56,12 +56,11 @@ from mindspore.ops.auto_generate import (minimum, maximum, mul, muls, sin, sinc,
|
|
|
56
56
|
sum_ext_op, prod_ext_op, all, matrix_inverse_ext, atan2_ext, sign, acos_ext,
|
|
57
57
|
acosh_ext, asin_ext, asinh_ext, atan_ext, tan, median_ext_op, median_dim_op,
|
|
58
58
|
xlogy_op, xlogy_scalar_other_op, xlogy_scalar_self_op, trunc, histc_ext, roll,
|
|
59
|
-
bincount_ext, rotated_iou_op, cat, narrow, var_op, pow,
|
|
59
|
+
bincount_ext, rotated_iou_op, cat, narrow, var_op, pow, inplace_erfinv_op,
|
|
60
60
|
frac_ext, pow_tensor_scalar_op, not_equal_op, isinf, addmv_op, cdist,
|
|
61
|
-
addbmm_op, addmm_op,
|
|
62
|
-
inplace_erfinv_op)
|
|
61
|
+
addbmm_op, addmm_op, pow_scalar_tensor_op)
|
|
63
62
|
# 2
|
|
64
|
-
|
|
63
|
+
from mindspore.ops.functional_overload import gmm
|
|
65
64
|
# 3
|
|
66
65
|
|
|
67
66
|
# 4
|
|
@@ -3219,7 +3218,7 @@ def approximate_equal(x, y, tolerance=1e-5):
|
|
|
3219
3218
|
>>> import mindspore
|
|
3220
3219
|
>>> mindspore.ops.approximate_equal(mindspore.tensor([1e6, 2e6, float("inf"), float("-inf"), float("nan")]),
|
|
3221
3220
|
... mindspore.tensor([1e6, 2e7, float("inf"), float("-inf"), float("nan")]))
|
|
3222
|
-
Tensor(shape=[
|
|
3221
|
+
Tensor(shape=[5], dtype=Bool, value= [ True, False, False, False, False])
|
|
3223
3222
|
>>>
|
|
3224
3223
|
>>> mindspore.ops.approximate_equal(mindspore.tensor([1e6, 2e6, 3e6]),
|
|
3225
3224
|
... mindspore.tensor([1.00001e6, 2.00002e6, 3.00009e6]), tolerance=1e3)
|
|
@@ -4860,7 +4859,7 @@ def addmv(input, mat, vec, *, beta=1, alpha=1):
|
|
|
4860
4859
|
|
|
4861
4860
|
|
|
4862
4861
|
def addmv_ext(input, mat, vec, *, beta=1, alpha=1):
|
|
4863
|
-
"""
|
|
4862
|
+
r"""
|
|
4864
4863
|
Performs a matrix-vector product of `mat` and `vec`, and add the input vector `input` to the final result.
|
|
4865
4864
|
|
|
4866
4865
|
If `mat` is a tensor of size :math:`(N, M)` , `vec` is a 1-D tensor of size :math:`M` , then `input` must be
|
|
@@ -5622,7 +5621,7 @@ def dstack(tensors):
|
|
|
5622
5621
|
1-D or 2-D tensors must have the same shape.
|
|
5623
5622
|
|
|
5624
5623
|
Args:
|
|
5625
|
-
tensors (Union(List[Tensor],
|
|
5624
|
+
tensors (Union(List[Tensor], tuple[Tensor])): The list of tensors or tuple of tensors.
|
|
5626
5625
|
|
|
5627
5626
|
Returns:
|
|
5628
5627
|
Tensor
|
|
@@ -5783,7 +5782,7 @@ def _diff_helper(input, n, dim):
|
|
|
5783
5782
|
is_bool = (input.dtype == mstype.bool_)
|
|
5784
5783
|
result = input
|
|
5785
5784
|
|
|
5786
|
-
for
|
|
5785
|
+
for _ in range(n): # pylint: disable=unused-variable
|
|
5787
5786
|
if is_bool:
|
|
5788
5787
|
result = logical_xor(narrow(result, dim, 1, out_len), narrow(result, dim, 0, out_len))
|
|
5789
5788
|
else:
|
|
@@ -8441,7 +8440,7 @@ def matmul(input, other):
|
|
|
8441
8440
|
>>> input = mindspore.ops.arange(24, dtype=mindspore.float32).reshape(2, 3, 4)
|
|
8442
8441
|
>>> other = mindspore.ops.arange(20, dtype=mindspore.float32).reshape(4, 5)
|
|
8443
8442
|
>>> output = mindspore.ops.matmul(input, other)
|
|
8444
|
-
>>> print(
|
|
8443
|
+
>>> print(output)
|
|
8445
8444
|
[[[ 70, 76, 82, 88, 94],
|
|
8446
8445
|
[ 190, 212, 234, 256, 278],
|
|
8447
8446
|
[ 310, 348, 386, 424, 462]],
|
|
@@ -9631,12 +9630,10 @@ def _einsum_convert_num_to_char(num):
|
|
|
9631
9630
|
"""For einsum, convert number into char."""
|
|
9632
9631
|
if [num] == [Ellipsis]:
|
|
9633
9632
|
return '...'
|
|
9634
|
-
|
|
9635
|
-
|
|
9636
|
-
|
|
9637
|
-
|
|
9638
|
-
if num >= 26 and num < 52:
|
|
9639
|
-
return chr(num - 26 + ord('a'))
|
|
9633
|
+
if 0 <= num < 26:
|
|
9634
|
+
return chr(num + 65)
|
|
9635
|
+
if 26 <= num < 52:
|
|
9636
|
+
return chr(num + 71)
|
|
9640
9637
|
raise ValueError(f"For Einsum, the number in sublist should be in range [0, 52), but got {num}")
|
|
9641
9638
|
|
|
9642
9639
|
|
|
@@ -9738,33 +9735,19 @@ def einsum(equation, *operands):
|
|
|
9738
9735
|
return _get_cache_prim(P.Einsum)(equation)(operands)
|
|
9739
9736
|
|
|
9740
9737
|
|
|
9741
|
-
def
|
|
9742
|
-
"""Convert sublist to label."""
|
|
9743
|
-
if num == Ellipsis or ell_num and num == 52:
|
|
9744
|
-
return '...'
|
|
9745
|
-
if 0 <= num < 26:
|
|
9746
|
-
return chr(num + ord('A'))
|
|
9747
|
-
if 26 <= num < 52:
|
|
9748
|
-
return chr(num + ord('a') - 26)
|
|
9749
|
-
raise ValueError(
|
|
9750
|
-
f'For einsum, the number in sublist must be in range [0, 52), but got {num}')
|
|
9751
|
-
|
|
9752
|
-
|
|
9753
|
-
def _einsum_convert_label_to_index(label):
|
|
9754
|
-
"""Convert label to index."""
|
|
9755
|
-
label_num = ord(label)
|
|
9756
|
-
if ord('A') <= label_num <= ord('Z'):
|
|
9757
|
-
return label_num - ord('A')
|
|
9758
|
-
if ord('a') <= label_num <= ord('z'):
|
|
9759
|
-
return label_num - ord('a') + 26
|
|
9760
|
-
if label_num == ord('.'):
|
|
9761
|
-
return 52
|
|
9762
|
-
raise ValueError(
|
|
9763
|
-
f'For einsum, the label in equation must be in [a-zA-Z] or ., but got {label}')
|
|
9764
|
-
|
|
9765
|
-
|
|
9766
|
-
def _einsum_convert_sublist(equation, *operands):
|
|
9738
|
+
def _einsum_convert_sublist(equation, operands):
|
|
9767
9739
|
"""Convert the sublist to an equation operand if the received input is a sublist format."""
|
|
9740
|
+
def _einsum_convert_sublist_to_label(num, ell_num=False):
|
|
9741
|
+
"""Convert sublist to label."""
|
|
9742
|
+
if num == Ellipsis or ell_num and num == 52:
|
|
9743
|
+
return '...'
|
|
9744
|
+
if 0 <= num < 26:
|
|
9745
|
+
return chr(num + ord('A'))
|
|
9746
|
+
if 26 <= num < 52:
|
|
9747
|
+
return chr(num + ord('a') - 26)
|
|
9748
|
+
raise ValueError(
|
|
9749
|
+
f'For einsum, the number in sublist must be in range [0, 52), but got {num}')
|
|
9750
|
+
|
|
9768
9751
|
if isinstance(equation, Tensor):
|
|
9769
9752
|
equation_tmp = ''
|
|
9770
9753
|
for i, lst in enumerate(operands):
|
|
@@ -9789,331 +9772,6 @@ def _einsum_convert_sublist(equation, *operands):
|
|
|
9789
9772
|
return equation, operands
|
|
9790
9773
|
|
|
9791
9774
|
|
|
9792
|
-
def _einsum_check_inputargs(equation, operands):
|
|
9793
|
-
"""Check equation and operands."""
|
|
9794
|
-
if not isinstance(equation, str):
|
|
9795
|
-
raise TypeError(
|
|
9796
|
-
f"For einsum, 'equation' must be a str, but got {type(equation)}.")
|
|
9797
|
-
for operand in operands:
|
|
9798
|
-
if not isinstance(operand, Tensor):
|
|
9799
|
-
raise TypeError(
|
|
9800
|
-
f"For einsum, members of 'operands' must be Tensor, but got {type(operand)}.")
|
|
9801
|
-
|
|
9802
|
-
|
|
9803
|
-
@constexpr
|
|
9804
|
-
def _einsum_parse_equation(equation):
|
|
9805
|
-
"""Parse equation."""
|
|
9806
|
-
l_equation = ''
|
|
9807
|
-
r_equation = ''
|
|
9808
|
-
equation = equation.replace(' ', '')
|
|
9809
|
-
|
|
9810
|
-
if '->' in equation:
|
|
9811
|
-
l_equation, r_equation = equation.split('->', 1)
|
|
9812
|
-
if l_equation == '':
|
|
9813
|
-
raise ValueError(
|
|
9814
|
-
'For einsum, equation must contain characters to the left fo the arrow.')
|
|
9815
|
-
else:
|
|
9816
|
-
l_equation = equation
|
|
9817
|
-
|
|
9818
|
-
if ',' in l_equation:
|
|
9819
|
-
l_equationlst = l_equation.split(",")
|
|
9820
|
-
else:
|
|
9821
|
-
l_equationlst = [l_equation]
|
|
9822
|
-
|
|
9823
|
-
l_equationlst = []
|
|
9824
|
-
|
|
9825
|
-
for subequation in l_equation.split(','):
|
|
9826
|
-
if '.' in subequation and ('...' not in subequation or subequation.count('.') != 3):
|
|
9827
|
-
raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
|
|
9828
|
-
f"and can only be found once.")
|
|
9829
|
-
subequation_lst = [_einsum_convert_label_to_index(label) for label in subequation.replace('...', '.')]
|
|
9830
|
-
l_equationlst.append(subequation_lst)
|
|
9831
|
-
|
|
9832
|
-
if "." in r_equation and ('...' not in r_equation or r_equation.count('.') != 3):
|
|
9833
|
-
raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
|
|
9834
|
-
f"and can only be found once.")
|
|
9835
|
-
r_equationlst = [_einsum_convert_label_to_index(label) for label in r_equation.replace('...', '.')]
|
|
9836
|
-
|
|
9837
|
-
return l_equationlst, r_equationlst, ('->' in equation)
|
|
9838
|
-
|
|
9839
|
-
|
|
9840
|
-
def _einsum_parse_labels(l_equationlst, operands):
|
|
9841
|
-
"""Parse left script of equation."""
|
|
9842
|
-
align_rank = 0
|
|
9843
|
-
max_labels = 53
|
|
9844
|
-
ellipsis_dimnum = 0
|
|
9845
|
-
labels_count = [0] * max_labels
|
|
9846
|
-
|
|
9847
|
-
if len(operands) != len(l_equationlst):
|
|
9848
|
-
raise ValueError(f"For einsum, 'operands' is not equal to specified in the 'equation', "
|
|
9849
|
-
f"but got {len(operands)} and {len(l_equationlst)}.")
|
|
9850
|
-
|
|
9851
|
-
for idx, sub_equ in enumerate(l_equationlst):
|
|
9852
|
-
start_dim = 0
|
|
9853
|
-
label_num = 0
|
|
9854
|
-
operand_shape = list(operands[idx].shape)
|
|
9855
|
-
for label in sub_equ:
|
|
9856
|
-
dim_num = 1
|
|
9857
|
-
label_num += 1
|
|
9858
|
-
end_dim = start_dim + 1
|
|
9859
|
-
|
|
9860
|
-
# Label is ellipsis
|
|
9861
|
-
if label == 52:
|
|
9862
|
-
end_dim = len(operand_shape) - len(sub_equ) + label_num
|
|
9863
|
-
dim_num = end_dim - start_dim
|
|
9864
|
-
if ellipsis_dimnum != 0 and ellipsis_dimnum != dim_num:
|
|
9865
|
-
raise ValueError(f"For einsum, an ellipsis in 'equation' can only represent the same numbers of "
|
|
9866
|
-
f"dimensions in 'operands'.")
|
|
9867
|
-
ellipsis_dimnum = dim_num
|
|
9868
|
-
if labels_count[label] == 0:
|
|
9869
|
-
align_rank += dim_num
|
|
9870
|
-
labels_count[label] += 1
|
|
9871
|
-
start_dim += dim_num
|
|
9872
|
-
if label_num != len(sub_equ) or start_dim != len(operand_shape):
|
|
9873
|
-
raise ValueError(f"For einsum, the numbers of labels specified in the 'equation' does not match "
|
|
9874
|
-
f"'operands[{idx}]'.")
|
|
9875
|
-
return ellipsis_dimnum, labels_count, align_rank
|
|
9876
|
-
|
|
9877
|
-
|
|
9878
|
-
def _einsum_infer_output(r_equationlst, arrow_exist, ellipsis_dimnum, labels_count):
|
|
9879
|
-
"""Parse right script of equation and infer output shape."""
|
|
9880
|
-
idx = 0
|
|
9881
|
-
idle_idx = -1
|
|
9882
|
-
output_rank = 0
|
|
9883
|
-
labels_perm_idx = [idle_idx] * 53
|
|
9884
|
-
|
|
9885
|
-
if arrow_exist:
|
|
9886
|
-
for label in r_equationlst:
|
|
9887
|
-
if labels_count[label] != 0:
|
|
9888
|
-
if labels_perm_idx[label] != idle_idx:
|
|
9889
|
-
raise ValueError(f"For einsum, '{_einsum_convert_sublist_to_label(label, True)}' or {label} in "
|
|
9890
|
-
f"sublist format has appears more than once in output subscript.")
|
|
9891
|
-
dimnum = 1
|
|
9892
|
-
if label == 52:
|
|
9893
|
-
dimnum = ellipsis_dimnum
|
|
9894
|
-
labels_perm_idx[label] = idx
|
|
9895
|
-
output_rank += dimnum
|
|
9896
|
-
idx += dimnum
|
|
9897
|
-
else:
|
|
9898
|
-
raise ValueError(f"For einsum, the label to the right of arrow in the 'equation' must appear on "
|
|
9899
|
-
f"left, but '{_einsum_convert_sublist_to_label(label, True)}' does not.")
|
|
9900
|
-
else:
|
|
9901
|
-
if labels_count[52] != 0:
|
|
9902
|
-
output_rank += ellipsis_dimnum
|
|
9903
|
-
labels_perm_idx[52] = idx
|
|
9904
|
-
idx += ellipsis_dimnum
|
|
9905
|
-
for label, count in enumerate(labels_count):
|
|
9906
|
-
if count == 1:
|
|
9907
|
-
output_rank += 1
|
|
9908
|
-
labels_perm_idx[label] = idx
|
|
9909
|
-
idx += 1
|
|
9910
|
-
|
|
9911
|
-
for label, count in enumerate(labels_count):
|
|
9912
|
-
if count != 0 and labels_perm_idx[label] == idle_idx:
|
|
9913
|
-
labels_perm_idx[label] = idx
|
|
9914
|
-
idx += 1
|
|
9915
|
-
|
|
9916
|
-
return output_rank, labels_perm_idx
|
|
9917
|
-
|
|
9918
|
-
|
|
9919
|
-
def _einsum_adjust_operands(operands, l_equationlst, ellipsis_dimnum, labels_perm_idx, align_rank):
|
|
9920
|
-
"""Align operands to output as possible."""
|
|
9921
|
-
# Unsqueeze miss dimensions to make all operands has same rank, compute diagonal if operand has same label.
|
|
9922
|
-
# Then use _labels_perm_idx to transpose all operands to align dimensions with output.
|
|
9923
|
-
adjust_operands = []
|
|
9924
|
-
for idx, operand in enumerate(operands):
|
|
9925
|
-
idle_dim = -1
|
|
9926
|
-
align_axis = [idle_dim] * align_rank
|
|
9927
|
-
label_dims = [idle_dim] * 53
|
|
9928
|
-
dim = 0
|
|
9929
|
-
|
|
9930
|
-
for label in l_equationlst[idx]:
|
|
9931
|
-
if label_dims[label] != idle_dim:
|
|
9932
|
-
operand = ops.diagonal(operand, 0, label_dims[label], dim)
|
|
9933
|
-
diag_perm = []
|
|
9934
|
-
diag_dim = 0
|
|
9935
|
-
for i in range(len(operand.shape)):
|
|
9936
|
-
if i == label_dims[label]:
|
|
9937
|
-
diag_perm.append(len(operand.shape) - 1)
|
|
9938
|
-
else:
|
|
9939
|
-
diag_perm.append(diag_dim)
|
|
9940
|
-
diag_dim += 1
|
|
9941
|
-
operand = permute(operand, tuple(diag_perm))
|
|
9942
|
-
else:
|
|
9943
|
-
label_dims[label] = dim
|
|
9944
|
-
if label == 52:
|
|
9945
|
-
for ell_idx in range(ellipsis_dimnum):
|
|
9946
|
-
align_axis[labels_perm_idx[label] + ell_idx] = dim
|
|
9947
|
-
dim += 1
|
|
9948
|
-
else:
|
|
9949
|
-
align_axis[labels_perm_idx[label]] = dim
|
|
9950
|
-
dim += 1
|
|
9951
|
-
if len(operand.shape) < align_rank:
|
|
9952
|
-
for i, axis in enumerate(align_axis):
|
|
9953
|
-
if axis == idle_dim:
|
|
9954
|
-
align_axis[i] = dim
|
|
9955
|
-
dim += 1
|
|
9956
|
-
missing_dims = [1] * (align_rank - len(operand.shape))
|
|
9957
|
-
operand_shape = list(operand.shape) + missing_dims
|
|
9958
|
-
operand = ops.reshape(operand, operand_shape)
|
|
9959
|
-
operand = permute(operand, tuple(align_axis))
|
|
9960
|
-
adjust_operands.append(operand)
|
|
9961
|
-
return adjust_operands
|
|
9962
|
-
|
|
9963
|
-
|
|
9964
|
-
def _einsum_find_dimlastop(align_rank, operands, adjust_operands):
|
|
9965
|
-
"""Find dim last operand."""
|
|
9966
|
-
dim_last_op = [0] * align_rank
|
|
9967
|
-
has_zero_dim = False
|
|
9968
|
-
for dim in range(align_rank):
|
|
9969
|
-
broadcast_dim = adjust_operands[0].shape[dim]
|
|
9970
|
-
for idx in range(1, len(adjust_operands)):
|
|
9971
|
-
other_dim = adjust_operands[idx].shape[dim]
|
|
9972
|
-
if broadcast_dim != other_dim and broadcast_dim != 1 and other_dim != 1:
|
|
9973
|
-
err_msg = "For einsum, operands do not broadcast after align to output [shapes :origin -> adjust]:"
|
|
9974
|
-
for i in range(len(operands)):
|
|
9975
|
-
err_msg += f" {operands[i].shape} -> {adjust_operands[i].shape}"
|
|
9976
|
-
raise ValueError(err_msg)
|
|
9977
|
-
if other_dim != 1:
|
|
9978
|
-
dim_last_op[dim] = idx
|
|
9979
|
-
broadcast_dim = other_dim
|
|
9980
|
-
has_zero_dim = has_zero_dim or broadcast_dim == 0
|
|
9981
|
-
return dim_last_op, has_zero_dim
|
|
9982
|
-
|
|
9983
|
-
|
|
9984
|
-
def _einsum_multiplication(sum_dims, l_tensor, r_tensor):
|
|
9985
|
-
"""Compute bmm for einsum."""
|
|
9986
|
-
batch_dims = []
|
|
9987
|
-
lonly_dims = []
|
|
9988
|
-
ronly_dims = []
|
|
9989
|
-
batch_size = 1
|
|
9990
|
-
lonly_size = 1
|
|
9991
|
-
ronly_size = 1
|
|
9992
|
-
sum_size = 1
|
|
9993
|
-
|
|
9994
|
-
l_shape = l_tensor.shape
|
|
9995
|
-
r_shape = r_tensor.shape
|
|
9996
|
-
|
|
9997
|
-
# Compute sum if dim is in sum_dims and get shapes for bmm
|
|
9998
|
-
for i in range(len(l_shape)):
|
|
9999
|
-
sum_l = l_shape[i] > 1
|
|
10000
|
-
sum_r = r_shape[i] > 1
|
|
10001
|
-
if i in sum_dims:
|
|
10002
|
-
if sum_l and sum_r:
|
|
10003
|
-
sum_size *= l_shape[i]
|
|
10004
|
-
elif sum_l:
|
|
10005
|
-
l_tensor = ops.auto_generate.sum_ext(l_tensor, i, True)
|
|
10006
|
-
elif sum_r:
|
|
10007
|
-
r_tensor = ops.auto_generate.sum_ext(r_tensor, i, True)
|
|
10008
|
-
elif sum_l and sum_r:
|
|
10009
|
-
batch_dims.append(i)
|
|
10010
|
-
batch_size *= l_shape[i]
|
|
10011
|
-
elif sum_l:
|
|
10012
|
-
lonly_dims.append(i)
|
|
10013
|
-
lonly_size *= l_shape[i]
|
|
10014
|
-
else:
|
|
10015
|
-
ronly_dims.append(i)
|
|
10016
|
-
ronly_size *= r_shape[i]
|
|
10017
|
-
|
|
10018
|
-
# Compute the einsum bmm operators pipeline.
|
|
10019
|
-
# The whole operators pipeline is transpose(in) -> reshape(in) -> bmm(in) -> reshape(out) -> transpose(out).
|
|
10020
|
-
l_reshape_shape = (batch_size, lonly_size, sum_size)
|
|
10021
|
-
r_reshape_shape = (batch_size, sum_size, ronly_size)
|
|
10022
|
-
|
|
10023
|
-
out_reshape_shape = [l_shape[dim] for dim in batch_dims]
|
|
10024
|
-
out_reshape_shape += [l_shape[dim] for dim in lonly_dims]
|
|
10025
|
-
out_reshape_shape += [1 for _ in sum_dims]
|
|
10026
|
-
out_reshape_shape += [r_shape[dim] for dim in ronly_dims]
|
|
10027
|
-
|
|
10028
|
-
l_perm_axis = batch_dims + lonly_dims + sum_dims + ronly_dims
|
|
10029
|
-
r_perm_axis = batch_dims + sum_dims + ronly_dims + lonly_dims
|
|
10030
|
-
out_perm_axis = [-1] * len(out_reshape_shape)
|
|
10031
|
-
|
|
10032
|
-
out_dim = 0
|
|
10033
|
-
for idx in range(len(l_perm_axis)):
|
|
10034
|
-
out_perm_axis[l_perm_axis[idx]] = out_dim
|
|
10035
|
-
out_dim += 1
|
|
10036
|
-
|
|
10037
|
-
l_tensor = permute(l_tensor, tuple(l_perm_axis))
|
|
10038
|
-
l_tensor = ops.reshape(l_tensor, l_reshape_shape)
|
|
10039
|
-
|
|
10040
|
-
r_tensor = permute(r_tensor, tuple(r_perm_axis))
|
|
10041
|
-
r_tensor = ops.reshape(r_tensor, r_reshape_shape)
|
|
10042
|
-
|
|
10043
|
-
output = bmm_ext(l_tensor, r_tensor)
|
|
10044
|
-
output = ops.reshape(output, out_reshape_shape)
|
|
10045
|
-
output = permute(output, tuple(out_perm_axis))
|
|
10046
|
-
|
|
10047
|
-
output_origin_shape = output.shape
|
|
10048
|
-
output_squeeze_shape = []
|
|
10049
|
-
for dim in range(len(output_origin_shape)):
|
|
10050
|
-
if dim not in sum_dims:
|
|
10051
|
-
output_squeeze_shape.append(output_origin_shape[dim])
|
|
10052
|
-
|
|
10053
|
-
return ops.reshape(output, output_squeeze_shape)
|
|
10054
|
-
|
|
10055
|
-
|
|
10056
|
-
def _einsum(equation, operands):
|
|
10057
|
-
'''Einsum main process'''
|
|
10058
|
-
_l_equationlst, _r_equationlst, _arrow_exist = _einsum_parse_equation(
|
|
10059
|
-
equation)
|
|
10060
|
-
_ellipsis_dimnum, _labels_count, _align_rank = _einsum_parse_labels(
|
|
10061
|
-
_l_equationlst, operands)
|
|
10062
|
-
_output_rank, _labels_perm_idx = _einsum_infer_output(
|
|
10063
|
-
_r_equationlst, _arrow_exist, _ellipsis_dimnum, _labels_count)
|
|
10064
|
-
_adjust_operands = _einsum_adjust_operands(operands, _l_equationlst, _ellipsis_dimnum, _labels_perm_idx,
|
|
10065
|
-
_align_rank)
|
|
10066
|
-
_dim_last_op, _has_zero_dim = _einsum_find_dimlastop(
|
|
10067
|
-
_align_rank, operands, _adjust_operands)
|
|
10068
|
-
_result = _adjust_operands[0]
|
|
10069
|
-
|
|
10070
|
-
# Fast path if operands has zero dim.
|
|
10071
|
-
if _has_zero_dim:
|
|
10072
|
-
output_shape = []
|
|
10073
|
-
for dim in range(_output_rank):
|
|
10074
|
-
output_shape.append(_adjust_operands[_dim_last_op[dim]].shape[dim])
|
|
10075
|
-
return ops.auto_generate.zeros(output_shape, dtype=_result.dtype)
|
|
10076
|
-
|
|
10077
|
-
# Sum or squeeze dimensions that is 1 for all rest operands.
|
|
10078
|
-
_reduce_dim = _output_rank
|
|
10079
|
-
for dim in range(_output_rank, _align_rank):
|
|
10080
|
-
if _dim_last_op[dim] == 0:
|
|
10081
|
-
if _result.shape[_reduce_dim] == 1:
|
|
10082
|
-
_result = ops.auto_generate.pyboost_inner_prim.squeeze_impl(_result, _reduce_dim)
|
|
10083
|
-
else:
|
|
10084
|
-
_result = ops.auto_generate.sum_ext(_result, _reduce_dim)
|
|
10085
|
-
else:
|
|
10086
|
-
_reduce_dim += 1
|
|
10087
|
-
|
|
10088
|
-
# Compute multiplication if operands are more than two.
|
|
10089
|
-
for i in range(1, len(_adjust_operands)):
|
|
10090
|
-
operand = _adjust_operands[i]
|
|
10091
|
-
dim = _output_rank
|
|
10092
|
-
sum_dims = []
|
|
10093
|
-
for j in range(_output_rank, _align_rank):
|
|
10094
|
-
if _dim_last_op[j] < i:
|
|
10095
|
-
operand = ops.auto_generate.pyboost_inner_prim.squeeze_impl(operand, dim)
|
|
10096
|
-
elif _dim_last_op[j] == i:
|
|
10097
|
-
if _result.shape[dim] == 1:
|
|
10098
|
-
operand = ops.auto_generate.sum_ext(operand, dim)
|
|
10099
|
-
_result = ops.auto_generate.pyboost_inner_prim.squeeze_impl(_result, dim)
|
|
10100
|
-
else:
|
|
10101
|
-
sum_dims.append(dim)
|
|
10102
|
-
dim += 1
|
|
10103
|
-
else:
|
|
10104
|
-
dim += 1
|
|
10105
|
-
|
|
10106
|
-
if sum_dims == []:
|
|
10107
|
-
_result = mul_ext(_result, operand)
|
|
10108
|
-
elif len(sum_dims) == len(_result.shape):
|
|
10109
|
-
_result = ops.auto_generate.dot(ops.auto_generate.flatten_ext(_result),
|
|
10110
|
-
ops.auto_generate.flatten_ext(operand))
|
|
10111
|
-
else:
|
|
10112
|
-
_result = _einsum_multiplication(sum_dims, _result, operand)
|
|
10113
|
-
|
|
10114
|
-
return _result
|
|
10115
|
-
|
|
10116
|
-
|
|
10117
9775
|
def einsum_ext(equation, *operands):
|
|
10118
9776
|
r"""
|
|
10119
9777
|
According to the Einstein summation Convention (Einsum),
|
|
@@ -10208,14 +9866,9 @@ def einsum_ext(equation, *operands):
|
|
|
10208
9866
|
[3. 6.]
|
|
10209
9867
|
[4. 8.]]
|
|
10210
9868
|
"""
|
|
10211
|
-
_equation, _operands = _einsum_convert_sublist(equation,
|
|
10212
|
-
_einsum_check_inputargs(_equation, _operands)
|
|
10213
|
-
|
|
10214
|
-
for operand in _operands:
|
|
10215
|
-
if ops.is_sequence_shape_unknown(operand.shape) or ops.is_sequence_value_unknown(operand.shape):
|
|
10216
|
-
raise ValueError(f"For einsum, the element of 'operands' can't be dynamic shape or dynamic rank.")
|
|
9869
|
+
_equation, _operands = _einsum_convert_sublist(equation, operands)
|
|
10217
9870
|
|
|
10218
|
-
return
|
|
9871
|
+
return ops.functional_overload.einsum(_equation, _operands)
|
|
10219
9872
|
|
|
10220
9873
|
|
|
10221
9874
|
def cumprod(input, dim, dtype=None):
|
|
@@ -12466,184 +12119,24 @@ def mul_ext(input, other):
|
|
|
12466
12119
|
return mul(input, other)
|
|
12467
12120
|
|
|
12468
12121
|
|
|
12469
|
-
def
|
|
12470
|
-
inputs_t = []
|
|
12471
|
-
for input_i in inputs:
|
|
12472
|
-
input_i_t = transpose_ext(input_i, -1, -2)
|
|
12473
|
-
inputs_t.append(input_i_t)
|
|
12474
|
-
return inputs_t
|
|
12475
|
-
|
|
12476
|
-
|
|
12477
|
-
def _is_transposed(input_tensor):
|
|
12478
|
-
dim = input_tensor.dim()
|
|
12479
|
-
if dim < 2 or dim > 3:
|
|
12480
|
-
raise ValueError("input tensor of _is_transposed should be either 2- or 3-dimensional.")
|
|
12481
|
-
input_shape = input_tensor.shape
|
|
12482
|
-
input_strides = input_tensor.stride()
|
|
12483
|
-
if input_strides[-2] == 1 and input_strides[-1] == input_shape[-2]:
|
|
12484
|
-
return True
|
|
12485
|
-
return False
|
|
12486
|
-
|
|
12487
|
-
|
|
12488
|
-
def gmm(x, weight, *, bias=None, group_list=None, group_type=0):
|
|
12122
|
+
def gmm_backward(grad, x, weight, group_list=None, group_list_type=0):
|
|
12489
12123
|
r"""
|
|
12490
|
-
|
|
12491
|
-
|
|
12492
|
-
.. warning::
|
|
12493
|
-
- This is an experimental API that is subject to change or deletion.
|
|
12494
|
-
- `group_type` must be constant.
|
|
12495
|
-
|
|
12496
|
-
.. note::
|
|
12497
|
-
- When `group_type` is 2, `weight` must be a non-continuous tensor after transpose.
|
|
12498
|
-
- Only when `group_type` is 0 and `bias` is None, the reverse derivative is supported,
|
|
12499
|
-
which is implemented by the function gmm_backward.
|
|
12500
|
-
|
|
12501
|
-
Args:
|
|
12502
|
-
x (tuple[Tensor]): The first tensors to be multiplied.
|
|
12503
|
-
weight (tuple[Tensor]): The second tensors to be multiplied.
|
|
12504
|
-
|
|
12505
|
-
Keyword Args:
|
|
12506
|
-
bias (tuple[Tensor], optional): Biases added to outputs. In the training scenario,
|
|
12507
|
-
the bias only supoorts None. Default: ``None`` .
|
|
12508
|
-
|
|
12509
|
-
group_list (Union[list[int], tuple(int)], optional): Represents the index of
|
|
12510
|
-
the different groups on the grouping axis. It must be a non-negative ascending
|
|
12511
|
-
sequence . Default: ``None`` .
|
|
12512
|
-
|
|
12513
|
-
If `group_type` is 0, the last element in `group_list` should be equal to the
|
|
12514
|
-
first dimension of the tensor in `x` .
|
|
12515
|
-
|
|
12516
|
-
If `group_type` is 2, the last element in `group_list` should be equal to the
|
|
12517
|
-
second dimension of the tensor in `x` .
|
|
12518
|
-
|
|
12519
|
-
group_type (int, optional): Represents the dim that need to be grouped. Default: ``0`` .
|
|
12520
|
-
For example, :math: `C[m,n] = A[m,k] \times B[k,n]`.
|
|
12521
|
-
|
|
12522
|
-
If `group_type` is 0, it means that the m-axis is grouped, where tensors in `x`
|
|
12523
|
-
should be 2-D, tensors in `weight` should be 3-D, and the tensors of result would
|
|
12524
|
-
be 2-D.
|
|
12525
|
-
|
|
12526
|
-
If `group_type` is 2, it means that the k-axis is grouped, where each tensor in `x`
|
|
12527
|
-
and `weight` should be 2-D, and the tensors of result would be 3-D.
|
|
12528
|
-
|
|
12529
|
-
Returns:
|
|
12530
|
-
tuple[Tensor], the results of grouping matrix multiplication.
|
|
12531
|
-
|
|
12532
|
-
Raises:
|
|
12533
|
-
TypeError: If `group_type` is not a int.
|
|
12534
|
-
ValueError: If `group_type` is invalid.
|
|
12535
|
-
ValueError: If the length of `x` or `weight` is not 1.
|
|
12536
|
-
|
|
12537
|
-
Supported Platforms:
|
|
12538
|
-
``Ascend``
|
|
12539
|
-
|
|
12540
|
-
Examples:
|
|
12541
|
-
>>> import mindspore
|
|
12542
|
-
>>> import numpy as np
|
|
12543
|
-
>>> from mindspore import Tensor, ops
|
|
12544
|
-
>>> x = Tensor(np.random.uniform(0,1, (10, 20)).astype(np.float32))
|
|
12545
|
-
>>> weight = Tensor(np.random.uniform(0,1, (4, 20, 8)).astype(np.float32))
|
|
12546
|
-
>>> group_list = [2, 6, 8, 10]
|
|
12547
|
-
>>> y = ops.function.math_func.gmm([x,], [weight,], group_list=group_list)
|
|
12548
|
-
>>> print(y[0].shape)
|
|
12549
|
-
>>> [10, 8]
|
|
12124
|
+
the grad of ops.function.math_func.gmm
|
|
12550
12125
|
"""
|
|
12551
|
-
|
|
12552
|
-
split_item=3, group_type=group_type)
|
|
12553
|
-
|
|
12554
|
-
|
|
12555
|
-
def gmm_backward(grad, x, weight, *, group_list=None):
|
|
12556
|
-
r"""
|
|
12557
|
-
the grad of gmm
|
|
12558
|
-
"""
|
|
12559
|
-
gradients = ops.auto_generate.gmm_backward(grad, x, weight, group_list)
|
|
12126
|
+
gradients = ops.functional_overload.gmm_backward(grad, x, weight, group_list, group_list_type)
|
|
12560
12127
|
dx = gradients[:len(x)]
|
|
12561
12128
|
dw = gradients[-len(weight):]
|
|
12562
12129
|
db = []
|
|
12563
12130
|
return dx, dw, db
|
|
12564
12131
|
|
|
12565
12132
|
|
|
12566
|
-
def
|
|
12567
|
-
r"""
|
|
12568
|
-
Grouping matrix multiplication.
|
|
12569
|
-
|
|
12570
|
-
.. warning::
|
|
12571
|
-
- This is an experimental API that is subject to change or deletion.
|
|
12572
|
-
- `group_type` must be constant.
|
|
12573
|
-
|
|
12574
|
-
.. note::
|
|
12575
|
-
- When `group_type` is 2, the tensors in `weight` must be non-continuous tensors after
|
|
12576
|
-
transpose.
|
|
12577
|
-
- Only when `group_type` is 0 and `bias` is None, the reverse derivative is supported,
|
|
12578
|
-
which is implemented by the function gmm_v2_backward.
|
|
12579
|
-
|
|
12580
|
-
Args:
|
|
12581
|
-
x (tuple[Tensor]): The first tensors to be multiplied.
|
|
12582
|
-
weight (tuple[Tensor]): The second tensors to be multiplied.
|
|
12583
|
-
|
|
12584
|
-
Keyword Args:
|
|
12585
|
-
bias (tuple[Tensor], optional): Biases added to outputs. In the training scenario,
|
|
12586
|
-
the bias only supoorts None. Default: ``None`` .
|
|
12587
|
-
|
|
12588
|
-
group_list (Tensor, optional): Represents the index of the different groups on
|
|
12589
|
-
the grouping axis. Supported dtypes: int64. Default: ``None`` .
|
|
12590
|
-
|
|
12591
|
-
If `group_list_type` is 0, it must be a non-negative ascending sequence.
|
|
12592
|
-
And when `group_type` is 0, the last element in `group_list` should be equal to
|
|
12593
|
-
the first dimension of the tensor in `x` . When `group_type` is 2, the last element
|
|
12594
|
-
in `group_list` should be equal to the second dimension of the tensor in `x` .
|
|
12595
|
-
|
|
12596
|
-
If `group_list_type` is 1, the value in `group_list` are the size of each group.
|
|
12597
|
-
|
|
12598
|
-
group_type (int, optional): Represents the axes that need to be grouped. For example,
|
|
12599
|
-
:math: `C[m,n] = A[m,k] \times B[k,n]`. Default: ``0`` .
|
|
12600
|
-
|
|
12601
|
-
If `group_type` is 0, it means that the m-axis is grouped, where tensors in `x`
|
|
12602
|
-
should be 2-D, tensors in `weight` should be 3-D, and the tensors of result would be
|
|
12603
|
-
2-D.
|
|
12604
|
-
|
|
12605
|
-
If `group_type` is 2, it means that the k-axis is grouped, where each tensor in `x`
|
|
12606
|
-
and `weight` should be 2-D, and the tensors of result would be 3-D.
|
|
12607
|
-
|
|
12608
|
-
group_list_type (int, optional): If it's 0, the value in `group_list` are the cumsum
|
|
12609
|
-
result of the size of each group. If it's 1, the value in `group_list` are the size
|
|
12610
|
-
of each group.
|
|
12611
|
-
|
|
12612
|
-
Returns:
|
|
12613
|
-
tuple[Tensor], the results of grouping matrix multiplication.
|
|
12614
|
-
|
|
12615
|
-
Raises:
|
|
12616
|
-
TypeError: If `group_type` is not a int.
|
|
12617
|
-
ValueError: If `group_type` is invalid.
|
|
12618
|
-
ValueError: If the length of `x` or `weight` is not 1.
|
|
12619
|
-
|
|
12620
|
-
Supported Platforms:
|
|
12621
|
-
``Ascend``
|
|
12622
|
-
|
|
12623
|
-
Examples:
|
|
12624
|
-
>>> import mindspore
|
|
12625
|
-
>>> import numpy as np
|
|
12626
|
-
>>> from mindspore import Tensor, ops
|
|
12627
|
-
>>> x = Tensor(np.random.uniform(0,1, (10, 20)).astype(np.float32))
|
|
12628
|
-
>>> weight = Tensor(np.random.uniform(0,1, (4, 20, 8)).astype(np.float32))
|
|
12629
|
-
>>> group_list = Tensor([2, 4, 2, 2])
|
|
12630
|
-
>>> y = ops.function.math_func.gmm_v2([x,], [weight,], group_list=group_list, group_list_type=1)
|
|
12631
|
-
>>> print(y[0].shape)
|
|
12632
|
-
>>> [10, 8]
|
|
12633
|
-
"""
|
|
12634
|
-
return grouped_matmul_v4(x, weight, bias=bias, group_list=group_list, split_item=3,
|
|
12635
|
-
group_type=group_type, group_list_type=group_list_type, act_type=0)
|
|
12636
|
-
|
|
12637
|
-
|
|
12638
|
-
def gmm_v2_backward(grad, x, weight, *, group_list=None, group_list_type=0):
|
|
12133
|
+
def gmm_backward_fusion(grad, weight, group_list=None, group_list_type=0):
|
|
12639
12134
|
r"""
|
|
12640
|
-
the grad of
|
|
12135
|
+
the grad of ops.function.math_func.gmm, only dx
|
|
12641
12136
|
"""
|
|
12642
|
-
|
|
12643
|
-
|
|
12644
|
-
dw = gradients[-len(weight):]
|
|
12137
|
+
dx = ops.functional_overload.gmm_backward_fusion(grad, weight, group_list, group_list_type)
|
|
12138
|
+
dw = []
|
|
12645
12139
|
db = []
|
|
12646
|
-
|
|
12647
12140
|
return dx, dw, db
|
|
12648
12141
|
|
|
12649
12142
|
|