mindspore 2.6.0__cp39-cp39-win_amd64.whl → 2.7.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +40 -9
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
- mindspore/_extends/parse/parser.py +36 -61
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +32 -13
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/amp.py +4 -4
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/common/__init__.py +27 -2
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +76 -15
- mindspore/common/api.py +193 -112
- mindspore/common/dtype.py +21 -11
- mindspore/common/dump.py +10 -15
- mindspore/common/generator.py +2 -3
- mindspore/common/hook_handle.py +11 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/parameter.py +26 -12
- mindspore/common/recompute.py +3 -3
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +48 -83
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +38 -23
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/datasets.py +20 -7
- mindspore/dataset/engine/datasets_user_defined.py +32 -2
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +7 -3
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -5
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +1 -0
- mindspore/include/api/cell.h +37 -1
- mindspore/include/api/delegate.h +10 -0
- mindspore/include/api/model.h +3 -0
- mindspore/include/api/types.h +2 -2
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +60 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +4 -44
- mindspore/mint/distributed/__init__.py +1 -0
- mindspore/mint/distributed/distributed.py +208 -5
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +164 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +122 -98
- mindspore/mint/nn/layer/normalization.py +8 -22
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/nn/cell.py +325 -499
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +32 -34
- mindspore/nn/layer/basic.py +67 -64
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +86 -85
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +37 -39
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +34 -37
- mindspore/nn/wrap/grad_reducer.py +37 -37
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +5 -5
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_vmap/vmap_array_ops.py +6 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +17 -8
- mindspore/ops/auto_generate/gen_extend_func.py +1 -51
- mindspore/ops/auto_generate/gen_ops_def.py +463 -257
- mindspore/ops/auto_generate/gen_ops_prim.py +1127 -885
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +8 -4
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +3 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +7 -94
- mindspore/ops/function/debug_func.py +4 -3
- mindspore/ops/function/grad/grad_func.py +1 -1
- mindspore/ops/function/math_func.py +21 -367
- mindspore/ops/function/nn_func.py +26 -41
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +31 -4
- mindspore/ops/functional.py +0 -2
- mindspore/ops/functional_overload.py +463 -6
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +5 -2
- mindspore/ops/operations/_custom_ops_utils.py +675 -8
- mindspore/ops/operations/_inner_ops.py +3 -6
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/comm_ops.py +185 -26
- mindspore/ops/operations/custom_ops.py +235 -172
- mindspore/ops/operations/debug_ops.py +55 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +15 -16
- mindspore/ops/operations/math_ops.py +3 -4
- mindspore/ops/operations/nn_ops.py +5 -6
- mindspore/ops/primitive.py +6 -10
- mindspore/ops/tensor_method.py +36 -4
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +7 -2
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +4 -2
- mindspore/parallel/_cell_wrapper.py +106 -40
- mindspore/parallel/_parallel_serialization.py +1 -1
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +13 -8
- mindspore/parallel/auto_parallel.py +12 -5
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +3 -1
- mindspore/parallel/cluster/process_entity/_api.py +84 -48
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +43 -4
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +1 -1
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
- mindspore/parallel/shard.py +2 -2
- mindspore/parallel/transform_safetensors.py +462 -174
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/profiler_context.py +25 -27
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_op_analyse.py +235 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +305 -314
- mindspore/profiler/envprofiler.py +12 -7
- mindspore/profiler/experimental_config.py +96 -6
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/npu_profiler.py +29 -19
- mindspore/profiler/profiler.py +35 -19
- mindspore/profiler/profiler_action_controller.py +64 -76
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +5 -5
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +60 -45
- mindspore/runtime/memory.py +21 -30
- mindspore/runtime/thread_bind_core.py +298 -164
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +6 -2
- mindspore/train/amp.py +43 -20
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_checkpoint.py +3 -6
- mindspore/train/callback/_flops_collector.py +1 -1
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +71 -13
- mindspore/train/data_sink.py +11 -2
- mindspore/train/dataset_helper.py +9 -0
- mindspore/train/model.py +51 -33
- mindspore/train/serialization.py +133 -111
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +162 -78
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +6 -9
- mindspore/version.py +1 -1
- {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
- {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +329 -367
- mindspore/_deprecated/jit.py +0 -198
- mindspore/experimental/es/__init__.py +0 -22
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -3218,7 +3218,7 @@ def approximate_equal(x, y, tolerance=1e-5):
|
|
|
3218
3218
|
>>> import mindspore
|
|
3219
3219
|
>>> mindspore.ops.approximate_equal(mindspore.tensor([1e6, 2e6, float("inf"), float("-inf"), float("nan")]),
|
|
3220
3220
|
... mindspore.tensor([1e6, 2e7, float("inf"), float("-inf"), float("nan")]))
|
|
3221
|
-
Tensor(shape=[
|
|
3221
|
+
Tensor(shape=[5], dtype=Bool, value= [ True, False, False, False, False])
|
|
3222
3222
|
>>>
|
|
3223
3223
|
>>> mindspore.ops.approximate_equal(mindspore.tensor([1e6, 2e6, 3e6]),
|
|
3224
3224
|
... mindspore.tensor([1.00001e6, 2.00002e6, 3.00009e6]), tolerance=1e3)
|
|
@@ -5621,7 +5621,7 @@ def dstack(tensors):
|
|
|
5621
5621
|
1-D or 2-D tensors must have the same shape.
|
|
5622
5622
|
|
|
5623
5623
|
Args:
|
|
5624
|
-
tensors (Union(List[Tensor],
|
|
5624
|
+
tensors (Union(List[Tensor], tuple[Tensor])): The list of tensors or tuple of tensors.
|
|
5625
5625
|
|
|
5626
5626
|
Returns:
|
|
5627
5627
|
Tensor
|
|
@@ -5782,7 +5782,7 @@ def _diff_helper(input, n, dim):
|
|
|
5782
5782
|
is_bool = (input.dtype == mstype.bool_)
|
|
5783
5783
|
result = input
|
|
5784
5784
|
|
|
5785
|
-
for
|
|
5785
|
+
for _ in range(n): # pylint: disable=unused-variable
|
|
5786
5786
|
if is_bool:
|
|
5787
5787
|
result = logical_xor(narrow(result, dim, 1, out_len), narrow(result, dim, 0, out_len))
|
|
5788
5788
|
else:
|
|
@@ -9630,12 +9630,10 @@ def _einsum_convert_num_to_char(num):
|
|
|
9630
9630
|
"""For einsum, convert number into char."""
|
|
9631
9631
|
if [num] == [Ellipsis]:
|
|
9632
9632
|
return '...'
|
|
9633
|
-
|
|
9634
|
-
|
|
9635
|
-
|
|
9636
|
-
|
|
9637
|
-
if num >= 26 and num < 52:
|
|
9638
|
-
return chr(num - 26 + ord('a'))
|
|
9633
|
+
if 0 <= num < 26:
|
|
9634
|
+
return chr(num + 65)
|
|
9635
|
+
if 26 <= num < 52:
|
|
9636
|
+
return chr(num + 71)
|
|
9639
9637
|
raise ValueError(f"For Einsum, the number in sublist should be in range [0, 52), but got {num}")
|
|
9640
9638
|
|
|
9641
9639
|
|
|
@@ -9737,33 +9735,19 @@ def einsum(equation, *operands):
|
|
|
9737
9735
|
return _get_cache_prim(P.Einsum)(equation)(operands)
|
|
9738
9736
|
|
|
9739
9737
|
|
|
9740
|
-
def
|
|
9741
|
-
"""Convert sublist to label."""
|
|
9742
|
-
if num == Ellipsis or ell_num and num == 52:
|
|
9743
|
-
return '...'
|
|
9744
|
-
if 0 <= num < 26:
|
|
9745
|
-
return chr(num + ord('A'))
|
|
9746
|
-
if 26 <= num < 52:
|
|
9747
|
-
return chr(num + ord('a') - 26)
|
|
9748
|
-
raise ValueError(
|
|
9749
|
-
f'For einsum, the number in sublist must be in range [0, 52), but got {num}')
|
|
9750
|
-
|
|
9751
|
-
|
|
9752
|
-
def _einsum_convert_label_to_index(label):
|
|
9753
|
-
"""Convert label to index."""
|
|
9754
|
-
label_num = ord(label)
|
|
9755
|
-
if ord('A') <= label_num <= ord('Z'):
|
|
9756
|
-
return label_num - ord('A')
|
|
9757
|
-
if ord('a') <= label_num <= ord('z'):
|
|
9758
|
-
return label_num - ord('a') + 26
|
|
9759
|
-
if label_num == ord('.'):
|
|
9760
|
-
return 52
|
|
9761
|
-
raise ValueError(
|
|
9762
|
-
f'For einsum, the label in equation must be in [a-zA-Z] or ., but got {label}')
|
|
9763
|
-
|
|
9764
|
-
|
|
9765
|
-
def _einsum_convert_sublist(equation, *operands):
|
|
9738
|
+
def _einsum_convert_sublist(equation, operands):
|
|
9766
9739
|
"""Convert the sublist to an equation operand if the received input is a sublist format."""
|
|
9740
|
+
def _einsum_convert_sublist_to_label(num, ell_num=False):
|
|
9741
|
+
"""Convert sublist to label."""
|
|
9742
|
+
if num == Ellipsis or ell_num and num == 52:
|
|
9743
|
+
return '...'
|
|
9744
|
+
if 0 <= num < 26:
|
|
9745
|
+
return chr(num + ord('A'))
|
|
9746
|
+
if 26 <= num < 52:
|
|
9747
|
+
return chr(num + ord('a') - 26)
|
|
9748
|
+
raise ValueError(
|
|
9749
|
+
f'For einsum, the number in sublist must be in range [0, 52), but got {num}')
|
|
9750
|
+
|
|
9767
9751
|
if isinstance(equation, Tensor):
|
|
9768
9752
|
equation_tmp = ''
|
|
9769
9753
|
for i, lst in enumerate(operands):
|
|
@@ -9788,331 +9772,6 @@ def _einsum_convert_sublist(equation, *operands):
|
|
|
9788
9772
|
return equation, operands
|
|
9789
9773
|
|
|
9790
9774
|
|
|
9791
|
-
def _einsum_check_inputargs(equation, operands):
|
|
9792
|
-
"""Check equation and operands."""
|
|
9793
|
-
if not isinstance(equation, str):
|
|
9794
|
-
raise TypeError(
|
|
9795
|
-
f"For einsum, 'equation' must be a str, but got {type(equation)}.")
|
|
9796
|
-
for operand in operands:
|
|
9797
|
-
if not isinstance(operand, Tensor):
|
|
9798
|
-
raise TypeError(
|
|
9799
|
-
f"For einsum, members of 'operands' must be Tensor, but got {type(operand)}.")
|
|
9800
|
-
|
|
9801
|
-
|
|
9802
|
-
@constexpr
|
|
9803
|
-
def _einsum_parse_equation(equation):
|
|
9804
|
-
"""Parse equation."""
|
|
9805
|
-
l_equation = ''
|
|
9806
|
-
r_equation = ''
|
|
9807
|
-
equation = equation.replace(' ', '')
|
|
9808
|
-
|
|
9809
|
-
if '->' in equation:
|
|
9810
|
-
l_equation, r_equation = equation.split('->', 1)
|
|
9811
|
-
if l_equation == '':
|
|
9812
|
-
raise ValueError(
|
|
9813
|
-
'For einsum, equation must contain characters to the left fo the arrow.')
|
|
9814
|
-
else:
|
|
9815
|
-
l_equation = equation
|
|
9816
|
-
|
|
9817
|
-
if ',' in l_equation:
|
|
9818
|
-
l_equationlst = l_equation.split(",")
|
|
9819
|
-
else:
|
|
9820
|
-
l_equationlst = [l_equation]
|
|
9821
|
-
|
|
9822
|
-
l_equationlst = []
|
|
9823
|
-
|
|
9824
|
-
for subequation in l_equation.split(','):
|
|
9825
|
-
if '.' in subequation and ('...' not in subequation or subequation.count('.') != 3):
|
|
9826
|
-
raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
|
|
9827
|
-
f"and can only be found once.")
|
|
9828
|
-
subequation_lst = [_einsum_convert_label_to_index(label) for label in subequation.replace('...', '.')]
|
|
9829
|
-
l_equationlst.append(subequation_lst)
|
|
9830
|
-
|
|
9831
|
-
if "." in r_equation and ('...' not in r_equation or r_equation.count('.') != 3):
|
|
9832
|
-
raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
|
|
9833
|
-
f"and can only be found once.")
|
|
9834
|
-
r_equationlst = [_einsum_convert_label_to_index(label) for label in r_equation.replace('...', '.')]
|
|
9835
|
-
|
|
9836
|
-
return l_equationlst, r_equationlst, ('->' in equation)
|
|
9837
|
-
|
|
9838
|
-
|
|
9839
|
-
def _einsum_parse_labels(l_equationlst, operands):
|
|
9840
|
-
"""Parse left script of equation."""
|
|
9841
|
-
align_rank = 0
|
|
9842
|
-
max_labels = 53
|
|
9843
|
-
ellipsis_dimnum = 0
|
|
9844
|
-
labels_count = [0] * max_labels
|
|
9845
|
-
|
|
9846
|
-
if len(operands) != len(l_equationlst):
|
|
9847
|
-
raise ValueError(f"For einsum, 'operands' is not equal to specified in the 'equation', "
|
|
9848
|
-
f"but got {len(operands)} and {len(l_equationlst)}.")
|
|
9849
|
-
|
|
9850
|
-
for idx, sub_equ in enumerate(l_equationlst):
|
|
9851
|
-
start_dim = 0
|
|
9852
|
-
label_num = 0
|
|
9853
|
-
operand_shape = list(operands[idx].shape)
|
|
9854
|
-
for label in sub_equ:
|
|
9855
|
-
dim_num = 1
|
|
9856
|
-
label_num += 1
|
|
9857
|
-
end_dim = start_dim + 1
|
|
9858
|
-
|
|
9859
|
-
# Label is ellipsis
|
|
9860
|
-
if label == 52:
|
|
9861
|
-
end_dim = len(operand_shape) - len(sub_equ) + label_num
|
|
9862
|
-
dim_num = end_dim - start_dim
|
|
9863
|
-
if ellipsis_dimnum != 0 and ellipsis_dimnum != dim_num:
|
|
9864
|
-
raise ValueError(f"For einsum, an ellipsis in 'equation' can only represent the same numbers of "
|
|
9865
|
-
f"dimensions in 'operands'.")
|
|
9866
|
-
ellipsis_dimnum = dim_num
|
|
9867
|
-
if labels_count[label] == 0:
|
|
9868
|
-
align_rank += dim_num
|
|
9869
|
-
labels_count[label] += 1
|
|
9870
|
-
start_dim += dim_num
|
|
9871
|
-
if label_num != len(sub_equ) or start_dim != len(operand_shape):
|
|
9872
|
-
raise ValueError(f"For einsum, the numbers of labels specified in the 'equation' does not match "
|
|
9873
|
-
f"'operands[{idx}]'.")
|
|
9874
|
-
return ellipsis_dimnum, labels_count, align_rank
|
|
9875
|
-
|
|
9876
|
-
|
|
9877
|
-
def _einsum_infer_output(r_equationlst, arrow_exist, ellipsis_dimnum, labels_count):
|
|
9878
|
-
"""Parse right script of equation and infer output shape."""
|
|
9879
|
-
idx = 0
|
|
9880
|
-
idle_idx = -1
|
|
9881
|
-
output_rank = 0
|
|
9882
|
-
labels_perm_idx = [idle_idx] * 53
|
|
9883
|
-
|
|
9884
|
-
if arrow_exist:
|
|
9885
|
-
for label in r_equationlst:
|
|
9886
|
-
if labels_count[label] != 0:
|
|
9887
|
-
if labels_perm_idx[label] != idle_idx:
|
|
9888
|
-
raise ValueError(f"For einsum, '{_einsum_convert_sublist_to_label(label, True)}' or {label} in "
|
|
9889
|
-
f"sublist format has appears more than once in output subscript.")
|
|
9890
|
-
dimnum = 1
|
|
9891
|
-
if label == 52:
|
|
9892
|
-
dimnum = ellipsis_dimnum
|
|
9893
|
-
labels_perm_idx[label] = idx
|
|
9894
|
-
output_rank += dimnum
|
|
9895
|
-
idx += dimnum
|
|
9896
|
-
else:
|
|
9897
|
-
raise ValueError(f"For einsum, the label to the right of arrow in the 'equation' must appear on "
|
|
9898
|
-
f"left, but '{_einsum_convert_sublist_to_label(label, True)}' does not.")
|
|
9899
|
-
else:
|
|
9900
|
-
if labels_count[52] != 0:
|
|
9901
|
-
output_rank += ellipsis_dimnum
|
|
9902
|
-
labels_perm_idx[52] = idx
|
|
9903
|
-
idx += ellipsis_dimnum
|
|
9904
|
-
for label, count in enumerate(labels_count):
|
|
9905
|
-
if count == 1:
|
|
9906
|
-
output_rank += 1
|
|
9907
|
-
labels_perm_idx[label] = idx
|
|
9908
|
-
idx += 1
|
|
9909
|
-
|
|
9910
|
-
for label, count in enumerate(labels_count):
|
|
9911
|
-
if count != 0 and labels_perm_idx[label] == idle_idx:
|
|
9912
|
-
labels_perm_idx[label] = idx
|
|
9913
|
-
idx += 1
|
|
9914
|
-
|
|
9915
|
-
return output_rank, labels_perm_idx
|
|
9916
|
-
|
|
9917
|
-
|
|
9918
|
-
def _einsum_adjust_operands(operands, l_equationlst, ellipsis_dimnum, labels_perm_idx, align_rank):
|
|
9919
|
-
"""Align operands to output as possible."""
|
|
9920
|
-
# Unsqueeze miss dimensions to make all operands has same rank, compute diagonal if operand has same label.
|
|
9921
|
-
# Then use _labels_perm_idx to transpose all operands to align dimensions with output.
|
|
9922
|
-
adjust_operands = []
|
|
9923
|
-
for idx, operand in enumerate(operands):
|
|
9924
|
-
idle_dim = -1
|
|
9925
|
-
align_axis = [idle_dim] * align_rank
|
|
9926
|
-
label_dims = [idle_dim] * 53
|
|
9927
|
-
dim = 0
|
|
9928
|
-
|
|
9929
|
-
for label in l_equationlst[idx]:
|
|
9930
|
-
if label_dims[label] != idle_dim:
|
|
9931
|
-
operand = ops.diagonal(operand, 0, label_dims[label], dim)
|
|
9932
|
-
diag_perm = []
|
|
9933
|
-
diag_dim = 0
|
|
9934
|
-
for i in range(len(operand.shape)):
|
|
9935
|
-
if i == label_dims[label]:
|
|
9936
|
-
diag_perm.append(len(operand.shape) - 1)
|
|
9937
|
-
else:
|
|
9938
|
-
diag_perm.append(diag_dim)
|
|
9939
|
-
diag_dim += 1
|
|
9940
|
-
operand = permute(operand, tuple(diag_perm))
|
|
9941
|
-
else:
|
|
9942
|
-
label_dims[label] = dim
|
|
9943
|
-
if label == 52:
|
|
9944
|
-
for ell_idx in range(ellipsis_dimnum):
|
|
9945
|
-
align_axis[labels_perm_idx[label] + ell_idx] = dim
|
|
9946
|
-
dim += 1
|
|
9947
|
-
else:
|
|
9948
|
-
align_axis[labels_perm_idx[label]] = dim
|
|
9949
|
-
dim += 1
|
|
9950
|
-
if len(operand.shape) < align_rank:
|
|
9951
|
-
for i, axis in enumerate(align_axis):
|
|
9952
|
-
if axis == idle_dim:
|
|
9953
|
-
align_axis[i] = dim
|
|
9954
|
-
dim += 1
|
|
9955
|
-
missing_dims = [1] * (align_rank - len(operand.shape))
|
|
9956
|
-
operand_shape = list(operand.shape) + missing_dims
|
|
9957
|
-
operand = ops.reshape(operand, operand_shape)
|
|
9958
|
-
operand = permute(operand, tuple(align_axis))
|
|
9959
|
-
adjust_operands.append(operand)
|
|
9960
|
-
return adjust_operands
|
|
9961
|
-
|
|
9962
|
-
|
|
9963
|
-
def _einsum_find_dimlastop(align_rank, operands, adjust_operands):
|
|
9964
|
-
"""Find dim last operand."""
|
|
9965
|
-
dim_last_op = [0] * align_rank
|
|
9966
|
-
has_zero_dim = False
|
|
9967
|
-
for dim in range(align_rank):
|
|
9968
|
-
broadcast_dim = adjust_operands[0].shape[dim]
|
|
9969
|
-
for idx in range(1, len(adjust_operands)):
|
|
9970
|
-
other_dim = adjust_operands[idx].shape[dim]
|
|
9971
|
-
if broadcast_dim != other_dim and broadcast_dim != 1 and other_dim != 1:
|
|
9972
|
-
err_msg = "For einsum, operands do not broadcast after align to output [shapes :origin -> adjust]:"
|
|
9973
|
-
for i in range(len(operands)):
|
|
9974
|
-
err_msg += f" {operands[i].shape} -> {adjust_operands[i].shape}"
|
|
9975
|
-
raise ValueError(err_msg)
|
|
9976
|
-
if other_dim != 1:
|
|
9977
|
-
dim_last_op[dim] = idx
|
|
9978
|
-
broadcast_dim = other_dim
|
|
9979
|
-
has_zero_dim = has_zero_dim or broadcast_dim == 0
|
|
9980
|
-
return dim_last_op, has_zero_dim
|
|
9981
|
-
|
|
9982
|
-
|
|
9983
|
-
def _einsum_multiplication(sum_dims, l_tensor, r_tensor):
|
|
9984
|
-
"""Compute bmm for einsum."""
|
|
9985
|
-
batch_dims = []
|
|
9986
|
-
lonly_dims = []
|
|
9987
|
-
ronly_dims = []
|
|
9988
|
-
batch_size = 1
|
|
9989
|
-
lonly_size = 1
|
|
9990
|
-
ronly_size = 1
|
|
9991
|
-
sum_size = 1
|
|
9992
|
-
|
|
9993
|
-
l_shape = l_tensor.shape
|
|
9994
|
-
r_shape = r_tensor.shape
|
|
9995
|
-
|
|
9996
|
-
# Compute sum if dim is in sum_dims and get shapes for bmm
|
|
9997
|
-
for i in range(len(l_shape)):
|
|
9998
|
-
sum_l = l_shape[i] > 1
|
|
9999
|
-
sum_r = r_shape[i] > 1
|
|
10000
|
-
if i in sum_dims:
|
|
10001
|
-
if sum_l and sum_r:
|
|
10002
|
-
sum_size *= l_shape[i]
|
|
10003
|
-
elif sum_l:
|
|
10004
|
-
l_tensor = ops.auto_generate.sum_ext(l_tensor, i, True)
|
|
10005
|
-
elif sum_r:
|
|
10006
|
-
r_tensor = ops.auto_generate.sum_ext(r_tensor, i, True)
|
|
10007
|
-
elif sum_l and sum_r:
|
|
10008
|
-
batch_dims.append(i)
|
|
10009
|
-
batch_size *= l_shape[i]
|
|
10010
|
-
elif sum_l:
|
|
10011
|
-
lonly_dims.append(i)
|
|
10012
|
-
lonly_size *= l_shape[i]
|
|
10013
|
-
else:
|
|
10014
|
-
ronly_dims.append(i)
|
|
10015
|
-
ronly_size *= r_shape[i]
|
|
10016
|
-
|
|
10017
|
-
# Compute the einsum bmm operators pipeline.
|
|
10018
|
-
# The whole operators pipeline is transpose(in) -> reshape(in) -> bmm(in) -> reshape(out) -> transpose(out).
|
|
10019
|
-
l_reshape_shape = (batch_size, lonly_size, sum_size)
|
|
10020
|
-
r_reshape_shape = (batch_size, sum_size, ronly_size)
|
|
10021
|
-
|
|
10022
|
-
out_reshape_shape = [l_shape[dim] for dim in batch_dims]
|
|
10023
|
-
out_reshape_shape += [l_shape[dim] for dim in lonly_dims]
|
|
10024
|
-
out_reshape_shape += [1 for _ in sum_dims]
|
|
10025
|
-
out_reshape_shape += [r_shape[dim] for dim in ronly_dims]
|
|
10026
|
-
|
|
10027
|
-
l_perm_axis = batch_dims + lonly_dims + sum_dims + ronly_dims
|
|
10028
|
-
r_perm_axis = batch_dims + sum_dims + ronly_dims + lonly_dims
|
|
10029
|
-
out_perm_axis = [-1] * len(out_reshape_shape)
|
|
10030
|
-
|
|
10031
|
-
out_dim = 0
|
|
10032
|
-
for idx in range(len(l_perm_axis)):
|
|
10033
|
-
out_perm_axis[l_perm_axis[idx]] = out_dim
|
|
10034
|
-
out_dim += 1
|
|
10035
|
-
|
|
10036
|
-
l_tensor = permute(l_tensor, tuple(l_perm_axis))
|
|
10037
|
-
l_tensor = ops.reshape(l_tensor, l_reshape_shape)
|
|
10038
|
-
|
|
10039
|
-
r_tensor = permute(r_tensor, tuple(r_perm_axis))
|
|
10040
|
-
r_tensor = ops.reshape(r_tensor, r_reshape_shape)
|
|
10041
|
-
|
|
10042
|
-
output = bmm_ext(l_tensor, r_tensor)
|
|
10043
|
-
output = ops.reshape(output, out_reshape_shape)
|
|
10044
|
-
output = permute(output, tuple(out_perm_axis))
|
|
10045
|
-
|
|
10046
|
-
output_origin_shape = output.shape
|
|
10047
|
-
output_squeeze_shape = []
|
|
10048
|
-
for dim in range(len(output_origin_shape)):
|
|
10049
|
-
if dim not in sum_dims:
|
|
10050
|
-
output_squeeze_shape.append(output_origin_shape[dim])
|
|
10051
|
-
|
|
10052
|
-
return ops.reshape(output, output_squeeze_shape)
|
|
10053
|
-
|
|
10054
|
-
|
|
10055
|
-
def _einsum(equation, operands):
|
|
10056
|
-
'''Einsum main process'''
|
|
10057
|
-
_l_equationlst, _r_equationlst, _arrow_exist = _einsum_parse_equation(
|
|
10058
|
-
equation)
|
|
10059
|
-
_ellipsis_dimnum, _labels_count, _align_rank = _einsum_parse_labels(
|
|
10060
|
-
_l_equationlst, operands)
|
|
10061
|
-
_output_rank, _labels_perm_idx = _einsum_infer_output(
|
|
10062
|
-
_r_equationlst, _arrow_exist, _ellipsis_dimnum, _labels_count)
|
|
10063
|
-
_adjust_operands = _einsum_adjust_operands(operands, _l_equationlst, _ellipsis_dimnum, _labels_perm_idx,
|
|
10064
|
-
_align_rank)
|
|
10065
|
-
_dim_last_op, _has_zero_dim = _einsum_find_dimlastop(
|
|
10066
|
-
_align_rank, operands, _adjust_operands)
|
|
10067
|
-
_result = _adjust_operands[0]
|
|
10068
|
-
|
|
10069
|
-
# Fast path if operands has zero dim.
|
|
10070
|
-
if _has_zero_dim:
|
|
10071
|
-
output_shape = []
|
|
10072
|
-
for dim in range(_output_rank):
|
|
10073
|
-
output_shape.append(_adjust_operands[_dim_last_op[dim]].shape[dim])
|
|
10074
|
-
return ops.auto_generate.zeros(output_shape, dtype=_result.dtype)
|
|
10075
|
-
|
|
10076
|
-
# Sum or squeeze dimensions that is 1 for all rest operands.
|
|
10077
|
-
_reduce_dim = _output_rank
|
|
10078
|
-
for dim in range(_output_rank, _align_rank):
|
|
10079
|
-
if _dim_last_op[dim] == 0:
|
|
10080
|
-
if _result.shape[_reduce_dim] == 1:
|
|
10081
|
-
_result = ops.auto_generate.pyboost_inner_prim.squeeze_impl(_result, _reduce_dim)
|
|
10082
|
-
else:
|
|
10083
|
-
_result = ops.auto_generate.sum_ext(_result, _reduce_dim)
|
|
10084
|
-
else:
|
|
10085
|
-
_reduce_dim += 1
|
|
10086
|
-
|
|
10087
|
-
# Compute multiplication if operands are more than two.
|
|
10088
|
-
for i in range(1, len(_adjust_operands)):
|
|
10089
|
-
operand = _adjust_operands[i]
|
|
10090
|
-
dim = _output_rank
|
|
10091
|
-
sum_dims = []
|
|
10092
|
-
for j in range(_output_rank, _align_rank):
|
|
10093
|
-
if _dim_last_op[j] < i:
|
|
10094
|
-
operand = ops.auto_generate.pyboost_inner_prim.squeeze_impl(operand, dim)
|
|
10095
|
-
elif _dim_last_op[j] == i:
|
|
10096
|
-
if _result.shape[dim] == 1:
|
|
10097
|
-
operand = ops.auto_generate.sum_ext(operand, dim)
|
|
10098
|
-
_result = ops.auto_generate.pyboost_inner_prim.squeeze_impl(_result, dim)
|
|
10099
|
-
else:
|
|
10100
|
-
sum_dims.append(dim)
|
|
10101
|
-
dim += 1
|
|
10102
|
-
else:
|
|
10103
|
-
dim += 1
|
|
10104
|
-
|
|
10105
|
-
if sum_dims == []:
|
|
10106
|
-
_result = mul_ext(_result, operand)
|
|
10107
|
-
elif len(sum_dims) == len(_result.shape):
|
|
10108
|
-
_result = ops.auto_generate.dot(ops.auto_generate.flatten_ext(_result),
|
|
10109
|
-
ops.auto_generate.flatten_ext(operand))
|
|
10110
|
-
else:
|
|
10111
|
-
_result = _einsum_multiplication(sum_dims, _result, operand)
|
|
10112
|
-
|
|
10113
|
-
return _result
|
|
10114
|
-
|
|
10115
|
-
|
|
10116
9775
|
def einsum_ext(equation, *operands):
|
|
10117
9776
|
r"""
|
|
10118
9777
|
According to the Einstein summation Convention (Einsum),
|
|
@@ -10207,14 +9866,9 @@ def einsum_ext(equation, *operands):
|
|
|
10207
9866
|
[3. 6.]
|
|
10208
9867
|
[4. 8.]]
|
|
10209
9868
|
"""
|
|
10210
|
-
_equation, _operands = _einsum_convert_sublist(equation,
|
|
10211
|
-
_einsum_check_inputargs(_equation, _operands)
|
|
10212
|
-
|
|
10213
|
-
for operand in _operands:
|
|
10214
|
-
if ops.is_sequence_shape_unknown(operand.shape) or ops.is_sequence_value_unknown(operand.shape):
|
|
10215
|
-
raise ValueError(f"For einsum, the element of 'operands' can't be dynamic shape or dynamic rank.")
|
|
9869
|
+
_equation, _operands = _einsum_convert_sublist(equation, operands)
|
|
10216
9870
|
|
|
10217
|
-
return
|
|
9871
|
+
return ops.functional_overload.einsum(_equation, _operands)
|
|
10218
9872
|
|
|
10219
9873
|
|
|
10220
9874
|
def cumprod(input, dim, dtype=None):
|
|
@@ -53,9 +53,10 @@ from mindspore.ops.auto_generate import (reflection_pad_1d_op, reflection_pad_2d
|
|
|
53
53
|
upsample_nearest1d_op, upsample_nearest2d_op, upsample_nearest3d_op,
|
|
54
54
|
upsample_linear1d_op, upsample_bilinear2d_op, upsample_bicubic2d_op,
|
|
55
55
|
upsample_trilinear3d_impl, fill_scalar_op, floor_op, nllloss_2d_op,
|
|
56
|
-
masked_fill_op, masked_select, ones, flatten_ext, conv_transpose2d
|
|
56
|
+
masked_fill_op, masked_select, ones, flatten_ext, conv_transpose2d,
|
|
57
|
+
func_max_pool2d_op)
|
|
57
58
|
# 2
|
|
58
|
-
|
|
59
|
+
from mindspore.ops.auto_generate.pyboost_inner_prim import grid_sampler_2d_impl, grid_sampler_3d_impl
|
|
59
60
|
# 3
|
|
60
61
|
|
|
61
62
|
# 4
|
|
@@ -91,10 +92,10 @@ from mindspore.ops.auto_generate import avg_pool3d_ext_op
|
|
|
91
92
|
# 19
|
|
92
93
|
|
|
93
94
|
# 20
|
|
94
|
-
|
|
95
|
+
from mindspore.ops.functional_overload import conv3d as conv3d_op
|
|
95
96
|
from mindspore.ops.auto_generate.gen_ops_prim import embedding_op, MaxPoolWithIndices, \
|
|
96
97
|
PromptFlashAttention, MaxPoolWithMask
|
|
97
|
-
from mindspore.ops.auto_generate.gen_ops_prim import
|
|
98
|
+
from mindspore.ops.auto_generate.gen_ops_prim import conv2d_ext_op, \
|
|
98
99
|
conv2d_padding_op, conv1d_ext_op, conv1d_padding_op, speed_fusion_attention_op
|
|
99
100
|
from mindspore.common.generator import default_generator
|
|
100
101
|
from mindspore.ops.auto_generate import hardshrink, hardsigmoid, hardswish
|
|
@@ -4420,7 +4421,7 @@ def nll_loss_ext(input, target, weight=None, ignore_index=-100, reduction='mean'
|
|
|
4420
4421
|
:math:`N` is the batch size, :math:`c` belonging to :math:`[0, C-1]` is class index,
|
|
4421
4422
|
where :math:`C` is the number of classes.
|
|
4422
4423
|
|
|
4423
|
-
If `reduction` is not ``'
|
|
4424
|
+
If `reduction` is not ``'none'`` (default ``'mean'``), then
|
|
4424
4425
|
|
|
4425
4426
|
.. math::
|
|
4426
4427
|
|
|
@@ -4444,7 +4445,7 @@ def nll_loss_ext(input, target, weight=None, ignore_index=-100, reduction='mean'
|
|
|
4444
4445
|
weight (Tensor, optional): A rescaling weight applied to the loss of each batch element.
|
|
4445
4446
|
If not None, the shape is :math:`(C,)`.
|
|
4446
4447
|
The data type must be float16 or float32 or bfloat16(only supported by Atlas A2 training series products).
|
|
4447
|
-
It should have the same data type as `input` . Default: ``
|
|
4448
|
+
It should have the same data type as `input` . Default: ``None`` .
|
|
4448
4449
|
ignore_index (int, optional): Specifies a target value that is ignored
|
|
4449
4450
|
and does not contribute to the input gradient. Default: ``-100`` .
|
|
4450
4451
|
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
|
|
@@ -4481,10 +4482,10 @@ def _nllloss_nd(input, target, weight=None, ignore_index=-100, reduction='mean')
|
|
|
4481
4482
|
weight = ones(n_classes, input.dtype)
|
|
4482
4483
|
if input_dim < 1:
|
|
4483
4484
|
raise ValueError(f"input dim should be less than 1, but got {input_dim}")
|
|
4484
|
-
if input_dim != 1 and input.shape[0] != target.shape[0]:
|
|
4485
|
+
if F.isconstant(input_dim) and F.isconstant(target.ndim) and input_dim != 1 and input.shape[0] != target.shape[0]:
|
|
4485
4486
|
raise ValueError(f"input bacth_size should be equal to target batch_size, but got {input.shape[0]} and "
|
|
4486
4487
|
f"{target.shape[0]}")
|
|
4487
|
-
if input_dim
|
|
4488
|
+
if input_dim in [1, 2]:
|
|
4488
4489
|
return nllloss_impl(input, target, weight, reduction, ignore_index)[0]
|
|
4489
4490
|
if input_dim == 4:
|
|
4490
4491
|
return nllloss_2d_op(input, target, weight, reduction, ignore_index)[0]
|
|
@@ -5365,7 +5366,7 @@ def max_pool3d(x, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=Fal
|
|
|
5365
5366
|
return out
|
|
5366
5367
|
|
|
5367
5368
|
|
|
5368
|
-
def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corners=
|
|
5369
|
+
def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corners=None):
|
|
5369
5370
|
"""
|
|
5370
5371
|
Given an `input` and a flow-field `grid`, computes the `output` using `input` values and pixel locations from
|
|
5371
5372
|
`grid`. Only spatial (4-D) and volumetric (5-D) `input` is supported.
|
|
@@ -5411,10 +5412,10 @@ def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corner
|
|
|
5411
5412
|
padding_mode (str, optional): An optional string specifying the pad method.
|
|
5412
5413
|
The optional values are "zeros", "border" or
|
|
5413
5414
|
"reflection". Default: ``'zeros'`` .
|
|
5414
|
-
align_corners (bool, optional): If set to
|
|
5415
|
+
align_corners (bool, optional): If set to ``True``, the extrema (-1 and 1) are considered as referring to
|
|
5415
5416
|
the center points of the input's corner pixels. If set to `False`, they are instead considered as referring
|
|
5416
5417
|
to the corner points of the input's corner pixels, making the sampling more resolution agnostic. Default:
|
|
5417
|
-
``False`` .
|
|
5418
|
+
``None``, which is the same as ``False`` .
|
|
5418
5419
|
|
|
5419
5420
|
Returns:
|
|
5420
5421
|
Tensor, dtype is the same as `input` and whose shape is :math:`(N, C, H_{out}, W_{out})` (4-D) and
|
|
@@ -5451,11 +5452,10 @@ def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corner
|
|
|
5451
5452
|
[[14.5 ]
|
|
5452
5453
|
[14.8 ]]]]
|
|
5453
5454
|
"""
|
|
5455
|
+
align_corners = False if align_corners is None else align_corners
|
|
5454
5456
|
if input.ndim == 4:
|
|
5455
|
-
|
|
5456
|
-
|
|
5457
|
-
_grid_sampler_3d = _get_cache_prim(NN_OPS.GridSampler3D)(mode, padding_mode, align_corners)
|
|
5458
|
-
return _grid_sampler_3d(input, grid)
|
|
5457
|
+
return grid_sampler_2d_impl(input, grid, mode, padding_mode, align_corners)
|
|
5458
|
+
return grid_sampler_3d_impl(input, grid, mode, padding_mode, align_corners)
|
|
5459
5459
|
|
|
5460
5460
|
|
|
5461
5461
|
@constexpr
|
|
@@ -6307,7 +6307,7 @@ def conv1d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
|
|
|
6307
6307
|
this 1D convolution layer also can be called 1D depthwise convolution layer. Default: ``1`` .
|
|
6308
6308
|
|
|
6309
6309
|
- :math:`(C_{in} \text{ % } \text{groups} == 0)` , :math:`(C_{out} \text{ % } \text{groups} == 0)` ,
|
|
6310
|
-
:math:`(C_{out} >= \text{groups})` , :math:`(\text{
|
|
6310
|
+
:math:`(C_{out} >= \text{groups})` , :math:`(\text{weight[1]} = C_{in} / \text{groups})`。
|
|
6311
6311
|
|
|
6312
6312
|
Returns:
|
|
6313
6313
|
Tensor, the value that applied 1D convolution. The shape is :math:`(N, C_{out}, L_{out})`.
|
|
@@ -6372,9 +6372,9 @@ def _get_pad_info(dilation, weight):
|
|
|
6372
6372
|
for i in range(2):
|
|
6373
6373
|
d = dilation[i]
|
|
6374
6374
|
weight_size = weight.shape[i + 2]
|
|
6375
|
-
|
|
6376
|
-
pad_l += (int(
|
|
6377
|
-
pad_r += (int(
|
|
6375
|
+
pad_item = d * (weight_size - 1)
|
|
6376
|
+
pad_l += (int(pad_item / 2),)
|
|
6377
|
+
pad_r += (int(pad_item - pad_l[i]),)
|
|
6378
6378
|
if pad_l[i] != pad_r[i]:
|
|
6379
6379
|
need_pad_nd = True
|
|
6380
6380
|
return need_pad_nd, pad_l, pad_r
|
|
@@ -6477,7 +6477,7 @@ def conv2d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
|
|
|
6477
6477
|
groups (int, optional): Splits `input` into groups. Default: ``1`` .
|
|
6478
6478
|
|
|
6479
6479
|
- :math:`(C_{in} \text{ % } \text{groups} == 0)` , :math:`(C_{out} \text{ % } \text{groups} == 0)` ,
|
|
6480
|
-
:math:`(C_{out} >= \text{groups})` , :math:`(\text{
|
|
6480
|
+
:math:`(C_{out} >= \text{groups})` , :math:`(\text{weight[1]} = C_{in} / \text{groups})`
|
|
6481
6481
|
|
|
6482
6482
|
Returns:
|
|
6483
6483
|
Tensor, the value that applied 2D convolution. The shape is :math:`(N, C_{out}, H_{out}, W_{out})`.
|
|
@@ -6981,10 +6981,6 @@ def batch_norm_ext(input, running_mean, running_var, weight=None, bias=None, tra
|
|
|
6981
6981
|
[[ 2.1621194 1.2360122]
|
|
6982
6982
|
[14.810596 10.180061 ]]
|
|
6983
6983
|
"""
|
|
6984
|
-
if weight is None:
|
|
6985
|
-
weight = ops.ones([input.shape[1]], dtype=input.dtype)
|
|
6986
|
-
if bias is None:
|
|
6987
|
-
bias = ops.zeros([input.shape[1]], dtype=input.dtype)
|
|
6988
6984
|
output = batch_norm_ext_op(input, weight, bias, running_mean, running_var, training, momentum, eps)
|
|
6989
6985
|
return output[0]
|
|
6990
6986
|
|
|
@@ -7382,8 +7378,8 @@ def conv3d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
|
|
|
7382
7378
|
- :math:`H_{out} = (H_{in} + PadUp + PadDown - ((kh - 1) * DilationH + 1)) / StrideH + 1` .
|
|
7383
7379
|
- :math:`W_{out} = (W_{in} + PadLeft + PadRight - ((kw - 1) * DilationW + 1)) / StrideW + 1` .
|
|
7384
7380
|
- :math:`D_{out} = (D_{in} + PadFront + PadBack - ((kd - 1) * DilationD + 1)) / StrideD + 1` .
|
|
7385
|
-
- :math:`(D_{in}+PadFront+PadBack - ((kd-1)*DilationD+1))
|
|
7386
|
-
- :math:`(H_{in}+PadUp+PadDown - ((kh-1)*Dilationh+1))
|
|
7381
|
+
- :math:`(D_{in}+PadFront+PadBack - ((kd-1)*DilationD+1)) \% StrideD <= PadBack` .
|
|
7382
|
+
- :math:`(H_{in}+PadUp+PadDown - ((kh-1)*Dilationh+1)) \% StrideH <= PadDown` .
|
|
7387
7383
|
- :math:`stride_d <= kernel_d` .
|
|
7388
7384
|
- :math:`PadUp < kh` and :math:`PadDown < kh` . When `padding` = ``'valid'``, both PadUp and PadDown are zeros.
|
|
7389
7385
|
When `padding` = ``'same'``, pad can be calculated by
|
|
@@ -7450,12 +7446,7 @@ def conv3d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
|
|
|
7450
7446
|
(12, 26, 59, 47, 5)
|
|
7451
7447
|
"""
|
|
7452
7448
|
|
|
7453
|
-
|
|
7454
|
-
return conv3d_ext_op(input, weight, bias, stride, padding, dilation, groups)
|
|
7455
|
-
if isinstance(padding, str):
|
|
7456
|
-
return conv3d_padding_op(input, weight, bias, stride, padding, dilation, groups)
|
|
7457
|
-
raise TypeError(f"For conv3d, the parameter 'padding' must be a tuple/list " \
|
|
7458
|
-
f"or a string, but got {type(padding)}")
|
|
7449
|
+
return conv3d_op(input, weight, bias, stride, padding, dilation, groups)
|
|
7459
7450
|
|
|
7460
7451
|
|
|
7461
7452
|
@_primexpr
|
|
@@ -7523,7 +7514,7 @@ def pixel_shuffle(input, upscale_factor):
|
|
|
7523
7514
|
c, h, w = idx[-3:]
|
|
7524
7515
|
_check_pxiel_shuffle_valid(c, upscale_factor)
|
|
7525
7516
|
c = c // upscale_factor ** 2
|
|
7526
|
-
input_perm =
|
|
7517
|
+
input_perm = pre + (c, upscale_factor, upscale_factor, h, w)
|
|
7527
7518
|
input = reshape_(input, input_perm)
|
|
7528
7519
|
input_perm = [i for i in range(length - 2)]
|
|
7529
7520
|
input_perm = input_perm + [length, length - 2, length + 1, length - 1]
|
|
@@ -7587,7 +7578,7 @@ def pixel_unshuffle(input, downscale_factor):
|
|
|
7587
7578
|
_check_pxiel_unshuffle_valid(h, w, downscale_factor)
|
|
7588
7579
|
h = h // downscale_factor
|
|
7589
7580
|
w = w // downscale_factor
|
|
7590
|
-
input_perm =
|
|
7581
|
+
input_perm = pre + (c, h, downscale_factor, w, downscale_factor)
|
|
7591
7582
|
input = reshape_(input, input_perm)
|
|
7592
7583
|
input_perm = [i for i in range(length - 2)]
|
|
7593
7584
|
input_perm = input_perm + [length - 1, length + 1, length - 2, length]
|
|
@@ -8912,13 +8903,7 @@ def max_pool2d_ext(input, kernel_size, stride=None, padding=0, dilation=1, ceil_
|
|
|
8912
8903
|
>>> print(argmax.shape)
|
|
8913
8904
|
(20, 16, 24, 31)
|
|
8914
8905
|
"""
|
|
8915
|
-
|
|
8916
|
-
if return_indices:
|
|
8917
|
-
max_pool_func_ = _get_cache_prim(MaxPoolWithIndices)(kernel_size, strides, padding, dilation, ceil_mode)
|
|
8918
|
-
out, indices = max_pool_func_(input)
|
|
8919
|
-
else:
|
|
8920
|
-
max_pool_func_ = _get_cache_prim(MaxPoolWithMask)(kernel_size, strides, padding, dilation, ceil_mode)
|
|
8921
|
-
out, indices = max_pool_func_(input)
|
|
8906
|
+
out, indices = func_max_pool2d_op(input, kernel_size, stride, padding, dilation, ceil_mode, return_indices)
|
|
8922
8907
|
if return_indices:
|
|
8923
8908
|
return out, indices
|
|
8924
8909
|
return out
|
|
@@ -15,6 +15,7 @@
|
|
|
15
15
|
"""Defines other operators with functional form."""
|
|
16
16
|
from mindspore.ops import operations as P
|
|
17
17
|
from mindspore.ops.auto_generate import rotary_position_embedding
|
|
18
|
+
from mindspore.ops.auto_generate import moe_distribute_dispatch, moe_distribute_combine
|
|
18
19
|
from mindspore.ops.auto_generate.gen_ops_prim import moe_init_routing_v2_op
|
|
19
20
|
|
|
20
21
|
partial_ = P.Partial()
|
|
@@ -268,6 +269,8 @@ __all__ = [
|
|
|
268
269
|
'partial',
|
|
269
270
|
'rotary_position_embedding',
|
|
270
271
|
'move_to',
|
|
271
|
-
'moe_init_routing_v2'
|
|
272
|
+
'moe_init_routing_v2',
|
|
273
|
+
'moe_distribute_dispatch',
|
|
274
|
+
'moe_distribute_combine'
|
|
272
275
|
]
|
|
273
276
|
__all__.sort()
|