mindspore 2.6.0rc1__cp311-cp311-win_amd64.whl → 2.7.0__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +2 -2
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +42 -11
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
- mindspore/_extends/parse/parser.py +65 -84
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +58 -14
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
- mindspore/amp.py +4 -22
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +43 -12
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +178 -53
- mindspore/common/_utils.py +9 -1
- mindspore/common/api.py +377 -203
- mindspore/common/dtype.py +108 -57
- mindspore/common/dump.py +11 -16
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
- mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
- mindspore/common/file_system.py +59 -9
- mindspore/common/generator.py +5 -3
- mindspore/common/hook_handle.py +33 -5
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +27 -29
- mindspore/common/recompute.py +5 -7
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +117 -131
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +67 -55
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +38 -4
- mindspore/dataset/engine/datasets.py +350 -322
- mindspore/dataset/engine/datasets_user_defined.py +70 -24
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +10 -6
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +17 -5
- mindspore/dataset/vision/utils.py +632 -21
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/ascend/op_tuning.py +35 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -4
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +27 -7
- mindspore/include/api/cell.h +65 -5
- mindspore/include/api/cfg.h +24 -7
- mindspore/include/api/context.h +1 -0
- mindspore/include/api/delegate.h +10 -2
- mindspore/include/api/dual_abi_helper.h +100 -19
- mindspore/include/api/graph.h +14 -1
- mindspore/include/api/kernel.h +16 -3
- mindspore/include/api/kernel_api.h +9 -1
- mindspore/include/api/metrics/accuracy.h +9 -0
- mindspore/include/api/model.h +8 -1
- mindspore/include/api/model_group.h +4 -0
- mindspore/include/api/model_parallel_runner.h +2 -0
- mindspore/include/api/status.h +48 -10
- mindspore/include/api/types.h +8 -3
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/constants.h +9 -0
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +61 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +6 -46
- mindspore/mint/distributed/__init__.py +5 -0
- mindspore/mint/distributed/distributed.py +429 -23
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +163 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +140 -104
- mindspore/mint/nn/layer/normalization.py +11 -25
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/cell.py +491 -623
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +36 -36
- mindspore/nn/layer/basic.py +74 -77
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +117 -110
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +38 -40
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +4 -6
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -0
- mindspore/nn/optim/lamb.py +1 -3
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/tft_wrapper.py +2 -3
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +73 -42
- mindspore/nn/wrap/grad_reducer.py +37 -52
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +7 -7
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +5 -5
- mindspore/numpy/utils_const.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
- mindspore/ops/_vmap/vmap_array_ops.py +31 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +54 -13
- mindspore/ops/auto_generate/gen_extend_func.py +27 -145
- mindspore/ops/auto_generate/gen_ops_def.py +1027 -347
- mindspore/ops/auto_generate/gen_ops_prim.py +2341 -1117
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +9 -5
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +4 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +19 -102
- mindspore/ops/function/debug_func.py +8 -5
- mindspore/ops/function/grad/grad_func.py +5 -13
- mindspore/ops/function/math_func.py +77 -572
- mindspore/ops/function/nn_func.py +46 -94
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +44 -5
- mindspore/ops/function/vmap_func.py +2 -1
- mindspore/ops/functional.py +4 -4
- mindspore/ops/functional_overload.py +594 -18
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +16 -11
- mindspore/ops/operations/_custom_ops_utils.py +689 -34
- mindspore/ops/operations/_inner_ops.py +14 -18
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +5 -51
- mindspore/ops/operations/comm_ops.py +186 -41
- mindspore/ops/operations/custom_ops.py +303 -177
- mindspore/ops/operations/debug_ops.py +59 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +27 -28
- mindspore/ops/operations/math_ops.py +8 -9
- mindspore/ops/operations/nn_ops.py +8 -40
- mindspore/ops/primitive.py +9 -20
- mindspore/ops/tensor_method.py +63 -15
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +8 -3
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +16 -23
- mindspore/parallel/_cell_wrapper.py +113 -45
- mindspore/parallel/_parallel_serialization.py +4 -3
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +17 -12
- mindspore/parallel/_utils.py +5 -11
- mindspore/parallel/auto_parallel.py +35 -14
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +13 -7
- mindspore/parallel/cluster/process_entity/_api.py +88 -49
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +48 -7
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +12 -12
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
- mindspore/parallel/shard.py +10 -25
- mindspore/parallel/transform_safetensors.py +469 -174
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/msprof_cmd_tool.py +2 -2
- mindspore/profiler/common/path_manager.py +9 -0
- mindspore/profiler/common/profiler_context.py +50 -29
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_meta_data.py +1 -0
- mindspore/profiler/common/profiler_op_analyse.py +239 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +374 -338
- mindspore/profiler/envprofiler.py +42 -12
- mindspore/profiler/experimental_config.py +112 -7
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/cpu_profiler.py +10 -4
- mindspore/profiler/platform/npu_profiler.py +30 -20
- mindspore/profiler/profiler.py +218 -154
- mindspore/profiler/profiler_action_controller.py +65 -77
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +8 -6
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +87 -45
- mindspore/runtime/memory.py +31 -32
- mindspore/runtime/thread_bind_core.py +299 -165
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +17 -7
- mindspore/train/amp.py +43 -23
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_callback.py +2 -1
- mindspore/train/callback/_checkpoint.py +4 -14
- mindspore/train/callback/_flops_collector.py +11 -7
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +98 -21
- mindspore/train/data_sink.py +15 -6
- mindspore/train/dataset_helper.py +14 -5
- mindspore/train/model.py +133 -69
- mindspore/train/serialization.py +168 -126
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +163 -77
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +14 -17
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/RECORD +403 -442
- mindspore/_deprecated/jit.py +0 -198
- mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
- mindspore/communication/_hccl_management.py +0 -297
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/common/validator/__init__.py +0 -14
- mindspore/profiler/common/validator/validate_path.py +0 -84
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- mindspore/utils/hooks.py +0 -81
- /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
|
@@ -16,14 +16,23 @@
|
|
|
16
16
|
from mindspore._c_expression import _add_instance
|
|
17
17
|
from mindspore._c_expression import _addcdiv_instance
|
|
18
18
|
from mindspore._c_expression import _all_gather_matmul_instance
|
|
19
|
+
from mindspore._c_expression import _any_instance
|
|
20
|
+
from mindspore._c_expression import _bernoulli__instance
|
|
19
21
|
from mindspore._c_expression import _bitwise_not_instance
|
|
20
22
|
from mindspore._c_expression import _clamp_instance
|
|
23
|
+
from mindspore._c_expression import _conv3d_instance
|
|
21
24
|
from mindspore._c_expression import _div_instance
|
|
25
|
+
from mindspore._c_expression import _einsum_instance
|
|
22
26
|
from mindspore._c_expression import _empty_instance
|
|
27
|
+
from mindspore._c_expression import _empty_like_instance
|
|
23
28
|
from mindspore._c_expression import _floor_divide_instance
|
|
24
29
|
from mindspore._c_expression import _fmod_instance
|
|
25
30
|
from mindspore._c_expression import _gelu_instance
|
|
31
|
+
from mindspore._c_expression import _gmm_instance
|
|
32
|
+
from mindspore._c_expression import _gmm_backward_instance
|
|
33
|
+
from mindspore._c_expression import _gmm_backward_fusion_instance
|
|
26
34
|
from mindspore._c_expression import _greater_equal_instance
|
|
35
|
+
from mindspore._c_expression import _index_add_instance
|
|
27
36
|
from mindspore._c_expression import _kthvalue_instance
|
|
28
37
|
from mindspore._c_expression import _lerp_instance
|
|
29
38
|
from mindspore._c_expression import _matmul_reduce_scatter_instance
|
|
@@ -31,8 +40,10 @@ from mindspore._c_expression import _max_instance
|
|
|
31
40
|
from mindspore._c_expression import _min_instance
|
|
32
41
|
from mindspore._c_expression import _nansum_instance
|
|
33
42
|
from mindspore._c_expression import _pixel_shuffle_instance
|
|
43
|
+
from mindspore._c_expression import _quant_matmul_instance
|
|
34
44
|
from mindspore._c_expression import _remainder_instance
|
|
35
45
|
from mindspore._c_expression import _repeat_interleave_instance
|
|
46
|
+
from mindspore._c_expression import _rmod_instance
|
|
36
47
|
from mindspore._c_expression import _sub_instance
|
|
37
48
|
from mindspore._c_expression import _where_instance
|
|
38
49
|
from mindspore._c_expression import _xlogy_instance
|
|
@@ -56,10 +67,10 @@ def add(*args, **kwargs):
|
|
|
56
67
|
Args:
|
|
57
68
|
input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
|
|
58
69
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
59
|
-
`
|
|
70
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
60
71
|
other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
|
|
61
72
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
62
|
-
`
|
|
73
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
63
74
|
|
|
64
75
|
Keyword Args:
|
|
65
76
|
alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
|
|
@@ -261,6 +272,61 @@ def all_gather_matmul(*args, **kwargs):
|
|
|
261
272
|
return _all_gather_matmul_instance(*args, **kwargs)
|
|
262
273
|
|
|
263
274
|
|
|
275
|
+
def any(*args, **kwargs):
|
|
276
|
+
r"""
|
|
277
|
+
any(input) -> Tensor
|
|
278
|
+
|
|
279
|
+
Check if ``True`` is present in `input` .
|
|
280
|
+
|
|
281
|
+
Args:
|
|
282
|
+
input (Tensor): The input tensor.
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
Tensor
|
|
286
|
+
|
|
287
|
+
Supported Platforms:
|
|
288
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
289
|
+
|
|
290
|
+
Examples:
|
|
291
|
+
>>> import mindspore
|
|
292
|
+
>>> input = mindspore.tensor([[True, False], [True, True]])
|
|
293
|
+
>>> mindspore.ops.functional_overload.any(input)
|
|
294
|
+
Tensor(shape=[], dtype=Bool, value= True)
|
|
295
|
+
|
|
296
|
+
.. function:: any(input, dim, keepdim=False) -> Tensor
|
|
297
|
+
:noindex:
|
|
298
|
+
|
|
299
|
+
Check if ``True`` is present in the specified dimension of `input` .
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
input (Tensor): The input tensor.
|
|
303
|
+
dim (int): The dimensions to reduce.
|
|
304
|
+
keepdim (bool, optional): Whether the output tensor has dim retained or not. Default ``False`` .
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
Tensor
|
|
308
|
+
|
|
309
|
+
Supported Platforms:
|
|
310
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
311
|
+
|
|
312
|
+
Examples:
|
|
313
|
+
>>> import mindspore
|
|
314
|
+
>>> input = mindspore.tensor([[True, False], [True, True]])
|
|
315
|
+
>>> mindspore.ops.functional_overload.any(input, dim=1)
|
|
316
|
+
Tensor(shape=[2], dtype=Bool, value= [ True, True])
|
|
317
|
+
"""
|
|
318
|
+
return _any_instance(*args, **kwargs)
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def bernoulli_(*args, **kwargs):
|
|
322
|
+
r"""
|
|
323
|
+
bernoulli_(input, p, seed, offset) -> Tensor
|
|
324
|
+
|
|
325
|
+
Inner function, used for Tensor.bernoulli_.
|
|
326
|
+
"""
|
|
327
|
+
return _bernoulli__instance(*args, **kwargs)
|
|
328
|
+
|
|
329
|
+
|
|
264
330
|
def bitwise_not(*args, **kwargs):
|
|
265
331
|
r"""
|
|
266
332
|
bitwise_not(input) -> Tensor
|
|
@@ -373,6 +439,143 @@ def clip(*args, **kwargs):
|
|
|
373
439
|
return _clamp_instance(*args, **kwargs)
|
|
374
440
|
|
|
375
441
|
|
|
442
|
+
def conv3d(*args, **kwargs):
|
|
443
|
+
r"""
|
|
444
|
+
conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
|
|
445
|
+
|
|
446
|
+
Applies a 3D convolution over an input tensor. The input tensor is typically of
|
|
447
|
+
shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`,
|
|
448
|
+
where :math:`N` is batch size, :math:`C` is channel number, :math:`D, H, W` are the depth,
|
|
449
|
+
height and width of the feature graph, respectively.
|
|
450
|
+
|
|
451
|
+
The output is calculated based on formula:
|
|
452
|
+
|
|
453
|
+
.. math::
|
|
454
|
+
|
|
455
|
+
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
|
|
456
|
+
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
457
|
+
|
|
458
|
+
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
459
|
+
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_
|
|
460
|
+
, :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
461
|
+
|
|
462
|
+
Here are the indices' meanings:
|
|
463
|
+
|
|
464
|
+
- :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
|
|
465
|
+
where :math:`N` is the batch size of the input.
|
|
466
|
+
|
|
467
|
+
- :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
|
|
468
|
+
where :math:`C_{out}` is the number of
|
|
469
|
+
output channels, which is also equal to the number of kernels.
|
|
470
|
+
|
|
471
|
+
- :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
|
|
472
|
+
where :math:`C_{in}` is the number of
|
|
473
|
+
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
474
|
+
|
|
475
|
+
Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
|
|
476
|
+
output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
477
|
+
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
478
|
+
channel in the :math:`i`-th batch of the input feature map.
|
|
479
|
+
|
|
480
|
+
The shape of the convolutional kernel is given by :math:`(kd, kh, kw)` where :math:`kd` , :math:`kd` and\
|
|
481
|
+
:math:`kw` are the depth, height and width of the kernel, respectively.
|
|
482
|
+
If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
|
|
483
|
+
will be :math:`(C_{out}, C_{in} / \text{group}, kd, kh, kw)`,
|
|
484
|
+
where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
|
|
485
|
+
|
|
486
|
+
For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
|
|
487
|
+
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
|
|
488
|
+
|
|
489
|
+
The following lists some of the limitations of the parameters.
|
|
490
|
+
|
|
491
|
+
- input -- The input to the conv3d. The input must have each dimension size within the range [1, int32_max].
|
|
492
|
+
- weight -- Filters of shape :math:`(C_{out}, C_{in} / groups, kd, kh, kw)`. The value of :math:`kh`
|
|
493
|
+
and :math:`kw` is in the range [1, 511]. The remaining values are in the range [1, int32_max].
|
|
494
|
+
And :math:`kh*kw*k0` is less 65536 (k0 is 16. If data type is float32, k0 is 8).
|
|
495
|
+
- bias -- Bias Tensor with shape :math:`(C_{out})`. The shape must equal to the first dimension of the weight.
|
|
496
|
+
- stride -- The distance of kernel moving. It can be an int number or
|
|
497
|
+
tuple (noted by :math:`(stride_d, stride_h, stride_w)`). stride_h and stride_w are in the range [1, 63].
|
|
498
|
+
stride_d is in the range [1, 255].
|
|
499
|
+
- padding -- If padding is an int number, it is in the range [0, 255].
|
|
500
|
+
- dilation -- The value is in the range [1, 255].
|
|
501
|
+
- groups -- The value is in the range [1, 65535].
|
|
502
|
+
- :math:`C_{in} \% \text{groups} == 0 \quad \text{and} \quad C_{out} \% \text{groups} == 0` .
|
|
503
|
+
- :math:`weight[1] == C_{in} / groups` .
|
|
504
|
+
- :math:`H_{in} + PadUp + PadDown >= (kh - 1) * DilationH + 1` .
|
|
505
|
+
- :math:`W_{in} + PadLeft + PadRight >= (kw - 1) * DilationW + 1` .
|
|
506
|
+
- :math:`D_{in} + PadFront + PadBack >= (kd - 1) * DilationD + 1` .
|
|
507
|
+
- :math:`H_{out} = (H_{in} + PadUp + PadDown - ((kh - 1) * DilationH + 1)) / StrideH + 1` .
|
|
508
|
+
- :math:`W_{out} = (W_{in} + PadLeft + PadRight - ((kw - 1) * DilationW + 1)) / StrideW + 1` .
|
|
509
|
+
- :math:`D_{out} = (D_{in} + PadFront + PadBack - ((kd - 1) * DilationD + 1)) / StrideD + 1` .
|
|
510
|
+
- :math:`(D_{in}+PadFront+PadBack - ((kd-1)*DilationD+1)) \% StrideD <= PadBack` .
|
|
511
|
+
- :math:`(H_{in}+PadUp+PadDown - ((kh-1)*Dilationh+1)) \% StrideH <= PadDown` .
|
|
512
|
+
- :math:`stride_d <= kernel_d` .
|
|
513
|
+
- :math:`PadUp < kh` and :math:`PadDown < kh` . When `padding` = ``'valid'``, both PadUp and PadDown are zeros.
|
|
514
|
+
When `padding` = ``'same'``, pad can be calculated by
|
|
515
|
+
:math:`floor(((H_{out}-1) * strideH + (kh - 1) * DilationH + 1 - H_{in}) / 2)` for high dimension.
|
|
516
|
+
It is similar way to calculate the padding for depth and width dimension. And the depth and width
|
|
517
|
+
dimensions also have the same constraints.
|
|
518
|
+
- :math:`((kh - 1) * DilationH - PadUp)` should be in [0, 255]. It is the same constraint for depth
|
|
519
|
+
and width dimension.
|
|
520
|
+
- If `padding` is ``'same'``, `stride` must be 1.
|
|
521
|
+
|
|
522
|
+
.. warning::
|
|
523
|
+
It is only supported on Atlas A2 Training Series Products.
|
|
524
|
+
|
|
525
|
+
Args:
|
|
526
|
+
input (Tensor): Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
|
|
527
|
+
weight (Tensor): Set size of kernel is :math:`(kd, kh,
|
|
528
|
+
kw)`, then the shape is :math:`(C_{out}, C_{in} / groups, kd, kh, kw)`.
|
|
529
|
+
bias (Tensor, optional): Bias Tensor with shape :math:`(C_{out})`.
|
|
530
|
+
When bias is ``None`` , zeros will be used. Default: ``None`` .
|
|
531
|
+
stride (Union(int, tuple[int], list[int]), optional): The distance of kernel moving, an int
|
|
532
|
+
number that represents the depth, the height and width of movement are both strides, or a
|
|
533
|
+
tuple of triple int numbers that
|
|
534
|
+
represent the depth, height and width of movement respectively. Default: ``1`` .
|
|
535
|
+
padding (Union(int, tuple[int], list[int], str), optional): Implicit paddings on both sides of the input `x`.
|
|
536
|
+
Can be a string, one integer or a tuple/list with 3 integers.
|
|
537
|
+
If `padding` is a string, the optional values are ``"same"`` , ``"valid"``.
|
|
538
|
+
|
|
539
|
+
- same: Adopts the way of completion. The height and width of the output will be equal to
|
|
540
|
+
the input `x` divided by stride. The padding will be evenly calculated in top and bottom,
|
|
541
|
+
left and right possiblily. Otherwise, the last extra padding will be calculated from the bottom
|
|
542
|
+
and the right side. If this mode is set, `stride` must be 1.
|
|
543
|
+
|
|
544
|
+
- valid: Adopts the way of discarding. The possible largest height and width of output will be returned
|
|
545
|
+
without padding. Extra pixels will be discarded.
|
|
546
|
+
|
|
547
|
+
If `padding` is one integer, the paddings of top, bottom, left and right are the same, equal to padding.
|
|
548
|
+
If `padding` is a tuple/list with 3 integers, the padding of head, tail, top, bottom,
|
|
549
|
+
left and right equal to pad[0], pad[0], pad[1], pad[1], pad[2] and pad[2] correspondingly. Default: ``0`` .
|
|
550
|
+
dilation (Union[int, tuple[int], list[int]], optional): Controlling the space between the kernel points.
|
|
551
|
+
Default: ``1`` .
|
|
552
|
+
groups (int, optional): Splits `input` into groups. Default: ``1`` .
|
|
553
|
+
|
|
554
|
+
Returns:
|
|
555
|
+
Tensor, the same dtype as the `input`, with the shape :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
|
|
556
|
+
or :math:`(C_{out}, D_{out}, H_{out}, W_{out})`.
|
|
557
|
+
|
|
558
|
+
Raises:
|
|
559
|
+
TypeError: If `stride`, `padding` or `dilation` is neither an int nor a tuple.
|
|
560
|
+
TypeError: `groups` is not an int.
|
|
561
|
+
TypeError: If `bias` is not a Tensor.
|
|
562
|
+
|
|
563
|
+
Supported Platforms:
|
|
564
|
+
``Ascend``
|
|
565
|
+
|
|
566
|
+
Examples:
|
|
567
|
+
>>> import mindspore
|
|
568
|
+
>>> import numpy as np
|
|
569
|
+
>>> from mindspore import mint
|
|
570
|
+
>>> x = mindspore.Tensor(np.random.randn(12, 1, 60, 50, 8), mindspore.float16)
|
|
571
|
+
>>> w = mindspore.Tensor(np.random.randn(26, 1, 2, 4, 4), mindspore.float16)
|
|
572
|
+
>>> out = mint.nn.functional.conv3d(x, w)
|
|
573
|
+
>>> print(out.shape)
|
|
574
|
+
(12, 26, 59, 47, 5)
|
|
575
|
+
"""
|
|
576
|
+
return _conv3d_instance(*args, **kwargs)
|
|
577
|
+
|
|
578
|
+
|
|
376
579
|
def div(*args, **kwargs):
|
|
377
580
|
r"""
|
|
378
581
|
div(input, other, *, rounding_mode=None) -> Tensor
|
|
@@ -386,7 +589,7 @@ def div(*args, **kwargs):
|
|
|
386
589
|
.. note::
|
|
387
590
|
- When the two inputs have different shapes, they must be able to broadcast to a common shape.
|
|
388
591
|
- The two inputs can not be bool type at the same time,
|
|
389
|
-
[True, Tensor(True
|
|
592
|
+
[True, Tensor(True), Tensor(np.array([True]))] are all considered bool type.
|
|
390
593
|
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
391
594
|
consistent.
|
|
392
595
|
|
|
@@ -438,9 +641,106 @@ def divide(*args, **kwargs):
|
|
|
438
641
|
return _div_instance(*args, **kwargs)
|
|
439
642
|
|
|
440
643
|
|
|
644
|
+
def einsum(*args, **kwargs):
|
|
645
|
+
r"""
|
|
646
|
+
According to the Einstein summation Convention (Einsum),
|
|
647
|
+
the product of the input tensor elements is summed along the specified dimension.
|
|
648
|
+
You can use this operator to perform diagonal, reducesum, transpose, matmul, mul, inner product operations, etc.
|
|
649
|
+
|
|
650
|
+
Note:
|
|
651
|
+
The sublist format is also supported. For example, einsum_ext(op1, sublist1, op2, sublist2, ..., sublist_out).
|
|
652
|
+
In this format, equation can be derived by the sublists which are made up of Python's Ellipsis and list of
|
|
653
|
+
integers in [0, 52). Each operand is followed by a sublist and an output sublist is at the end.
|
|
654
|
+
Dynamic shape, dynamic rank input is not supported in `graph mode (mode=mindspore.GRAPH_MODE)
|
|
655
|
+
<https://www.mindspore.cn/tutorials/en/master/compile/static_graph.html>`_.
|
|
656
|
+
|
|
657
|
+
.. warning::
|
|
658
|
+
This is an experimental API that is subject to change or deletion.
|
|
659
|
+
|
|
660
|
+
Args:
|
|
661
|
+
equation (str): Notation based on the Einstein summation convention, represent the operation you want to do.
|
|
662
|
+
the value can contain only letters, commas, ellipsis and arrow. The letters(must be in [a-zA-Z]) represent
|
|
663
|
+
input tensor dimension, commas(,) represent separate tensors, ellipsis indicates the tensor dimension that
|
|
664
|
+
you do not care about, the left of the arrow indicates the input tensors, and the right of it indicates the
|
|
665
|
+
desired output dimension. If there are no arrows in the equation, the letters that appear exactly once in
|
|
666
|
+
the equation will be part of the output, sorted in increasing alphabetical order. The output is computed by
|
|
667
|
+
multiplying the input operands element-wise, with their dimensions aligned based on the letters, and then
|
|
668
|
+
summing out the dimensions whose letters are not part of the output. If there is one arrow in the equation,
|
|
669
|
+
the output letters must appear at least once for some input operand and at most once for the output.
|
|
670
|
+
operands (Tensor): Input tensor used for calculation. The dtype of the tensor must be the same.
|
|
671
|
+
|
|
672
|
+
Returns:
|
|
673
|
+
Tensor, the shape of it can be obtained from the `equation` , and the dtype is the same as input tensors.
|
|
674
|
+
|
|
675
|
+
Raises:
|
|
676
|
+
TypeError: If `equation` is invalid, or the `equation` does not match the input tensor.
|
|
677
|
+
ValueError: If the number in sublist is not in [0, 52) in sublist format.
|
|
678
|
+
|
|
679
|
+
Supported Platforms:
|
|
680
|
+
``Ascend``
|
|
681
|
+
|
|
682
|
+
Examples:
|
|
683
|
+
>>> import mindspore
|
|
684
|
+
>>> import numpy as np
|
|
685
|
+
>>> from mindspore import Tensor, ops
|
|
686
|
+
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
687
|
+
>>> equation = "i->"
|
|
688
|
+
>>> output = ops.einsum_ext(equation, x)
|
|
689
|
+
>>> print(output)
|
|
690
|
+
7.0
|
|
691
|
+
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
692
|
+
>>> y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
|
|
693
|
+
>>> equation = "i,i->i"
|
|
694
|
+
>>> output = ops.einsum_ext(equation, x, y)
|
|
695
|
+
>>> print(output)
|
|
696
|
+
[ 2. 8. 12.]
|
|
697
|
+
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
698
|
+
>>> y = Tensor(np.array([[2.0, 3.0], [1.0, 2.0], [4.0, 5.0]]), mindspore.float32)
|
|
699
|
+
>>> equation = "ij,jk->ik"
|
|
700
|
+
>>> output = ops.einsum_ext(equation, x, y)
|
|
701
|
+
>>> print(output)
|
|
702
|
+
[[16. 22.]
|
|
703
|
+
[37. 52.]]
|
|
704
|
+
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
705
|
+
>>> equation = "ij->ji"
|
|
706
|
+
>>> output = ops.einsum_ext(equation, x)
|
|
707
|
+
>>> print(output)
|
|
708
|
+
[[1. 4.]
|
|
709
|
+
[2. 5.]
|
|
710
|
+
[3. 6.]]
|
|
711
|
+
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
712
|
+
>>> equation = "ij->j"
|
|
713
|
+
>>> output = ops.einsum_ext(equation, x)
|
|
714
|
+
>>> print(output)
|
|
715
|
+
[5. 7. 9.]
|
|
716
|
+
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
717
|
+
>>> equation = "...->"
|
|
718
|
+
>>> output = ops.einsum_ext(equation, x)
|
|
719
|
+
>>> print(output)
|
|
720
|
+
21.0
|
|
721
|
+
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
722
|
+
>>> y = Tensor(np.array([2.0, 4.0, 1.0]), mindspore.float32)
|
|
723
|
+
>>> equation = "j,i->ji"
|
|
724
|
+
>>> output = ops.einsum_ext(equation, x, y)
|
|
725
|
+
>>> print(output)
|
|
726
|
+
[[ 2. 4. 1.]
|
|
727
|
+
[ 4. 8. 2.]
|
|
728
|
+
[ 6. 12. 3.]]
|
|
729
|
+
>>> x = mindspore.Tensor([1, 2, 3, 4], mindspore.float32)
|
|
730
|
+
>>> y = mindspore.Tensor([1, 2], mindspore.float32)
|
|
731
|
+
>>> output = ops.einsum_ext(x, [..., 1], y, [..., 2], [..., 1, 2])
|
|
732
|
+
>>> print(output)
|
|
733
|
+
[[1. 2.]
|
|
734
|
+
[2. 4.]
|
|
735
|
+
[3. 6.]
|
|
736
|
+
[4. 8.]]
|
|
737
|
+
"""
|
|
738
|
+
return _einsum_instance(*args, **kwargs)
|
|
739
|
+
|
|
740
|
+
|
|
441
741
|
def empty(*args, **kwargs):
|
|
442
742
|
r"""
|
|
443
|
-
empty(*size, dtype=None, device=None) -> Tensor
|
|
743
|
+
empty(*size, *, dtype=None, device=None) -> Tensor
|
|
444
744
|
|
|
445
745
|
Creates a tensor with uninitialized data, whose shape, dtype and device are described by the argument `size`,
|
|
446
746
|
`dtype` and `device` respectively.
|
|
@@ -450,22 +750,23 @@ def empty(*args, **kwargs):
|
|
|
450
750
|
|
|
451
751
|
Args:
|
|
452
752
|
size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Can be variable numbers of
|
|
453
|
-
positive integers or
|
|
753
|
+
positive integers or tuple or list containing positive integers.
|
|
454
754
|
|
|
455
755
|
Keyword Args:
|
|
456
756
|
dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
|
|
457
757
|
`mindspore.float32` will be used. Default: ``None`` .
|
|
458
|
-
device (string, optional): The specified device of the output tensor.
|
|
459
|
-
|
|
758
|
+
device (string, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
|
|
759
|
+
``"cpu"`` and ``"CPU"`` are supported. In graph mode O0, ``"Ascend"`` and ``"npu"`` are supported. If `device = None`,
|
|
760
|
+
`mindspore.context.device_target` will be used. Default ``None``.
|
|
460
761
|
|
|
461
762
|
Returns:
|
|
462
|
-
Tensor, whose dtype and
|
|
763
|
+
Tensor, whose shape, dtype and device are defined by input.
|
|
463
764
|
|
|
464
765
|
Raises:
|
|
465
766
|
TypeError: If `size` is neither an int nor a tuple or list of int.
|
|
466
767
|
|
|
467
768
|
Supported Platforms:
|
|
468
|
-
``Ascend``
|
|
769
|
+
``Ascend`` ``CPU``
|
|
469
770
|
|
|
470
771
|
Examples:
|
|
471
772
|
>>> import mindspore
|
|
@@ -478,8 +779,55 @@ def empty(*args, **kwargs):
|
|
|
478
779
|
return _empty_instance(*args, **kwargs)
|
|
479
780
|
|
|
480
781
|
|
|
782
|
+
def empty_like(*args, **kwargs):
|
|
783
|
+
r"""
|
|
784
|
+
empty_like(input, *, dtype=None, device=None) -> Tensor
|
|
785
|
+
|
|
786
|
+
Returns an uninitialized Tensor with the same shape as the `input`. Its dtype is specified by `dtype` and its
|
|
787
|
+
device is specified by `device`.
|
|
788
|
+
|
|
789
|
+
.. warning::
|
|
790
|
+
This is an experimental API that is subject to change or deletion.
|
|
791
|
+
|
|
792
|
+
Args:
|
|
793
|
+
input (Tensor): Tensor of any dimension.
|
|
794
|
+
|
|
795
|
+
Keyword Args:
|
|
796
|
+
dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype = None`, the
|
|
797
|
+
tensor will have the same dtype as input `input`. Default ``None``.
|
|
798
|
+
device (string, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
|
|
799
|
+
``"cpu"`` and ``"CPU"`` are supported. In graph mode O0, ``"Ascend"`` and ``"npu"`` are supported. If `device = None`,
|
|
800
|
+
the value set by :func:`mindspore.set_device` will be used. Default ``None``.
|
|
801
|
+
|
|
802
|
+
Returns:
|
|
803
|
+
Tensor, has the same shape, type and device as `input` but with uninitialized data (May be a random value).
|
|
804
|
+
|
|
805
|
+
Raises:
|
|
806
|
+
TypeError: If `input` is not a Tensor.
|
|
807
|
+
|
|
808
|
+
Supported Platforms:
|
|
809
|
+
``Ascend`` ``CPU``
|
|
810
|
+
|
|
811
|
+
Examples:
|
|
812
|
+
>>> import mindspore
|
|
813
|
+
>>> from mindspore import ops, Tensor
|
|
814
|
+
>>> x = Tensor([[1, 2, 3], [4, 5, 6]])
|
|
815
|
+
>>> output1 = ops.empty_like(x)
|
|
816
|
+
>>> print(output1)
|
|
817
|
+
[[0 0 0]
|
|
818
|
+
[0 0 0]]
|
|
819
|
+
>>> output2 = ops.empty_like(x, dtype=mindspore.float64)
|
|
820
|
+
>>> print(output2)
|
|
821
|
+
[[0. 0. 0.]
|
|
822
|
+
[0. 0. 0.]]
|
|
823
|
+
"""
|
|
824
|
+
return _empty_like_instance(*args, **kwargs)
|
|
825
|
+
|
|
826
|
+
|
|
481
827
|
def floor_divide(*args, **kwargs):
|
|
482
828
|
r"""
|
|
829
|
+
floor_divide(input, other) -> Tensor
|
|
830
|
+
|
|
483
831
|
Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
|
|
484
832
|
|
|
485
833
|
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
@@ -641,6 +989,108 @@ def gelu(*args, **kwargs):
|
|
|
641
989
|
return _gelu_instance(*args, **kwargs)
|
|
642
990
|
|
|
643
991
|
|
|
992
|
+
def gmm(*args, **kwargs):
|
|
993
|
+
r"""
|
|
994
|
+
gmm(x, weight, bias=None, group_list=None, group_type=0, group_list_type=0) -> tuple[Tensor]
|
|
995
|
+
|
|
996
|
+
Grouping matrix multiplication.
|
|
997
|
+
|
|
998
|
+
.. warning::
|
|
999
|
+
- This is an experimental API that is subject to change or deletion.
|
|
1000
|
+
- `group_type` must be a constant.
|
|
1001
|
+
- Only support on Atlas A2 training series.
|
|
1002
|
+
- When the type of `group_list` is tuple[int] or list[int], it should a non-negative non-decreasing sequence,
|
|
1003
|
+
indicating indexes of each group along the split axis. In this scenario, the arg `group_list_type` is useless.
|
|
1004
|
+
|
|
1005
|
+
.. note::
|
|
1006
|
+
- When `group_type` is 2, the tensors in `x` must be non-continuous tensors which has
|
|
1007
|
+
been transposed.
|
|
1008
|
+
- Only when `group_type` is 0 and `bias` is None, the reverse derivative is supported,
|
|
1009
|
+
which is implemented by ops.function.math_func.gmm_backward or through automatic differentiation.
|
|
1010
|
+
|
|
1011
|
+
Args:
|
|
1012
|
+
x (tuple[Tensor]): The first tensors to be multiplied, whose num should be 1.
|
|
1013
|
+
weight (tuple[Tensor]): The second tensors to be multiplied, whose num should be 1.
|
|
1014
|
+
bias (tuple[Tensor], optional): Biases added to outputs, whose num should be 1.
|
|
1015
|
+
The shape of each tensor in `bias` should be :math: `(group_list.shape[0], n)`
|
|
1016
|
+
or :math: `(len(group_list), n)`. In the training scenario, the bias only supports None.
|
|
1017
|
+
Default: ``None`` .
|
|
1018
|
+
group_list (Union[Tensor, list[int], tuple[int]], optional): 1-D Tensor, list[int]
|
|
1019
|
+
or tuple[int], indicating indexes or sizes of each group along the split axis.
|
|
1020
|
+
When `group_list` is list[int] or tuple[int], it's length should be less than or equal to 128.
|
|
1021
|
+
When `group_list` is a Tensor, it's size should be less than or equal to 1024.
|
|
1022
|
+
Supported dtypes: int64.
|
|
1023
|
+
Default: ``None`` .
|
|
1024
|
+
|
|
1025
|
+
- If `group_list_type` is 0, it must be a non-negative non-decreasing sequence.
|
|
1026
|
+
And when `group_type` is 0, the last element in `group_list` should be equal to
|
|
1027
|
+
the first dimension of the tensor in `x` . When `group_type` is 2, the last element
|
|
1028
|
+
in `group_list` should be equal to the second dimension of the tensor in `x` .
|
|
1029
|
+
|
|
1030
|
+
- If `group_list_type` is 1, the value in `group_list` are the sizes of each group.
|
|
1031
|
+
group_type (int, optional): Represents the axes that need to be grouped. For example,
|
|
1032
|
+
:math: `C[m,n] = A[m,k] \times B[k,n]`. Default: ``0`` .
|
|
1033
|
+
|
|
1034
|
+
- If `group_type` is 0, it means that the m-axis is grouped, meaning that the shape
|
|
1035
|
+
of each tensor in `x` should be :math: `(m, k)` , the shape of each tensor in `weight`
|
|
1036
|
+
should be :math: `(group_list.shape[0], k, n)` or :math: `(len(group_list), k, n)`,
|
|
1037
|
+
and the shape of each tensor in result would be :math: `(m, n)` .
|
|
1038
|
+
|
|
1039
|
+
- If `group_type` is 2, it means that the k-axis is grouped, meaning that
|
|
1040
|
+
the shape of each tensor in `x` should be :math: `(m, k)`, the shape of each
|
|
1041
|
+
tensor in `weight` should be :math: `(k, n)`, and the shape of each tensor
|
|
1042
|
+
in result would be :math: `(group_list.shape[0], m, n)` or :math: `(len(group_list), m, n)`.
|
|
1043
|
+
group_list_type (int, optional): If it's 0, the value in `group_list` are the cumsum
|
|
1044
|
+
result of the size of each group. If it's 1, the value in `group_list` are the size
|
|
1045
|
+
of each group. Default: ``0`` .
|
|
1046
|
+
|
|
1047
|
+
`x` , `weight` and `bias` only support the following 3 type combinations:
|
|
1048
|
+
|
|
1049
|
+
- x: float16, weight: float16, bias: float16
|
|
1050
|
+
- x: bfloat16, weight: bfloat16, bias: float32
|
|
1051
|
+
- x: float32, weight: float32, bias: float32
|
|
1052
|
+
|
|
1053
|
+
Returns:
|
|
1054
|
+
tuple[Tensor], the results of grouping matrix multiplication.
|
|
1055
|
+
|
|
1056
|
+
Supported Platforms:
|
|
1057
|
+
``Ascend``
|
|
1058
|
+
|
|
1059
|
+
Examples:
|
|
1060
|
+
>>> import numpy as np
|
|
1061
|
+
>>> from mindspore import Tensor, ops
|
|
1062
|
+
>>> x = Tensor(np.random.uniform(0,1, (10, 20)).astype(np.float32))
|
|
1063
|
+
>>> weight = Tensor(np.random.uniform(0,1, (4, 20, 8)).astype(np.float32))
|
|
1064
|
+
>>> group_list = Tensor([2, 4, 2, 2])
|
|
1065
|
+
>>> y = ops.function.math_func.gmm([x,], [weight,], group_list=group_list, group_list_type=1)
|
|
1066
|
+
>>> print(y[0].shape)
|
|
1067
|
+
>>> (10, 8)
|
|
1068
|
+
>>> group_list = [2, 6, 8, 10]
|
|
1069
|
+
>>> y = ops.function.math_func.gmm([x,], [weight,], group_list=group_list, group_list_type=0)
|
|
1070
|
+
>>> print(y[0].shape)
|
|
1071
|
+
>>> (10, 8)
|
|
1072
|
+
"""
|
|
1073
|
+
return _gmm_instance(*args, **kwargs)
|
|
1074
|
+
|
|
1075
|
+
|
|
1076
|
+
def gmm_backward(*args, **kwargs):
|
|
1077
|
+
r"""
|
|
1078
|
+
gmm_backward(grad, x, weight, *, group_list=None, group_list_type=0) -> tuple[tuple[Tensor]]
|
|
1079
|
+
|
|
1080
|
+
the grad of ops.function.math_func.gmm
|
|
1081
|
+
"""
|
|
1082
|
+
return _gmm_backward_instance(*args, **kwargs)
|
|
1083
|
+
|
|
1084
|
+
|
|
1085
|
+
def gmm_backward_fusion(*args, **kwargs):
|
|
1086
|
+
r"""
|
|
1087
|
+
gmm_backward_fusion(grad, weight, *, group_list=None, group_list_type=0) -> tuple[tuple[Tensor]]
|
|
1088
|
+
|
|
1089
|
+
the grad of ops.function.math_func.gmm, only dx
|
|
1090
|
+
"""
|
|
1091
|
+
return _gmm_backward_fusion_instance(*args, **kwargs)
|
|
1092
|
+
|
|
1093
|
+
|
|
644
1094
|
def greater_equal(*args, **kwargs):
|
|
645
1095
|
r"""
|
|
646
1096
|
greater_equal(input, other) -> Tensor
|
|
@@ -667,10 +1117,10 @@ def greater_equal(*args, **kwargs):
|
|
|
667
1117
|
|
|
668
1118
|
Args:
|
|
669
1119
|
input (Union[Tensor, Number]): The first input is a number
|
|
670
|
-
or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ or `
|
|
1120
|
+
or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ or `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_.
|
|
671
1121
|
other (Union[Tensor, Number]): Second input. When the first input is a Tensor, the second input should be a Number,
|
|
672
|
-
or a Tensor of the number or
|
|
673
|
-
the second input must be a Tensor of number or
|
|
1122
|
+
or a Tensor of the number or bool data type. When the first input is a Scalar,
|
|
1123
|
+
the second input must be a Tensor of number or bool data type.
|
|
674
1124
|
|
|
675
1125
|
Returns:
|
|
676
1126
|
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
@@ -707,6 +1157,60 @@ def ge(*args, **kwargs):
|
|
|
707
1157
|
return _greater_equal_instance(*args, **kwargs)
|
|
708
1158
|
|
|
709
1159
|
|
|
1160
|
+
def index_add(*args, **kwargs):
|
|
1161
|
+
r"""
|
|
1162
|
+
index_add(input, dim, index, source, *, alpha=1) -> Tensor
|
|
1163
|
+
|
|
1164
|
+
Accumulate the elements of `alpha` times `source` into the `input` by adding to the index in the order given in `index`. For example, if ``dim == 0`` , ``index[i] == j`` , and ``alpha = -1`` , then the `i` th row of `source` is subtracted from the `j` th row of `input` . The `dim` th dimension of `source` must have the same size as the length of `index` , and all other dimensions must match `input`, or an error will be raised. For a 3-D tensor, the output is defined as follows:
|
|
1165
|
+
|
|
1166
|
+
.. math::
|
|
1167
|
+
\begin{array}{ll}
|
|
1168
|
+
input[index[i],\ :,\ :]\ +=\ alpha * source[i,\ :,\ :] \qquad \#if\ dim == 0 \\
|
|
1169
|
+
input[:,\ \ index[i],\ :]\ +=\ alpha * source[:,\ \ i,\ :] \qquad \#if\ dim == 1 \\
|
|
1170
|
+
input[:,\ :,\ \ index[i]]\ +=\ alpha * source[:,\ :,\ \ i] \qquad\#if\ dim == 2 \\
|
|
1171
|
+
\end{array}
|
|
1172
|
+
|
|
1173
|
+
.. warning::
|
|
1174
|
+
This is an experimental API that is subject to change or deletion.
|
|
1175
|
+
|
|
1176
|
+
Args:
|
|
1177
|
+
input (Tensor): The input Tensor.
|
|
1178
|
+
dim (int): The dimension along which to index.
|
|
1179
|
+
index (Tensor): Add the value of "input Tensor" and `source` along the dimension of the `dim` according to the specified index value, with data type int32. The `index` must be 1D with the same size as the size of `source` in the `dim` dimension. The values of `index` should be in [0, b), where the b is the size of "input Tensor" in the `dim` dimension.
|
|
1180
|
+
source (Tensor): The input tensor with the value to add. Must have same data type as "input Tensor". The shape must be the same as "input Tensor" except the `dim` th dimension.
|
|
1181
|
+
|
|
1182
|
+
Keyword Args:
|
|
1183
|
+
alpha (number, optional): The scalar multiplier for source. Default: ``1``.
|
|
1184
|
+
|
|
1185
|
+
Returns:
|
|
1186
|
+
Tensor, has the same shape and dtype as `input`.
|
|
1187
|
+
|
|
1188
|
+
Raises:
|
|
1189
|
+
TypeError: If neither `index` nor `source` is a Tensor.
|
|
1190
|
+
ValueError: If the value of `dim` is out of the dimension range of `source` shape.
|
|
1191
|
+
ValueError: If `index` rank is not the same as `source` rank.
|
|
1192
|
+
ValueError: If shape of `index` is not 1D or size of `index` is not equal to dimension of source[dim].
|
|
1193
|
+
ValueError: If the shape of `source` is not the same as that of `input` except the `dim` axis.
|
|
1194
|
+
|
|
1195
|
+
Supported Platforms:
|
|
1196
|
+
``Ascend``
|
|
1197
|
+
|
|
1198
|
+
Examples:
|
|
1199
|
+
>>> import numpy as np
|
|
1200
|
+
>>> import mindspore
|
|
1201
|
+
>>> from mindspore import Tensor, mint
|
|
1202
|
+
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
|
|
1203
|
+
>>> index = Tensor(np.array([0, 2]), mindspore.int32)
|
|
1204
|
+
>>> y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
|
|
1205
|
+
>>> output = mint.index_add(x, 1, index, y, alpha=1)
|
|
1206
|
+
>>> print(output)
|
|
1207
|
+
[[ 1.5 2. 4. ]
|
|
1208
|
+
[ 5. 5. 7.5]
|
|
1209
|
+
[ 9. 8. 11.5]]
|
|
1210
|
+
"""
|
|
1211
|
+
return _index_add_instance(*args, **kwargs)
|
|
1212
|
+
|
|
1213
|
+
|
|
710
1214
|
def kthvalue(*args, **kwargs):
|
|
711
1215
|
r"""
|
|
712
1216
|
Calculates the kth smallest value along given dim specified by `dim` of the input
|
|
@@ -1151,6 +1655,60 @@ def pixel_shuffle(*args, **kwargs):
|
|
|
1151
1655
|
return _pixel_shuffle_instance(*args, **kwargs)
|
|
1152
1656
|
|
|
1153
1657
|
|
|
1658
|
+
def quant_matmul(*args, **kwargs):
|
|
1659
|
+
r"""
|
|
1660
|
+
quant_matmul(x1, x2, scale, *, offset=None, pertoken_scale=None, bias=None, output_dtype=None, x1_dtype=None, x2_dtype=None, pertoken_scale_dtype=None, scale_dtype=None, group_sizes=None) -> Tensor
|
|
1661
|
+
|
|
1662
|
+
Used for quantized matrix multiplication.
|
|
1663
|
+
|
|
1664
|
+
.. warning::
|
|
1665
|
+
This is an experimental API that is subject to change or deletion.
|
|
1666
|
+
Only support on David training series.
|
|
1667
|
+
|
|
1668
|
+
Args:
|
|
1669
|
+
x1 (Tensor): Tensor of shape :math:`(*, M, K)` . The dimension of `input` should be in [2, 6].
|
|
1670
|
+
x2 (Tensor): Tensor of shape :math:`(*, K, N)` . The dimension of `input` should be in [2, 6].
|
|
1671
|
+
scale (Tensor): Tensor of shape :math:`(T,)` . T should be equal to 1 or N, N is the last dimension of `x2`.
|
|
1672
|
+
|
|
1673
|
+
Keyword Args:
|
|
1674
|
+
offset (Tensor, optional): Tensor of shape :math:`(T,)` . T should be equal to 1 or N, N is the last dimension of `x2`. Default: ``None`` .
|
|
1675
|
+
pertoken_scale (Tensor, optional): Tensor of shape :math:`(M,)` . M is second-to-last dimension of `x1`. Default: ``None`` .
|
|
1676
|
+
A valid Tensor must deliver to `pertoken_scale` , ``None`` will cause unexpected error.
|
|
1677
|
+
bias (Tensor, optional): Tensor of shape :math:`(N,)` or :math:`(B, 1, N)` , N is the last dimension of `x2`.
|
|
1678
|
+
If dimension of `output` is 2, 4, 5 or 6, `bias` must has shape :math:`(N,)` . Default: ``None`` .
|
|
1679
|
+
output_dtype (:class:`mindspore.dtype`, optional): the dtype of `output`. Default: ``None`` .
|
|
1680
|
+
x1_dtype (:class:`mindspore.dtype`, optional): Cast `x1` to `x1_dtype` before calculation. Default: ``None`` .
|
|
1681
|
+
x2_dtype (:class:`mindspore.dtype`, optional): Cast `x2` to `x2_dtype` before calculation. Default: ``None`` .
|
|
1682
|
+
pertoken_scale_dtype (:class:`mindspore.dtype`, optional): Cast `pertoken_scale` to `pertoken_scale_dtype` before calculation. Default: ``None`` .
|
|
1683
|
+
scale_dtype (:class:`mindspore.dtype`, optional): Cast `scale` to `scale_dtype` before calculation. Default: ``None`` .
|
|
1684
|
+
group_sizes (Union[tuple(int), list(int)], optional): A sequence of int elements. Must have 3 elements. Default: ``None`` .
|
|
1685
|
+
|
|
1686
|
+
Returns:
|
|
1687
|
+
Tensor of shape :math:`(*, M, N)` .
|
|
1688
|
+
|
|
1689
|
+
Raises:
|
|
1690
|
+
ValueError: If dtype of `x1` is int8 or int32.
|
|
1691
|
+
|
|
1692
|
+
Supported Platforms:
|
|
1693
|
+
``Ascend``
|
|
1694
|
+
|
|
1695
|
+
Examples:
|
|
1696
|
+
>>> import numpy as np
|
|
1697
|
+
>>> import mindspore as ms
|
|
1698
|
+
>>> from mindspore import ops, Tensor
|
|
1699
|
+
>>> x1 = Tensor(np.random.randn(2, 3, 4), ms.float8_e4m3)
|
|
1700
|
+
>>> x2 = Tensor(np.random.randn(2, 4, 5), ms.float8_e4m3)
|
|
1701
|
+
>>> scale = Tensor(np.random.randn(1,), ms.float32)
|
|
1702
|
+
>>> pertoken_scale = Tensor(np.random.randn(3,), ms.float32)
|
|
1703
|
+
>>> output = ops.auto_generate.quant_matmul(x1, x2, scale, pertoken_scale=pertoken_scale, output_dtype=ms.bfloat16)
|
|
1704
|
+
>>> print(output.shape)
|
|
1705
|
+
(2, 3, 5)
|
|
1706
|
+
>>> print(output.dtype)
|
|
1707
|
+
BFloat16
|
|
1708
|
+
"""
|
|
1709
|
+
return _quant_matmul_instance(*args, **kwargs)
|
|
1710
|
+
|
|
1711
|
+
|
|
1154
1712
|
def remainder(*args, **kwargs):
|
|
1155
1713
|
r"""
|
|
1156
1714
|
remainder(input, other) -> Tensor
|
|
@@ -1171,10 +1729,10 @@ def remainder(*args, **kwargs):
|
|
|
1171
1729
|
input (Union[Tensor, numbers.Number, bool]): The dividend is a numbers.Number or
|
|
1172
1730
|
a bool or a tensor whose data type is
|
|
1173
1731
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1174
|
-
`
|
|
1732
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1175
1733
|
other (Union[Tensor, numbers.Number, bool]): The divisor is a numbers.Number or
|
|
1176
|
-
a bool or a tensor whose data type is number or bool
|
|
1177
|
-
When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool
|
|
1734
|
+
a bool or a tensor whose data type is number or bool when the dividend is a tensor.
|
|
1735
|
+
When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool.
|
|
1178
1736
|
|
|
1179
1737
|
Returns:
|
|
1180
1738
|
Tensor, with dtype promoted and shape broadcasted.
|
|
@@ -1242,6 +1800,13 @@ def repeat_interleave(*args, **kwargs):
|
|
|
1242
1800
|
return _repeat_interleave_instance(*args, **kwargs)
|
|
1243
1801
|
|
|
1244
1802
|
|
|
1803
|
+
def rmod(*args, **kwargs):
|
|
1804
|
+
r"""
|
|
1805
|
+
rmod(input, other) -> Tensor
|
|
1806
|
+
"""
|
|
1807
|
+
return _rmod_instance(*args, **kwargs)
|
|
1808
|
+
|
|
1809
|
+
|
|
1245
1810
|
def sub(*args, **kwargs):
|
|
1246
1811
|
r"""
|
|
1247
1812
|
sub(input, other, *, alpha=1) -> Tensor
|
|
@@ -1261,10 +1826,10 @@ def sub(*args, **kwargs):
|
|
|
1261
1826
|
Args:
|
|
1262
1827
|
input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
|
|
1263
1828
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1264
|
-
`
|
|
1829
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1265
1830
|
other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
|
|
1266
1831
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1267
|
-
`
|
|
1832
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1268
1833
|
|
|
1269
1834
|
Keyword Args:
|
|
1270
1835
|
alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
|
|
@@ -1380,7 +1945,7 @@ def xlogy(*args, **kwargs):
|
|
|
1380
1945
|
input (Union[Tensor, numbers.Number, bool]): The first input is a numbers.Number or
|
|
1381
1946
|
a bool or a tensor whose data type is
|
|
1382
1947
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1383
|
-
`
|
|
1948
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1384
1949
|
other (Union[Tensor, numbers.Number, bool]): The second input is a numbers.Number or
|
|
1385
1950
|
a bool or a tensor whose data type is number or bool when the first input is a tensor.
|
|
1386
1951
|
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
|
|
@@ -1413,17 +1978,26 @@ __all__ = [
|
|
|
1413
1978
|
"__add__",
|
|
1414
1979
|
"addcdiv",
|
|
1415
1980
|
"all_gather_matmul",
|
|
1981
|
+
"any",
|
|
1982
|
+
"bernoulli_",
|
|
1416
1983
|
"bitwise_not",
|
|
1417
1984
|
"clamp",
|
|
1418
1985
|
"clip",
|
|
1986
|
+
"conv3d",
|
|
1419
1987
|
"div",
|
|
1420
1988
|
"divide",
|
|
1989
|
+
"einsum",
|
|
1421
1990
|
"empty",
|
|
1991
|
+
"empty_like",
|
|
1422
1992
|
"floor_divide",
|
|
1423
1993
|
"fmod",
|
|
1424
1994
|
"gelu",
|
|
1995
|
+
"gmm",
|
|
1996
|
+
"gmm_backward",
|
|
1997
|
+
"gmm_backward_fusion",
|
|
1425
1998
|
"greater_equal",
|
|
1426
1999
|
"ge",
|
|
2000
|
+
"index_add",
|
|
1427
2001
|
"kthvalue",
|
|
1428
2002
|
"lerp",
|
|
1429
2003
|
"matmul_reduce_scatter",
|
|
@@ -1431,8 +2005,10 @@ __all__ = [
|
|
|
1431
2005
|
"min",
|
|
1432
2006
|
"nansum",
|
|
1433
2007
|
"pixel_shuffle",
|
|
2008
|
+
"quant_matmul",
|
|
1434
2009
|
"remainder",
|
|
1435
2010
|
"repeat_interleave",
|
|
2011
|
+
"rmod",
|
|
1436
2012
|
"sub",
|
|
1437
2013
|
"__sub__",
|
|
1438
2014
|
"where",
|