mindspore 2.7.0rc1__cp311-cp311-win_amd64.whl → 2.7.1__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +5 -2
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +2 -2
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/compile_config.py +24 -1
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -3
- mindspore/_extends/parse/parser.py +28 -22
- mindspore/_extends/parse/resources.py +1 -1
- mindspore/_extends/parse/standard_method.py +23 -2
- mindspore/_extends/parse/trope.py +2 -1
- mindspore/_extends/pijit/pijit_func_white_list.py +9 -27
- mindspore/amp.py +0 -18
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/base.py +29 -2
- mindspore/common/__init__.py +18 -12
- mindspore/common/_decorator.py +3 -2
- mindspore/common/_grad_function.py +3 -1
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +371 -96
- mindspore/common/_utils.py +7 -43
- mindspore/common/api.py +434 -135
- mindspore/common/dtype.py +98 -57
- mindspore/common/dump.py +7 -108
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +15 -23
- mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
- mindspore/common/file_system.py +59 -9
- mindspore/common/hook_handle.py +82 -3
- mindspore/common/jit_config.py +5 -1
- mindspore/common/jit_trace.py +27 -12
- mindspore/common/lazy_inline.py +5 -3
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +17 -127
- mindspore/common/recompute.py +4 -13
- mindspore/common/tensor.py +50 -217
- mindspore/communication/_comm_helper.py +11 -1
- mindspore/communication/comm_func.py +138 -4
- mindspore/communication/management.py +85 -1
- mindspore/config/op_info.config +0 -15
- mindspore/context.py +20 -106
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +35 -1
- mindspore/dataset/engine/datasets.py +338 -319
- mindspore/dataset/engine/datasets_user_defined.py +38 -22
- mindspore/dataset/engine/datasets_vision.py +1 -1
- mindspore/dataset/engine/validators.py +1 -15
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +17 -5
- mindspore/dataset/vision/utils.py +632 -21
- mindspore/device_context/ascend/op_tuning.py +35 -1
- mindspore/dnnl.dll +0 -0
- mindspore/{profiler/common/validator → graph}/__init__.py +9 -1
- mindspore/graph/custom_pass.py +55 -0
- mindspore/include/api/cell.h +28 -4
- mindspore/include/api/cfg.h +24 -7
- mindspore/include/api/context.h +1 -0
- mindspore/include/api/delegate.h +0 -2
- mindspore/include/api/dual_abi_helper.h +100 -19
- mindspore/include/api/graph.h +14 -1
- mindspore/include/api/kernel.h +16 -3
- mindspore/include/api/kernel_api.h +9 -1
- mindspore/include/api/metrics/accuracy.h +9 -0
- mindspore/include/api/model.h +5 -1
- mindspore/include/api/model_group.h +4 -0
- mindspore/include/api/model_parallel_runner.h +2 -0
- mindspore/include/api/status.h +48 -10
- mindspore/include/api/types.h +6 -1
- mindspore/include/dataset/constants.h +9 -0
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/__init__.py +3 -3
- mindspore/mindrecord/common/exceptions.py +1 -0
- mindspore/mindrecord/config.py +1 -1
- mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
- mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
- mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
- mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
- mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
- mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
- mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
- mindspore/mindrecord/filereader.py +4 -4
- mindspore/mindrecord/filewriter.py +5 -5
- mindspore/mindrecord/mindpage.py +2 -2
- mindspore/mindrecord/tools/cifar10.py +4 -3
- mindspore/mindrecord/tools/cifar100.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
- mindspore/mindrecord/tools/cifar10_to_mr.py +6 -6
- mindspore/mindrecord/tools/csv_to_mr.py +1 -1
- mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
- mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_cluster.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_hardware_abstract.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mindspore_runtime_utils.dll +0 -0
- mindspore/mindspore_tools.dll +0 -0
- mindspore/mint/__init__.py +15 -10
- mindspore/mint/distributed/__init__.py +4 -0
- mindspore/mint/distributed/distributed.py +392 -69
- mindspore/mint/nn/__init__.py +2 -16
- mindspore/mint/nn/functional.py +4 -110
- mindspore/mint/nn/layer/__init__.py +0 -2
- mindspore/mint/nn/layer/_functions.py +1 -2
- mindspore/mint/nn/layer/activation.py +0 -6
- mindspore/mint/nn/layer/basic.py +0 -47
- mindspore/mint/nn/layer/conv.py +10 -10
- mindspore/mint/nn/layer/normalization.py +11 -16
- mindspore/mint/nn/layer/pooling.py +0 -4
- mindspore/nn/__init__.py +1 -3
- mindspore/nn/cell.py +231 -239
- mindspore/nn/layer/activation.py +4 -2
- mindspore/nn/layer/basic.py +56 -14
- mindspore/nn/layer/container.py +16 -0
- mindspore/nn/layer/embedding.py +4 -169
- mindspore/nn/layer/image.py +1 -1
- mindspore/nn/layer/normalization.py +2 -1
- mindspore/nn/layer/thor_layer.py +4 -85
- mindspore/nn/optim/ada_grad.py +0 -1
- mindspore/nn/optim/adafactor.py +0 -1
- mindspore/nn/optim/adam.py +32 -127
- mindspore/nn/optim/adamax.py +0 -1
- mindspore/nn/optim/asgd.py +0 -1
- mindspore/nn/optim/ftrl.py +8 -102
- mindspore/nn/optim/lamb.py +1 -4
- mindspore/nn/optim/lars.py +0 -3
- mindspore/nn/optim/lazyadam.py +25 -218
- mindspore/nn/optim/momentum.py +5 -43
- mindspore/nn/optim/optimizer.py +6 -55
- mindspore/nn/optim/proximal_ada_grad.py +0 -1
- mindspore/nn/optim/rmsprop.py +0 -1
- mindspore/nn/optim/rprop.py +0 -1
- mindspore/nn/optim/sgd.py +0 -1
- mindspore/nn/optim/tft_wrapper.py +2 -4
- mindspore/nn/optim/thor.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -8
- mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
- mindspore/nn/probability/bijector/power_transform.py +20 -21
- mindspore/nn/probability/bijector/scalar_affine.py +5 -5
- mindspore/nn/probability/bijector/softplus.py +13 -14
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +39 -5
- mindspore/nn/wrap/grad_reducer.py +4 -89
- mindspore/numpy/array_creations.py +4 -4
- mindspore/numpy/fft.py +9 -9
- mindspore/numpy/utils_const.py +1 -1
- mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
- mindspore/onnx/onnx_export.py +137 -0
- mindspore/opencv_core4110.dll +0 -0
- mindspore/opencv_imgcodecs4110.dll +0 -0
- mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
- mindspore/ops/__init__.py +2 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
- mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
- mindspore/ops/_op_impl/cpu/__init__.py +1 -5
- mindspore/ops/_op_impl/cpu/{buffer_append.py → joinedstr_op.py} +8 -8
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +28 -24
- mindspore/ops/auto_generate/gen_extend_func.py +6 -11
- mindspore/ops/auto_generate/gen_ops_def.py +385 -154
- mindspore/ops/auto_generate/gen_ops_prim.py +5676 -5167
- mindspore/ops/communication.py +97 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +16 -2
- mindspore/ops/composite/multitype_ops/__init__.py +3 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
- mindspore/ops/function/__init__.py +2 -0
- mindspore/ops/function/array_func.py +24 -18
- mindspore/ops/function/comm_func.py +3883 -0
- mindspore/ops/function/debug_func.py +7 -6
- mindspore/ops/function/grad/grad_func.py +4 -12
- mindspore/ops/function/math_func.py +89 -86
- mindspore/ops/function/nn_func.py +92 -313
- mindspore/ops/function/random_func.py +9 -18
- mindspore/ops/functional.py +4 -1
- mindspore/ops/functional_overload.py +377 -30
- mindspore/ops/operations/__init__.py +2 -5
- mindspore/ops/operations/_custom_ops_utils.py +7 -9
- mindspore/ops/operations/_inner_ops.py +12 -50
- mindspore/ops/operations/_rl_inner_ops.py +0 -933
- mindspore/ops/operations/array_ops.py +5 -50
- mindspore/ops/operations/comm_ops.py +95 -17
- mindspore/ops/operations/custom_ops.py +237 -22
- mindspore/ops/operations/debug_ops.py +33 -35
- mindspore/ops/operations/manually_defined/ops_def.py +39 -318
- mindspore/ops/operations/math_ops.py +5 -5
- mindspore/ops/operations/nn_ops.py +3 -3
- mindspore/ops/operations/sparse_ops.py +0 -83
- mindspore/ops/primitive.py +4 -27
- mindspore/ops/tensor_method.py +88 -10
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
- mindspore/ops_generate/api/functions_cc_generator.py +53 -4
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
- mindspore/ops_generate/common/gen_constants.py +11 -10
- mindspore/ops_generate/common/op_proto.py +18 -1
- mindspore/ops_generate/common/template.py +102 -245
- mindspore/ops_generate/common/template_utils.py +212 -0
- mindspore/ops_generate/gen_custom_ops.py +69 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
- mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
- mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +0 -16
- mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
- mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
- mindspore/ops_generate/resources/yaml_loader.py +13 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
- mindspore/parallel/_auto_parallel_context.py +5 -15
- mindspore/parallel/_cell_wrapper.py +1 -1
- mindspore/parallel/_parallel_serialization.py +4 -6
- mindspore/parallel/_ps_context.py +2 -2
- mindspore/parallel/_utils.py +34 -17
- mindspore/parallel/auto_parallel.py +23 -9
- mindspore/parallel/checkpoint_transform.py +20 -2
- mindspore/parallel/cluster/process_entity/_api.py +28 -33
- mindspore/parallel/cluster/process_entity/_utils.py +9 -5
- mindspore/parallel/cluster/run.py +5 -3
- mindspore/{experimental/llm_boost/ascend_native → parallel/distributed}/__init__.py +21 -22
- mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
- mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
- mindspore/parallel/function/reshard_func.py +6 -5
- mindspore/parallel/nn/parallel_cell_wrapper.py +40 -3
- mindspore/parallel/nn/parallel_grad_reducer.py +0 -8
- mindspore/parallel/shard.py +7 -21
- mindspore/parallel/strategy.py +336 -0
- mindspore/parallel/transform_safetensors.py +127 -20
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +13 -9
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
- mindspore/profiler/common/constant.py +5 -0
- mindspore/profiler/common/file_manager.py +9 -0
- mindspore/profiler/common/msprof_cmd_tool.py +40 -4
- mindspore/profiler/common/path_manager.py +65 -24
- mindspore/profiler/common/profiler_context.py +27 -14
- mindspore/profiler/common/profiler_info.py +3 -3
- mindspore/profiler/common/profiler_meta_data.py +1 -0
- mindspore/profiler/common/profiler_op_analyse.py +10 -6
- mindspore/profiler/common/profiler_path_manager.py +13 -0
- mindspore/profiler/common/util.py +30 -3
- mindspore/profiler/dynamic_profiler.py +91 -46
- mindspore/profiler/envprofiler.py +30 -5
- mindspore/profiler/experimental_config.py +18 -2
- mindspore/profiler/platform/cpu_profiler.py +10 -4
- mindspore/profiler/platform/npu_profiler.py +34 -7
- mindspore/profiler/profiler.py +193 -145
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +108 -24
- mindspore/runtime/__init__.py +9 -6
- mindspore/runtime/executor.py +35 -0
- mindspore/runtime/memory.py +113 -0
- mindspore/runtime/thread_bind_core.py +1 -1
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
- mindspore/tools/data_dump.py +130 -0
- mindspore/tools/sdc_detect.py +91 -0
- mindspore/tools/stress_detect.py +63 -0
- mindspore/train/__init__.py +6 -6
- mindspore/train/_utils.py +8 -21
- mindspore/train/amp.py +6 -7
- mindspore/train/callback/_callback.py +2 -1
- mindspore/train/callback/_checkpoint.py +1 -17
- mindspore/train/callback/_flops_collector.py +10 -6
- mindspore/train/callback/_train_fault_tolerance.py +72 -25
- mindspore/train/data_sink.py +5 -9
- mindspore/train/dataset_helper.py +5 -5
- mindspore/train/model.py +41 -230
- mindspore/train/serialization.py +160 -401
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dlpack.py +92 -0
- mindspore/utils/dryrun.py +1 -1
- mindspore/utils/runtime_execution_order_check.py +10 -0
- mindspore/utils/sdc_detect.py +14 -12
- mindspore/utils/stress_detect.py +43 -0
- mindspore/utils/utils.py +152 -16
- mindspore/version.py +1 -1
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/RECORD +330 -344
- mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
- mindspore/communication/_hccl_management.py +0 -297
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -207
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
- mindspore/experimental/llm_boost/atb/__init__.py +0 -23
- mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
- mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
- mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
- mindspore/experimental/llm_boost/register.py +0 -130
- mindspore/experimental/llm_boost/utils.py +0 -31
- mindspore/include/OWNERS +0 -7
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
- mindspore/nn/reinforcement/_batch_read_write.py +0 -142
- mindspore/nn/reinforcement/_tensors_queue.py +0 -152
- mindspore/nn/reinforcement/tensor_array.py +0 -145
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
- mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
- mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
- mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
- mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
- mindspore/ops/operations/_tensor_array.py +0 -359
- mindspore/ops/operations/rl_ops.py +0 -288
- mindspore/parallel/_offload_context.py +0 -275
- mindspore/parallel/_recovery_context.py +0 -115
- mindspore/parallel/_transformer/__init__.py +0 -35
- mindspore/parallel/_transformer/layers.py +0 -765
- mindspore/parallel/_transformer/loss.py +0 -251
- mindspore/parallel/_transformer/moe.py +0 -693
- mindspore/parallel/_transformer/op_parallel_config.py +0 -222
- mindspore/parallel/_transformer/transformer.py +0 -3124
- mindspore/parallel/mpi/_mpi_config.py +0 -116
- mindspore/profiler/common/validator/validate_path.py +0 -84
- mindspore/train/memory_profiling_pb2.py +0 -298
- mindspore/utils/hooks.py +0 -81
- /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
mindspore/ops/functional.py
CHANGED
|
@@ -20,7 +20,7 @@ from mindspore.common._register_for_tensor import tensor_operator_registry
|
|
|
20
20
|
from mindspore.ops import _constants
|
|
21
21
|
from mindspore.ops.function import *
|
|
22
22
|
from mindspore.ops.function.array_func import chunk_ext, zero_
|
|
23
|
-
from mindspore.ops.function.math_func import all, argmax_ext, float_power_ext, erfinv_, tanh_, bernoulli_ext
|
|
23
|
+
from mindspore.ops.function.math_func import all, argmax_ext, float_power_ext, erfinv_, tanh_, bernoulli_ext, bernoulli_
|
|
24
24
|
from mindspore.ops.function.random_func import random_, uniform_ext, uniform_, normal_, exponential_
|
|
25
25
|
from mindspore.ops import operations as P
|
|
26
26
|
from mindspore.ops.operations import array_ops
|
|
@@ -266,6 +266,7 @@ setattr(tensor_operator_registry, 'erf', erf)
|
|
|
266
266
|
setattr(tensor_operator_registry, 'erfc', erfc)
|
|
267
267
|
setattr(tensor_operator_registry, 'standard_normal', P.StandardNormal)
|
|
268
268
|
setattr(tensor_operator_registry, 'sigmoid', sigmoid)
|
|
269
|
+
setattr(tensor_operator_registry, 'sigmoid_', auto_generate.inplace_sigmoid)
|
|
269
270
|
setattr(tensor_operator_registry, 'median', Median)
|
|
270
271
|
setattr(tensor_operator_registry, 'tanh', tanh)
|
|
271
272
|
setattr(tensor_operator_registry, 'tanh_', tanh_)
|
|
@@ -397,6 +398,7 @@ setattr(tensor_operator_registry, 'inplace_scatter_add', auto_generate.inplace_s
|
|
|
397
398
|
setattr(tensor_operator_registry, 'slice_scatter', slice_scatter)
|
|
398
399
|
setattr(tensor_operator_registry, 'select_scatter', select_scatter)
|
|
399
400
|
setattr(tensor_operator_registry, 'bernoulli', bernoulli_ext)
|
|
401
|
+
setattr(tensor_operator_registry, 'bernoulli_', bernoulli_)
|
|
400
402
|
setattr(tensor_operator_registry, 'poisson', P.Poisson)
|
|
401
403
|
setattr(tensor_operator_registry, 'randperm', P.Randperm)
|
|
402
404
|
setattr(tensor_operator_registry, 'multinomial', multinomial)
|
|
@@ -451,6 +453,7 @@ setattr(tensor_operator_registry, 'ne', ne)
|
|
|
451
453
|
setattr(tensor_operator_registry, 'not_equal', not_equal)
|
|
452
454
|
setattr(tensor_operator_registry, 'sgn', sgn)
|
|
453
455
|
setattr(tensor_operator_registry, 'sign', sign)
|
|
456
|
+
setattr(tensor_operator_registry, 'sign_', auto_generate.inplace_sign)
|
|
454
457
|
setattr(tensor_operator_registry, 'signbit', signbit)
|
|
455
458
|
setattr(tensor_operator_registry, 'sinh', sinh)
|
|
456
459
|
setattr(tensor_operator_registry, 'trunc', trunc)
|
|
@@ -17,8 +17,11 @@ from mindspore._c_expression import _add_instance
|
|
|
17
17
|
from mindspore._c_expression import _addcdiv_instance
|
|
18
18
|
from mindspore._c_expression import _all_gather_matmul_instance
|
|
19
19
|
from mindspore._c_expression import _any_instance
|
|
20
|
+
from mindspore._c_expression import _bernoulli__instance
|
|
20
21
|
from mindspore._c_expression import _bitwise_not_instance
|
|
21
22
|
from mindspore._c_expression import _clamp_instance
|
|
23
|
+
from mindspore._c_expression import _conv1d_instance
|
|
24
|
+
from mindspore._c_expression import _conv2d_instance
|
|
22
25
|
from mindspore._c_expression import _conv3d_instance
|
|
23
26
|
from mindspore._c_expression import _div_instance
|
|
24
27
|
from mindspore._c_expression import _einsum_instance
|
|
@@ -31,6 +34,7 @@ from mindspore._c_expression import _gmm_instance
|
|
|
31
34
|
from mindspore._c_expression import _gmm_backward_instance
|
|
32
35
|
from mindspore._c_expression import _gmm_backward_fusion_instance
|
|
33
36
|
from mindspore._c_expression import _greater_equal_instance
|
|
37
|
+
from mindspore._c_expression import _imag_instance
|
|
34
38
|
from mindspore._c_expression import _index_add_instance
|
|
35
39
|
from mindspore._c_expression import _kthvalue_instance
|
|
36
40
|
from mindspore._c_expression import _lerp_instance
|
|
@@ -40,6 +44,7 @@ from mindspore._c_expression import _min_instance
|
|
|
40
44
|
from mindspore._c_expression import _nansum_instance
|
|
41
45
|
from mindspore._c_expression import _pixel_shuffle_instance
|
|
42
46
|
from mindspore._c_expression import _quant_matmul_instance
|
|
47
|
+
from mindspore._c_expression import _real_instance
|
|
43
48
|
from mindspore._c_expression import _remainder_instance
|
|
44
49
|
from mindspore._c_expression import _repeat_interleave_instance
|
|
45
50
|
from mindspore._c_expression import _rmod_instance
|
|
@@ -66,10 +71,10 @@ def add(*args, **kwargs):
|
|
|
66
71
|
Args:
|
|
67
72
|
input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
|
|
68
73
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
69
|
-
`
|
|
74
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
70
75
|
other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
|
|
71
76
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
72
|
-
`
|
|
77
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
73
78
|
|
|
74
79
|
Keyword Args:
|
|
75
80
|
alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
|
|
@@ -317,6 +322,15 @@ def any(*args, **kwargs):
|
|
|
317
322
|
return _any_instance(*args, **kwargs)
|
|
318
323
|
|
|
319
324
|
|
|
325
|
+
def bernoulli_(*args, **kwargs):
|
|
326
|
+
r"""
|
|
327
|
+
bernoulli_(input, p, seed, offset) -> Tensor
|
|
328
|
+
|
|
329
|
+
Inner function, used for Tensor.bernoulli_.
|
|
330
|
+
"""
|
|
331
|
+
return _bernoulli__instance(*args, **kwargs)
|
|
332
|
+
|
|
333
|
+
|
|
320
334
|
def bitwise_not(*args, **kwargs):
|
|
321
335
|
r"""
|
|
322
336
|
bitwise_not(input) -> Tensor
|
|
@@ -429,6 +443,245 @@ def clip(*args, **kwargs):
|
|
|
429
443
|
return _clamp_instance(*args, **kwargs)
|
|
430
444
|
|
|
431
445
|
|
|
446
|
+
def conv1d(*args, **kwargs):
|
|
447
|
+
r"""
|
|
448
|
+
conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
|
|
449
|
+
|
|
450
|
+
Applies a 1D convolution over an input tensor. The input tenor is typically
|
|
451
|
+
of shape :math:`(N, C_{in}, L_{in})`,
|
|
452
|
+
where :math:`N` is batch size, :math:`C` is channel number, :math:`L` is sequence length.
|
|
453
|
+
|
|
454
|
+
The output is calculated based on formula:
|
|
455
|
+
|
|
456
|
+
.. math::
|
|
457
|
+
|
|
458
|
+
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
|
|
459
|
+
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
460
|
+
|
|
461
|
+
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
462
|
+
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
463
|
+
:math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
464
|
+
|
|
465
|
+
- :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
|
|
466
|
+
where :math:`N` is the batch size of the input.
|
|
467
|
+
|
|
468
|
+
- :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
|
|
469
|
+
where :math:`C_{out}` is the number of
|
|
470
|
+
output channels, which is also equal to the number of kernels.
|
|
471
|
+
|
|
472
|
+
- :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
|
|
473
|
+
where :math:`C_{in}` is the number of
|
|
474
|
+
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
475
|
+
|
|
476
|
+
Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
|
|
477
|
+
output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
478
|
+
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
479
|
+
channel in the :math:`i`-th batch of the input feature map.
|
|
480
|
+
|
|
481
|
+
The shape of the convolutional kernel is given by :math:`(\text{kernel_size})`,
|
|
482
|
+
where :math:`\text{kernel_size}` is the length of the kernel.
|
|
483
|
+
If we consider the input and output channels as well as the `groups` parameter, the complete kernel shape
|
|
484
|
+
will be :math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size})`,
|
|
485
|
+
where `groups` is the number of groups dividing `x`'s input channel when applying groups convolution.
|
|
486
|
+
|
|
487
|
+
For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
|
|
488
|
+
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
|
|
489
|
+
|
|
490
|
+
Args:
|
|
491
|
+
input (Tensor): Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
|
|
492
|
+
weight (Tensor): Tensor of shape
|
|
493
|
+
:math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size})`, then the size of kernel
|
|
494
|
+
is :math:`(\text{kernel_size})`.
|
|
495
|
+
bias (Tensor, optional): Bias Tensor with shape :math:`(C_{out})`.
|
|
496
|
+
When bias is ``None`` , zeros will be used. Default: ``None`` .
|
|
497
|
+
stride (Union[int, tuple[int], list[int]], optional): The movement stride of the 1D convolution kernel.
|
|
498
|
+
The data type is an integer or a tuple of one integer. Default: ``1`` .
|
|
499
|
+
padding (Union[int, tuple[int], list[int], str], optional): The number of padding
|
|
500
|
+
on the input.
|
|
501
|
+
The data type is an integer or a tuple of one integer or string {`valid`, `same`}.
|
|
502
|
+
The value should be greater than or equal to 0. Default: ``0`` .
|
|
503
|
+
|
|
504
|
+
- ``"same"``: Pad the input around its edges so that the shape of input and output
|
|
505
|
+
are the same when `stride` is set to ``1``.
|
|
506
|
+
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
507
|
+
uniformly distributed around the input, if it is odd, the excess amount goes to the right side.
|
|
508
|
+
If this mode is set, `stride` must be 1.
|
|
509
|
+
|
|
510
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
511
|
+
possible length. Extra sequence that could not complete a full stride will
|
|
512
|
+
be discarded.
|
|
513
|
+
|
|
514
|
+
dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation rate to use for
|
|
515
|
+
dilated convolution. It can be a single int or a tuple of 1 integer.
|
|
516
|
+
Assuming :math:`dilation=(d)`, the convolutional kernel samples the input with a
|
|
517
|
+
spacing of :math:`d-1` elements in the length direction.
|
|
518
|
+
Default: ``1`` .
|
|
519
|
+
groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
520
|
+
divisible by `groups`. If the groups is equal to `in_channels` and `out_channels`,
|
|
521
|
+
this 1D convolution layer also can be called 1D depthwise convolution layer. Default: ``1`` .
|
|
522
|
+
The following restraints should be met:
|
|
523
|
+
|
|
524
|
+
- :math:`(C_{in} \text{ % } \text{groups} == 0)`
|
|
525
|
+
- :math:`(C_{out} \text{ % } \text{groups} == 0)`
|
|
526
|
+
- :math:`(C_{out} >= \text{groups})`
|
|
527
|
+
- :math:`(\text{weight[1]} = C_{in} / \text{groups})`
|
|
528
|
+
|
|
529
|
+
Returns:
|
|
530
|
+
Tensor, the value that applied 1D convolution. The shape is :math:`(N, C_{out}, L_{out})`.
|
|
531
|
+
To see how different pad modes affect the output shape, please refer to
|
|
532
|
+
:class:`mindspore.mint.nn.Conv1d` for more details.
|
|
533
|
+
|
|
534
|
+
Raises:
|
|
535
|
+
RuntimeError: On Ascend, due to the limitation of the L1 cache size of different NPU chip, if input size or
|
|
536
|
+
kernel size is too large, it may trigger an error.
|
|
537
|
+
TypeError: If `in_channels`, `out_channels` or `groups` is not an int.
|
|
538
|
+
TypeError: If `kernel_size`, `stride` or `dilation` is neither an int not a tuple.
|
|
539
|
+
ValueError: Args and size of the input feature map should satisfy the output formula to ensure that the size of
|
|
540
|
+
the output feature map is positive; otherwise, an error will be reported.
|
|
541
|
+
ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
|
|
542
|
+
ValueError: If `padding` is less than 0.
|
|
543
|
+
ValueError: If `padding` is `same` , `stride` is not equal to 1.
|
|
544
|
+
ValueError: The input parameters do not satisfy the convolution output formula.
|
|
545
|
+
ValueError: The `kernel_size` cannot exceed the size of the input feature map.
|
|
546
|
+
ValueError: The value of `padding` cannot cause the calculation area to exceed the input size.
|
|
547
|
+
|
|
548
|
+
Supported Platforms:
|
|
549
|
+
``Ascend``
|
|
550
|
+
|
|
551
|
+
Examples:
|
|
552
|
+
>>> import mindspore
|
|
553
|
+
>>> import numpy as np
|
|
554
|
+
>>> from mindspore import Tensor, ops, mint
|
|
555
|
+
>>> x = Tensor(np.ones([10, 32, 32]), mindspore.float32)
|
|
556
|
+
>>> weight = Tensor(np.ones([32, 32, 3]), mindspore.float32)
|
|
557
|
+
>>> output = mint.nn.functional.conv1d(x, weight)
|
|
558
|
+
>>> print(output.shape)
|
|
559
|
+
(10, 32, 30)
|
|
560
|
+
"""
|
|
561
|
+
return _conv1d_instance(*args, **kwargs)
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
def conv2d(*args, **kwargs):
|
|
565
|
+
r"""
|
|
566
|
+
Applies a 2D convolution over an input tensor. The input tensor is typically of
|
|
567
|
+
shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`,
|
|
568
|
+
where :math:`N` is batch size, :math:`C` is channel number, :math:`H` is feature height, :math:`W` is feature width.
|
|
569
|
+
|
|
570
|
+
The output is calculated based on formula:
|
|
571
|
+
|
|
572
|
+
.. math::
|
|
573
|
+
|
|
574
|
+
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
|
|
575
|
+
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
576
|
+
|
|
577
|
+
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
578
|
+
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
579
|
+
, :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
580
|
+
|
|
581
|
+
- :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
|
|
582
|
+
where :math:`N` is the batch size of the input.
|
|
583
|
+
|
|
584
|
+
- :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
|
|
585
|
+
where :math:`C_{out}` is the number of output channels, which is also equal to the number of kernels.
|
|
586
|
+
|
|
587
|
+
- :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
|
|
588
|
+
where :math:`C_{in}` is the number of
|
|
589
|
+
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
590
|
+
|
|
591
|
+
Therefore, in the above formula, :math:`{bias}(C_{out_j})` represents the bias of the :math:`j`-th
|
|
592
|
+
output channel, :math:`{weight}(C_{out_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
593
|
+
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
594
|
+
channel in the :math:`i`-th batch of the input feature map.
|
|
595
|
+
|
|
596
|
+
The shape of the convolutional kernel is given by :math:`(\text{kernel_size[0]}, \text{kernel_size[1]})`,
|
|
597
|
+
where :math:`\text{kernel_size[0]}` and :math:`\text{kernel_size[1]}` are the height and width of the kernel,
|
|
598
|
+
respectively.
|
|
599
|
+
If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
|
|
600
|
+
will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]}, \text{kernel_size[1]})`,
|
|
601
|
+
where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
|
|
602
|
+
|
|
603
|
+
For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
|
|
604
|
+
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_ and
|
|
605
|
+
`ConvNets <http://cs231n.github.io/convolutional-networks/>`_.
|
|
606
|
+
|
|
607
|
+
.. warning::
|
|
608
|
+
This is an experimental API that is subject to change or deletion.
|
|
609
|
+
|
|
610
|
+
Args:
|
|
611
|
+
input (Tensor): Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
|
|
612
|
+
weight (Tensor): Tensor of shape
|
|
613
|
+
:math:`(N, C_{in} / \text{groups}, \text{kernel_size[0]}, \text{kernel_size[1]})`, then the size of kernel
|
|
614
|
+
is :math:`(\text{kernel_size[0]}, \text{kernel_size[1]})`.
|
|
615
|
+
bias (Tensor, optional): Bias Tensor with shape :math:`(C_{out})`.
|
|
616
|
+
When bias is ``None`` , zeros will be used. Default: ``None`` .
|
|
617
|
+
stride (Union(int, tuple[int], list[int]), optional): The distance of kernel moving, an int number that
|
|
618
|
+
represents the height and width of movement are both strides, or a tuple of two int numbers that
|
|
619
|
+
represent height and width of movement respectively. Default: ``1`` .
|
|
620
|
+
padding (Union[int, tuple[int], list[int], str], optional): The number of padding
|
|
621
|
+
on the height and width directions of the input.
|
|
622
|
+
The data type is an integer or a tuple of two integers or string {`valid`, `same`}. If `padding` is an
|
|
623
|
+
integer, then `padding_{H}` and `padding_{W}` are all equal to `padding`.
|
|
624
|
+
If `padding` is a tuple of 2 integers, then `padding_{H}` and `padding_{W}`
|
|
625
|
+
is equal to `padding[0]` and `padding[1]` respectively.
|
|
626
|
+
The value should be greater than or equal to 0. Default: ``0`` .
|
|
627
|
+
|
|
628
|
+
- ``"same"``: Pad the input around its edges so that the shape of input and output
|
|
629
|
+
are the same when `stride` is set to ``1``.
|
|
630
|
+
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
631
|
+
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
|
|
632
|
+
If this mode is set, `stride` must be 1.
|
|
633
|
+
|
|
634
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
635
|
+
possible height and width. Extra pixels that could not complete a full stride will
|
|
636
|
+
be discarded.
|
|
637
|
+
|
|
638
|
+
dilation (Union(int, tuple[int], list[int]), optional): Gaps between kernel elements.The data type
|
|
639
|
+
is int or a tuple of 2 integers. Specifies the dilation rate to use for dilated convolution.
|
|
640
|
+
If set to be :math:`k > 1`,
|
|
641
|
+
there will be :math:`k - 1` pixels skipped for each sampling location. Its value must
|
|
642
|
+
be greater than or equal to 1 and bounded by the height and width of the input `x`. Default: ``1`` .
|
|
643
|
+
groups (int, optional): Splits `input` into groups. Default: ``1`` .
|
|
644
|
+
|
|
645
|
+
- :math:`(C_{in} \text{ % } \text{groups} == 0)` , :math:`(C_{out} \text{ % } \text{groups} == 0)` ,
|
|
646
|
+
:math:`(C_{out} >= \text{groups})` , :math:`(\text{kernel_size[1]} = C_{in} / \text{groups})`
|
|
647
|
+
|
|
648
|
+
Returns:
|
|
649
|
+
Tensor, the value that applied 2D convolution. The shape is :math:`(N, C_{out}, H_{out}, W_{out})`.
|
|
650
|
+
To see how different pad modes affect the output shape, please refer to
|
|
651
|
+
:class:`mindspore.mint.nn.Conv2d` for more details.
|
|
652
|
+
|
|
653
|
+
Raises:
|
|
654
|
+
ValueError: Args and size of the input feature map should satisfy the output formula to ensure that the size of
|
|
655
|
+
the output feature map is positive; otherwise, an error will be reported. For more details on the output
|
|
656
|
+
formula, please refer to :class:`mindspore.mint.nn.Conv2d`.
|
|
657
|
+
RuntimeError: On Ascend, due to the limitation of the L1 cache size of different NPU chip, if input size or
|
|
658
|
+
kernel size is too large, it may trigger an error.
|
|
659
|
+
TypeError: If `in_channels` , `out_channels` or `groups` is not an int.
|
|
660
|
+
TypeError: If `kernel_size` , `stride` or `dilation` is neither an int nor a tuple.
|
|
661
|
+
TypeError: If `bias` is not a Tensor.
|
|
662
|
+
ValueError: If the shape of `bias` is not :math:`(C_{out})` .
|
|
663
|
+
ValueError: If `stride` or `dilation` is less than 1.
|
|
664
|
+
ValueError: If `padding` is `same` , `stride` is not equal to 1.
|
|
665
|
+
ValueError: The input parameters do not satisfy the convolution output formula.
|
|
666
|
+
ValueError: The KernelSize cannot exceed the size of the input feature map.
|
|
667
|
+
ValueError: The value of padding cannot cause the calculation area to exceed the input size.
|
|
668
|
+
|
|
669
|
+
Supported Platforms:
|
|
670
|
+
``Ascend``
|
|
671
|
+
|
|
672
|
+
Examples:
|
|
673
|
+
>>> import mindspore
|
|
674
|
+
>>> import numpy as np
|
|
675
|
+
>>> from mindspore import Tensor, ops, mint
|
|
676
|
+
>>> x = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
|
|
677
|
+
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
|
|
678
|
+
>>> output = mint.nn.functional.conv2d(x, weight)
|
|
679
|
+
>>> print(output.shape)
|
|
680
|
+
(10, 32, 30, 30)
|
|
681
|
+
"""
|
|
682
|
+
return _conv2d_instance(*args, **kwargs)
|
|
683
|
+
|
|
684
|
+
|
|
432
685
|
def conv3d(*args, **kwargs):
|
|
433
686
|
r"""
|
|
434
687
|
conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
|
|
@@ -579,7 +832,7 @@ def div(*args, **kwargs):
|
|
|
579
832
|
.. note::
|
|
580
833
|
- When the two inputs have different shapes, they must be able to broadcast to a common shape.
|
|
581
834
|
- The two inputs can not be bool type at the same time,
|
|
582
|
-
[True, Tensor(True
|
|
835
|
+
[True, Tensor(True), Tensor(np.array([True]))] are all considered bool type.
|
|
583
836
|
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
584
837
|
consistent.
|
|
585
838
|
|
|
@@ -730,13 +983,10 @@ def einsum(*args, **kwargs):
|
|
|
730
983
|
|
|
731
984
|
def empty(*args, **kwargs):
|
|
732
985
|
r"""
|
|
733
|
-
empty(*size, *, dtype=None, device=None) -> Tensor
|
|
986
|
+
empty(*size, *, dtype=None, device=None, pin_memory=False) -> Tensor
|
|
734
987
|
|
|
735
988
|
Creates a tensor with uninitialized data, whose shape, dtype and device are described by the argument `size`,
|
|
736
|
-
`dtype` and `device` respectively.
|
|
737
|
-
|
|
738
|
-
.. warning::
|
|
739
|
-
This is an experimental API that is subject to change or deletion.
|
|
989
|
+
`dtype` and `device` respectively. If `pin_memory` is True, the tensor will be allocated in pinned memory.
|
|
740
990
|
|
|
741
991
|
Args:
|
|
742
992
|
size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Can be variable numbers of
|
|
@@ -745,15 +995,18 @@ def empty(*args, **kwargs):
|
|
|
745
995
|
Keyword Args:
|
|
746
996
|
dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
|
|
747
997
|
`mindspore.float32` will be used. Default: ``None`` .
|
|
748
|
-
device (
|
|
998
|
+
device (str, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
|
|
749
999
|
``"cpu"`` and ``"CPU"`` are supported. In graph mode O0, ``"Ascend"`` and ``"npu"`` are supported. If `device = None`,
|
|
750
1000
|
`mindspore.context.device_target` will be used. Default ``None``.
|
|
1001
|
+
pin_memory (bool, optional): If set `pin_memory` to True, the tensor will be allocated in pinned memory, and `device`
|
|
1002
|
+
should be ``"cpu"`` or ``"CPU"`` . Default ``False``.
|
|
751
1003
|
|
|
752
1004
|
Returns:
|
|
753
1005
|
Tensor, whose shape, dtype and device are defined by input.
|
|
754
1006
|
|
|
755
1007
|
Raises:
|
|
756
1008
|
TypeError: If `size` is neither an int nor a tuple or list of int.
|
|
1009
|
+
RuntimeError: If `pin_memory` is True, and `device` is neither ``"cpu"`` nor ``"CPU"`` .
|
|
757
1010
|
|
|
758
1011
|
Supported Platforms:
|
|
759
1012
|
``Ascend`` ``CPU``
|
|
@@ -771,13 +1024,10 @@ def empty(*args, **kwargs):
|
|
|
771
1024
|
|
|
772
1025
|
def empty_like(*args, **kwargs):
|
|
773
1026
|
r"""
|
|
774
|
-
empty_like(input, *, dtype=None, device=None) -> Tensor
|
|
1027
|
+
empty_like(input, *, dtype=None, device=None, pin_memory=False) -> Tensor
|
|
775
1028
|
|
|
776
1029
|
Returns an uninitialized Tensor with the same shape as the `input`. Its dtype is specified by `dtype` and its
|
|
777
|
-
device is specified by `device`.
|
|
778
|
-
|
|
779
|
-
.. warning::
|
|
780
|
-
This is an experimental API that is subject to change or deletion.
|
|
1030
|
+
device is specified by `device`. If `pin_memory` is True, the tensor will be allocated in pinned memory.
|
|
781
1031
|
|
|
782
1032
|
Args:
|
|
783
1033
|
input (Tensor): Tensor of any dimension.
|
|
@@ -785,15 +1035,18 @@ def empty_like(*args, **kwargs):
|
|
|
785
1035
|
Keyword Args:
|
|
786
1036
|
dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype = None`, the
|
|
787
1037
|
tensor will have the same dtype as input `input`. Default ``None``.
|
|
788
|
-
device (
|
|
1038
|
+
device (str, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
|
|
789
1039
|
``"cpu"`` and ``"CPU"`` are supported. In graph mode O0, ``"Ascend"`` and ``"npu"`` are supported. If `device = None`,
|
|
790
1040
|
the value set by :func:`mindspore.set_device` will be used. Default ``None``.
|
|
1041
|
+
pin_memory (bool, optional): If set `pin_memory` to True, the tensor will be allocated in pinned memory, and `device`
|
|
1042
|
+
should be ``"cpu"`` or ``"CPU"`` . Default ``False``.
|
|
791
1043
|
|
|
792
1044
|
Returns:
|
|
793
1045
|
Tensor, has the same shape, type and device as `input` but with uninitialized data (May be a random value).
|
|
794
1046
|
|
|
795
1047
|
Raises:
|
|
796
1048
|
TypeError: If `input` is not a Tensor.
|
|
1049
|
+
RuntimeError: If `pin_memory` is True, and `device` is neither ``"cpu"`` nor ``"CPU"`` .
|
|
797
1050
|
|
|
798
1051
|
Supported Platforms:
|
|
799
1052
|
``Ascend`` ``CPU``
|
|
@@ -833,9 +1086,6 @@ def floor_divide(*args, **kwargs):
|
|
|
833
1086
|
where the :math:`floor` indicates the Floor operator. For more details,
|
|
834
1087
|
please refer to the :class:`mindspore.mint.floor` operator.
|
|
835
1088
|
|
|
836
|
-
.. warning::
|
|
837
|
-
This is an experimental API that is subject to change or deletion.
|
|
838
|
-
|
|
839
1089
|
Args:
|
|
840
1090
|
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
841
1091
|
a bool or a tensor whose data type is number or bool.
|
|
@@ -1107,10 +1357,10 @@ def greater_equal(*args, **kwargs):
|
|
|
1107
1357
|
|
|
1108
1358
|
Args:
|
|
1109
1359
|
input (Union[Tensor, Number]): The first input is a number
|
|
1110
|
-
or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ or `
|
|
1360
|
+
or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ or `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_.
|
|
1111
1361
|
other (Union[Tensor, Number]): Second input. When the first input is a Tensor, the second input should be a Number,
|
|
1112
|
-
or a Tensor of the number or
|
|
1113
|
-
the second input must be a Tensor of number or
|
|
1362
|
+
or a Tensor of the number or bool data type. When the first input is a Scalar,
|
|
1363
|
+
the second input must be a Tensor of number or bool data type.
|
|
1114
1364
|
|
|
1115
1365
|
Returns:
|
|
1116
1366
|
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
|
|
@@ -1147,6 +1397,54 @@ def ge(*args, **kwargs):
|
|
|
1147
1397
|
return _greater_equal_instance(*args, **kwargs)
|
|
1148
1398
|
|
|
1149
1399
|
|
|
1400
|
+
def imag(*args, **kwargs):
|
|
1401
|
+
r"""
|
|
1402
|
+
imag(input) -> Tensor
|
|
1403
|
+
|
|
1404
|
+
Return a new tensor containing the imaginary values of the input tensor.
|
|
1405
|
+
The returned tensor and input tensor share the same underlying storage.
|
|
1406
|
+
|
|
1407
|
+
Note:
|
|
1408
|
+
- Only support Pynative mode.
|
|
1409
|
+
- Only support complex64 and complex128 tensors.
|
|
1410
|
+
|
|
1411
|
+
Args:
|
|
1412
|
+
input (Tensor): The input tensor, the data type must be complex64 or complex128.
|
|
1413
|
+
|
|
1414
|
+
Returns:
|
|
1415
|
+
Tensor, the shape is same as `input`. The data type is float32 if `input` is complex64, float64 when `input` is complex128.
|
|
1416
|
+
|
|
1417
|
+
Raises:
|
|
1418
|
+
TypeError: If dtype of `input` is not complex64 or complex128.
|
|
1419
|
+
ValueError: If input tensor has no storage info.
|
|
1420
|
+
|
|
1421
|
+
Supported Platforms:
|
|
1422
|
+
``Ascend``
|
|
1423
|
+
|
|
1424
|
+
Examples:
|
|
1425
|
+
>>> import mindspore
|
|
1426
|
+
>>> from mindspore import Tensor, ops, context
|
|
1427
|
+
>>> context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
|
1428
|
+
>>> real = Tensor([1.1, 2.1, 3.1], mindspore.float32)
|
|
1429
|
+
>>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float32)
|
|
1430
|
+
>>> x = ops.Complex()(real, imag)
|
|
1431
|
+
>>> output = ops.functional_overload.imag(x)
|
|
1432
|
+
>>> print(output)
|
|
1433
|
+
[4.1 5.1 6.1]
|
|
1434
|
+
>>> print(output.dtype)
|
|
1435
|
+
Float32
|
|
1436
|
+
>>> real = Tensor([1.1, 2.1, 3.1], mindspore.float64)
|
|
1437
|
+
>>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float64)
|
|
1438
|
+
>>> x = ops.Complex()(real, imag)
|
|
1439
|
+
>>> output = ops.functional_overload.imag(x)
|
|
1440
|
+
>>> print(output)
|
|
1441
|
+
[4.1 5.1 6.1]
|
|
1442
|
+
>>> print(output.dtype)
|
|
1443
|
+
Float64
|
|
1444
|
+
"""
|
|
1445
|
+
return _imag_instance(*args, **kwargs)
|
|
1446
|
+
|
|
1447
|
+
|
|
1150
1448
|
def index_add(*args, **kwargs):
|
|
1151
1449
|
r"""
|
|
1152
1450
|
index_add(input, dim, index, source, *, alpha=1) -> Tensor
|
|
@@ -1615,9 +1913,6 @@ def pixel_shuffle(*args, **kwargs):
|
|
|
1615
1913
|
For detailed introduction to the pixel_shuffle algorithm, refer to
|
|
1616
1914
|
`Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_ .
|
|
1617
1915
|
|
|
1618
|
-
.. warning::
|
|
1619
|
-
This is an experimental API that is subject to change or deletion.
|
|
1620
|
-
|
|
1621
1916
|
Args:
|
|
1622
1917
|
input (Tensor): Tensor of shape :math:`(*, C \times r^2, H, W)` . The dimension of `input` is larger than 2,
|
|
1623
1918
|
and the length of third to last dimension can be divisible by the square of `upscale_factor`.
|
|
@@ -1699,6 +1994,53 @@ def quant_matmul(*args, **kwargs):
|
|
|
1699
1994
|
return _quant_matmul_instance(*args, **kwargs)
|
|
1700
1995
|
|
|
1701
1996
|
|
|
1997
|
+
def real(*args, **kwargs):
|
|
1998
|
+
r"""
|
|
1999
|
+
real(input) -> Tensor
|
|
2000
|
+
|
|
2001
|
+
Return a new tensor containing the real values of the input tensor. If input is real, it is returned unchanged.
|
|
2002
|
+
The returned tensor and input tensor share the same underlying storage.
|
|
2003
|
+
|
|
2004
|
+
Note:
|
|
2005
|
+
Only support Pynative mode.
|
|
2006
|
+
|
|
2007
|
+
Args:
|
|
2008
|
+
input (Tensor): The input tensor.
|
|
2009
|
+
|
|
2010
|
+
Returns:
|
|
2011
|
+
Tensor, the shape is same as `input`. The data type is float32 if `input` is complex64, float64 when `input` is complex128.
|
|
2012
|
+
Otherwise, the data type is the same as `input`.
|
|
2013
|
+
|
|
2014
|
+
Raises:
|
|
2015
|
+
ValueError: If input tensor has no storage info.
|
|
2016
|
+
|
|
2017
|
+
Supported Platforms:
|
|
2018
|
+
``Ascend``
|
|
2019
|
+
|
|
2020
|
+
Examples:
|
|
2021
|
+
>>> import mindspore
|
|
2022
|
+
>>> from mindspore import Tensor, ops, context
|
|
2023
|
+
>>> context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
|
2024
|
+
>>> real = Tensor([1.1, 2.1, 3.1], mindspore.float32)
|
|
2025
|
+
>>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float32)
|
|
2026
|
+
>>> x = ops.Complex()(real, imag)
|
|
2027
|
+
>>> output = ops.functional_overload.real(x)
|
|
2028
|
+
>>> print(output)
|
|
2029
|
+
[1.1 2.1 3.1]
|
|
2030
|
+
>>> print(output.dtype)
|
|
2031
|
+
Float32
|
|
2032
|
+
>>> real = Tensor([1.1, 2.1, 3.1], mindspore.float64)
|
|
2033
|
+
>>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float64)
|
|
2034
|
+
>>> x = ops.Complex()(real, imag)
|
|
2035
|
+
>>> output = ops.functional_overload.real(x)
|
|
2036
|
+
>>> print(output)
|
|
2037
|
+
[1.1 2.1 3.1]
|
|
2038
|
+
>>> print(output.dtype)
|
|
2039
|
+
Float64
|
|
2040
|
+
"""
|
|
2041
|
+
return _real_instance(*args, **kwargs)
|
|
2042
|
+
|
|
2043
|
+
|
|
1702
2044
|
def remainder(*args, **kwargs):
|
|
1703
2045
|
r"""
|
|
1704
2046
|
remainder(input, other) -> Tensor
|
|
@@ -1719,10 +2061,10 @@ def remainder(*args, **kwargs):
|
|
|
1719
2061
|
input (Union[Tensor, numbers.Number, bool]): The dividend is a numbers.Number or
|
|
1720
2062
|
a bool or a tensor whose data type is
|
|
1721
2063
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1722
|
-
`
|
|
2064
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1723
2065
|
other (Union[Tensor, numbers.Number, bool]): The divisor is a numbers.Number or
|
|
1724
|
-
a bool or a tensor whose data type is number or bool
|
|
1725
|
-
When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool
|
|
2066
|
+
a bool or a tensor whose data type is number or bool when the dividend is a tensor.
|
|
2067
|
+
When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool.
|
|
1726
2068
|
|
|
1727
2069
|
Returns:
|
|
1728
2070
|
Tensor, with dtype promoted and shape broadcasted.
|
|
@@ -1816,10 +2158,10 @@ def sub(*args, **kwargs):
|
|
|
1816
2158
|
Args:
|
|
1817
2159
|
input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
|
|
1818
2160
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1819
|
-
`
|
|
2161
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1820
2162
|
other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
|
|
1821
2163
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1822
|
-
`
|
|
2164
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1823
2165
|
|
|
1824
2166
|
Keyword Args:
|
|
1825
2167
|
alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
|
|
@@ -1935,7 +2277,7 @@ def xlogy(*args, **kwargs):
|
|
|
1935
2277
|
input (Union[Tensor, numbers.Number, bool]): The first input is a numbers.Number or
|
|
1936
2278
|
a bool or a tensor whose data type is
|
|
1937
2279
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1938
|
-
`
|
|
2280
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1939
2281
|
other (Union[Tensor, numbers.Number, bool]): The second input is a numbers.Number or
|
|
1940
2282
|
a bool or a tensor whose data type is number or bool when the first input is a tensor.
|
|
1941
2283
|
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
|
|
@@ -1969,9 +2311,12 @@ __all__ = [
|
|
|
1969
2311
|
"addcdiv",
|
|
1970
2312
|
"all_gather_matmul",
|
|
1971
2313
|
"any",
|
|
2314
|
+
"bernoulli_",
|
|
1972
2315
|
"bitwise_not",
|
|
1973
2316
|
"clamp",
|
|
1974
2317
|
"clip",
|
|
2318
|
+
"conv1d",
|
|
2319
|
+
"conv2d",
|
|
1975
2320
|
"conv3d",
|
|
1976
2321
|
"div",
|
|
1977
2322
|
"divide",
|
|
@@ -1986,6 +2331,7 @@ __all__ = [
|
|
|
1986
2331
|
"gmm_backward_fusion",
|
|
1987
2332
|
"greater_equal",
|
|
1988
2333
|
"ge",
|
|
2334
|
+
"imag",
|
|
1989
2335
|
"index_add",
|
|
1990
2336
|
"kthvalue",
|
|
1991
2337
|
"lerp",
|
|
@@ -1995,6 +2341,7 @@ __all__ = [
|
|
|
1995
2341
|
"nansum",
|
|
1996
2342
|
"pixel_shuffle",
|
|
1997
2343
|
"quant_matmul",
|
|
2344
|
+
"real",
|
|
1998
2345
|
"remainder",
|
|
1999
2346
|
"repeat_interleave",
|
|
2000
2347
|
"rmod",
|
|
@@ -55,7 +55,7 @@ from .comm_ops import (AllGather, AllReduce, Reduce, NeighborExchange, NeighborE
|
|
|
55
55
|
Broadcast, CollectiveGather, CollectiveScatter, Barrier, Send, Receive, BatchISendIRecv,
|
|
56
56
|
_MirrorOperator, _MirrorMiniStepOperator, _MiniStepAllGather, ReduceOp, _VirtualDataset,
|
|
57
57
|
_VirtualOutput, _VirtualDiv, _GetTensorSlice, _VirtualAdd, _VirtualAssignAdd, _VirtualAccuGrad,
|
|
58
|
-
_HostAllGather, _HostReduceScatter, _MirrorMicroStepOperator, _MicroStepAllGather,
|
|
58
|
+
_HostAllGather, _HostReduceScatter, _MirrorMicroStepOperator, _MicroStepAllGather, AlltoAllVC,
|
|
59
59
|
_VirtualPipelineEnd, AlltoAllV, ReduceScatter, _VirtualAssignKvCache, AllGatherV, ReduceScatterV)
|
|
60
60
|
from .control_ops import GeSwitch, Merge
|
|
61
61
|
from .custom_ops import (Custom, CustomOpBuilder)
|
|
@@ -129,7 +129,6 @@ from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, RandomGamm
|
|
|
129
129
|
LogUniformCandidateSampler, TruncatedNormal, LogNormalReverse, NonDeterministicInts,
|
|
130
130
|
ParameterizedTruncatedNormal, RandomPoisson, MultinomialWithReplacement, RandomShuffle,
|
|
131
131
|
RandpermV2)
|
|
132
|
-
from .rl_ops import (BufferAppend, BufferGetItem, BufferSample)
|
|
133
132
|
from .sparse_ops import (
|
|
134
133
|
SparseToDense, SparseTensorDenseMatmul, SparseTensorDenseAdd, SparseSlice)
|
|
135
134
|
from .spectral_ops import (BartlettWindow, BlackmanWindow)
|
|
@@ -406,6 +405,7 @@ __all__ = [
|
|
|
406
405
|
"AllReduce",
|
|
407
406
|
"AllGatherV",
|
|
408
407
|
"ReduceScatterV",
|
|
408
|
+
"AlltoAllVC",
|
|
409
409
|
"Reduce",
|
|
410
410
|
"_AllSwap",
|
|
411
411
|
"ReduceScatter",
|
|
@@ -532,9 +532,6 @@ __all__ = [
|
|
|
532
532
|
"HShrink",
|
|
533
533
|
"PyExecute",
|
|
534
534
|
"PyFunc",
|
|
535
|
-
"BufferAppend",
|
|
536
|
-
"BufferGetItem",
|
|
537
|
-
"BufferSample",
|
|
538
535
|
"Erfinv",
|
|
539
536
|
"Conj",
|
|
540
537
|
"Real",
|