mindspore 2.7.0__cp311-cp311-win_amd64.whl → 2.7.1__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -1
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_extends/parse/compile_config.py +24 -1
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -2
- mindspore/_extends/parse/resources.py +1 -1
- mindspore/_extends/parse/standard_method.py +8 -1
- mindspore/_extends/parse/trope.py +2 -1
- mindspore/_extends/pijit/pijit_func_white_list.py +7 -22
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/base.py +29 -2
- mindspore/common/_decorator.py +3 -2
- mindspore/common/_grad_function.py +3 -1
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +275 -64
- mindspore/common/_utils.py +0 -44
- mindspore/common/api.py +285 -35
- mindspore/common/dump.py +7 -108
- mindspore/common/dynamic_shape/auto_dynamic_shape.py +1 -3
- mindspore/common/hook_handle.py +60 -0
- mindspore/common/jit_config.py +5 -1
- mindspore/common/jit_trace.py +27 -12
- mindspore/common/lazy_inline.py +5 -3
- mindspore/common/parameter.py +13 -107
- mindspore/common/recompute.py +4 -11
- mindspore/common/tensor.py +16 -169
- mindspore/communication/_comm_helper.py +11 -1
- mindspore/communication/comm_func.py +138 -4
- mindspore/communication/management.py +85 -1
- mindspore/config/op_info.config +0 -15
- mindspore/context.py +5 -85
- mindspore/dataset/engine/datasets.py +8 -4
- mindspore/dataset/engine/datasets_vision.py +1 -1
- mindspore/dataset/engine/validators.py +1 -15
- mindspore/dnnl.dll +0 -0
- mindspore/{experimental/llm_boost/ascend_native → graph}/__init__.py +7 -7
- mindspore/graph/custom_pass.py +55 -0
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/__init__.py +3 -3
- mindspore/mindrecord/common/exceptions.py +1 -0
- mindspore/mindrecord/config.py +1 -1
- mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
- mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
- mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
- mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
- mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
- mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
- mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
- mindspore/mindrecord/filereader.py +4 -4
- mindspore/mindrecord/filewriter.py +5 -5
- mindspore/mindrecord/mindpage.py +2 -2
- mindspore/mindrecord/tools/cifar10.py +1 -1
- mindspore/mindrecord/tools/cifar100.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
- mindspore/mindrecord/tools/cifar10_to_mr.py +1 -1
- mindspore/mindrecord/tools/csv_to_mr.py +1 -1
- mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
- mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_cluster.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_hardware_abstract.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mindspore_runtime_utils.dll +0 -0
- mindspore/mindspore_tools.dll +0 -0
- mindspore/mint/__init__.py +15 -10
- mindspore/mint/distributed/distributed.py +182 -62
- mindspore/mint/nn/__init__.py +2 -16
- mindspore/mint/nn/functional.py +4 -110
- mindspore/mint/nn/layer/__init__.py +0 -2
- mindspore/mint/nn/layer/activation.py +0 -6
- mindspore/mint/nn/layer/basic.py +0 -47
- mindspore/mint/nn/layer/conv.py +4 -4
- mindspore/mint/nn/layer/normalization.py +8 -13
- mindspore/mint/nn/layer/pooling.py +0 -4
- mindspore/nn/__init__.py +1 -3
- mindspore/nn/cell.py +16 -66
- mindspore/nn/layer/basic.py +49 -1
- mindspore/nn/layer/container.py +16 -0
- mindspore/nn/layer/embedding.py +4 -169
- mindspore/nn/layer/normalization.py +2 -1
- mindspore/nn/layer/thor_layer.py +4 -85
- mindspore/nn/optim/ada_grad.py +0 -1
- mindspore/nn/optim/adafactor.py +0 -1
- mindspore/nn/optim/adam.py +31 -124
- mindspore/nn/optim/adamax.py +0 -1
- mindspore/nn/optim/asgd.py +0 -1
- mindspore/nn/optim/ftrl.py +8 -102
- mindspore/nn/optim/lamb.py +0 -1
- mindspore/nn/optim/lars.py +0 -3
- mindspore/nn/optim/lazyadam.py +25 -218
- mindspore/nn/optim/momentum.py +5 -43
- mindspore/nn/optim/optimizer.py +6 -55
- mindspore/nn/optim/proximal_ada_grad.py +0 -1
- mindspore/nn/optim/rmsprop.py +0 -1
- mindspore/nn/optim/rprop.py +0 -1
- mindspore/nn/optim/sgd.py +0 -1
- mindspore/nn/optim/tft_wrapper.py +0 -1
- mindspore/nn/optim/thor.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -8
- mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
- mindspore/nn/probability/bijector/power_transform.py +20 -21
- mindspore/nn/probability/bijector/scalar_affine.py +5 -5
- mindspore/nn/probability/bijector/softplus.py +13 -14
- mindspore/nn/wrap/grad_reducer.py +4 -74
- mindspore/numpy/array_creations.py +2 -2
- mindspore/numpy/fft.py +9 -9
- mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
- mindspore/onnx/onnx_export.py +137 -0
- mindspore/opencv_core4110.dll +0 -0
- mindspore/opencv_imgcodecs4110.dll +0 -0
- mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
- mindspore/ops/__init__.py +2 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
- mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
- mindspore/ops/_op_impl/cpu/__init__.py +0 -5
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +16 -22
- mindspore/ops/auto_generate/gen_extend_func.py +2 -7
- mindspore/ops/auto_generate/gen_ops_def.py +98 -141
- mindspore/ops/auto_generate/gen_ops_prim.py +12708 -12686
- mindspore/ops/communication.py +97 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +15 -1
- mindspore/ops/composite/multitype_ops/__init__.py +3 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
- mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
- mindspore/ops/function/__init__.py +1 -0
- mindspore/ops/function/array_func.py +14 -12
- mindspore/ops/function/comm_func.py +3883 -0
- mindspore/ops/function/debug_func.py +3 -4
- mindspore/ops/function/math_func.py +45 -54
- mindspore/ops/function/nn_func.py +75 -294
- mindspore/ops/function/random_func.py +9 -18
- mindspore/ops/functional.py +2 -0
- mindspore/ops/functional_overload.py +354 -18
- mindspore/ops/operations/__init__.py +2 -5
- mindspore/ops/operations/_custom_ops_utils.py +7 -9
- mindspore/ops/operations/_inner_ops.py +1 -38
- mindspore/ops/operations/_rl_inner_ops.py +0 -933
- mindspore/ops/operations/array_ops.py +1 -0
- mindspore/ops/operations/comm_ops.py +94 -2
- mindspore/ops/operations/custom_ops.py +228 -19
- mindspore/ops/operations/debug_ops.py +27 -29
- mindspore/ops/operations/manually_defined/ops_def.py +27 -306
- mindspore/ops/operations/nn_ops.py +2 -2
- mindspore/ops/operations/sparse_ops.py +0 -83
- mindspore/ops/primitive.py +1 -17
- mindspore/ops/tensor_method.py +72 -3
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
- mindspore/ops_generate/api/functions_cc_generator.py +53 -4
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
- mindspore/ops_generate/common/gen_constants.py +11 -10
- mindspore/ops_generate/common/op_proto.py +18 -1
- mindspore/ops_generate/common/template.py +102 -245
- mindspore/ops_generate/common/template_utils.py +212 -0
- mindspore/ops_generate/gen_custom_ops.py +69 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
- mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
- mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
- mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
- mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
- mindspore/ops_generate/resources/yaml_loader.py +13 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
- mindspore/parallel/_cell_wrapper.py +1 -1
- mindspore/parallel/_parallel_serialization.py +1 -4
- mindspore/parallel/_utils.py +29 -6
- mindspore/parallel/checkpoint_transform.py +18 -2
- mindspore/parallel/cluster/process_entity/_api.py +24 -32
- mindspore/parallel/cluster/process_entity/_utils.py +9 -5
- mindspore/{experimental/llm_boost/atb → parallel/distributed}/__init__.py +21 -23
- mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
- mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
- mindspore/parallel/strategy.py +336 -0
- mindspore/parallel/transform_safetensors.py +117 -16
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +3 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
- mindspore/profiler/common/constant.py +5 -0
- mindspore/profiler/common/file_manager.py +9 -0
- mindspore/profiler/common/msprof_cmd_tool.py +38 -2
- mindspore/profiler/common/path_manager.py +56 -24
- mindspore/profiler/common/profiler_context.py +2 -12
- mindspore/profiler/common/profiler_info.py +3 -3
- mindspore/profiler/common/profiler_path_manager.py +13 -0
- mindspore/profiler/common/util.py +30 -3
- mindspore/profiler/experimental_config.py +2 -1
- mindspore/profiler/platform/npu_profiler.py +33 -6
- mindspore/run_check/_check_version.py +108 -24
- mindspore/runtime/__init__.py +3 -2
- mindspore/runtime/executor.py +11 -3
- mindspore/runtime/memory.py +112 -0
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
- mindspore/tools/data_dump.py +130 -0
- mindspore/tools/sdc_detect.py +91 -0
- mindspore/tools/stress_detect.py +63 -0
- mindspore/train/__init__.py +6 -6
- mindspore/train/_utils.py +5 -18
- mindspore/train/amp.py +6 -4
- mindspore/train/callback/_checkpoint.py +0 -9
- mindspore/train/callback/_train_fault_tolerance.py +69 -18
- mindspore/train/data_sink.py +1 -5
- mindspore/train/model.py +38 -211
- mindspore/train/serialization.py +126 -387
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dlpack.py +92 -0
- mindspore/utils/dryrun.py +1 -1
- mindspore/utils/runtime_execution_order_check.py +10 -0
- mindspore/utils/sdc_detect.py +14 -12
- mindspore/utils/stress_detect.py +43 -0
- mindspore/utils/utils.py +144 -8
- mindspore/version.py +1 -1
- {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
- {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/RECORD +254 -267
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -210
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
- mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
- mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
- mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
- mindspore/experimental/llm_boost/register.py +0 -130
- mindspore/experimental/llm_boost/utils.py +0 -31
- mindspore/include/OWNERS +0 -7
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
- mindspore/nn/reinforcement/_batch_read_write.py +0 -142
- mindspore/nn/reinforcement/_tensors_queue.py +0 -152
- mindspore/nn/reinforcement/tensor_array.py +0 -145
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
- mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
- mindspore/ops/_op_impl/cpu/buffer_append.py +0 -28
- mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
- mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
- mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
- mindspore/ops/operations/_tensor_array.py +0 -359
- mindspore/ops/operations/rl_ops.py +0 -288
- mindspore/parallel/_offload_context.py +0 -275
- mindspore/parallel/_recovery_context.py +0 -115
- mindspore/parallel/_transformer/__init__.py +0 -35
- mindspore/parallel/_transformer/layers.py +0 -765
- mindspore/parallel/_transformer/loss.py +0 -251
- mindspore/parallel/_transformer/moe.py +0 -693
- mindspore/parallel/_transformer/op_parallel_config.py +0 -222
- mindspore/parallel/_transformer/transformer.py +0 -3124
- mindspore/parallel/mpi/_mpi_config.py +0 -116
- mindspore/train/memory_profiling_pb2.py +0 -298
- {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
|
@@ -20,6 +20,8 @@ from mindspore._c_expression import _any_instance
|
|
|
20
20
|
from mindspore._c_expression import _bernoulli__instance
|
|
21
21
|
from mindspore._c_expression import _bitwise_not_instance
|
|
22
22
|
from mindspore._c_expression import _clamp_instance
|
|
23
|
+
from mindspore._c_expression import _conv1d_instance
|
|
24
|
+
from mindspore._c_expression import _conv2d_instance
|
|
23
25
|
from mindspore._c_expression import _conv3d_instance
|
|
24
26
|
from mindspore._c_expression import _div_instance
|
|
25
27
|
from mindspore._c_expression import _einsum_instance
|
|
@@ -32,6 +34,7 @@ from mindspore._c_expression import _gmm_instance
|
|
|
32
34
|
from mindspore._c_expression import _gmm_backward_instance
|
|
33
35
|
from mindspore._c_expression import _gmm_backward_fusion_instance
|
|
34
36
|
from mindspore._c_expression import _greater_equal_instance
|
|
37
|
+
from mindspore._c_expression import _imag_instance
|
|
35
38
|
from mindspore._c_expression import _index_add_instance
|
|
36
39
|
from mindspore._c_expression import _kthvalue_instance
|
|
37
40
|
from mindspore._c_expression import _lerp_instance
|
|
@@ -41,6 +44,7 @@ from mindspore._c_expression import _min_instance
|
|
|
41
44
|
from mindspore._c_expression import _nansum_instance
|
|
42
45
|
from mindspore._c_expression import _pixel_shuffle_instance
|
|
43
46
|
from mindspore._c_expression import _quant_matmul_instance
|
|
47
|
+
from mindspore._c_expression import _real_instance
|
|
44
48
|
from mindspore._c_expression import _remainder_instance
|
|
45
49
|
from mindspore._c_expression import _repeat_interleave_instance
|
|
46
50
|
from mindspore._c_expression import _rmod_instance
|
|
@@ -439,6 +443,245 @@ def clip(*args, **kwargs):
|
|
|
439
443
|
return _clamp_instance(*args, **kwargs)
|
|
440
444
|
|
|
441
445
|
|
|
446
|
+
def conv1d(*args, **kwargs):
|
|
447
|
+
r"""
|
|
448
|
+
conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
|
|
449
|
+
|
|
450
|
+
Applies a 1D convolution over an input tensor. The input tenor is typically
|
|
451
|
+
of shape :math:`(N, C_{in}, L_{in})`,
|
|
452
|
+
where :math:`N` is batch size, :math:`C` is channel number, :math:`L` is sequence length.
|
|
453
|
+
|
|
454
|
+
The output is calculated based on formula:
|
|
455
|
+
|
|
456
|
+
.. math::
|
|
457
|
+
|
|
458
|
+
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
|
|
459
|
+
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
460
|
+
|
|
461
|
+
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
462
|
+
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
463
|
+
:math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
464
|
+
|
|
465
|
+
- :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
|
|
466
|
+
where :math:`N` is the batch size of the input.
|
|
467
|
+
|
|
468
|
+
- :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
|
|
469
|
+
where :math:`C_{out}` is the number of
|
|
470
|
+
output channels, which is also equal to the number of kernels.
|
|
471
|
+
|
|
472
|
+
- :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
|
|
473
|
+
where :math:`C_{in}` is the number of
|
|
474
|
+
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
475
|
+
|
|
476
|
+
Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
|
|
477
|
+
output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
478
|
+
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
479
|
+
channel in the :math:`i`-th batch of the input feature map.
|
|
480
|
+
|
|
481
|
+
The shape of the convolutional kernel is given by :math:`(\text{kernel_size})`,
|
|
482
|
+
where :math:`\text{kernel_size}` is the length of the kernel.
|
|
483
|
+
If we consider the input and output channels as well as the `groups` parameter, the complete kernel shape
|
|
484
|
+
will be :math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size})`,
|
|
485
|
+
where `groups` is the number of groups dividing `x`'s input channel when applying groups convolution.
|
|
486
|
+
|
|
487
|
+
For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
|
|
488
|
+
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
|
|
489
|
+
|
|
490
|
+
Args:
|
|
491
|
+
input (Tensor): Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
|
|
492
|
+
weight (Tensor): Tensor of shape
|
|
493
|
+
:math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size})`, then the size of kernel
|
|
494
|
+
is :math:`(\text{kernel_size})`.
|
|
495
|
+
bias (Tensor, optional): Bias Tensor with shape :math:`(C_{out})`.
|
|
496
|
+
When bias is ``None`` , zeros will be used. Default: ``None`` .
|
|
497
|
+
stride (Union[int, tuple[int], list[int]], optional): The movement stride of the 1D convolution kernel.
|
|
498
|
+
The data type is an integer or a tuple of one integer. Default: ``1`` .
|
|
499
|
+
padding (Union[int, tuple[int], list[int], str], optional): The number of padding
|
|
500
|
+
on the input.
|
|
501
|
+
The data type is an integer or a tuple of one integer or string {`valid`, `same`}.
|
|
502
|
+
The value should be greater than or equal to 0. Default: ``0`` .
|
|
503
|
+
|
|
504
|
+
- ``"same"``: Pad the input around its edges so that the shape of input and output
|
|
505
|
+
are the same when `stride` is set to ``1``.
|
|
506
|
+
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
507
|
+
uniformly distributed around the input, if it is odd, the excess amount goes to the right side.
|
|
508
|
+
If this mode is set, `stride` must be 1.
|
|
509
|
+
|
|
510
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
511
|
+
possible length. Extra sequence that could not complete a full stride will
|
|
512
|
+
be discarded.
|
|
513
|
+
|
|
514
|
+
dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation rate to use for
|
|
515
|
+
dilated convolution. It can be a single int or a tuple of 1 integer.
|
|
516
|
+
Assuming :math:`dilation=(d)`, the convolutional kernel samples the input with a
|
|
517
|
+
spacing of :math:`d-1` elements in the length direction.
|
|
518
|
+
Default: ``1`` .
|
|
519
|
+
groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
520
|
+
divisible by `groups`. If the groups is equal to `in_channels` and `out_channels`,
|
|
521
|
+
this 1D convolution layer also can be called 1D depthwise convolution layer. Default: ``1`` .
|
|
522
|
+
The following restraints should be met:
|
|
523
|
+
|
|
524
|
+
- :math:`(C_{in} \text{ % } \text{groups} == 0)`
|
|
525
|
+
- :math:`(C_{out} \text{ % } \text{groups} == 0)`
|
|
526
|
+
- :math:`(C_{out} >= \text{groups})`
|
|
527
|
+
- :math:`(\text{weight[1]} = C_{in} / \text{groups})`
|
|
528
|
+
|
|
529
|
+
Returns:
|
|
530
|
+
Tensor, the value that applied 1D convolution. The shape is :math:`(N, C_{out}, L_{out})`.
|
|
531
|
+
To see how different pad modes affect the output shape, please refer to
|
|
532
|
+
:class:`mindspore.mint.nn.Conv1d` for more details.
|
|
533
|
+
|
|
534
|
+
Raises:
|
|
535
|
+
RuntimeError: On Ascend, due to the limitation of the L1 cache size of different NPU chip, if input size or
|
|
536
|
+
kernel size is too large, it may trigger an error.
|
|
537
|
+
TypeError: If `in_channels`, `out_channels` or `groups` is not an int.
|
|
538
|
+
TypeError: If `kernel_size`, `stride` or `dilation` is neither an int not a tuple.
|
|
539
|
+
ValueError: Args and size of the input feature map should satisfy the output formula to ensure that the size of
|
|
540
|
+
the output feature map is positive; otherwise, an error will be reported.
|
|
541
|
+
ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
|
|
542
|
+
ValueError: If `padding` is less than 0.
|
|
543
|
+
ValueError: If `padding` is `same` , `stride` is not equal to 1.
|
|
544
|
+
ValueError: The input parameters do not satisfy the convolution output formula.
|
|
545
|
+
ValueError: The `kernel_size` cannot exceed the size of the input feature map.
|
|
546
|
+
ValueError: The value of `padding` cannot cause the calculation area to exceed the input size.
|
|
547
|
+
|
|
548
|
+
Supported Platforms:
|
|
549
|
+
``Ascend``
|
|
550
|
+
|
|
551
|
+
Examples:
|
|
552
|
+
>>> import mindspore
|
|
553
|
+
>>> import numpy as np
|
|
554
|
+
>>> from mindspore import Tensor, ops, mint
|
|
555
|
+
>>> x = Tensor(np.ones([10, 32, 32]), mindspore.float32)
|
|
556
|
+
>>> weight = Tensor(np.ones([32, 32, 3]), mindspore.float32)
|
|
557
|
+
>>> output = mint.nn.functional.conv1d(x, weight)
|
|
558
|
+
>>> print(output.shape)
|
|
559
|
+
(10, 32, 30)
|
|
560
|
+
"""
|
|
561
|
+
return _conv1d_instance(*args, **kwargs)
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
def conv2d(*args, **kwargs):
|
|
565
|
+
r"""
|
|
566
|
+
Applies a 2D convolution over an input tensor. The input tensor is typically of
|
|
567
|
+
shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`,
|
|
568
|
+
where :math:`N` is batch size, :math:`C` is channel number, :math:`H` is feature height, :math:`W` is feature width.
|
|
569
|
+
|
|
570
|
+
The output is calculated based on formula:
|
|
571
|
+
|
|
572
|
+
.. math::
|
|
573
|
+
|
|
574
|
+
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
|
|
575
|
+
\sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
|
|
576
|
+
|
|
577
|
+
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
578
|
+
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
579
|
+
, :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
580
|
+
|
|
581
|
+
- :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
|
|
582
|
+
where :math:`N` is the batch size of the input.
|
|
583
|
+
|
|
584
|
+
- :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
|
|
585
|
+
where :math:`C_{out}` is the number of output channels, which is also equal to the number of kernels.
|
|
586
|
+
|
|
587
|
+
- :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
|
|
588
|
+
where :math:`C_{in}` is the number of
|
|
589
|
+
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
590
|
+
|
|
591
|
+
Therefore, in the above formula, :math:`{bias}(C_{out_j})` represents the bias of the :math:`j`-th
|
|
592
|
+
output channel, :math:`{weight}(C_{out_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
593
|
+
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
594
|
+
channel in the :math:`i`-th batch of the input feature map.
|
|
595
|
+
|
|
596
|
+
The shape of the convolutional kernel is given by :math:`(\text{kernel_size[0]}, \text{kernel_size[1]})`,
|
|
597
|
+
where :math:`\text{kernel_size[0]}` and :math:`\text{kernel_size[1]}` are the height and width of the kernel,
|
|
598
|
+
respectively.
|
|
599
|
+
If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
|
|
600
|
+
will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]}, \text{kernel_size[1]})`,
|
|
601
|
+
where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
|
|
602
|
+
|
|
603
|
+
For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
|
|
604
|
+
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_ and
|
|
605
|
+
`ConvNets <http://cs231n.github.io/convolutional-networks/>`_.
|
|
606
|
+
|
|
607
|
+
.. warning::
|
|
608
|
+
This is an experimental API that is subject to change or deletion.
|
|
609
|
+
|
|
610
|
+
Args:
|
|
611
|
+
input (Tensor): Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
|
|
612
|
+
weight (Tensor): Tensor of shape
|
|
613
|
+
:math:`(N, C_{in} / \text{groups}, \text{kernel_size[0]}, \text{kernel_size[1]})`, then the size of kernel
|
|
614
|
+
is :math:`(\text{kernel_size[0]}, \text{kernel_size[1]})`.
|
|
615
|
+
bias (Tensor, optional): Bias Tensor with shape :math:`(C_{out})`.
|
|
616
|
+
When bias is ``None`` , zeros will be used. Default: ``None`` .
|
|
617
|
+
stride (Union(int, tuple[int], list[int]), optional): The distance of kernel moving, an int number that
|
|
618
|
+
represents the height and width of movement are both strides, or a tuple of two int numbers that
|
|
619
|
+
represent height and width of movement respectively. Default: ``1`` .
|
|
620
|
+
padding (Union[int, tuple[int], list[int], str], optional): The number of padding
|
|
621
|
+
on the height and width directions of the input.
|
|
622
|
+
The data type is an integer or a tuple of two integers or string {`valid`, `same`}. If `padding` is an
|
|
623
|
+
integer, then `padding_{H}` and `padding_{W}` are all equal to `padding`.
|
|
624
|
+
If `padding` is a tuple of 2 integers, then `padding_{H}` and `padding_{W}`
|
|
625
|
+
is equal to `padding[0]` and `padding[1]` respectively.
|
|
626
|
+
The value should be greater than or equal to 0. Default: ``0`` .
|
|
627
|
+
|
|
628
|
+
- ``"same"``: Pad the input around its edges so that the shape of input and output
|
|
629
|
+
are the same when `stride` is set to ``1``.
|
|
630
|
+
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
631
|
+
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
|
|
632
|
+
If this mode is set, `stride` must be 1.
|
|
633
|
+
|
|
634
|
+
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
635
|
+
possible height and width. Extra pixels that could not complete a full stride will
|
|
636
|
+
be discarded.
|
|
637
|
+
|
|
638
|
+
dilation (Union(int, tuple[int], list[int]), optional): Gaps between kernel elements.The data type
|
|
639
|
+
is int or a tuple of 2 integers. Specifies the dilation rate to use for dilated convolution.
|
|
640
|
+
If set to be :math:`k > 1`,
|
|
641
|
+
there will be :math:`k - 1` pixels skipped for each sampling location. Its value must
|
|
642
|
+
be greater than or equal to 1 and bounded by the height and width of the input `x`. Default: ``1`` .
|
|
643
|
+
groups (int, optional): Splits `input` into groups. Default: ``1`` .
|
|
644
|
+
|
|
645
|
+
- :math:`(C_{in} \text{ % } \text{groups} == 0)` , :math:`(C_{out} \text{ % } \text{groups} == 0)` ,
|
|
646
|
+
:math:`(C_{out} >= \text{groups})` , :math:`(\text{kernel_size[1]} = C_{in} / \text{groups})`
|
|
647
|
+
|
|
648
|
+
Returns:
|
|
649
|
+
Tensor, the value that applied 2D convolution. The shape is :math:`(N, C_{out}, H_{out}, W_{out})`.
|
|
650
|
+
To see how different pad modes affect the output shape, please refer to
|
|
651
|
+
:class:`mindspore.mint.nn.Conv2d` for more details.
|
|
652
|
+
|
|
653
|
+
Raises:
|
|
654
|
+
ValueError: Args and size of the input feature map should satisfy the output formula to ensure that the size of
|
|
655
|
+
the output feature map is positive; otherwise, an error will be reported. For more details on the output
|
|
656
|
+
formula, please refer to :class:`mindspore.mint.nn.Conv2d`.
|
|
657
|
+
RuntimeError: On Ascend, due to the limitation of the L1 cache size of different NPU chip, if input size or
|
|
658
|
+
kernel size is too large, it may trigger an error.
|
|
659
|
+
TypeError: If `in_channels` , `out_channels` or `groups` is not an int.
|
|
660
|
+
TypeError: If `kernel_size` , `stride` or `dilation` is neither an int nor a tuple.
|
|
661
|
+
TypeError: If `bias` is not a Tensor.
|
|
662
|
+
ValueError: If the shape of `bias` is not :math:`(C_{out})` .
|
|
663
|
+
ValueError: If `stride` or `dilation` is less than 1.
|
|
664
|
+
ValueError: If `padding` is `same` , `stride` is not equal to 1.
|
|
665
|
+
ValueError: The input parameters do not satisfy the convolution output formula.
|
|
666
|
+
ValueError: The KernelSize cannot exceed the size of the input feature map.
|
|
667
|
+
ValueError: The value of padding cannot cause the calculation area to exceed the input size.
|
|
668
|
+
|
|
669
|
+
Supported Platforms:
|
|
670
|
+
``Ascend``
|
|
671
|
+
|
|
672
|
+
Examples:
|
|
673
|
+
>>> import mindspore
|
|
674
|
+
>>> import numpy as np
|
|
675
|
+
>>> from mindspore import Tensor, ops, mint
|
|
676
|
+
>>> x = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
|
|
677
|
+
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
|
|
678
|
+
>>> output = mint.nn.functional.conv2d(x, weight)
|
|
679
|
+
>>> print(output.shape)
|
|
680
|
+
(10, 32, 30, 30)
|
|
681
|
+
"""
|
|
682
|
+
return _conv2d_instance(*args, **kwargs)
|
|
683
|
+
|
|
684
|
+
|
|
442
685
|
def conv3d(*args, **kwargs):
|
|
443
686
|
r"""
|
|
444
687
|
conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
|
|
@@ -740,13 +983,10 @@ def einsum(*args, **kwargs):
|
|
|
740
983
|
|
|
741
984
|
def empty(*args, **kwargs):
|
|
742
985
|
r"""
|
|
743
|
-
empty(*size, *, dtype=None, device=None) -> Tensor
|
|
986
|
+
empty(*size, *, dtype=None, device=None, pin_memory=False) -> Tensor
|
|
744
987
|
|
|
745
988
|
Creates a tensor with uninitialized data, whose shape, dtype and device are described by the argument `size`,
|
|
746
|
-
`dtype` and `device` respectively.
|
|
747
|
-
|
|
748
|
-
.. warning::
|
|
749
|
-
This is an experimental API that is subject to change or deletion.
|
|
989
|
+
`dtype` and `device` respectively. If `pin_memory` is True, the tensor will be allocated in pinned memory.
|
|
750
990
|
|
|
751
991
|
Args:
|
|
752
992
|
size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Can be variable numbers of
|
|
@@ -755,15 +995,18 @@ def empty(*args, **kwargs):
|
|
|
755
995
|
Keyword Args:
|
|
756
996
|
dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
|
|
757
997
|
`mindspore.float32` will be used. Default: ``None`` .
|
|
758
|
-
device (
|
|
998
|
+
device (str, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
|
|
759
999
|
``"cpu"`` and ``"CPU"`` are supported. In graph mode O0, ``"Ascend"`` and ``"npu"`` are supported. If `device = None`,
|
|
760
1000
|
`mindspore.context.device_target` will be used. Default ``None``.
|
|
1001
|
+
pin_memory (bool, optional): If set `pin_memory` to True, the tensor will be allocated in pinned memory, and `device`
|
|
1002
|
+
should be ``"cpu"`` or ``"CPU"`` . Default ``False``.
|
|
761
1003
|
|
|
762
1004
|
Returns:
|
|
763
1005
|
Tensor, whose shape, dtype and device are defined by input.
|
|
764
1006
|
|
|
765
1007
|
Raises:
|
|
766
1008
|
TypeError: If `size` is neither an int nor a tuple or list of int.
|
|
1009
|
+
RuntimeError: If `pin_memory` is True, and `device` is neither ``"cpu"`` nor ``"CPU"`` .
|
|
767
1010
|
|
|
768
1011
|
Supported Platforms:
|
|
769
1012
|
``Ascend`` ``CPU``
|
|
@@ -781,13 +1024,10 @@ def empty(*args, **kwargs):
|
|
|
781
1024
|
|
|
782
1025
|
def empty_like(*args, **kwargs):
|
|
783
1026
|
r"""
|
|
784
|
-
empty_like(input, *, dtype=None, device=None) -> Tensor
|
|
1027
|
+
empty_like(input, *, dtype=None, device=None, pin_memory=False) -> Tensor
|
|
785
1028
|
|
|
786
1029
|
Returns an uninitialized Tensor with the same shape as the `input`. Its dtype is specified by `dtype` and its
|
|
787
|
-
device is specified by `device`.
|
|
788
|
-
|
|
789
|
-
.. warning::
|
|
790
|
-
This is an experimental API that is subject to change or deletion.
|
|
1030
|
+
device is specified by `device`. If `pin_memory` is True, the tensor will be allocated in pinned memory.
|
|
791
1031
|
|
|
792
1032
|
Args:
|
|
793
1033
|
input (Tensor): Tensor of any dimension.
|
|
@@ -795,15 +1035,18 @@ def empty_like(*args, **kwargs):
|
|
|
795
1035
|
Keyword Args:
|
|
796
1036
|
dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype = None`, the
|
|
797
1037
|
tensor will have the same dtype as input `input`. Default ``None``.
|
|
798
|
-
device (
|
|
1038
|
+
device (str, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
|
|
799
1039
|
``"cpu"`` and ``"CPU"`` are supported. In graph mode O0, ``"Ascend"`` and ``"npu"`` are supported. If `device = None`,
|
|
800
1040
|
the value set by :func:`mindspore.set_device` will be used. Default ``None``.
|
|
1041
|
+
pin_memory (bool, optional): If set `pin_memory` to True, the tensor will be allocated in pinned memory, and `device`
|
|
1042
|
+
should be ``"cpu"`` or ``"CPU"`` . Default ``False``.
|
|
801
1043
|
|
|
802
1044
|
Returns:
|
|
803
1045
|
Tensor, has the same shape, type and device as `input` but with uninitialized data (May be a random value).
|
|
804
1046
|
|
|
805
1047
|
Raises:
|
|
806
1048
|
TypeError: If `input` is not a Tensor.
|
|
1049
|
+
RuntimeError: If `pin_memory` is True, and `device` is neither ``"cpu"`` nor ``"CPU"`` .
|
|
807
1050
|
|
|
808
1051
|
Supported Platforms:
|
|
809
1052
|
``Ascend`` ``CPU``
|
|
@@ -843,9 +1086,6 @@ def floor_divide(*args, **kwargs):
|
|
|
843
1086
|
where the :math:`floor` indicates the Floor operator. For more details,
|
|
844
1087
|
please refer to the :class:`mindspore.mint.floor` operator.
|
|
845
1088
|
|
|
846
|
-
.. warning::
|
|
847
|
-
This is an experimental API that is subject to change or deletion.
|
|
848
|
-
|
|
849
1089
|
Args:
|
|
850
1090
|
input (Union[Tensor, Number, bool]): The first input is a number or
|
|
851
1091
|
a bool or a tensor whose data type is number or bool.
|
|
@@ -1157,6 +1397,54 @@ def ge(*args, **kwargs):
|
|
|
1157
1397
|
return _greater_equal_instance(*args, **kwargs)
|
|
1158
1398
|
|
|
1159
1399
|
|
|
1400
|
+
def imag(*args, **kwargs):
|
|
1401
|
+
r"""
|
|
1402
|
+
imag(input) -> Tensor
|
|
1403
|
+
|
|
1404
|
+
Return a new tensor containing the imaginary values of the input tensor.
|
|
1405
|
+
The returned tensor and input tensor share the same underlying storage.
|
|
1406
|
+
|
|
1407
|
+
Note:
|
|
1408
|
+
- Only support Pynative mode.
|
|
1409
|
+
- Only support complex64 and complex128 tensors.
|
|
1410
|
+
|
|
1411
|
+
Args:
|
|
1412
|
+
input (Tensor): The input tensor, the data type must be complex64 or complex128.
|
|
1413
|
+
|
|
1414
|
+
Returns:
|
|
1415
|
+
Tensor, the shape is same as `input`. The data type is float32 if `input` is complex64, float64 when `input` is complex128.
|
|
1416
|
+
|
|
1417
|
+
Raises:
|
|
1418
|
+
TypeError: If dtype of `input` is not complex64 or complex128.
|
|
1419
|
+
ValueError: If input tensor has no storage info.
|
|
1420
|
+
|
|
1421
|
+
Supported Platforms:
|
|
1422
|
+
``Ascend``
|
|
1423
|
+
|
|
1424
|
+
Examples:
|
|
1425
|
+
>>> import mindspore
|
|
1426
|
+
>>> from mindspore import Tensor, ops, context
|
|
1427
|
+
>>> context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
|
1428
|
+
>>> real = Tensor([1.1, 2.1, 3.1], mindspore.float32)
|
|
1429
|
+
>>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float32)
|
|
1430
|
+
>>> x = ops.Complex()(real, imag)
|
|
1431
|
+
>>> output = ops.functional_overload.imag(x)
|
|
1432
|
+
>>> print(output)
|
|
1433
|
+
[4.1 5.1 6.1]
|
|
1434
|
+
>>> print(output.dtype)
|
|
1435
|
+
Float32
|
|
1436
|
+
>>> real = Tensor([1.1, 2.1, 3.1], mindspore.float64)
|
|
1437
|
+
>>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float64)
|
|
1438
|
+
>>> x = ops.Complex()(real, imag)
|
|
1439
|
+
>>> output = ops.functional_overload.imag(x)
|
|
1440
|
+
>>> print(output)
|
|
1441
|
+
[4.1 5.1 6.1]
|
|
1442
|
+
>>> print(output.dtype)
|
|
1443
|
+
Float64
|
|
1444
|
+
"""
|
|
1445
|
+
return _imag_instance(*args, **kwargs)
|
|
1446
|
+
|
|
1447
|
+
|
|
1160
1448
|
def index_add(*args, **kwargs):
|
|
1161
1449
|
r"""
|
|
1162
1450
|
index_add(input, dim, index, source, *, alpha=1) -> Tensor
|
|
@@ -1625,9 +1913,6 @@ def pixel_shuffle(*args, **kwargs):
|
|
|
1625
1913
|
For detailed introduction to the pixel_shuffle algorithm, refer to
|
|
1626
1914
|
`Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_ .
|
|
1627
1915
|
|
|
1628
|
-
.. warning::
|
|
1629
|
-
This is an experimental API that is subject to change or deletion.
|
|
1630
|
-
|
|
1631
1916
|
Args:
|
|
1632
1917
|
input (Tensor): Tensor of shape :math:`(*, C \times r^2, H, W)` . The dimension of `input` is larger than 2,
|
|
1633
1918
|
and the length of third to last dimension can be divisible by the square of `upscale_factor`.
|
|
@@ -1709,6 +1994,53 @@ def quant_matmul(*args, **kwargs):
|
|
|
1709
1994
|
return _quant_matmul_instance(*args, **kwargs)
|
|
1710
1995
|
|
|
1711
1996
|
|
|
1997
|
+
def real(*args, **kwargs):
|
|
1998
|
+
r"""
|
|
1999
|
+
real(input) -> Tensor
|
|
2000
|
+
|
|
2001
|
+
Return a new tensor containing the real values of the input tensor. If input is real, it is returned unchanged.
|
|
2002
|
+
The returned tensor and input tensor share the same underlying storage.
|
|
2003
|
+
|
|
2004
|
+
Note:
|
|
2005
|
+
Only support Pynative mode.
|
|
2006
|
+
|
|
2007
|
+
Args:
|
|
2008
|
+
input (Tensor): The input tensor.
|
|
2009
|
+
|
|
2010
|
+
Returns:
|
|
2011
|
+
Tensor, the shape is same as `input`. The data type is float32 if `input` is complex64, float64 when `input` is complex128.
|
|
2012
|
+
Otherwise, the data type is the same as `input`.
|
|
2013
|
+
|
|
2014
|
+
Raises:
|
|
2015
|
+
ValueError: If input tensor has no storage info.
|
|
2016
|
+
|
|
2017
|
+
Supported Platforms:
|
|
2018
|
+
``Ascend``
|
|
2019
|
+
|
|
2020
|
+
Examples:
|
|
2021
|
+
>>> import mindspore
|
|
2022
|
+
>>> from mindspore import Tensor, ops, context
|
|
2023
|
+
>>> context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
|
|
2024
|
+
>>> real = Tensor([1.1, 2.1, 3.1], mindspore.float32)
|
|
2025
|
+
>>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float32)
|
|
2026
|
+
>>> x = ops.Complex()(real, imag)
|
|
2027
|
+
>>> output = ops.functional_overload.real(x)
|
|
2028
|
+
>>> print(output)
|
|
2029
|
+
[1.1 2.1 3.1]
|
|
2030
|
+
>>> print(output.dtype)
|
|
2031
|
+
Float32
|
|
2032
|
+
>>> real = Tensor([1.1, 2.1, 3.1], mindspore.float64)
|
|
2033
|
+
>>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float64)
|
|
2034
|
+
>>> x = ops.Complex()(real, imag)
|
|
2035
|
+
>>> output = ops.functional_overload.real(x)
|
|
2036
|
+
>>> print(output)
|
|
2037
|
+
[1.1 2.1 3.1]
|
|
2038
|
+
>>> print(output.dtype)
|
|
2039
|
+
Float64
|
|
2040
|
+
"""
|
|
2041
|
+
return _real_instance(*args, **kwargs)
|
|
2042
|
+
|
|
2043
|
+
|
|
1712
2044
|
def remainder(*args, **kwargs):
|
|
1713
2045
|
r"""
|
|
1714
2046
|
remainder(input, other) -> Tensor
|
|
@@ -1983,6 +2315,8 @@ __all__ = [
|
|
|
1983
2315
|
"bitwise_not",
|
|
1984
2316
|
"clamp",
|
|
1985
2317
|
"clip",
|
|
2318
|
+
"conv1d",
|
|
2319
|
+
"conv2d",
|
|
1986
2320
|
"conv3d",
|
|
1987
2321
|
"div",
|
|
1988
2322
|
"divide",
|
|
@@ -1997,6 +2331,7 @@ __all__ = [
|
|
|
1997
2331
|
"gmm_backward_fusion",
|
|
1998
2332
|
"greater_equal",
|
|
1999
2333
|
"ge",
|
|
2334
|
+
"imag",
|
|
2000
2335
|
"index_add",
|
|
2001
2336
|
"kthvalue",
|
|
2002
2337
|
"lerp",
|
|
@@ -2006,6 +2341,7 @@ __all__ = [
|
|
|
2006
2341
|
"nansum",
|
|
2007
2342
|
"pixel_shuffle",
|
|
2008
2343
|
"quant_matmul",
|
|
2344
|
+
"real",
|
|
2009
2345
|
"remainder",
|
|
2010
2346
|
"repeat_interleave",
|
|
2011
2347
|
"rmod",
|
|
@@ -55,7 +55,7 @@ from .comm_ops import (AllGather, AllReduce, Reduce, NeighborExchange, NeighborE
|
|
|
55
55
|
Broadcast, CollectiveGather, CollectiveScatter, Barrier, Send, Receive, BatchISendIRecv,
|
|
56
56
|
_MirrorOperator, _MirrorMiniStepOperator, _MiniStepAllGather, ReduceOp, _VirtualDataset,
|
|
57
57
|
_VirtualOutput, _VirtualDiv, _GetTensorSlice, _VirtualAdd, _VirtualAssignAdd, _VirtualAccuGrad,
|
|
58
|
-
_HostAllGather, _HostReduceScatter, _MirrorMicroStepOperator, _MicroStepAllGather,
|
|
58
|
+
_HostAllGather, _HostReduceScatter, _MirrorMicroStepOperator, _MicroStepAllGather, AlltoAllVC,
|
|
59
59
|
_VirtualPipelineEnd, AlltoAllV, ReduceScatter, _VirtualAssignKvCache, AllGatherV, ReduceScatterV)
|
|
60
60
|
from .control_ops import GeSwitch, Merge
|
|
61
61
|
from .custom_ops import (Custom, CustomOpBuilder)
|
|
@@ -129,7 +129,6 @@ from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, RandomGamm
|
|
|
129
129
|
LogUniformCandidateSampler, TruncatedNormal, LogNormalReverse, NonDeterministicInts,
|
|
130
130
|
ParameterizedTruncatedNormal, RandomPoisson, MultinomialWithReplacement, RandomShuffle,
|
|
131
131
|
RandpermV2)
|
|
132
|
-
from .rl_ops import (BufferAppend, BufferGetItem, BufferSample)
|
|
133
132
|
from .sparse_ops import (
|
|
134
133
|
SparseToDense, SparseTensorDenseMatmul, SparseTensorDenseAdd, SparseSlice)
|
|
135
134
|
from .spectral_ops import (BartlettWindow, BlackmanWindow)
|
|
@@ -406,6 +405,7 @@ __all__ = [
|
|
|
406
405
|
"AllReduce",
|
|
407
406
|
"AllGatherV",
|
|
408
407
|
"ReduceScatterV",
|
|
408
|
+
"AlltoAllVC",
|
|
409
409
|
"Reduce",
|
|
410
410
|
"_AllSwap",
|
|
411
411
|
"ReduceScatter",
|
|
@@ -532,9 +532,6 @@ __all__ = [
|
|
|
532
532
|
"HShrink",
|
|
533
533
|
"PyExecute",
|
|
534
534
|
"PyFunc",
|
|
535
|
-
"BufferAppend",
|
|
536
|
-
"BufferGetItem",
|
|
537
|
-
"BufferSample",
|
|
538
535
|
"Erfinv",
|
|
539
536
|
"Conj",
|
|
540
537
|
"Real",
|
|
@@ -228,20 +228,18 @@ class ExtensionBuilder:
|
|
|
228
228
|
source_file.write(content)
|
|
229
229
|
|
|
230
230
|
def _run_ninja_build(self, module_name):
|
|
231
|
-
"""Run ninja build."""
|
|
231
|
+
"""Run ninja build and log output to .build_log.txt"""
|
|
232
232
|
cmd = ['ninja', '-v']
|
|
233
233
|
env = os.environ.copy()
|
|
234
|
+
log_file = os.path.join(self.build_dir, '.build_log.txt')
|
|
234
235
|
|
|
235
236
|
try:
|
|
236
|
-
|
|
237
|
-
|
|
237
|
+
with open(log_file, 'w', encoding='utf-8') as f:
|
|
238
|
+
# If the build succeeds, do nothing with the output (silent)
|
|
239
|
+
subprocess.run(cmd, stdout=f, stderr=f, cwd=self.build_dir, check=True, env=env)
|
|
238
240
|
except subprocess.CalledProcessError as e:
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
stdout_output = e.stdout.decode() if e.stdout else ""
|
|
242
|
-
full_output = stderr_output + stdout_output
|
|
243
|
-
|
|
244
|
-
# Format the error message
|
|
241
|
+
with open(log_file, 'r', encoding='utf-8') as rf:
|
|
242
|
+
full_output = rf.read()
|
|
245
243
|
msg = f"Error building extension '{module_name}': {full_output}"
|
|
246
244
|
|
|
247
245
|
# In multi-card situation, only one process build the library.
|
|
@@ -1413,6 +1413,7 @@ class PsROIPooling(PrimitiveWithInfer):
|
|
|
1413
1413
|
|
|
1414
1414
|
@prim_attr_register
|
|
1415
1415
|
def __init__(self, pooled_height, pooled_width, num_rois, spatial_scale, out_dim, group_size):
|
|
1416
|
+
|
|
1416
1417
|
"""Initialize PsROIPooling"""
|
|
1417
1418
|
validator.check_value_type("pooled_height", pooled_height, [int], self.name)
|
|
1418
1419
|
validator.check_value_type("pooled_width", pooled_width, [int], self.name)
|
|
@@ -1726,44 +1727,6 @@ class Format(PrimitiveWithInfer):
|
|
|
1726
1727
|
return {'dtype': mstype.string, 'shape': [], 'value': value}
|
|
1727
1728
|
|
|
1728
1729
|
|
|
1729
|
-
class FlattenConcat(Primitive):
|
|
1730
|
-
"""
|
|
1731
|
-
Flatten input tensors and concatenate them into several chunk tensors grouped by data types.
|
|
1732
|
-
|
|
1733
|
-
Args:
|
|
1734
|
-
fusion_size (int): Maximum memory chunk size in bytes, 0 for unlimited. Default: 0.
|
|
1735
|
-
|
|
1736
|
-
Inputs:
|
|
1737
|
-
- **tensors** (tuple[Tensor], list[Tensor]) - The input Tensors to be flattened and concatenated.
|
|
1738
|
-
|
|
1739
|
-
Outputs:
|
|
1740
|
-
tuple[Tensor], result chunk tensors.
|
|
1741
|
-
|
|
1742
|
-
Supported Platforms:
|
|
1743
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1744
|
-
|
|
1745
|
-
Examples:
|
|
1746
|
-
>>> from mindspore.ops.operations import _inner_ops as inner
|
|
1747
|
-
>>> t1 = Tensor(np.array([1]).astype(np.float32))
|
|
1748
|
-
>>> t2 = Tensor(np.array([2]).astype(np.float32))
|
|
1749
|
-
>>> t3 = Tensor(np.array([3]).astype(np.float64))
|
|
1750
|
-
>>> t4 = Tensor(np.array([4]).astype(np.float32))
|
|
1751
|
-
>>> t5 = Tensor(np.array([5]).astype(np.float64))
|
|
1752
|
-
>>> chunks = inner.FlattenConcat()([t1, t2, t2, t3, t4, t5])
|
|
1753
|
-
>>> print(chunks[0].asnumpy())
|
|
1754
|
-
>>> print(chunks[1].asnumpy())
|
|
1755
|
-
[1. 2. 4.]
|
|
1756
|
-
[3. 5.]
|
|
1757
|
-
"""
|
|
1758
|
-
|
|
1759
|
-
@prim_attr_register
|
|
1760
|
-
def __init__(self, fusion_size=0):
|
|
1761
|
-
"""Initialize FlattenConcat"""
|
|
1762
|
-
validator.check_non_negative_int(fusion_size, 'fusion_size', self.name)
|
|
1763
|
-
self.fusion_size = fusion_size
|
|
1764
|
-
self.add_prim_attr('fusion_size', fusion_size)
|
|
1765
|
-
|
|
1766
|
-
|
|
1767
1730
|
class KMeansCentroids(PrimitiveWithInfer):
|
|
1768
1731
|
"""
|
|
1769
1732
|
Calculate the segment_sum, segment_count, kmean_total_sum that are clustering results
|