mindspore 2.3.0__cp39-cp39-win_amd64.whl → 2.4.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +3 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +50 -9
- mindspore/_extends/parse/compile_config.py +41 -0
- mindspore/_extends/parse/parser.py +9 -7
- mindspore/_extends/parse/standard_method.py +52 -14
- mindspore/_extends/pijit/pijit_func_white_list.py +350 -24
- mindspore/amp.py +24 -10
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/common/__init__.py +6 -4
- mindspore/common/_pijit_context.py +190 -0
- mindspore/common/_register_for_tensor.py +2 -1
- mindspore/common/_tensor_overload.py +139 -0
- mindspore/common/api.py +102 -87
- mindspore/common/dump.py +5 -6
- mindspore/common/generator.py +1 -7
- mindspore/common/hook_handle.py +14 -26
- mindspore/common/mindir_util.py +2 -2
- mindspore/common/parameter.py +46 -13
- mindspore/common/recompute.py +39 -9
- mindspore/common/sparse_tensor.py +7 -3
- mindspore/common/tensor.py +209 -29
- mindspore/communication/__init__.py +1 -1
- mindspore/communication/_comm_helper.py +38 -3
- mindspore/communication/comm_func.py +310 -55
- mindspore/communication/management.py +14 -14
- mindspore/context.py +123 -22
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/__init__.py +1 -1
- mindspore/dataset/core/config.py +7 -0
- mindspore/dataset/core/validator_helpers.py +7 -0
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +72 -44
- mindspore/dataset/engine/datasets_audio.py +7 -7
- mindspore/dataset/engine/datasets_standard_format.py +53 -3
- mindspore/dataset/engine/datasets_text.py +20 -20
- mindspore/dataset/engine/datasets_user_defined.py +174 -104
- mindspore/dataset/engine/datasets_vision.py +33 -33
- mindspore/dataset/engine/iterators.py +29 -0
- mindspore/dataset/engine/obs/util.py +7 -0
- mindspore/dataset/engine/queue.py +114 -60
- mindspore/dataset/engine/serializer_deserializer.py +2 -2
- mindspore/dataset/engine/validators.py +34 -14
- mindspore/dataset/text/__init__.py +1 -4
- mindspore/dataset/transforms/__init__.py +0 -3
- mindspore/dataset/utils/line_reader.py +2 -0
- mindspore/dataset/vision/__init__.py +1 -4
- mindspore/dataset/vision/utils.py +1 -1
- mindspore/dataset/vision/validators.py +2 -1
- mindspore/dnnl.dll +0 -0
- mindspore/{nn/extend → experimental/es}/__init__.py +4 -11
- mindspore/experimental/es/embedding_service.py +883 -0
- mindspore/{nn/layer → experimental/es}/embedding_service_layer.py +218 -30
- mindspore/experimental/llm_boost/__init__.py +21 -0
- mindspore/{nn/extend/layer → experimental/llm_boost/atb}/__init__.py +4 -8
- mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
- mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
- mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
- mindspore/experimental/llm_boost/register.py +129 -0
- mindspore/experimental/llm_boost/utils.py +31 -0
- mindspore/experimental/optim/adamw.py +85 -0
- mindspore/experimental/optim/optimizer.py +3 -0
- mindspore/hal/__init__.py +3 -3
- mindspore/hal/contiguous_tensors_handle.py +175 -0
- mindspore/hal/stream.py +18 -0
- mindspore/include/api/model_group.h +13 -1
- mindspore/include/api/types.h +10 -10
- mindspore/include/dataset/config.h +2 -2
- mindspore/include/dataset/constants.h +2 -2
- mindspore/include/dataset/execute.h +2 -2
- mindspore/include/dataset/vision.h +4 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filewriter.py +68 -51
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mint/__init__.py +495 -46
- mindspore/mint/distributed/__init__.py +31 -0
- mindspore/mint/distributed/distributed.py +254 -0
- mindspore/mint/nn/__init__.py +266 -21
- mindspore/mint/nn/functional.py +125 -19
- mindspore/mint/nn/layer/__init__.py +39 -0
- mindspore/mint/nn/layer/activation.py +133 -0
- mindspore/mint/nn/layer/normalization.py +477 -0
- mindspore/mint/nn/layer/pooling.py +110 -0
- mindspore/mint/optim/adamw.py +28 -7
- mindspore/mint/special/__init__.py +63 -0
- mindspore/multiprocessing/__init__.py +2 -1
- mindspore/nn/__init__.py +0 -1
- mindspore/nn/cell.py +275 -93
- mindspore/nn/layer/activation.py +211 -44
- mindspore/nn/layer/basic.py +113 -3
- mindspore/nn/layer/embedding.py +120 -2
- mindspore/nn/layer/normalization.py +101 -5
- mindspore/nn/layer/padding.py +34 -48
- mindspore/nn/layer/pooling.py +161 -7
- mindspore/nn/layer/transformer.py +3 -3
- mindspore/nn/loss/__init__.py +2 -2
- mindspore/nn/loss/loss.py +84 -6
- mindspore/nn/optim/__init__.py +2 -1
- mindspore/nn/optim/adadelta.py +1 -1
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/tft_wrapper.py +127 -0
- mindspore/nn/wrap/cell_wrapper.py +12 -23
- mindspore/nn/wrap/grad_reducer.py +5 -5
- mindspore/nn/wrap/loss_scale.py +17 -3
- mindspore/numpy/__init__.py +1 -1
- mindspore/numpy/array_creations.py +65 -68
- mindspore/numpy/array_ops.py +64 -60
- mindspore/numpy/fft.py +610 -75
- mindspore/numpy/logic_ops.py +11 -10
- mindspore/numpy/math_ops.py +85 -84
- mindspore/numpy/utils_const.py +4 -4
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +6 -4
- mindspore/ops/_grad_experimental/grad_comm_ops.py +47 -3
- mindspore/ops/_grad_experimental/grad_math_ops.py +0 -22
- mindspore/ops/_vmap/vmap_array_ops.py +2 -4
- mindspore/ops/_vmap/vmap_math_ops.py +17 -1
- mindspore/ops/_vmap/vmap_nn_ops.py +43 -2
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +85 -7
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +2 -0
- mindspore/ops/auto_generate/gen_extend_func.py +734 -13
- mindspore/ops/auto_generate/gen_ops_def.py +2420 -381
- mindspore/ops/auto_generate/gen_ops_prim.py +5196 -1659
- mindspore/ops/auto_generate/pyboost_inner_prim.py +176 -56
- mindspore/ops/composite/base.py +85 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +1 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -2
- mindspore/ops/function/__init__.py +22 -0
- mindspore/ops/function/array_func.py +490 -153
- mindspore/ops/function/debug_func.py +113 -1
- mindspore/ops/function/fft_func.py +15 -2
- mindspore/ops/function/grad/grad_func.py +3 -2
- mindspore/ops/function/math_func.py +558 -207
- mindspore/ops/function/nn_func.py +817 -383
- mindspore/ops/function/other_func.py +3 -2
- mindspore/ops/function/random_func.py +184 -8
- mindspore/ops/function/reshard_func.py +13 -11
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/function/vmap_func.py +3 -2
- mindspore/ops/functional.py +24 -14
- mindspore/ops/op_info_register.py +3 -3
- mindspore/ops/operations/__init__.py +6 -1
- mindspore/ops/operations/_grad_ops.py +2 -76
- mindspore/ops/operations/_infer_ops.py +1 -1
- mindspore/ops/operations/_inner_ops.py +71 -94
- mindspore/ops/operations/array_ops.py +12 -146
- mindspore/ops/operations/comm_ops.py +42 -53
- mindspore/ops/operations/custom_ops.py +83 -19
- mindspore/ops/operations/debug_ops.py +42 -10
- mindspore/ops/operations/manually_defined/_inner.py +12 -0
- mindspore/ops/operations/manually_defined/ops_def.py +265 -10
- mindspore/ops/operations/math_ops.py +12 -223
- mindspore/ops/operations/nn_ops.py +20 -114
- mindspore/ops/operations/other_ops.py +7 -4
- mindspore/ops/operations/random_ops.py +46 -1
- mindspore/ops/primitive.py +18 -6
- mindspore/ops_generate/arg_dtype_cast.py +2 -0
- mindspore/ops_generate/gen_aclnn_implement.py +11 -11
- mindspore/ops_generate/gen_constants.py +36 -0
- mindspore/ops_generate/gen_ops.py +67 -52
- mindspore/ops_generate/gen_ops_inner_prim.py +1 -1
- mindspore/ops_generate/gen_pyboost_func.py +131 -47
- mindspore/ops_generate/op_proto.py +10 -3
- mindspore/ops_generate/pyboost_utils.py +14 -1
- mindspore/ops_generate/template.py +43 -21
- mindspore/parallel/__init__.py +3 -1
- mindspore/parallel/_auto_parallel_context.py +28 -8
- mindspore/parallel/_cell_wrapper.py +83 -0
- mindspore/parallel/_parallel_serialization.py +47 -19
- mindspore/parallel/_tensor.py +81 -11
- mindspore/parallel/_utils.py +13 -1
- mindspore/parallel/algo_parameter_config.py +5 -5
- mindspore/parallel/checkpoint_transform.py +46 -39
- mindspore/parallel/cluster/process_entity/__init__.py +1 -1
- mindspore/parallel/cluster/process_entity/_api.py +31 -23
- mindspore/parallel/cluster/process_entity/_utils.py +2 -27
- mindspore/parallel/parameter_broadcast.py +3 -4
- mindspore/parallel/shard.py +162 -31
- mindspore/parallel/transform_safetensors.py +993 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/common/constant.py +29 -0
- mindspore/profiler/common/registry.py +47 -0
- mindspore/profiler/common/util.py +28 -0
- mindspore/profiler/dynamic_profiler.py +694 -0
- mindspore/profiler/envprofiling.py +17 -19
- mindspore/profiler/parser/ascend_analysis/constant.py +18 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +25 -4
- mindspore/profiler/parser/ascend_analysis/function_event.py +43 -19
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +31 -26
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +56 -10
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +55 -8
- mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +27 -20
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +9 -2
- mindspore/profiler/parser/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/parser/ascend_timeline_generator.py +27 -25
- mindspore/profiler/parser/base_timeline_generator.py +19 -25
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
- mindspore/profiler/parser/framework_parser.py +1 -391
- mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
- mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
- mindspore/profiler/parser/memory_usage_parser.py +0 -154
- mindspore/profiler/parser/profiler_info.py +78 -6
- mindspore/profiler/profiler.py +153 -0
- mindspore/profiler/profiling.py +280 -412
- mindspore/rewrite/__init__.py +1 -2
- mindspore/rewrite/common/namespace.py +4 -4
- mindspore/rewrite/symbol_tree/symbol_tree.py +3 -3
- mindspore/run_check/_check_version.py +36 -103
- mindspore/safeguard/rewrite_obfuscation.py +591 -247
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +4 -3
- mindspore/train/_utils.py +28 -2
- mindspore/train/amp.py +171 -53
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +85 -22
- mindspore/train/callback/_cluster_monitor.py +1 -1
- mindspore/train/callback/_flops_collector.py +1 -0
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +134 -31
- mindspore/train/callback/_summary_collector.py +5 -5
- mindspore/train/callback/_tft_register.py +352 -0
- mindspore/train/dataset_helper.py +7 -3
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/roc.py +4 -4
- mindspore/train/mind_ir_pb2.py +44 -39
- mindspore/train/model.py +134 -58
- mindspore/train/serialization.py +336 -112
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +21 -0
- mindspore/utils/utils.py +60 -0
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/METADATA +6 -2
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/RECORD +258 -252
- mindspore/include/c_api/ms/abstract.h +0 -67
- mindspore/include/c_api/ms/attribute.h +0 -197
- mindspore/include/c_api/ms/base/handle_types.h +0 -43
- mindspore/include/c_api/ms/base/macros.h +0 -32
- mindspore/include/c_api/ms/base/status.h +0 -33
- mindspore/include/c_api/ms/base/types.h +0 -283
- mindspore/include/c_api/ms/context.h +0 -102
- mindspore/include/c_api/ms/graph.h +0 -160
- mindspore/include/c_api/ms/node.h +0 -606
- mindspore/include/c_api/ms/tensor.h +0 -161
- mindspore/include/c_api/ms/value.h +0 -84
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/extend/basic.py +0 -140
- mindspore/nn/extend/embedding.py +0 -143
- mindspore/nn/extend/layer/normalization.py +0 -109
- mindspore/nn/extend/pooling.py +0 -117
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
- mindspore/ops/extend/__init__.py +0 -53
- mindspore/ops/extend/array_func.py +0 -218
- mindspore/ops/extend/math_func.py +0 -76
- mindspore/ops/extend/nn_func.py +0 -308
- mindspore/ops/silent_check.py +0 -162
- mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
- mindspore/profiler/parser/msadvisor_parser.py +0 -240
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/top_level.txt +0 -0
|
@@ -17,6 +17,82 @@ from mindspore.common import dtype as mstype
|
|
|
17
17
|
from mindspore.ops.auto_generate.pyboost_inner_prim import *
|
|
18
18
|
|
|
19
19
|
|
|
20
|
+
def acos(input):
|
|
21
|
+
r"""
|
|
22
|
+
Computes arccosine of input tensors element-wise.
|
|
23
|
+
|
|
24
|
+
.. math::
|
|
25
|
+
|
|
26
|
+
out_i = \cos^{-1}(input_i)
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
input (Tensor): The shape of tensor is
|
|
30
|
+
:math:`(N,*)`, where :math:`*` means any number of additional dimensions.
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
|
|
34
|
+
|
|
35
|
+
Raises:
|
|
36
|
+
TypeError: If `input` is not a Tensor.
|
|
37
|
+
|
|
38
|
+
Supported Platforms:
|
|
39
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
40
|
+
|
|
41
|
+
Examples:
|
|
42
|
+
>>> import mindspore
|
|
43
|
+
>>> import numpy as np
|
|
44
|
+
>>> from mindspore import Tensor, ops
|
|
45
|
+
>>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
|
|
46
|
+
>>> output = ops.acos_ext(input)
|
|
47
|
+
>>> print(output)
|
|
48
|
+
[0.7377037 1.5307857 1.2661037 0.9764114]
|
|
49
|
+
"""
|
|
50
|
+
return acos_impl(input)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def acosh(input):
|
|
54
|
+
r"""
|
|
55
|
+
Computes inverse hyperbolic cosine of the inputs element-wise.
|
|
56
|
+
|
|
57
|
+
.. math::
|
|
58
|
+
|
|
59
|
+
out_i = \cosh^{-1}(input_i)
|
|
60
|
+
|
|
61
|
+
.. note::
|
|
62
|
+
Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
|
|
63
|
+
Input range is [1, inf].
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
input (Tensor): The input tensor of inverse hyperbolic cosine function.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
|
|
70
|
+
|
|
71
|
+
Raises:
|
|
72
|
+
TypeError: If `input` is not a Tensor.
|
|
73
|
+
|
|
74
|
+
Supported Platforms:
|
|
75
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
76
|
+
|
|
77
|
+
Examples:
|
|
78
|
+
>>> import mindspore
|
|
79
|
+
>>> import numpy as np
|
|
80
|
+
>>> from mindspore import Tensor, ops
|
|
81
|
+
>>> input = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
|
|
82
|
+
>>> output = ops.acosh_ext(input)
|
|
83
|
+
>>> print(output)
|
|
84
|
+
[0. 0.9624236 1.7627472 5.298292 ]
|
|
85
|
+
"""
|
|
86
|
+
return acosh_impl(input)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def adaptive_avg_pool2d_grad(grad_output, x):
|
|
90
|
+
r"""
|
|
91
|
+
None
|
|
92
|
+
"""
|
|
93
|
+
return adaptive_avg_pool2d_grad_impl(grad_output, x)
|
|
94
|
+
|
|
95
|
+
|
|
20
96
|
def add(input, other, alpha=1):
|
|
21
97
|
r"""
|
|
22
98
|
Adds scaled other value to input Tensor.
|
|
@@ -34,12 +110,12 @@ def add(input, other, alpha=1):
|
|
|
34
110
|
Args:
|
|
35
111
|
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
36
112
|
a bool or a tensor whose data type is
|
|
37
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore
|
|
38
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore
|
|
113
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
114
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
39
115
|
other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
|
|
40
116
|
a bool or a tensor whose data type is
|
|
41
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore
|
|
42
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore
|
|
117
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
118
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
43
119
|
alpha (number.Number): A scaling factor applied to `other`, default 1.
|
|
44
120
|
|
|
45
121
|
Returns:
|
|
@@ -106,6 +182,104 @@ def argmax(input, dim=None, keepdim=False):
|
|
|
106
182
|
return argmax_impl(input, dim, keepdim)
|
|
107
183
|
|
|
108
184
|
|
|
185
|
+
def argmin(input, dim=None, keepdim=False):
|
|
186
|
+
r"""
|
|
187
|
+
Return the indices of the minimum values of a tensor across a dimension.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
input (Tensor): Input tensor.
|
|
191
|
+
dim (Union[int, None], optional): Specify the axis for calculation. If `dim` is ``None`` , the indices of the minimum
|
|
192
|
+
value within the flattened input will be returned. Default: ``None`` .
|
|
193
|
+
keepdim (bool, optional): Whether the output tensor retains the specified
|
|
194
|
+
dimension. Ignored if `dim` is None. Default: ``False`` .
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
Tensor, indices of the minimum values of the input tensor across a dimension.
|
|
198
|
+
|
|
199
|
+
Raises:
|
|
200
|
+
TypeError: If `keepdim` is not bool.
|
|
201
|
+
ValueError: If `dim` is out of range.
|
|
202
|
+
|
|
203
|
+
Supported Platforms:
|
|
204
|
+
``Ascend``
|
|
205
|
+
|
|
206
|
+
Examples:
|
|
207
|
+
>>> import numpy as np
|
|
208
|
+
>>> from mindspore import Tensor
|
|
209
|
+
>>> from mindspore import mint
|
|
210
|
+
>>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
|
|
211
|
+
>>> output = mint.argmin(x, dim=-1)
|
|
212
|
+
>>> print(output)
|
|
213
|
+
[0 1 2]
|
|
214
|
+
"""
|
|
215
|
+
return argmin_impl(input, dim, keepdim)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def asin(input):
|
|
219
|
+
r"""
|
|
220
|
+
Computes arcsine of input tensors element-wise.
|
|
221
|
+
|
|
222
|
+
.. math::
|
|
223
|
+
|
|
224
|
+
out_i = \sin^{-1}(input_i)
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
input (Tensor): The shape of tensor is
|
|
228
|
+
:math:`(N,*)`, where :math:`*` means any number of additional dimensions.
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
|
|
232
|
+
|
|
233
|
+
Raises:
|
|
234
|
+
TypeError: If `input` is not a Tensor.
|
|
235
|
+
|
|
236
|
+
Supported Platforms:
|
|
237
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
238
|
+
|
|
239
|
+
Examples:
|
|
240
|
+
>>> import mindspore
|
|
241
|
+
>>> import numpy as np
|
|
242
|
+
>>> from mindspore import Tensor, ops
|
|
243
|
+
>>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
|
|
244
|
+
>>> output = ops.asin_ext(input)
|
|
245
|
+
>>> print(output)
|
|
246
|
+
[0.8330927 0.04001068 0.30469266 0.59438497 ]
|
|
247
|
+
"""
|
|
248
|
+
return asin_impl(input)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def asinh(input):
|
|
252
|
+
r"""
|
|
253
|
+
Computes inverse hyperbolic sine of the input element-wise.
|
|
254
|
+
|
|
255
|
+
.. math::
|
|
256
|
+
|
|
257
|
+
out_i = \sinh^{-1}(input_i)
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
input (Tensor): The input tensor of inverse hyperbolic sine function.
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
|
|
264
|
+
|
|
265
|
+
Raises:
|
|
266
|
+
TypeError: If `input` is not a Tensor.
|
|
267
|
+
|
|
268
|
+
Supported Platforms:
|
|
269
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
270
|
+
|
|
271
|
+
Examples:
|
|
272
|
+
>>> import mindspore
|
|
273
|
+
>>> import numpy as np
|
|
274
|
+
>>> from mindspore import Tensor, ops
|
|
275
|
+
>>> input = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
|
|
276
|
+
>>> output = ops.asinh_ext(input)
|
|
277
|
+
>>> print(output)
|
|
278
|
+
[-2.3124385 1.1947632 1.8184465 5.298342 ]
|
|
279
|
+
"""
|
|
280
|
+
return asinh_impl(input)
|
|
281
|
+
|
|
282
|
+
|
|
109
283
|
def atan2(input, other):
|
|
110
284
|
r"""
|
|
111
285
|
Returns arctangent of input/other element-wise.
|
|
@@ -124,7 +298,9 @@ def atan2(input, other):
|
|
|
124
298
|
its shape is able to broadcast with `input`.
|
|
125
299
|
|
|
126
300
|
Returns:
|
|
127
|
-
Tensor, the shape is the same as the one after broadcasting
|
|
301
|
+
Tensor, the shape is the same as the one after broadcasting.
|
|
302
|
+
The dtype of output is float32 when dtype of `input` is in
|
|
303
|
+
[bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
|
|
128
304
|
|
|
129
305
|
Raises:
|
|
130
306
|
TypeError: If `input` or `other` is not a Tensor or scalar.
|
|
@@ -147,6 +323,39 @@ def atan2(input, other):
|
|
|
147
323
|
return atan2_impl(input, other)
|
|
148
324
|
|
|
149
325
|
|
|
326
|
+
def atan(input):
|
|
327
|
+
r"""
|
|
328
|
+
Computes the trigonometric inverse tangent of the input element-wise.
|
|
329
|
+
|
|
330
|
+
.. math::
|
|
331
|
+
|
|
332
|
+
out_i = \tan^{-1}(input_i)
|
|
333
|
+
|
|
334
|
+
Args:
|
|
335
|
+
input (Tensor): The shape of tensor is
|
|
336
|
+
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
337
|
+
|
|
338
|
+
Returns:
|
|
339
|
+
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
|
|
340
|
+
|
|
341
|
+
Raises:
|
|
342
|
+
TypeError: If `input` is not a Tensor.
|
|
343
|
+
|
|
344
|
+
Supported Platforms:
|
|
345
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
346
|
+
|
|
347
|
+
Examples:
|
|
348
|
+
>>> import mindspore
|
|
349
|
+
>>> import numpy as np
|
|
350
|
+
>>> from mindspore import Tensor, ops
|
|
351
|
+
>>> input = Tensor(np.array([1.0, 0.0]), mindspore.float32)
|
|
352
|
+
>>> output = ops.atan_ext(input)
|
|
353
|
+
>>> print(output)
|
|
354
|
+
[0.7853982 0. ]
|
|
355
|
+
"""
|
|
356
|
+
return atan_impl(input)
|
|
357
|
+
|
|
358
|
+
|
|
150
359
|
def bmm(input, mat2):
|
|
151
360
|
r"""
|
|
152
361
|
Performs batch matrix-matrix multiplication of two three-dimensional tensors.
|
|
@@ -254,6 +463,56 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
254
463
|
return fold_impl(input, converted_output_size, converted_kernel_size, converted_dilation, converted_padding, converted_stride)
|
|
255
464
|
|
|
256
465
|
|
|
466
|
+
def copy(variable, value):
|
|
467
|
+
r"""
|
|
468
|
+
None
|
|
469
|
+
"""
|
|
470
|
+
return copy_impl(variable, value)
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
def cummin(input, dim):
|
|
474
|
+
r"""
|
|
475
|
+
Returns a tuple (values, indices) where `values` is the cumulative minimum value of input Tensor `input`
|
|
476
|
+
along the dimension `dim`, and `indices` is the index location of each minimum value.
|
|
477
|
+
|
|
478
|
+
.. math::
|
|
479
|
+
\begin{array}{ll} \\
|
|
480
|
+
y_{i} = \min(x_{1}, x_{2}, ... , x_{i})
|
|
481
|
+
\end{array}
|
|
482
|
+
|
|
483
|
+
Args:
|
|
484
|
+
input (Tensor): The input Tensor, The dimension must be greater than 0.
|
|
485
|
+
dim (int): Operation dimension. The value of `dim` must be in the range `[-input.ndim, input.ndim - 1]`.
|
|
486
|
+
|
|
487
|
+
Returns:
|
|
488
|
+
tuple [Tensor], tuple of 2 Tensors, containing the cumulative minimum of elements and the index.
|
|
489
|
+
The shape of each output tensor is the same as that of input `input`.
|
|
490
|
+
|
|
491
|
+
Raises:
|
|
492
|
+
TypeError: If `input` is not a Tensor.
|
|
493
|
+
TypeError: If `input` is a Tensor, but the type is complex or bool.
|
|
494
|
+
TypeError: If `dim` is not an int.
|
|
495
|
+
ValueError: If `dim` is out the range of `[-input.ndim, input.ndim - 1]`.
|
|
496
|
+
|
|
497
|
+
.. note::
|
|
498
|
+
O2 mode is not supported in Ascend.
|
|
499
|
+
|
|
500
|
+
Supported Platforms:
|
|
501
|
+
``Ascend``
|
|
502
|
+
|
|
503
|
+
Examples:
|
|
504
|
+
>>> from mindspore import Tensor, ops
|
|
505
|
+
>>> import mindspore
|
|
506
|
+
>>> a = Tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220], mindspore.float32)
|
|
507
|
+
>>> output = ops.cummin_ext(a, dim=0)
|
|
508
|
+
>>> print(output[0])
|
|
509
|
+
[-0.2284 -0.6628 -0.6628 -0.6628 -1.3298 -1.3298]
|
|
510
|
+
>>> print(output[1])
|
|
511
|
+
[0 1 1 1 4 4]
|
|
512
|
+
"""
|
|
513
|
+
return cummin_impl(input, dim)
|
|
514
|
+
|
|
515
|
+
|
|
257
516
|
def cumsum(input, dim, dtype=None):
|
|
258
517
|
r"""
|
|
259
518
|
Computes the cumulative sum of input Tensor along `dim`.
|
|
@@ -365,8 +624,6 @@ def flatten(input, start_dim=0, end_dim=-1):
|
|
|
365
624
|
|
|
366
625
|
Args:
|
|
367
626
|
input (Tensor): The input Tensor.
|
|
368
|
-
|
|
369
|
-
Keyword Args:
|
|
370
627
|
start_dim (int, optional): The first dimension to flatten. Default: ``0`` .
|
|
371
628
|
end_dim (int, optional): The last dimension to flatten. Default: ``-1`` .
|
|
372
629
|
|
|
@@ -386,15 +643,58 @@ def flatten(input, start_dim=0, end_dim=-1):
|
|
|
386
643
|
Examples:
|
|
387
644
|
>>> import mindspore
|
|
388
645
|
>>> import numpy as np
|
|
389
|
-
>>> from mindspore import Tensor,
|
|
646
|
+
>>> from mindspore import Tensor, ops
|
|
390
647
|
>>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
|
|
391
|
-
>>> output =
|
|
648
|
+
>>> output = ops.auto_generate.flatten_ext(input_x)
|
|
392
649
|
>>> print(output.shape)
|
|
393
650
|
(24,)
|
|
394
651
|
"""
|
|
395
652
|
return flatten_impl(input, start_dim, end_dim)
|
|
396
653
|
|
|
397
654
|
|
|
655
|
+
def histc(input, bins=100, min=0, max=0):
|
|
656
|
+
r"""
|
|
657
|
+
Computes the histogram of a tensor.
|
|
658
|
+
|
|
659
|
+
The elements are sorted into equal width bins between `min` and `max`.
|
|
660
|
+
If `min` and `max` are both zero, the minimum and maximum values of the data are used.
|
|
661
|
+
|
|
662
|
+
Elements lower than min or higher than max are ignored.
|
|
663
|
+
|
|
664
|
+
.. warning::
|
|
665
|
+
This is an experimental API that is subject to change or deletion.
|
|
666
|
+
If input is int64, valid values fit within int32; exceeding this may cause precision errors.
|
|
667
|
+
|
|
668
|
+
Args:
|
|
669
|
+
input (Tensor): the input tensor.
|
|
670
|
+
bins (int, optional): Number of histogram bins, optional. If specified, must be positive. Default: ``100`` .
|
|
671
|
+
min (int, float, optional): the lower end of the range (inclusive), optional. Default: ``0`` .
|
|
672
|
+
max (int, float, optional): the upper end of the range (inclusive), optional. Default: ``0`` .
|
|
673
|
+
|
|
674
|
+
Returns:
|
|
675
|
+
A 1-D Tensor, has the same type as `input` with the shape :math:`(bins, )`.
|
|
676
|
+
|
|
677
|
+
Raises:
|
|
678
|
+
TypeError: If `input` is not a Tensor.
|
|
679
|
+
TypeError: If `input` datatype is not in support list.
|
|
680
|
+
TypeError: If attr `min` or `max` is not float or int.
|
|
681
|
+
TypeError: If attr `bins` is not int.
|
|
682
|
+
ValueError: If attr value `min` > `max`.
|
|
683
|
+
ValueError: If attr `bins` <= 0.
|
|
684
|
+
|
|
685
|
+
Supported Platforms:
|
|
686
|
+
``Ascend``
|
|
687
|
+
|
|
688
|
+
Examples:
|
|
689
|
+
>>> from mindspore import Tensor, ops
|
|
690
|
+
>>> x = Tensor([1., 2, 1])
|
|
691
|
+
>>> y = ops.histc_ext(x, bins=4, min=0, max=3)
|
|
692
|
+
>>> print(y)
|
|
693
|
+
[0 2 1 0]
|
|
694
|
+
"""
|
|
695
|
+
return histc_impl(input, bins, min, max)
|
|
696
|
+
|
|
697
|
+
|
|
398
698
|
def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
399
699
|
r"""
|
|
400
700
|
Extracts sliding local blocks from a batched input tensor.
|
|
@@ -513,6 +813,79 @@ def index_select(input, dim, index):
|
|
|
513
813
|
return index_select_impl(input, dim, index)
|
|
514
814
|
|
|
515
815
|
|
|
816
|
+
def inplace_add(input, other, alpha=1):
|
|
817
|
+
r"""
|
|
818
|
+
None
|
|
819
|
+
"""
|
|
820
|
+
return inplace_add_impl(input, other, alpha)
|
|
821
|
+
|
|
822
|
+
|
|
823
|
+
def inplace_adds(input, other, alpha=1):
|
|
824
|
+
r"""
|
|
825
|
+
None
|
|
826
|
+
"""
|
|
827
|
+
return inplace_adds_impl(input, other, alpha)
|
|
828
|
+
|
|
829
|
+
|
|
830
|
+
def l1_loss(input, target, reduction='mean'):
|
|
831
|
+
r"""
|
|
832
|
+
Calculate the mean absolute error between the `input` value and the `target` value.
|
|
833
|
+
|
|
834
|
+
Assuming that the :math:`x` and :math:`y` are the predicted value and target value,
|
|
835
|
+
both are one-dimensional tensors of length :math:`N`, length :math:`N`, `reduction` is set to ``'none'`` ,
|
|
836
|
+
then calculate the loss of :math:`x` and :math:`y` without dimensionality reduction.
|
|
837
|
+
|
|
838
|
+
The formula is as follows:
|
|
839
|
+
|
|
840
|
+
.. math::
|
|
841
|
+
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with } l_n = \left| x_n - y_n \right|,
|
|
842
|
+
|
|
843
|
+
where :math:`N` is the batch size.
|
|
844
|
+
|
|
845
|
+
If `reduction` is ``'mean'`` or ``'sum'`` , then:
|
|
846
|
+
|
|
847
|
+
.. math::
|
|
848
|
+
\ell(x, y) =
|
|
849
|
+
\begin{cases}
|
|
850
|
+
\operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
|
|
851
|
+
\operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
|
|
852
|
+
\end{cases}
|
|
853
|
+
|
|
854
|
+
Args:
|
|
855
|
+
input (Tensor): Predicted value, Tensor of any dimension.
|
|
856
|
+
target (Tensor): Target value, usually has the same shape as the `input`.
|
|
857
|
+
If `input` and `target` have different shapes, make sure they can broadcast to each other.
|
|
858
|
+
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
|
|
859
|
+
``'sum'`` . Default: ``'mean'`` .
|
|
860
|
+
|
|
861
|
+
- ``'none'``: no reduction will be applied.
|
|
862
|
+
- ``'mean'``: compute and return the mean of elements in the output. Notice: At least one of the input and target is float type when the reduction is ``'mean'`` .
|
|
863
|
+
- ``'sum'``: the output elements will be summed.
|
|
864
|
+
|
|
865
|
+
Returns:
|
|
866
|
+
Tensor or Scalar, if `reduction` is ``'none'`` , return a Tensor with same shape and dtype as `input`.
|
|
867
|
+
Otherwise, a scalar value will be returned.
|
|
868
|
+
|
|
869
|
+
Raises:
|
|
870
|
+
TypeError: If `input` is not a Tensor.
|
|
871
|
+
TypeError: If `target` is not a Tensor.
|
|
872
|
+
ValueError: If `reduction` is not one of ``'none'`` , ``'mean'`` or ``'sum'`` .
|
|
873
|
+
|
|
874
|
+
Supported Platforms:
|
|
875
|
+
``Ascend``
|
|
876
|
+
|
|
877
|
+
Examples:
|
|
878
|
+
>>> from mindspore import Tensor, ops
|
|
879
|
+
>>> from mindspore import dtype as mstype
|
|
880
|
+
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], mstype.float32)
|
|
881
|
+
>>> target = Tensor([[6, 5, 4], [3, 2, 1]], mstype.float32)
|
|
882
|
+
>>> output = ops.l1_loss_ext(x, target, reduction="mean")
|
|
883
|
+
>>> print(output)
|
|
884
|
+
3.0
|
|
885
|
+
"""
|
|
886
|
+
return l1_loss_impl(input, target, converted_reduction)
|
|
887
|
+
|
|
888
|
+
|
|
516
889
|
def leaky_relu(input, negative_slope=0.01):
|
|
517
890
|
r"""
|
|
518
891
|
leaky_relu activation function. The element of `input` less than 0 times `negative_slope` .
|
|
@@ -560,6 +933,89 @@ def leaky_relu(input, negative_slope=0.01):
|
|
|
560
933
|
return leaky_relu_impl(input, negative_slope)
|
|
561
934
|
|
|
562
935
|
|
|
936
|
+
def log_softmax(input, dim=None, dtype=None):
|
|
937
|
+
r"""
|
|
938
|
+
Applies the Log Softmax function to the input tensor on the specified axis.
|
|
939
|
+
Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
|
|
940
|
+
the Log Softmax function is shown as follows:
|
|
941
|
+
|
|
942
|
+
.. math::
|
|
943
|
+
\text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
|
|
944
|
+
|
|
945
|
+
where :math:`N` is the length of the Tensor.
|
|
946
|
+
|
|
947
|
+
Args:
|
|
948
|
+
input (Tensor): The input Tensor.
|
|
949
|
+
dim (int, optional): The axis to perform the Log softmax operation. Default: ``None`` .
|
|
950
|
+
|
|
951
|
+
Keyword Args:
|
|
952
|
+
dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If not set to None, the input
|
|
953
|
+
Tensor will be cast to `dtype` before the operation is performed. This is useful for preventing overflows.
|
|
954
|
+
If set to None, stay the same as original Tensor. Default: ``None`` . Supported data type is {float16, float32, double, bfloat16}.
|
|
955
|
+
|
|
956
|
+
Returns:
|
|
957
|
+
Tensor, with the same shape as the input.
|
|
958
|
+
|
|
959
|
+
Raises:
|
|
960
|
+
TypeError: If `dim` is not an int.
|
|
961
|
+
ValueError: If `dim` is not in range [-len(input.shape), len(input.shape)).
|
|
962
|
+
|
|
963
|
+
Supported Platforms:
|
|
964
|
+
``Ascend``
|
|
965
|
+
|
|
966
|
+
Examples:
|
|
967
|
+
>>> import mindspore
|
|
968
|
+
>>> import numpy as np
|
|
969
|
+
>>> from mindspore import Tensor, ops
|
|
970
|
+
>>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
971
|
+
>>> output = ops.auto_generate.log_softmax(logits, dim=-1)
|
|
972
|
+
>>> print(output)
|
|
973
|
+
[-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
|
|
974
|
+
"""
|
|
975
|
+
return log_softmax_impl(input, dim, dtype)
|
|
976
|
+
|
|
977
|
+
|
|
978
|
+
def logaddexp(input, other):
|
|
979
|
+
r"""
|
|
980
|
+
Computes the logarithm of the sum of exponentiations of the inputs.
|
|
981
|
+
This function is useful in statistics where the calculated probabilities of events may be
|
|
982
|
+
so small as to exceed the range of normal floating point numbers.
|
|
983
|
+
|
|
984
|
+
.. math::
|
|
985
|
+
|
|
986
|
+
out_i = \log(exp(input_i) + \exp(other_i))
|
|
987
|
+
|
|
988
|
+
.. warning::
|
|
989
|
+
This is an experimental API that is subject to change or deletion.
|
|
990
|
+
|
|
991
|
+
Args:
|
|
992
|
+
input (Tensor): Input Tensor. The dtype of `input` must be float.
|
|
993
|
+
other (Tensor): Input Tensor. The dtype of `other` must be float.
|
|
994
|
+
If the shape of `input` is not equal to the shape of `other`,
|
|
995
|
+
they must be broadcastable to a common shape (which becomes the shape of the output).
|
|
996
|
+
|
|
997
|
+
Returns:
|
|
998
|
+
Tensor, with the same dtype as `input` and `other`.
|
|
999
|
+
|
|
1000
|
+
Raises:
|
|
1001
|
+
TypeError: If `input` or `other` is not a Tensor.
|
|
1002
|
+
TypeError: The dtype of `input` or `other` is not float.
|
|
1003
|
+
|
|
1004
|
+
Supported Platforms:
|
|
1005
|
+
``Ascend``
|
|
1006
|
+
|
|
1007
|
+
Examples:
|
|
1008
|
+
>>> import numpy as np
|
|
1009
|
+
>>> from mindspore import Tensor, ops
|
|
1010
|
+
>>> x1 = Tensor(np.array([1, 2, 3]).astype(np.float16))
|
|
1011
|
+
>>> x2 = Tensor(np.array(2).astype(np.float16))
|
|
1012
|
+
>>> output = ops.logaddexp_ext(x1, x2)
|
|
1013
|
+
>>> print(output)
|
|
1014
|
+
[2.312 2.693 3.312]
|
|
1015
|
+
"""
|
|
1016
|
+
return logaddexp_impl(input, other)
|
|
1017
|
+
|
|
1018
|
+
|
|
563
1019
|
def matmul(input, mat2):
|
|
564
1020
|
r"""
|
|
565
1021
|
None
|
|
@@ -684,6 +1140,138 @@ def mean(input, axis=None, keep_dims=False, dtype=None):
|
|
|
684
1140
|
return mean_impl(input, axis, keep_dims, dtype)
|
|
685
1141
|
|
|
686
1142
|
|
|
1143
|
+
def mish(input):
|
|
1144
|
+
r"""
|
|
1145
|
+
Computes MISH (A Self Regularized Non-Monotonic Neural Activation Function)
|
|
1146
|
+
of input tensors element-wise.
|
|
1147
|
+
|
|
1148
|
+
The formula is defined as follows:
|
|
1149
|
+
|
|
1150
|
+
.. math::
|
|
1151
|
+
\text{mish}(input) = input * \tanh(softplus(\text{input}))
|
|
1152
|
+
|
|
1153
|
+
See more details in `A Self Regularized Non-Monotonic Neural Activation Function
|
|
1154
|
+
<https://arxiv.org/abs/1908.08681>`_.
|
|
1155
|
+
|
|
1156
|
+
Mish Activation Function Graph:
|
|
1157
|
+
|
|
1158
|
+
.. image:: ../images/Mish.png
|
|
1159
|
+
:align: center
|
|
1160
|
+
|
|
1161
|
+
Args:
|
|
1162
|
+
input (Tensor): The input of MISH. Supported dtypes:
|
|
1163
|
+
|
|
1164
|
+
- Ascend: float16, float32.
|
|
1165
|
+
|
|
1166
|
+
Returns:
|
|
1167
|
+
Tensor, has the same type and shape as the `input`.
|
|
1168
|
+
|
|
1169
|
+
Raises:
|
|
1170
|
+
TypeError: If `input` is not a Tensor.
|
|
1171
|
+
TypeError: If dtype of `input` is not float16 or float32.
|
|
1172
|
+
|
|
1173
|
+
Supported Platforms:
|
|
1174
|
+
``Ascend``
|
|
1175
|
+
|
|
1176
|
+
Examples:
|
|
1177
|
+
>>> import mindspore
|
|
1178
|
+
>>> from mindspore import Tensor, ops
|
|
1179
|
+
>>> import numpy as np
|
|
1180
|
+
>>> x = Tensor(np.array([[-1.1, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
1181
|
+
>>> output = ops.mish(x)
|
|
1182
|
+
>>> print(output)
|
|
1183
|
+
[[-3.0764845e-01 3.9974124e+00 -2.6832507e-03]
|
|
1184
|
+
[ 1.9439589e+00 -3.3576239e-02 8.9999990e+00]]
|
|
1185
|
+
"""
|
|
1186
|
+
return mish_impl(input)
|
|
1187
|
+
|
|
1188
|
+
|
|
1189
|
+
def mse_loss(input, target, reduction='mean'):
|
|
1190
|
+
r"""
|
|
1191
|
+
Calculates the mean squared error between the predicted value and the label value.
|
|
1192
|
+
|
|
1193
|
+
For detailed information, please refer to :class:`mindspore.nn.MSELoss`.
|
|
1194
|
+
|
|
1195
|
+
Args:
|
|
1196
|
+
input (Tensor): Tensor of any dimension. The data type needs to be consistent with the `target`.
|
|
1197
|
+
It should also be broadcastable with the `target`.
|
|
1198
|
+
target (Tensor): The input label. Tensor of any dimension. The data type needs to be consistent with the `input`.
|
|
1199
|
+
It should also be broadcastable with the `input`.
|
|
1200
|
+
reduction (str, optional): Apply specific reduction method to the output: ``'mean'`` , ``'none'`` ,
|
|
1201
|
+
``'sum'`` . Default: ``'mean'`` .
|
|
1202
|
+
|
|
1203
|
+
- ``'none'``: no reduction will be applied.
|
|
1204
|
+
- ``'mean'``: compute and return the mean of elements in the output.
|
|
1205
|
+
- ``'sum'``: the output elements will be summed.
|
|
1206
|
+
|
|
1207
|
+
Returns:
|
|
1208
|
+
- Tensor. If `reduction` is ``'mean'`` or ``'sum'``, the shape of output is `Tensor Scalar`.
|
|
1209
|
+
- If reduction is ``'none'``, the shape of output is the broadcasted shape of **input** and **target** .
|
|
1210
|
+
|
|
1211
|
+
Raises:
|
|
1212
|
+
ValueError: If `reduction` is not one of ``'mean'`` , ``'sum'`` or ``'none'``.
|
|
1213
|
+
ValueError: If `input` and `target` are not broadcastable.
|
|
1214
|
+
TypeError: If `input` and `target` are in different data type.
|
|
1215
|
+
|
|
1216
|
+
Supported Platforms:
|
|
1217
|
+
``Ascend``
|
|
1218
|
+
|
|
1219
|
+
Examples:
|
|
1220
|
+
>>> import mindspore
|
|
1221
|
+
>>> import numpy as np
|
|
1222
|
+
>>> from mindspore import Tensor, ops
|
|
1223
|
+
>>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
|
1224
|
+
>>> labels = Tensor(np.array([[1, 1, 1], [1, 2, 2]]), mindspore.float32)
|
|
1225
|
+
>>> output = ops.mse_loss_ext(logits, labels, reduction='none')
|
|
1226
|
+
>>> print(output)
|
|
1227
|
+
[[0. 1. 4.]
|
|
1228
|
+
[0. 0. 1.]]
|
|
1229
|
+
"""
|
|
1230
|
+
return mse_loss_impl(input, target, converted_reduction)
|
|
1231
|
+
|
|
1232
|
+
|
|
1233
|
+
def outer(input, vec2):
|
|
1234
|
+
r"""
|
|
1235
|
+
Return outer product of `input` and `vec2`. If `input` is a vector of size :math:`n`
|
|
1236
|
+
and `vec2` is a vector of size :math:`m` , then output must be a matrix of shape :math:`(n, m)` .
|
|
1237
|
+
|
|
1238
|
+
.. warning::
|
|
1239
|
+
This is an experimental API that is subject to change or deletion.
|
|
1240
|
+
|
|
1241
|
+
.. note::
|
|
1242
|
+
This function does not broadcast.
|
|
1243
|
+
|
|
1244
|
+
Args:
|
|
1245
|
+
input (Tensor): 1-D input vector.
|
|
1246
|
+
vec2 (Tensor): 1-D input vector.
|
|
1247
|
+
|
|
1248
|
+
Returns:
|
|
1249
|
+
out, 2-D matrix, the outer product of two vectors.
|
|
1250
|
+
|
|
1251
|
+
Raises:
|
|
1252
|
+
TypeError: If `input` or `vec2` is not a Tensor.
|
|
1253
|
+
TypeError: The implicitly converted data types of `input` and `vec2` are not one of float16, float32, float64, bool, uint8, int8, int16, int32, int64, complex64, complex128, bfloat16
|
|
1254
|
+
ValueError: If the dimension of `input` or `vec2` is not equal to 1.
|
|
1255
|
+
|
|
1256
|
+
Supported Platforms:
|
|
1257
|
+
``Ascend``
|
|
1258
|
+
|
|
1259
|
+
Examples:
|
|
1260
|
+
>>> import mindspore
|
|
1261
|
+
>>> import numpy as np
|
|
1262
|
+
>>> from mindspore import Tensor
|
|
1263
|
+
>>> from mindspore import ops
|
|
1264
|
+
>>> input = Tensor(np.array([7, 8, 9]), mindspore.int32)
|
|
1265
|
+
>>> vec2 = Tensor(np.array([7, 10, 11]), mindspore.int32)
|
|
1266
|
+
>>> out = ops.outer(input, vec2)
|
|
1267
|
+
>>> print(out)
|
|
1268
|
+
[[49 70 77]
|
|
1269
|
+
[56 80 88]
|
|
1270
|
+
[63 90 99]]
|
|
1271
|
+
"""
|
|
1272
|
+
return outer_impl(input, vec2)
|
|
1273
|
+
|
|
1274
|
+
|
|
687
1275
|
def prod(input, axis=None, keep_dims=False, dtype=None):
|
|
688
1276
|
r"""
|
|
689
1277
|
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
|
|
@@ -762,6 +1350,56 @@ def prod(input, axis=None, keep_dims=False, dtype=None):
|
|
|
762
1350
|
return prod_impl(input, axis, keep_dims, dtype)
|
|
763
1351
|
|
|
764
1352
|
|
|
1353
|
+
def selu(input):
|
|
1354
|
+
r"""
|
|
1355
|
+
Activation function SELU (Scaled exponential Linear Unit).
|
|
1356
|
+
|
|
1357
|
+
The activation function is defined as:
|
|
1358
|
+
|
|
1359
|
+
.. math::
|
|
1360
|
+
E_{i} =
|
|
1361
|
+
scale *
|
|
1362
|
+
\begin{cases}
|
|
1363
|
+
x_{i}, &\text{if } x_{i} \geq 0; \cr
|
|
1364
|
+
\text{alpha} * (\exp(x_i) - 1), &\text{otherwise.}
|
|
1365
|
+
\end{cases}
|
|
1366
|
+
|
|
1367
|
+
where :math:`alpha` and :math:`scale` are pre-defined constants(:math:`alpha=1.67326324`
|
|
1368
|
+
and :math:`scale=1.05070098`).
|
|
1369
|
+
|
|
1370
|
+
See more details in `Self-Normalizing Neural Networks <https://arxiv.org/abs/1706.02515>`_.
|
|
1371
|
+
|
|
1372
|
+
SELU Activation Function Graph:
|
|
1373
|
+
|
|
1374
|
+
.. image:: ../images/SeLU.png
|
|
1375
|
+
:align: center
|
|
1376
|
+
|
|
1377
|
+
Args:
|
|
1378
|
+
input (Tensor): Tensor of any dimension.
|
|
1379
|
+
The data type is float16, float32, bfloat16.
|
|
1380
|
+
|
|
1381
|
+
Returns:
|
|
1382
|
+
Tensor, with the same type and shape as the `input`.
|
|
1383
|
+
|
|
1384
|
+
Raises:
|
|
1385
|
+
TypeError: If dtype of `input` is not float16, float32, bfloat16.
|
|
1386
|
+
|
|
1387
|
+
Supported Platforms:
|
|
1388
|
+
``Ascend``
|
|
1389
|
+
|
|
1390
|
+
Examples:
|
|
1391
|
+
>>> import mindspore
|
|
1392
|
+
>>> from mindspore import Tensor, mint
|
|
1393
|
+
>>> import numpy as np
|
|
1394
|
+
>>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
1395
|
+
>>> output = mint.nn.functional.selu(input)
|
|
1396
|
+
>>> print(output)
|
|
1397
|
+
[[-1.1113307 4.202804 -1.7575096]
|
|
1398
|
+
[ 2.101402 -1.7462534 9.456309 ]]
|
|
1399
|
+
"""
|
|
1400
|
+
return selu_impl(input)
|
|
1401
|
+
|
|
1402
|
+
|
|
765
1403
|
def softplus(input, beta=1, threshold=20):
|
|
766
1404
|
r"""
|
|
767
1405
|
Applies softplus function to `input` element-wise.
|
|
@@ -867,12 +1505,12 @@ def sub(input, other, alpha=1):
|
|
|
867
1505
|
Args:
|
|
868
1506
|
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
|
869
1507
|
a bool or a tensor whose data type is
|
|
870
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore
|
|
871
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore
|
|
1508
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1509
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
872
1510
|
other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
|
|
873
1511
|
a bool or a tensor whose data type is
|
|
874
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore
|
|
875
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore
|
|
1512
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1513
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
876
1514
|
alpha (number.Number): A scaling factor applied to `other`, default 1.
|
|
877
1515
|
|
|
878
1516
|
Returns:
|
|
@@ -978,3 +1616,86 @@ def topk(input, k, dim=-1, largest=True, sorted=True):
|
|
|
978
1616
|
"""
|
|
979
1617
|
return topk_impl(input, k, dim, largest, sorted)
|
|
980
1618
|
|
|
1619
|
+
|
|
1620
|
+
def trace(input):
|
|
1621
|
+
r"""
|
|
1622
|
+
Returns a new tensor that is the sum of the `input` main trace.
|
|
1623
|
+
|
|
1624
|
+
Note:
|
|
1625
|
+
Input must be tensor.
|
|
1626
|
+
|
|
1627
|
+
Args:
|
|
1628
|
+
input (Tensor): 2-D Tensor.
|
|
1629
|
+
|
|
1630
|
+
Returns:
|
|
1631
|
+
Tensor, when the data type of `input` is integer or bool, its data type is int64, otherwise it is the same as `input`, and size equals to 1.
|
|
1632
|
+
|
|
1633
|
+
Raises:
|
|
1634
|
+
TypeError: If `input` is not a Tensor.
|
|
1635
|
+
ValueError: If the dimension of `input` is not equal to 2.
|
|
1636
|
+
TypeError: If the dtype of `input` is not one of float16, float32, float64, bool, uint8, int8, int16, int32, int64, complex64, complex128, bfloat16.
|
|
1637
|
+
|
|
1638
|
+
Supported Platforms:
|
|
1639
|
+
``Ascend``
|
|
1640
|
+
|
|
1641
|
+
Examples:
|
|
1642
|
+
>>> import mindspore
|
|
1643
|
+
>>> import numpy as np
|
|
1644
|
+
>>> from mindspore import Tensor, ops
|
|
1645
|
+
>>> input = Tensor(np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]]), mindspore.float32)
|
|
1646
|
+
>>> output = ops.trace_ext(input)
|
|
1647
|
+
>>> print(output)
|
|
1648
|
+
42.0
|
|
1649
|
+
>>> input = Tensor(np.arange(1, 13).reshape(3, 4), mindspore.float32)
|
|
1650
|
+
>>> output = ops.trace_ext(input)
|
|
1651
|
+
>>> print(output)
|
|
1652
|
+
18.0
|
|
1653
|
+
>>> input = Tensor(np.arange(12, 0, -1).reshape(4, 3), mindspore.float32)
|
|
1654
|
+
>>> output = ops.trace_ext(input)
|
|
1655
|
+
>>> print(output)
|
|
1656
|
+
24.0
|
|
1657
|
+
"""
|
|
1658
|
+
return trace_impl(input)
|
|
1659
|
+
|
|
1660
|
+
|
|
1661
|
+
def transpose(input, dim0, dim1):
|
|
1662
|
+
r"""
|
|
1663
|
+
Interchange two axes of a tensor.
|
|
1664
|
+
|
|
1665
|
+
.. warning::
|
|
1666
|
+
This is an experimental API that is subject to change or deletion.
|
|
1667
|
+
|
|
1668
|
+
Args:
|
|
1669
|
+
input(Tensor): Input tensor.
|
|
1670
|
+
dim0 (int): First axis.
|
|
1671
|
+
dim1 (int): Second axis.
|
|
1672
|
+
|
|
1673
|
+
Returns:
|
|
1674
|
+
Transposed tensor, has the same data type as `input`.
|
|
1675
|
+
|
|
1676
|
+
Raises:
|
|
1677
|
+
TypeError: If argument `input` is not Tensor.
|
|
1678
|
+
TypeError: If `dim0` or `dim1` is not integer.
|
|
1679
|
+
ValueError: If `dim0` or `dim1` is not in the range of :math:`[-ndim, ndim-1]`.
|
|
1680
|
+
|
|
1681
|
+
Supported Platforms:
|
|
1682
|
+
``Ascend``
|
|
1683
|
+
|
|
1684
|
+
Examples:
|
|
1685
|
+
>>> import numpy as np
|
|
1686
|
+
>>> from mindspore import mint
|
|
1687
|
+
>>> from mindspore import Tensor
|
|
1688
|
+
>>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
|
|
1689
|
+
>>> output = mint.transpose(input, 0, 2)
|
|
1690
|
+
>>> print(output.shape)
|
|
1691
|
+
(4, 3, 2)
|
|
1692
|
+
"""
|
|
1693
|
+
return transpose_impl(input, dim0, dim1)
|
|
1694
|
+
|
|
1695
|
+
|
|
1696
|
+
def tril(input, diagonal=0):
|
|
1697
|
+
r"""
|
|
1698
|
+
None
|
|
1699
|
+
"""
|
|
1700
|
+
return tril_impl(input, diagonal)
|
|
1701
|
+
|