mindspore 2.3.0rc1__cp38-cp38-manylinux1_x86_64.whl → 2.3.0rc2__cp38-cp38-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (223) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
  4. mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
  6. mindspore/_checkparam.py +20 -0
  7. mindspore/_extends/parse/parser.py +1 -1
  8. mindspore/_extends/parse/standard_method.py +6 -5
  9. mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
  10. mindspore/amp.py +5 -5
  11. mindspore/boost/boost_cell_wrapper.py +1 -1
  12. mindspore/boost/group_loss_scale_manager.py +1 -1
  13. mindspore/common/__init__.py +4 -2
  14. mindspore/common/_register_for_recompute.py +48 -0
  15. mindspore/common/_stub_tensor.py +1 -0
  16. mindspore/common/api.py +56 -4
  17. mindspore/common/dtype.py +5 -3
  18. mindspore/common/dump.py +2 -2
  19. mindspore/common/hook_handle.py +51 -4
  20. mindspore/common/initializer.py +1 -1
  21. mindspore/common/jit_config.py +17 -6
  22. mindspore/common/parameter.py +7 -2
  23. mindspore/common/recompute.py +247 -0
  24. mindspore/common/sparse_tensor.py +2 -2
  25. mindspore/common/symbol.py +1 -1
  26. mindspore/common/tensor.py +74 -36
  27. mindspore/communication/__init__.py +3 -3
  28. mindspore/communication/management.py +30 -30
  29. mindspore/context.py +28 -15
  30. mindspore/dataset/__init__.py +5 -5
  31. mindspore/dataset/audio/__init__.py +2 -2
  32. mindspore/dataset/audio/transforms.py +51 -51
  33. mindspore/dataset/callback/ds_callback.py +2 -2
  34. mindspore/dataset/engine/cache_client.py +1 -1
  35. mindspore/dataset/engine/datasets.py +3 -3
  36. mindspore/dataset/engine/datasets_audio.py +14 -14
  37. mindspore/dataset/engine/datasets_standard_format.py +3 -3
  38. mindspore/dataset/engine/datasets_text.py +38 -38
  39. mindspore/dataset/engine/datasets_user_defined.py +3 -3
  40. mindspore/dataset/engine/datasets_vision.py +68 -68
  41. mindspore/dataset/text/__init__.py +3 -3
  42. mindspore/dataset/text/transforms.py +26 -26
  43. mindspore/dataset/transforms/__init__.py +1 -1
  44. mindspore/dataset/vision/__init__.py +3 -3
  45. mindspore/dataset/vision/transforms.py +92 -92
  46. mindspore/dataset/vision/utils.py +1 -1
  47. mindspore/experimental/optim/adadelta.py +2 -2
  48. mindspore/experimental/optim/adagrad.py +2 -2
  49. mindspore/experimental/optim/adam.py +2 -2
  50. mindspore/experimental/optim/adamax.py +2 -2
  51. mindspore/experimental/optim/adamw.py +2 -2
  52. mindspore/experimental/optim/asgd.py +2 -2
  53. mindspore/experimental/optim/lr_scheduler.py +24 -20
  54. mindspore/experimental/optim/nadam.py +2 -2
  55. mindspore/experimental/optim/optimizer.py +1 -1
  56. mindspore/experimental/optim/radam.py +2 -2
  57. mindspore/experimental/optim/rmsprop.py +2 -2
  58. mindspore/experimental/optim/rprop.py +2 -2
  59. mindspore/experimental/optim/sgd.py +2 -2
  60. mindspore/hal/stream.py +2 -0
  61. mindspore/include/mindapi/base/types.h +5 -0
  62. mindspore/lib/libdnnl.so.2 +0 -0
  63. mindspore/lib/libmindspore.so +0 -0
  64. mindspore/lib/libmindspore_backend.so +0 -0
  65. mindspore/lib/libmindspore_common.so +0 -0
  66. mindspore/lib/libmindspore_core.so +0 -0
  67. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  68. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  69. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  70. mindspore/lib/libmindspore_shared_lib.so +0 -0
  71. mindspore/lib/libopencv_core.so.4.5 +0 -0
  72. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  73. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  75. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
  76. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  77. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  78. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  79. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  80. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  81. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  82. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  83. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  84. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  85. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  86. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  87. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  88. mindspore/log.py +2 -2
  89. mindspore/mint/__init__.py +457 -0
  90. mindspore/mint/nn/__init__.py +430 -0
  91. mindspore/mint/nn/functional.py +424 -0
  92. mindspore/mint/optim/__init__.py +24 -0
  93. mindspore/mint/optim/adamw.py +186 -0
  94. mindspore/multiprocessing/__init__.py +4 -0
  95. mindspore/nn/__init__.py +3 -0
  96. mindspore/nn/cell.py +51 -47
  97. mindspore/nn/extend/__init__.py +29 -0
  98. mindspore/nn/extend/basic.py +140 -0
  99. mindspore/nn/extend/embedding.py +143 -0
  100. mindspore/nn/extend/layer/__init__.py +27 -0
  101. mindspore/nn/extend/layer/normalization.py +107 -0
  102. mindspore/nn/extend/pooling.py +117 -0
  103. mindspore/nn/generator.py +297 -0
  104. mindspore/nn/layer/basic.py +109 -1
  105. mindspore/nn/layer/container.py +2 -2
  106. mindspore/nn/layer/conv.py +6 -6
  107. mindspore/nn/layer/embedding.py +1 -1
  108. mindspore/nn/layer/normalization.py +21 -43
  109. mindspore/nn/layer/padding.py +4 -0
  110. mindspore/nn/optim/ada_grad.py +2 -2
  111. mindspore/nn/optim/adadelta.py +1 -1
  112. mindspore/nn/optim/adafactor.py +1 -1
  113. mindspore/nn/optim/adam.py +7 -7
  114. mindspore/nn/optim/adamax.py +2 -2
  115. mindspore/nn/optim/adasum.py +2 -2
  116. mindspore/nn/optim/asgd.py +2 -2
  117. mindspore/nn/optim/ftrl.py +1 -1
  118. mindspore/nn/optim/lamb.py +3 -3
  119. mindspore/nn/optim/lars.py +1 -1
  120. mindspore/nn/optim/lazyadam.py +2 -2
  121. mindspore/nn/optim/momentum.py +2 -2
  122. mindspore/nn/optim/optimizer.py +2 -2
  123. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  124. mindspore/nn/optim/rmsprop.py +2 -2
  125. mindspore/nn/optim/rprop.py +2 -2
  126. mindspore/nn/optim/sgd.py +2 -2
  127. mindspore/nn/optim/thor.py +2 -2
  128. mindspore/nn/wrap/cell_wrapper.py +9 -9
  129. mindspore/nn/wrap/grad_reducer.py +5 -5
  130. mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
  131. mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
  132. mindspore/ops/_vmap/vmap_math_ops.py +27 -8
  133. mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
  134. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
  135. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
  136. mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
  137. mindspore/ops/auto_generate/gen_extend_func.py +274 -0
  138. mindspore/ops/auto_generate/gen_ops_def.py +889 -22
  139. mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
  140. mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
  141. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
  142. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
  143. mindspore/ops/extend/__init__.py +9 -1
  144. mindspore/ops/extend/array_func.py +134 -27
  145. mindspore/ops/extend/math_func.py +3 -3
  146. mindspore/ops/extend/nn_func.py +363 -2
  147. mindspore/ops/function/__init__.py +19 -2
  148. mindspore/ops/function/array_func.py +463 -439
  149. mindspore/ops/function/clip_func.py +7 -18
  150. mindspore/ops/function/grad/grad_func.py +5 -5
  151. mindspore/ops/function/linalg_func.py +4 -4
  152. mindspore/ops/function/math_func.py +260 -243
  153. mindspore/ops/function/nn_func.py +825 -62
  154. mindspore/ops/function/random_func.py +73 -4
  155. mindspore/ops/function/sparse_unary_func.py +1 -1
  156. mindspore/ops/function/vmap_func.py +1 -1
  157. mindspore/ops/functional.py +2 -2
  158. mindspore/ops/op_info_register.py +1 -31
  159. mindspore/ops/operations/__init__.py +2 -3
  160. mindspore/ops/operations/_grad_ops.py +2 -107
  161. mindspore/ops/operations/_inner_ops.py +5 -5
  162. mindspore/ops/operations/_sequence_ops.py +2 -2
  163. mindspore/ops/operations/array_ops.py +11 -233
  164. mindspore/ops/operations/comm_ops.py +32 -32
  165. mindspore/ops/operations/custom_ops.py +7 -89
  166. mindspore/ops/operations/manually_defined/ops_def.py +329 -4
  167. mindspore/ops/operations/math_ops.py +13 -163
  168. mindspore/ops/operations/nn_ops.py +9 -316
  169. mindspore/ops/operations/random_ops.py +1 -1
  170. mindspore/ops/operations/sparse_ops.py +3 -3
  171. mindspore/ops/primitive.py +2 -2
  172. mindspore/ops_generate/arg_dtype_cast.py +12 -3
  173. mindspore/ops_generate/arg_handler.py +24 -0
  174. mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
  175. mindspore/ops_generate/gen_pyboost_func.py +13 -6
  176. mindspore/ops_generate/pyboost_utils.py +2 -17
  177. mindspore/parallel/__init__.py +3 -2
  178. mindspore/parallel/_auto_parallel_context.py +106 -1
  179. mindspore/parallel/_parallel_serialization.py +34 -2
  180. mindspore/parallel/_utils.py +16 -0
  181. mindspore/parallel/algo_parameter_config.py +4 -4
  182. mindspore/parallel/checkpoint_transform.py +249 -77
  183. mindspore/parallel/cluster/process_entity/_api.py +1 -1
  184. mindspore/parallel/parameter_broadcast.py +1 -1
  185. mindspore/parallel/shard.py +1 -1
  186. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
  187. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
  188. mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
  189. mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
  190. mindspore/profiler/parser/ascend_op_generator.py +26 -9
  191. mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
  192. mindspore/profiler/parser/profiler_info.py +11 -1
  193. mindspore/profiler/profiling.py +13 -5
  194. mindspore/rewrite/api/node.py +12 -12
  195. mindspore/rewrite/api/symbol_tree.py +11 -11
  196. mindspore/run_check/_check_version.py +1 -1
  197. mindspore/safeguard/rewrite_obfuscation.py +2 -2
  198. mindspore/train/amp.py +4 -4
  199. mindspore/train/anf_ir_pb2.py +8 -2
  200. mindspore/train/callback/_backup_and_restore.py +2 -2
  201. mindspore/train/callback/_callback.py +4 -4
  202. mindspore/train/callback/_checkpoint.py +2 -2
  203. mindspore/train/callback/_early_stop.py +2 -2
  204. mindspore/train/callback/_landscape.py +4 -4
  205. mindspore/train/callback/_loss_monitor.py +2 -2
  206. mindspore/train/callback/_on_request_exit.py +2 -2
  207. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  208. mindspore/train/callback/_summary_collector.py +2 -2
  209. mindspore/train/callback/_time_monitor.py +2 -2
  210. mindspore/train/dataset_helper.py +8 -3
  211. mindspore/train/loss_scale_manager.py +2 -2
  212. mindspore/train/metrics/metric.py +3 -3
  213. mindspore/train/mind_ir_pb2.py +22 -17
  214. mindspore/train/model.py +15 -15
  215. mindspore/train/serialization.py +18 -18
  216. mindspore/train/summary/summary_record.py +7 -7
  217. mindspore/train/train_thor/convert_utils.py +3 -3
  218. mindspore/version.py +1 -1
  219. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
  220. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +223 -209
  221. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  222. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  223. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -73,6 +73,273 @@ def add(input, other, alpha=1):
73
73
  return add_impl(input, other, alpha)
74
74
 
75
75
 
76
+ def bmm(input, mat2):
77
+ r"""
78
+ Performs batch matrix-matrix multiplication of two three-dimensional tensors.
79
+
80
+ .. math::
81
+ \text{output}[b, i, j] = \text{input}[b, i, k] @ \text{mat2}[b, k, j]
82
+
83
+ Args:
84
+ input (Tensor): The first batch of matrices to be multiplied. Must be a three-dimensional tensor.
85
+ mat2 (Tensor): The second batch of matrices to be multiplied. Must be a three-dimensional tensor.
86
+
87
+ Returns:
88
+ Tensor, the output tensor of shape `(b, n, p)`, where each matrix is the product of the corresponding matrices in the input batches.
89
+
90
+ Raises:
91
+ TypeError: If `input` or `mat2` is not three-dimensional tensors.
92
+ ValueError: If the length of the third dimension of `input` is not equal to the length of the second dimension of `mat2`.
93
+ ValueError: If the batch size of the inputs do not match.
94
+
95
+ Supported Platforms:
96
+ ``Ascend`` ``GPU`` ``CPU``
97
+
98
+ Examples:
99
+ >>> import mindspore
100
+ >>> import numpy as np
101
+ >>> from mindspore import Tensor
102
+ >>> from mindspore.ops.extend import bmm
103
+ >>> a = Tensor(np.ones(shape=[2, 3, 4]), mindspore.float32)
104
+ >>> b = Tensor(np.ones(shape=[2, 4, 5]), mindspore.float32)
105
+ >>> output = bmm(a, b)
106
+ >>> print(output.shape)
107
+ (2, 3, 5)
108
+ """
109
+ return bmm_impl(input, mat2)
110
+
111
+
112
+ def ffn(x, weight1, weight2, expertTokens=None, bias1=None, bias2=None, scale=None, offset=None, deqScale1=None, deqScale2=None, antiquant_scale1=None, antiquant_scale2=None, antiquant_offset1=None, antiquant_offset2=None, activation='fastgelu', inner_precise=0):
113
+ r"""
114
+ None
115
+ """
116
+ return ffn_impl(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, converted_activation, inner_precise)
117
+
118
+
119
+ def flatten(input, start_dim=0, end_dim=-1):
120
+ r"""
121
+ Flatten a tensor along dimensions from `start_dim` to `end_dim`.
122
+
123
+ Args:
124
+ input (Tensor): The input Tensor.
125
+
126
+ Keyword Args:
127
+ start_dim (int, optional): The first dimension to flatten. Default: ``0`` .
128
+ end_dim (int, optional): The last dimension to flatten. Default: ``-1`` .
129
+
130
+ Returns:
131
+ Tensor. If no dimensions are flattened, returns the original `input`, otherwise return the flattened Tensor.
132
+ If `input` is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
133
+
134
+ Raises:
135
+ TypeError: If `input` is not a Tensor.
136
+ TypeError: If `start_dim` or `end_dim` is not int.
137
+ ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
138
+ ValueError: If `start_dim` or `end_dim` is not in range of [-input.dim, input.dim-1].
139
+
140
+ Supported Platforms:
141
+ ``Ascend`` ``GPU`` ``CPU``
142
+
143
+ Examples:
144
+ >>> import mindspore
145
+ >>> import numpy as np
146
+ >>> from mindspore import Tensor, mint
147
+ >>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
148
+ >>> output = mint.flatten(input_x)
149
+ >>> print(output.shape)
150
+ (24,)
151
+ """
152
+ return flatten_impl(input, start_dim, end_dim)
153
+
154
+
155
+ def leaky_relu(input, negative_slope=0.01):
156
+ r"""
157
+ leaky_relu activation function. The element of `input` less than 0 times `negative_slope` .
158
+
159
+ The activation function is defined as:
160
+
161
+ .. math::
162
+ \text{leaky_relu}(input) = \begin{cases}input, &\text{if } input \geq 0; \cr
163
+ {\negative_slope} * input, &\text{otherwise.}\end{cases}
164
+
165
+ where :math:`\negative_slope` represents the `negative_slope` parameter.
166
+
167
+ For more details, see `Rectifier Nonlinearities Improve Neural Network Acoustic Models
168
+ <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`_.
169
+
170
+ LeakyReLU Activation Function Graph:
171
+
172
+ .. image:: ../images/LeakyReLU.png
173
+ :align: center
174
+
175
+ Args:
176
+ input (Tensor): The input of leaky_relu is a Tensor of any dimension.
177
+ negative_slope (Union[int, float]): Slope of the activation function when the element of `input` is less than 0.
178
+ Default: ``0.01`` .
179
+
180
+ Returns:
181
+ Tensor, has the same type and shape as the `input`.
182
+
183
+ Raises:
184
+ TypeError: If `input` is not a Tensor.
185
+ TypeError: If `negative_slope` is not a float or an int.
186
+
187
+ Supported Platforms:
188
+ ``Ascend`` ``GPU`` ``CPU``
189
+
190
+ Examples:
191
+ >>> import mindspore
192
+ >>> import numpy as np
193
+ >>> from mindspore import Tensor, ops
194
+ >>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
195
+ >>> print(mint.leaky_relu(input, negative_slope=0.2))
196
+ [[-0.2 4. -1.6]
197
+ [ 2. -1. 9. ]]
198
+ """
199
+ return leaky_relu_impl(input, negative_slope)
200
+
201
+
202
+ def matmul(input, mat2):
203
+ r"""
204
+ None
205
+ """
206
+ return matmul_impl(input, mat2)
207
+
208
+
209
+ def mean(input, axis=None, keep_dims=False, dtype=None):
210
+ r"""
211
+ Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
212
+ And reduce a dimension of `input` along the specified `axis`. `keep_dims`
213
+ determines whether the dimensions of the output and input are the same.
214
+
215
+ Note:
216
+ The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
217
+
218
+ Args:
219
+ input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
220
+ :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
221
+ axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
222
+ reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
223
+ and the value range is [-r,r).
224
+ keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
225
+ If ``False`` , don't keep these dimensions. Default: ``False`` .
226
+ dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
227
+
228
+ Returns:
229
+ Tensor, has the same data type as input tensor.
230
+
231
+ - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
232
+ the output is a 0-D tensor representing the product of all elements in the input tensor.
233
+ - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
234
+ the shape of output is :math:`(x_0, x_2, ..., x_R)`.
235
+ - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
236
+ the shape of output is :math:`(x_0, x_3, ..., x_R)`.
237
+ - If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
238
+ the shape of output is :math:`(x_0, x_3, ..., x_R)`.
239
+
240
+ Raises:
241
+ TypeError: If `x` is not a Tensor.
242
+ TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
243
+ TypeError: If `keep_dims` is not a bool.
244
+ ValueError: If `axis` is out of range.
245
+
246
+ Supported Platforms:
247
+ ``Ascend`` ``GPU`` ``CPU``
248
+
249
+ Examples:
250
+ >>> import mindspore
251
+ >>> import numpy as np
252
+ >>> from mindspore import Tensor, ops
253
+ >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
254
+ >>> output = ops.mean(x, 1, keep_dims=True)
255
+ >>> result = output.shape
256
+ >>> print(result)
257
+ (3, 1, 5, 6)
258
+ >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
259
+ >>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
260
+ ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
261
+ ... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
262
+ ... mindspore.float32)
263
+ >>> output = ops.mean(x)
264
+ >>> print(output)
265
+ 5.0
266
+ >>> print(output.shape)
267
+ ()
268
+ >>> # case 2: Reduces a dimension along the axis 0
269
+ >>> output = ops.mean(x, 0, True)
270
+ >>> print(output)
271
+ [[[4. 4. 4. 4. 4. 4.]
272
+ [5. 5. 5. 5. 5. 5.]
273
+ [6. 6. 6. 6. 6. 6.]]]
274
+ >>> # case 3: Reduces a dimension along the axis 1
275
+ >>> output = ops.mean(x, 1, True)
276
+ >>> print(output)
277
+ [[[2. 2. 2. 2. 2. 2.]]
278
+ [[5. 5. 5. 5. 5. 5.]]
279
+ [[8. 8. 8. 8. 8. 8.]]]
280
+ >>> # case 4: Reduces a dimension along the axis 2
281
+ >>> output = ops.mean(x, 2, True)
282
+ >>> print(output)
283
+ [[[ 2.]
284
+ [ 2.]
285
+ [ 2.]]
286
+ [[ 4.]
287
+ [ 5.]
288
+ [ 6.]]
289
+ [[ 6.]
290
+ [ 8.]
291
+ [10.]]]
292
+ """
293
+ return mean_impl(input, axis, keep_dims, dtype)
294
+
295
+
296
+ def softplus(input, beta=1, threshold=20):
297
+ r"""
298
+ None
299
+ """
300
+ return softplus_impl(input, beta, threshold)
301
+
302
+
303
+ def stack(tensors, dim=0):
304
+ r"""
305
+ Stacks a list of tensors in specified dim.
306
+
307
+ Stacks the list of input tensors with the same rank `R`, output is a tensor of rank `(R+1)`.
308
+
309
+ Given input tensors of shape :math:`(x_1, x_2, ..., x_R)`. Set the number of input tensors as `N`.
310
+ If :math:`dim \ge 0`, the shape of the output tensor is
311
+ :math:`(x_1, x_2, ..., x_{dim}, N, x_{dim+1}, ..., x_R)`.
312
+
313
+ Args:
314
+ tensors (Union[tuple, list]): A Tuple or list of Tensor objects with the same shape and type.
315
+ dim (int): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
316
+
317
+ Returns:
318
+ Tensor. A stacked Tensor with the same type as `tensors`.
319
+
320
+ Raises:
321
+ TypeError: If the data types of elements in `tensors` are not the same.
322
+ ValueError: If the length of `tensors` is not greater than zero;
323
+ or if dim is out of the range [-(R+1), R+1);
324
+ or if the shapes of elements in tensors are not the same.
325
+
326
+ Supported Platforms:
327
+ ``Ascend`` ``GPU`` ``CPU``
328
+
329
+ Examples:
330
+ >>> import mindspore
331
+ >>> from mindspore import Tensor, mint
332
+ >>> import numpy as np
333
+ >>> data1 = Tensor(np.array([0, 1]).astype(np.float32))
334
+ >>> data2 = Tensor(np.array([2, 3]).astype(np.float32))
335
+ >>> output = mint.stack([data1, data2], 0)
336
+ >>> print(output)
337
+ [[0. 1.]
338
+ [2. 3.]]
339
+ """
340
+ return stack_impl(tensors, dim)
341
+
342
+
76
343
  def sub(input, other, alpha=1):
77
344
  r"""
78
345
  Subtracts scaled other value from input Tensor.
@@ -128,3 +395,10 @@ def sub(input, other, alpha=1):
128
395
  """
129
396
  return sub_impl(input, other, alpha)
130
397
 
398
+
399
+ def topk(input, k, dim=-1, largest=True, sorted=True):
400
+ r"""
401
+ None
402
+ """
403
+ return topk_impl(input, k, dim, largest, sorted)
404
+