mindspore 2.3.0rc1__cp37-cp37m-manylinux1_x86_64.whl → 2.3.0rc2__cp37-cp37m-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (226) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
  4. mindspore/_c_dataengine.cpython-37m-x86_64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-37m-x86_64-linux-gnu.so +0 -0
  6. mindspore/_checkparam.py +20 -0
  7. mindspore/_extends/parse/parser.py +1 -1
  8. mindspore/_extends/parse/standard_method.py +6 -5
  9. mindspore/_mindspore_offline_debug.cpython-37m-x86_64-linux-gnu.so +0 -0
  10. mindspore/amp.py +5 -5
  11. mindspore/bin/cache_admin +0 -0
  12. mindspore/bin/cache_server +0 -0
  13. mindspore/boost/boost_cell_wrapper.py +1 -1
  14. mindspore/boost/group_loss_scale_manager.py +1 -1
  15. mindspore/common/__init__.py +4 -2
  16. mindspore/common/_register_for_recompute.py +48 -0
  17. mindspore/common/_stub_tensor.py +1 -0
  18. mindspore/common/api.py +56 -4
  19. mindspore/common/dtype.py +5 -3
  20. mindspore/common/dump.py +2 -2
  21. mindspore/common/hook_handle.py +51 -4
  22. mindspore/common/initializer.py +1 -1
  23. mindspore/common/jit_config.py +17 -6
  24. mindspore/common/parameter.py +7 -2
  25. mindspore/common/recompute.py +247 -0
  26. mindspore/common/sparse_tensor.py +2 -2
  27. mindspore/common/symbol.py +1 -1
  28. mindspore/common/tensor.py +74 -36
  29. mindspore/communication/__init__.py +3 -3
  30. mindspore/communication/management.py +30 -30
  31. mindspore/context.py +28 -15
  32. mindspore/dataset/__init__.py +5 -5
  33. mindspore/dataset/audio/__init__.py +2 -2
  34. mindspore/dataset/audio/transforms.py +51 -51
  35. mindspore/dataset/callback/ds_callback.py +2 -2
  36. mindspore/dataset/engine/cache_client.py +1 -1
  37. mindspore/dataset/engine/datasets.py +3 -3
  38. mindspore/dataset/engine/datasets_audio.py +14 -14
  39. mindspore/dataset/engine/datasets_standard_format.py +3 -3
  40. mindspore/dataset/engine/datasets_text.py +38 -38
  41. mindspore/dataset/engine/datasets_user_defined.py +3 -3
  42. mindspore/dataset/engine/datasets_vision.py +68 -68
  43. mindspore/dataset/text/__init__.py +3 -3
  44. mindspore/dataset/text/transforms.py +26 -26
  45. mindspore/dataset/transforms/__init__.py +1 -1
  46. mindspore/dataset/vision/__init__.py +3 -3
  47. mindspore/dataset/vision/transforms.py +92 -92
  48. mindspore/dataset/vision/utils.py +1 -1
  49. mindspore/experimental/optim/adadelta.py +2 -2
  50. mindspore/experimental/optim/adagrad.py +2 -2
  51. mindspore/experimental/optim/adam.py +2 -2
  52. mindspore/experimental/optim/adamax.py +2 -2
  53. mindspore/experimental/optim/adamw.py +2 -2
  54. mindspore/experimental/optim/asgd.py +2 -2
  55. mindspore/experimental/optim/lr_scheduler.py +24 -20
  56. mindspore/experimental/optim/nadam.py +2 -2
  57. mindspore/experimental/optim/optimizer.py +1 -1
  58. mindspore/experimental/optim/radam.py +2 -2
  59. mindspore/experimental/optim/rmsprop.py +2 -2
  60. mindspore/experimental/optim/rprop.py +2 -2
  61. mindspore/experimental/optim/sgd.py +2 -2
  62. mindspore/hal/stream.py +2 -0
  63. mindspore/include/mindapi/base/types.h +5 -0
  64. mindspore/lib/libdnnl.so.2 +0 -0
  65. mindspore/lib/libmindspore.so +0 -0
  66. mindspore/lib/libmindspore_backend.so +0 -0
  67. mindspore/lib/libmindspore_common.so +0 -0
  68. mindspore/lib/libmindspore_core.so +0 -0
  69. mindspore/lib/libmindspore_glog.so.0 +0 -0
  70. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  71. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  72. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  73. mindspore/lib/libmindspore_shared_lib.so +0 -0
  74. mindspore/lib/libopencv_core.so.4.5 +0 -0
  75. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  76. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  77. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  78. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
  79. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  80. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  81. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  82. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  83. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  84. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  85. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  86. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  87. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  88. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  89. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  90. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  91. mindspore/log.py +2 -2
  92. mindspore/mint/__init__.py +457 -0
  93. mindspore/mint/nn/__init__.py +430 -0
  94. mindspore/mint/nn/functional.py +424 -0
  95. mindspore/mint/optim/__init__.py +24 -0
  96. mindspore/mint/optim/adamw.py +186 -0
  97. mindspore/multiprocessing/__init__.py +4 -0
  98. mindspore/nn/__init__.py +3 -0
  99. mindspore/nn/cell.py +51 -47
  100. mindspore/nn/extend/__init__.py +29 -0
  101. mindspore/nn/extend/basic.py +140 -0
  102. mindspore/nn/extend/embedding.py +143 -0
  103. mindspore/nn/extend/layer/__init__.py +27 -0
  104. mindspore/nn/extend/layer/normalization.py +107 -0
  105. mindspore/nn/extend/pooling.py +117 -0
  106. mindspore/nn/generator.py +297 -0
  107. mindspore/nn/layer/basic.py +109 -1
  108. mindspore/nn/layer/container.py +2 -2
  109. mindspore/nn/layer/conv.py +6 -6
  110. mindspore/nn/layer/embedding.py +1 -1
  111. mindspore/nn/layer/normalization.py +21 -43
  112. mindspore/nn/layer/padding.py +4 -0
  113. mindspore/nn/optim/ada_grad.py +2 -2
  114. mindspore/nn/optim/adadelta.py +1 -1
  115. mindspore/nn/optim/adafactor.py +1 -1
  116. mindspore/nn/optim/adam.py +7 -7
  117. mindspore/nn/optim/adamax.py +2 -2
  118. mindspore/nn/optim/adasum.py +2 -2
  119. mindspore/nn/optim/asgd.py +2 -2
  120. mindspore/nn/optim/ftrl.py +1 -1
  121. mindspore/nn/optim/lamb.py +3 -3
  122. mindspore/nn/optim/lars.py +1 -1
  123. mindspore/nn/optim/lazyadam.py +2 -2
  124. mindspore/nn/optim/momentum.py +2 -2
  125. mindspore/nn/optim/optimizer.py +2 -2
  126. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  127. mindspore/nn/optim/rmsprop.py +2 -2
  128. mindspore/nn/optim/rprop.py +2 -2
  129. mindspore/nn/optim/sgd.py +2 -2
  130. mindspore/nn/optim/thor.py +2 -2
  131. mindspore/nn/wrap/cell_wrapper.py +9 -9
  132. mindspore/nn/wrap/grad_reducer.py +5 -5
  133. mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
  134. mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
  135. mindspore/ops/_vmap/vmap_math_ops.py +27 -8
  136. mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
  137. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
  138. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
  139. mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
  140. mindspore/ops/auto_generate/gen_extend_func.py +274 -0
  141. mindspore/ops/auto_generate/gen_ops_def.py +889 -22
  142. mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
  143. mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
  144. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
  145. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
  146. mindspore/ops/extend/__init__.py +9 -1
  147. mindspore/ops/extend/array_func.py +134 -27
  148. mindspore/ops/extend/math_func.py +3 -3
  149. mindspore/ops/extend/nn_func.py +363 -2
  150. mindspore/ops/function/__init__.py +19 -2
  151. mindspore/ops/function/array_func.py +463 -439
  152. mindspore/ops/function/clip_func.py +7 -18
  153. mindspore/ops/function/grad/grad_func.py +5 -5
  154. mindspore/ops/function/linalg_func.py +4 -4
  155. mindspore/ops/function/math_func.py +260 -243
  156. mindspore/ops/function/nn_func.py +825 -62
  157. mindspore/ops/function/random_func.py +73 -4
  158. mindspore/ops/function/sparse_unary_func.py +1 -1
  159. mindspore/ops/function/vmap_func.py +1 -1
  160. mindspore/ops/functional.py +2 -2
  161. mindspore/ops/op_info_register.py +1 -31
  162. mindspore/ops/operations/__init__.py +2 -3
  163. mindspore/ops/operations/_grad_ops.py +2 -107
  164. mindspore/ops/operations/_inner_ops.py +5 -5
  165. mindspore/ops/operations/_sequence_ops.py +2 -2
  166. mindspore/ops/operations/array_ops.py +11 -233
  167. mindspore/ops/operations/comm_ops.py +32 -32
  168. mindspore/ops/operations/custom_ops.py +7 -89
  169. mindspore/ops/operations/manually_defined/ops_def.py +329 -4
  170. mindspore/ops/operations/math_ops.py +13 -163
  171. mindspore/ops/operations/nn_ops.py +9 -316
  172. mindspore/ops/operations/random_ops.py +1 -1
  173. mindspore/ops/operations/sparse_ops.py +3 -3
  174. mindspore/ops/primitive.py +2 -2
  175. mindspore/ops_generate/arg_dtype_cast.py +12 -3
  176. mindspore/ops_generate/arg_handler.py +24 -0
  177. mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
  178. mindspore/ops_generate/gen_pyboost_func.py +13 -6
  179. mindspore/ops_generate/pyboost_utils.py +2 -17
  180. mindspore/parallel/__init__.py +3 -2
  181. mindspore/parallel/_auto_parallel_context.py +106 -1
  182. mindspore/parallel/_parallel_serialization.py +34 -2
  183. mindspore/parallel/_utils.py +16 -0
  184. mindspore/parallel/algo_parameter_config.py +4 -4
  185. mindspore/parallel/checkpoint_transform.py +249 -77
  186. mindspore/parallel/cluster/process_entity/_api.py +1 -1
  187. mindspore/parallel/parameter_broadcast.py +1 -1
  188. mindspore/parallel/shard.py +1 -1
  189. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
  190. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
  191. mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
  192. mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
  193. mindspore/profiler/parser/ascend_op_generator.py +26 -9
  194. mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
  195. mindspore/profiler/parser/profiler_info.py +11 -1
  196. mindspore/profiler/profiling.py +13 -5
  197. mindspore/rewrite/api/node.py +12 -12
  198. mindspore/rewrite/api/symbol_tree.py +11 -11
  199. mindspore/run_check/_check_version.py +1 -1
  200. mindspore/safeguard/rewrite_obfuscation.py +2 -2
  201. mindspore/train/amp.py +4 -4
  202. mindspore/train/anf_ir_pb2.py +8 -2
  203. mindspore/train/callback/_backup_and_restore.py +2 -2
  204. mindspore/train/callback/_callback.py +4 -4
  205. mindspore/train/callback/_checkpoint.py +2 -2
  206. mindspore/train/callback/_early_stop.py +2 -2
  207. mindspore/train/callback/_landscape.py +4 -4
  208. mindspore/train/callback/_loss_monitor.py +2 -2
  209. mindspore/train/callback/_on_request_exit.py +2 -2
  210. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  211. mindspore/train/callback/_summary_collector.py +2 -2
  212. mindspore/train/callback/_time_monitor.py +2 -2
  213. mindspore/train/dataset_helper.py +8 -3
  214. mindspore/train/loss_scale_manager.py +2 -2
  215. mindspore/train/metrics/metric.py +3 -3
  216. mindspore/train/mind_ir_pb2.py +22 -17
  217. mindspore/train/model.py +15 -15
  218. mindspore/train/serialization.py +18 -18
  219. mindspore/train/summary/summary_record.py +7 -7
  220. mindspore/train/train_thor/convert_utils.py +3 -3
  221. mindspore/version.py +1 -1
  222. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
  223. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +226 -212
  224. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  225. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  226. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -31,6 +31,9 @@ from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
31
31
  from mindspore.ops.operations._sequence_ops import TupleToTensor
32
32
  from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
33
33
  from mindspore.ops.operations._sequence_ops import TensorToList
34
+ from mindspore.ops.auto_generate import OnesLikeExt, ZerosLikeExt, FillScalar, FillTensor, Arange, Chunk
35
+ from mindspore.ops.auto_generate.gen_ops_prim import SplitTensor
36
+ from mindspore.ops.auto_generate.gen_ops_prim import SplitWithSize, RepeatInterleave
34
37
 
35
38
  from mindspore.ops.operations.array_ops import (
36
39
  UniqueConsecutive,
@@ -48,8 +51,6 @@ from mindspore.ops.operations.array_ops import (
48
51
  Expand,
49
52
  Lstsq,
50
53
  Mvlgamma,
51
- Tril,
52
- Argmax,
53
54
  ArgMaxWithValue,
54
55
  ArgMinWithValue
55
56
  )
@@ -61,8 +62,9 @@ from mindspore._c_expression import Tensor as Tensor_
61
62
  from mindspore.ops._utils.utils import ms_arrange
62
63
 
63
64
  from mindspore.ops.auto_generate import cat, range, scatter_nd, deepcopy, masked_fill, diagonal, expand_dims, \
64
- nonzero, reverse, transpose, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, broadcast_to, \
65
- strided_slice
65
+ nonzero, flip, transpose, tril, triu, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, \
66
+ broadcast_to, strided_slice, ones, zeros, max_, min_, select
67
+ from mindspore.ops.auto_generate.gen_ops_prim import scatter_add_ext_op
66
68
  from mindspore.ops.operations.manually_defined import tile, rank, scalar_cast
67
69
 
68
70
  arg_max_with_value_ = ArgMaxWithValue()
@@ -83,7 +85,6 @@ lstsq_ = Lstsq()
83
85
  masked_select_ = P.MaskedSelect()
84
86
  matrix_band_part_ = P.array_ops.MatrixBandPart()
85
87
  ones_ = P.Ones()
86
- ones_like_ = P.OnesLike()
87
88
  population_count_ = P.PopulationCount()
88
89
  range_ = P.Range()
89
90
  rank_ = P.Rank()
@@ -99,6 +100,8 @@ scatter_mul_ = P.ScatterMul()
99
100
  scatter_nd_ = P.ScatterNd()
100
101
  scatter_update_ = P.ScatterUpdate()
101
102
  shape_ = P.Shape()
103
+ split_tensor = SplitTensor()
104
+ split_with_size = SplitWithSize()
102
105
  size_ = P.Size()
103
106
  tensor_scatter_add_ = P.TensorScatterAdd()
104
107
  tensor_scatter_div_ = P.TensorScatterDiv()
@@ -119,7 +122,15 @@ unsorted_segment_max_ = P.UnsortedSegmentMax()
119
122
  unsorted_segment_min_ = P.UnsortedSegmentMin()
120
123
  unsorted_segment_prod_ = P.UnsortedSegmentProd()
121
124
  unsorted_segment_sum_ = P.UnsortedSegmentSum()
125
+ ones_like_ = P.OnesLike()
122
126
  zeros_like_ = P.ZerosLike()
127
+ ones_like_ext_ = OnesLikeExt()
128
+ zeros_like_ext_ = ZerosLikeExt()
129
+ fill_scalar_ = FillScalar()
130
+ fill_tensor_ = FillTensor()
131
+ arange_ = Arange()
132
+ chunk_ = Chunk()
133
+ repeat_interleave_ = RepeatInterleave()
123
134
 
124
135
 
125
136
  @_primexpr
@@ -258,16 +269,84 @@ def arange(start=0, end=None, step=1, *, dtype=None):
258
269
  return data
259
270
 
260
271
 
272
+ def arange_ext(start=0, end=None, step=1, *, dtype=None):
273
+ r"""
274
+ Creates a sequence of numbers that begins at `start` and extends by increments of
275
+ `step` up to but not including `end`.
276
+
277
+ Args:
278
+ start (Union[float, int, Tensor], optional): The start of the interval.
279
+ If Tensor, the shape must be :math:`()` . Default: ``0`` .
280
+ end (Union[float, int, Tensor], optional): The end of the interval, exclusive.
281
+ If Tensor, the shape must be :math:`()`.
282
+ Default: ``None`` . If ``None`` , it defaults to the value of `start`, and 0 is used as the starting value.
283
+ step (Union[float, int, Tensor], optional): Number that increments `start`.
284
+ If Tensor, the shape must be :math:`()`. Default: ``1`` .
285
+
286
+ Keyword Args:
287
+ dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
288
+ When `dtype` is not specified or ``None``:
289
+
290
+ If `start`, `end`, and `step` are all integers, the dtype of output is int64,
291
+
292
+ If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
293
+
294
+ Returns:
295
+ A 1-D Tensor, with the same type as the inputs.
296
+
297
+ Raises:
298
+ TypeError: If `start`, `end` or `step` is not an int or a float or a TensorScalar(Special Tensor with shape ())
299
+ in valid dtypes.
300
+ ValueError: If `step` = 0.
301
+ ValueError: If `start` >= `end` when `step` > 0.
302
+ ValueError: If `start` <= `end` when `step` < 0.
303
+
304
+ Supported Platforms:
305
+ ``Ascend``
306
+
307
+ Examples:
308
+ >>> import mindspore as ms
309
+ >>> from mindspore import Tensor, mint
310
+ >>> output = mint.arange(1, 6)
311
+ >>> print(output)
312
+ [1 2 3 4 5]
313
+ >>> print(output.dtype)
314
+ Int64
315
+ >>> output = mint.arange(0, 3, 1.2)
316
+ >>> print(output)
317
+ [0. 1.2 2.4]
318
+ >>> print(output.dtype)
319
+ Float32
320
+ >>> output = mint.arange(7, 1, -2)
321
+ >>> print(output)
322
+ [7 5 3]
323
+ >>> print(output.dtype)
324
+ Int64
325
+ >>> output = mint.arange(ms.Tensor(12.0, dtype=ms.float64), 2, ms.Tensor(-1.0, dtype=ms.float32))
326
+ >>> print(output)
327
+ [12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
328
+ >>> print(output.dtype)
329
+ Float32
330
+ """
331
+ if end is None:
332
+ start, end = 0, start
333
+
334
+ out = arange_(start, end, step)
335
+ if dtype is not None:
336
+ out = cast_(out, dtype)
337
+ return out
338
+
339
+
261
340
  def concat(tensors, axis=0):
262
341
  """
263
342
  Alias for :func:`mindspore.ops.cat()`.
264
343
 
265
344
  Tutorial Examples:
266
- - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/r2.3.q1/beginner/tensor.html#tensor-operation>`_
345
+ - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/master/beginner/tensor.html#tensor-operation>`_
267
346
  - `Vision Transformer Image Classification - Building ViT as a whole
268
- <https://mindspore.cn/tutorials/application/en/r2.3.q1/cv/vit.html#building-vit-as-a-whole>`_
347
+ <https://mindspore.cn/tutorials/application/en/master/cv/vit.html#building-vit-as-a-whole>`_
269
348
  - `Sentiment Classification Implemented by RNN - Dense
270
- <https://mindspore.cn/tutorials/application/en/r2.3.q1/nlp/sentiment_analysis.html#dense>`_
349
+ <https://mindspore.cn/tutorials/application/en/master/nlp/sentiment_analysis.html#dense>`_
271
350
  """
272
351
  return cat(tensors, axis)
273
352
 
@@ -391,25 +470,25 @@ def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype
391
470
  return out
392
471
 
393
472
 
394
- def where(condition, x, y):
473
+ def where(condition, input, other):
395
474
  r"""
396
- Selects elements from `x` or `y` based on `condition` and returns a tensor.
475
+ Selects elements from `input` or `other` based on `condition` and returns a tensor.
397
476
 
398
477
  .. math::
399
- output_i = \begin{cases} x_i,\quad &if\ condition_i \\ y_i,\quad &otherwise \end{cases}
478
+ output_i = \begin{cases} input_i,\quad &if\ condition_i \\ other_i,\quad &otherwise \end{cases}
400
479
 
401
480
  Args:
402
- condition (Tensor[bool]): If True, yield `x`, otherwise yield `y`.
403
- x (Union[Tensor, Scalar]): When `condition` is True, values to select from.
404
- y (Union[Tensor, Scalar]): When `condition` is False, values to select from.
481
+ condition (Tensor[bool]): If True, yield `input`, otherwise yield `other`.
482
+ input (Union[Tensor, Scalar]): When `condition` is True, values to select from.
483
+ other (Union[Tensor, Scalar]): When `condition` is False, values to select from.
405
484
 
406
485
  Returns:
407
- Tensor, elements are selected from `x` and `y`.
486
+ Tensor, elements are selected from `input` and `other`.
408
487
 
409
488
  Raises:
410
489
  TypeError: If `condition` is not a Tensor.
411
- TypeError: If both `x` and `y` are scalars.
412
- ValueError: If `condition`, `x` and `y` can not broadcast to each other.
490
+ TypeError: If both `input` and `other` are scalars.
491
+ ValueError: If `condition`, `input` and `other` can not broadcast to each other.
413
492
 
414
493
  Supported Platforms:
415
494
  ``Ascend`` ``GPU`` ``CPU``
@@ -426,25 +505,15 @@ def where(condition, x, y):
426
505
  [[0. 1.]
427
506
  [2. 1.]]
428
507
  """
429
- if not isinstance(condition, Tensor):
430
- raise TypeError(f"For 'where', 'condition' must be a Tensor, but got {type(condition)}.")
431
- if isinstance(x, (int, float)):
432
- if not isinstance(y, Tensor):
433
- raise TypeError(
434
- f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
435
- )
436
- x = cast_(x, y.dtype)
437
- elif isinstance(y, (int, float)):
438
- if not isinstance(x, Tensor):
439
- raise TypeError(
440
- f"For 'where', at least one of 'x' and 'y' should be Tensor, but got x:{type(x)}, y:{type(y)}."
441
- )
442
- y = cast_(y, x.dtype)
443
- output_shape = _calc_broadcast_shape(x.shape, y.shape, condition.shape)
444
- condition = broadcast_to(condition, output_shape)
445
- x = broadcast_to(x, output_shape)
446
- y = broadcast_to(y, output_shape)
447
- return tensor_select_(condition, x, y)
508
+ return tensor_select_(condition, input, other)
509
+
510
+
511
+ def reverse(x, axis):
512
+ """
513
+ :func:`mindspore.ops.reverse` will be deprecated in the future.
514
+ Please use :func:`mindspore.ops.flip` instead.
515
+ """
516
+ return flip(x, axis)
448
517
 
449
518
 
450
519
  def ravel(input):
@@ -648,8 +717,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
648
717
 
649
718
  Args:
650
719
  type (mindspore.dtype): The specified type of output tensor. The data type only supports
651
- `bool_ <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ and
652
- `number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_ .
720
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ and
721
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
653
722
  shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
654
723
  value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
655
724
 
@@ -724,6 +793,45 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-na
724
793
  return ops.fill(dtype, size, fill_value)
725
794
 
726
795
 
796
+ def full_ext(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-name
797
+ """
798
+ Create a Tensor of the specified shape and fill it with the specified value.
799
+
800
+ Args:
801
+ size (Union(tuple[int], list[int])): The specified shape of output tensor.
802
+ fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
803
+
804
+ Keyword Args:
805
+ dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details,
806
+ please refer to :class:`mindspore.dtype` . Default: ``None`` .
807
+
808
+ Returns:
809
+ Tensor.
810
+
811
+ Raises:
812
+ TypeError: If `size` is not a tuple or list.
813
+ ValueError: The element in `size` is less than 0.
814
+
815
+ Supported Platforms:
816
+ ``Ascend`` ``GPU`` ``CPU``
817
+
818
+ Examples:
819
+ >>> from mindspore import ops
820
+ >>> output = ops.full((2, 2), 1)
821
+ >>> print(output)
822
+ [[1. 1.]
823
+ [1. 1.]]
824
+ >>> output = ops.full((3, 3), 0)
825
+ >>> print(output)
826
+ [[0. 0. 0.]
827
+ [0. 0. 0.]
828
+ [0. 0. 0.]]
829
+ """
830
+ if isinstance(fill_value, Tensor):
831
+ return fill_tensor_(size, fill_value, dtype)
832
+ return fill_scalar_(size, fill_value, dtype)
833
+
834
+
727
835
  def full_like(input, fill_value, *, dtype=None):
728
836
  """
729
837
  Return a Tensor of the same shape as `input` and filled with `fill_value`.
@@ -834,6 +942,45 @@ def chunk(input, chunks, axis=0):
834
942
  return res
835
943
 
836
944
 
945
+ def chunk_ext(input, chunks, dim=0):
946
+ """
947
+ Cut the input Tensor into `chunks` sub-tensors along the specified axis.
948
+
949
+ Note:
950
+ This function may return less than the specified number of chunks!
951
+
952
+ Args:
953
+ input (Tensor): A Tensor to be cut.
954
+ chunks (int): Number of sub-tensors to cut.
955
+ dim (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
956
+
957
+ Returns:
958
+ A tuple of sub-tensors.
959
+
960
+ Raises:
961
+ TypeError: If argument `input` is not Tensor.
962
+ TypeError: The sum of `chunks` is not int.
963
+ TypeError: If argument `dim` is not int.
964
+ ValueError: If argument `dim` is out of range of :math:`[-input.ndim, input.ndim)` .
965
+ ValueError: If argument `chunks` is not positive number.
966
+
967
+ Supported Platforms:
968
+ ``Ascend``
969
+
970
+ Examples:
971
+ >>> import numpy as np
972
+ >>> import mindspore
973
+ >>> from mindspore import Tensor
974
+ >>> input_x = np.arange(9).astype("float32")
975
+ >>> output = mindspore.mint.chunk(Tensor(input_x), 3)
976
+ >>> print(output)
977
+ (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
978
+ Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
979
+ Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
980
+ """
981
+ return chunk_(input, chunks, dim)
982
+
983
+
837
984
  def fills(x, value):
838
985
  """
839
986
  `fills` is deprecated, please use `ops.fill` instead.
@@ -853,55 +1000,46 @@ def fills(x, value):
853
1000
  return fills_(x, value_)
854
1001
 
855
1002
 
856
- def ones(shape, dtype=None): # pylint: disable=redefined-outer-name
857
- r"""
858
- Creates a tensor filled with value ones.
859
-
860
- Creates a tensor with shape described by the first argument and fills it with value ones in type of the second
861
- argument.
1003
+ def ones_like(input, *, dtype=None):
1004
+ """
1005
+ Returns a Tensor with a value of 1 and its shape is the same as the input.
862
1006
 
863
1007
  Args:
864
- shape (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
865
- tuple or Tensor containing positive integers are allowed. If it is a Tensor,
866
- it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
867
- dtype (:class:`mindspore.dtype`): The specified type of output tensor. If `dtype` is ``None`` ,
868
- `mindspore.float32` will be used. Default: ``None`` .
1008
+ input (Tensor): Tensor of any dimension.
1009
+
1010
+ Keyword Args:
1011
+ dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
1012
+ the dtype of the input tensor will be used. Default: ``None`` .
869
1013
 
870
1014
  Returns:
871
- Tensor, has the same type and shape as input shape value.
1015
+ Tensor, has the same shape as `input` but filled with ones.
872
1016
 
873
1017
  Raises:
874
- TypeError: If `shape` is not tuple, int or Tensor.
1018
+ TypeError: If `input` is not a Tensor.
875
1019
 
876
1020
  Supported Platforms:
877
1021
  ``Ascend`` ``GPU`` ``CPU``
878
1022
 
879
1023
  Examples:
880
- >>> import mindspore
881
- >>> from mindspore import ops
882
- >>> output = ops.ones((2, 2), mindspore.float32)
1024
+ >>> import numpy as np
1025
+ >>> from mindspore import Tensor, ops
1026
+ >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
1027
+ >>> output = ops.ones_like(x)
883
1028
  >>> print(output)
884
- [[1. 1.]
885
- [1. 1.]]
1029
+ [[1 1]
1030
+ [1 1]]
886
1031
  """
887
- _dtype = mstype.float32 if dtype is None else dtype
888
- value = Tensor(1, _dtype)
889
- if isinstance(shape, int):
890
- shape = tuple([shape])
891
- elif isinstance(shape, list):
892
- if not shape:
893
- shape = Tensor_(shape, dtype=mstype.int64)
894
- else:
895
- shape = Tensor(shape, dtype=mstype.int64)
896
- elif isinstance(shape, Tensor) and shape.ndim == 0 and shape.size == 1:
897
- shape = shape.reshape(1)
898
- output = fillv2_(shape, value)
1032
+ output = ones_like_(input)
1033
+ _dtype = input.dtype if dtype is None else dtype
1034
+ output = cast_(output, _dtype)
899
1035
  return output
900
1036
 
901
1037
 
902
- def ones_like(input, *, dtype=None):
903
- """
904
- Returns a Tensor with a value of 1 and its shape is the same as the input.
1038
+ def zeros_like(input, *, dtype=None):
1039
+ r"""
1040
+ Creates a tensor filled with 0, with the same size as input, and the given dtype.
1041
+
1042
+ If `dtype = None`, the tensor will have the same dtype as input `input`.
905
1043
 
906
1044
  Args:
907
1045
  input (Tensor): Tensor of any dimension.
@@ -911,73 +1049,63 @@ def ones_like(input, *, dtype=None):
911
1049
  the dtype of the input tensor will be used. Default: ``None`` .
912
1050
 
913
1051
  Returns:
914
- Tensor, has the same shape as `input` but filled with ones.
1052
+ Tensor, filled with 0.
915
1053
 
916
1054
  Raises:
917
- TypeError: If `input` is not a Tensor.
1055
+ TypeError: If dtype is not a MindSpore dtype.
918
1056
 
919
1057
  Supported Platforms:
920
1058
  ``Ascend`` ``GPU`` ``CPU``
921
1059
 
922
1060
  Examples:
1061
+ >>> import mindspore
923
1062
  >>> import numpy as np
924
1063
  >>> from mindspore import Tensor, ops
925
- >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
926
- >>> output = ops.ones_like(x)
1064
+ >>> x = Tensor(np.arange(4).reshape(2, 2))
1065
+ >>> output = ops.zeros_like(x, dtype=mindspore.float32)
927
1066
  >>> print(output)
928
- [[1 1]
929
- [1 1]]
1067
+ [[0. 0.]
1068
+ [0. 0.]]
930
1069
  """
931
- output = ones_like_(input)
932
1070
  _dtype = input.dtype if dtype is None else dtype
1071
+ output = zeros_like_(input)
933
1072
  output = cast_(output, _dtype)
934
1073
  return output
935
1074
 
936
1075
 
937
- def zeros(size, dtype=None): # pylint: disable=redefined-outer-name
938
- r"""
939
- Creates a tensor filled with 0 with shape described by `shape` and fills it with value 0 in type of `dtype`.
1076
+ def ones_like_ext(input, *, dtype=None):
1077
+ """
1078
+ Returns a Tensor with a value of 1 and its shape is the same as the input.
940
1079
 
941
1080
  Args:
942
- size (Union[tuple[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
943
- tuple or Tensor containing positive integers are allowed. If it is a Tensor,
944
- it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
945
- dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
946
- mindspore.float32 will be used. Default: ``None`` .
1081
+ input (Tensor): Tensor of any dimension.
1082
+
1083
+ Keyword Args:
1084
+ dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
1085
+ the dtype of the input tensor will be used. Default: ``None`` .
947
1086
 
948
1087
  Returns:
949
- Tensor, has the same dtype and size as input.
1088
+ Tensor, has the same shape as `input` but filled with ones.
950
1089
 
951
1090
  Raises:
952
- TypeError: If `size` is not tuple, int or Tensor.
1091
+ TypeError: If `input` is not a Tensor.
953
1092
 
954
1093
  Supported Platforms:
955
1094
  ``Ascend`` ``GPU`` ``CPU``
956
1095
 
957
1096
  Examples:
958
- >>> import mindspore
959
- >>> from mindspore import ops
960
- >>> output = ops.zeros((2, 2), mindspore.float32)
1097
+ >>> import numpy as np
1098
+ >>> from mindspore import Tensor, ops
1099
+ >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
1100
+ >>> output = ops.mint.ones_like(x)
961
1101
  >>> print(output)
962
- [[0. 0.]
963
- [0. 0.]]
1102
+ [[1 1]
1103
+ [1 1]]
964
1104
  """
965
- _dtype = mstype.float32 if dtype is None else dtype
966
- value = Tensor(0, _dtype)
967
- if isinstance(size, int):
968
- size = tuple([size])
969
- elif isinstance(size, list):
970
- if not size:
971
- size = Tensor_(size, dtype=mstype.int64)
972
- else:
973
- size = Tensor(size, dtype=mstype.int64)
974
- elif isinstance(size, Tensor) and size.ndim == 0 and size.size == 1:
975
- size = size.reshape(1)
976
- output = fillv2_(size, value)
977
- return output
1105
+ return ones_like_ext_(input, dtype)
978
1106
 
979
1107
 
980
- def zeros_like(input, *, dtype=None):
1108
+ def zeros_like_ext(input, *, dtype=None):
981
1109
  r"""
982
1110
  Creates a tensor filled with 0, with the same size as input, and the given dtype.
983
1111
 
@@ -1004,15 +1132,12 @@ def zeros_like(input, *, dtype=None):
1004
1132
  >>> import numpy as np
1005
1133
  >>> from mindspore import Tensor, ops
1006
1134
  >>> x = Tensor(np.arange(4).reshape(2, 2))
1007
- >>> output = ops.zeros_like(x, dtype=mindspore.float32)
1135
+ >>> output = ops.mint.zeros_like(x, dtype=mindspore.float32)
1008
1136
  >>> print(output)
1009
1137
  [[0. 0.]
1010
1138
  [0. 0.]]
1011
1139
  """
1012
- _dtype = input.dtype if dtype is None else dtype
1013
- output = zeros_like_(input)
1014
- output = cast_(output, _dtype)
1015
- return output
1140
+ return zeros_like_ext_(input, dtype)
1016
1141
 
1017
1142
 
1018
1143
  ##############################
@@ -1273,7 +1398,7 @@ def size(input_x):
1273
1398
 
1274
1399
  Args:
1275
1400
  input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
1276
- `number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_.
1401
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1277
1402
 
1278
1403
  Returns:
1279
1404
  int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
@@ -1517,171 +1642,6 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1517
1642
  return reshape_(input, new_shape)
1518
1643
 
1519
1644
 
1520
- @constexpr
1521
- def _check_select_type_match(scalar, tensor_type, scalar_name, tensor_name):
1522
- if isinstance(scalar, int) and tensor_type != mstype.int32:
1523
- raise TypeError(f"For functional operator[select], the input[{scalar_name}] is int, "
1524
- f"then the input[{tensor_name}] must be a Tensor of int32.")
1525
- if isinstance(scalar, float) and tensor_type != mstype.float32:
1526
- raise TypeError(f"For functional operator[select], the input[{scalar_name}] is float, "
1527
- f"then the input[{tensor_name}] must be a Tensor of float32.")
1528
-
1529
-
1530
- @_primexpr
1531
- def _check_select_shape_match(input_shape, cond_shape, tensor_name):
1532
- if input_shape != cond_shape:
1533
- raise ValueError(f"For functional operator[select], the cond shape must be same as {tensor_name} shape.")
1534
-
1535
-
1536
- @constexpr
1537
- def _check_select_type(is_cond_tensor, is_x_scalar, is_y_scalar, is_x_tensor, is_y_tensor):
1538
- if not is_cond_tensor:
1539
- raise TypeError(f"For functional operator[select], the input[cond] must be a Tensor.")
1540
- if is_x_scalar and not is_y_tensor:
1541
- raise TypeError(f"For functional operator[select], the input[x] is int or float, "
1542
- f"then the input[y] must be a Tensor.")
1543
- if is_y_scalar and not is_x_tensor:
1544
- raise TypeError(f"For functional operator[select], the input[y] is int or float, "
1545
- f"then the input[x] must be a Tensor.")
1546
-
1547
-
1548
- @constexpr
1549
- def _check_select_shape_same(cond_shape, x_shape, y_shape):
1550
- """Check if input of select has same shape."""
1551
- return cond_shape == x_shape and x_shape == y_shape and cond_shape == y_shape
1552
-
1553
-
1554
- @constexpr
1555
- def get_max_value(x, y, z):
1556
- """Get the maximum value of x, y and z."""
1557
- if x >= y and x >= z:
1558
- return x
1559
- if y >= x and y >= z:
1560
- return y
1561
- return z
1562
-
1563
-
1564
- @constexpr
1565
- def _calc_broadcast_shape(cond_shape, x_shape, y_shape):
1566
- """Calculate broadcast shape for select"""
1567
- converted_shape = []
1568
- cond_reverse = cond_shape[::-1]
1569
- x_reverse = x_shape[::-1]
1570
- y_reverse = y_shape[::-1]
1571
- max_len = get_max_value(len(cond_reverse), len(x_reverse), len(y_reverse))
1572
- i = 0
1573
- while i < max_len:
1574
- cond_element = 1 if i >= len(cond_reverse) else cond_reverse[i]
1575
- x_element = 1 if i >= len(x_reverse) else x_reverse[i]
1576
- y_element = 1 if i >= len(y_reverse) else y_reverse[i]
1577
- broadcast_element = get_max_value(cond_element, x_element, y_element)
1578
- if cond_element not in (1, broadcast_element):
1579
- raise ValueError(f"For select, condition input can not broadcast at index {i}")
1580
- if x_element not in (1, broadcast_element):
1581
- raise ValueError(f"For select, x input can not broadcast at index {i}")
1582
- if y_element not in (1, broadcast_element):
1583
- raise ValueError(f"For select, y input can not broadcast at index {i}")
1584
- converted_shape.append(broadcast_element)
1585
- i = i + 1
1586
- converted_shape.reverse()
1587
- return tuple(converted_shape)
1588
-
1589
-
1590
- def select(cond, x, y):
1591
- r"""
1592
- The conditional tensor determines whether the corresponding element in the output must be
1593
- selected from `x` (if true) or `y` (if false) based on the value of each element.
1594
-
1595
- It can be defined as:
1596
-
1597
- .. math::
1598
- out_i = \begin{cases}
1599
- x_i, & \text{if } cond_i \\
1600
- y_i, & \text{otherwise}
1601
- \end{cases}
1602
-
1603
- Args:
1604
- cond (Tensor[bool]): The condition tensor, decides which element is chosen.
1605
- The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
1606
- x (Union[Tensor, int, float]): The first Tensor or number to be selected.
1607
- If x is a Tensor, the shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
1608
- If x is an int or a float, it will be cast to the type of int32 or float32,
1609
- and broadcast to the same shape as y. One of x and y must be a Tensor.
1610
- y (Union[Tensor, int, float]): The second Tensor or number to be selected.
1611
- If y is a Tensor, The shape is or can be broadcadt to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
1612
- If y is an int or a float, it will be cast to the type of int32 or float32,
1613
- and broadcast to the same shape as x. One of x and y must be a Tensor.
1614
-
1615
- Returns:
1616
- Tensor, has the same shape as `cond`.
1617
-
1618
- Raises:
1619
- TypeError: If `x` or `y` is not a Tensor, int or float.
1620
- ValueError: The shapes of inputs can not be broadcast.
1621
-
1622
- Supported Platforms:
1623
- ``Ascend`` ``GPU`` ``CPU``
1624
-
1625
- Examples:
1626
- >>> import mindspore
1627
- >>> from mindspore import Tensor, ops
1628
- >>> # 1) Both inputs are Tensor
1629
- >>>
1630
- >>> cond = Tensor([True, False])
1631
- >>> x = Tensor([2,3], mindspore.float32)
1632
- >>> y = Tensor([1,2], mindspore.float32)
1633
- >>> output = ops.select(cond, x, y)
1634
- >>> print(output)
1635
- [2. 2.]
1636
- >>> # 2) y is a float
1637
- >>> cond = Tensor([True, False])
1638
- >>> x = Tensor([2,3], mindspore.float32)
1639
- >>> y = 2.0
1640
- >>> output = ops.select(cond, x, y)
1641
- >>> print(output)
1642
- [2. 2.]
1643
- """
1644
- is_x_scalar = isinstance(x, (int, float))
1645
- is_y_scalar = isinstance(y, (int, float))
1646
- is_x_tensor = isinstance(x, Tensor)
1647
- is_y_tensor = isinstance(y, Tensor)
1648
- is_cond_tensor = isinstance(cond, Tensor)
1649
- _check_select_type(is_cond_tensor, is_x_scalar, is_y_scalar, is_x_tensor, is_y_tensor)
1650
- input_x = x
1651
- input_y = y
1652
- if is_x_scalar:
1653
- _check_select_shape_match(y.shape, cond.shape, "y")
1654
- _check_select_type_match(x, y.dtype, "x", "y")
1655
- input_x = zeros_like_(y) + x
1656
- if isinstance(x, int):
1657
- input_x = cast_(input_x, mstype.int32)
1658
- else:
1659
- input_x = cast_(input_x, mstype.float32)
1660
-
1661
- if is_y_scalar:
1662
- _check_select_shape_match(x.shape, cond.shape, "x")
1663
- _check_select_type_match(y, x.dtype, "y", "x")
1664
- input_y = zeros_like_(x) + y
1665
- if isinstance(y, int):
1666
- input_y = cast_(input_y, mstype.int32)
1667
- else:
1668
- input_y = cast_(input_y, mstype.float32)
1669
-
1670
- if is_x_tensor and is_y_tensor and is_cond_tensor:
1671
- x_shape = ops.shape(x)
1672
- y_shape = ops.shape(y)
1673
- cond_shape = ops.shape(cond)
1674
- all_constant = ops.isconstant(cond_shape) and ops.isconstant(x_shape) and ops.isconstant(y_shape)
1675
- if all_constant and not _check_select_shape_same(cond_shape, x_shape, y_shape):
1676
- broadcast_shape = _calc_broadcast_shape(cond_shape, x_shape, y_shape)
1677
- new_cond = ops.broadcast_to(cond, broadcast_shape)
1678
- new_x = ops.broadcast_to(x, broadcast_shape)
1679
- new_y = ops.broadcast_to(y, broadcast_shape)
1680
- return tensor_select_(new_cond, new_x, new_y)
1681
-
1682
- return tensor_select_(cond, input_x, input_y)
1683
-
1684
-
1685
1645
  def slice(input_x, begin, size):
1686
1646
  r"""
1687
1647
  Slices a tensor in the specified shape.
@@ -2882,7 +2842,7 @@ def gather_elements(input, dim, index):
2882
2842
  int32, int64. The value range of each index element is `[-input.shape(dim), input.shape(dim))`.
2883
2843
 
2884
2844
  Returns:
2885
- Tensor, has the same shape as `index` tensor and has the same data type with `input`.
2845
+ Tensor, has the same shape as `index` and has the same data type with `input`.
2886
2846
 
2887
2847
  Raises:
2888
2848
  TypeError: If dtype of `dim` or `index` is neither int32 nor int64.
@@ -3281,6 +3241,66 @@ def scatter(input, axis, index, src):
3281
3241
  return ops.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
3282
3242
 
3283
3243
 
3244
+ def scatter_add_ext(input, dim, index, src):
3245
+ """
3246
+ Update the value in `src` to `input` according to the specified index.
3247
+
3248
+ Args:
3249
+ input (Tensor): The target tensor. The rank of `input` must be at least 1.
3250
+ dim (int): Which axis to scatter. Accepted range is [-r, r) where r = rank(input).
3251
+ index (Tensor): The index to do update operation whose data type must be mindspore.int32 or
3252
+ mindspore.int64. Same rank as `input` . And accepted range is [-s, s) where s is the size along axis.
3253
+ src (Tensor): The tensor doing the update operation with `input` , has the same type as `input` ,
3254
+ and the shape of `src` should be equal to the shape of `index` .
3255
+
3256
+ Returns:
3257
+ Tensor, has the same shape and type as `input` .
3258
+
3259
+ Raises:
3260
+ TypeError: If `index` is neither int32 nor int64.
3261
+ ValueError: If anyone of the rank among `input` , `index` and `src` less than 1.
3262
+ ValueError: If the shape of `src` is not equal to the shape of `index` .
3263
+ ValueError: If the rank of `src` is not equal to the rank of `input` .
3264
+ RuntimeError: If the data type of `input` and `src` conversion of Parameter
3265
+ is required when data type conversion of Parameter is not supported.
3266
+
3267
+ Supported Platforms:
3268
+ ``Ascend`` ``GPU`` ``CPU``
3269
+
3270
+ Examples:
3271
+ >>> import numpy as np
3272
+ >>> import mindspore as ms
3273
+ >>> from mindspore import Tensor, ops
3274
+ >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
3275
+ >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
3276
+ >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
3277
+ >>> out = ops.scatter_add_ext(input=input, dim=1, index=index, src=src)
3278
+ >>> print(out)
3279
+ [[1. 2. 8. 4. 8.]]
3280
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3281
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3282
+ >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
3283
+ >>> out = ops.scatter_add_ext(input=input, dim=0, index=index, src=src)
3284
+ >>> print(out)
3285
+ [[1. 2. 3. 0. 0.]
3286
+ [0. 0. 0. 0. 0.]
3287
+ [4. 5. 6. 0. 0.]
3288
+ [0. 0. 0. 0. 0.]
3289
+ [7. 8. 9. 0. 0.]]
3290
+ >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3291
+ >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3292
+ >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
3293
+ >>> out = ops.scatter_add_ext(input=input, dim=1, index=index, src=src)
3294
+ >>> print(out)
3295
+ [[1. 0. 2. 0. 3.]
3296
+ [4. 0. 5. 0. 6.]
3297
+ [7. 0. 8. 0. 9.]
3298
+ [0. 0. 0. 0. 0.]
3299
+ [0. 0. 0. 0. 0.]]
3300
+ """
3301
+ return scatter_add_ext_op(input, dim, index, src)
3302
+
3303
+
3284
3304
  def _get_slice_scatter_const(x_shape, axis, start, end, step):
3285
3305
  r"""
3286
3306
  Calculate the rank of input, embedded dimensions and index.
@@ -4622,7 +4642,6 @@ def _split_sub_tensors(x, split_size_or_sections, axis):
4622
4642
  sub_tensors.append(sliced_tensor)
4623
4643
  return sub_tensors
4624
4644
 
4625
-
4626
4645
  def split(tensor, split_size_or_sections, axis=0):
4627
4646
  """
4628
4647
  Splits the Tensor into chunks along the given axis.
@@ -4690,127 +4709,52 @@ def split(tensor, split_size_or_sections, axis=0):
4690
4709
  f"but got {type(split_size_or_sections)}")
4691
4710
  return tuple(res)
4692
4711
 
4693
-
4694
- def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
4712
+ def split_ext(tensor, split_size_or_sections, axis=0):
4695
4713
  """
4696
- Returns the lower triangle part of 'input' (elements that contain the diagonal and below),
4697
- and set the other elements to zeros.
4698
-
4699
- Args:
4700
- input (Tensor): A Tensor with shape :math:`(x_1, x_2, ..., x_R)`. The rank must be at least 2.
4701
- Supporting all number types including bool.
4702
- diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
4703
- indicating the main diagonal.
4704
-
4705
- Returns:
4706
- Tensor, the same shape and data type as the input `x`.
4707
-
4708
- Raises:
4709
- TypeError: If `x` is not a Tensor.
4710
- TypeError: If `diagonal` is not an int.
4711
- TypeError: If the type of `x` is neither number nor bool.
4712
- ValueError: If the rank of `x` is less than 2.
4713
-
4714
- Supported Platforms:
4715
- ``Ascend`` ``GPU`` ``CPU``
4716
-
4717
- Examples:
4718
- >>> import numpy as np
4719
- >>> from mindspore import Tensor, ops
4720
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4721
- ... [ 5, 6, 7, 8],
4722
- ... [10, 11, 12, 13],
4723
- ... [14, 15, 16, 17]]))
4724
- >>> result = ops.tril(x)
4725
- >>> print(result)
4726
- [[ 1 0 0 0]
4727
- [ 5 6 0 0]
4728
- [10 11 12 0]
4729
- [14 15 16 17]]
4730
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4731
- ... [ 5, 6, 7, 8],
4732
- ... [10, 11, 12, 13],
4733
- ... [14, 15, 16, 17]]))
4734
- >>> result = ops.tril(x, diagonal=1)
4735
- >>> print(result)
4736
- [[ 1 2 0 0]
4737
- [ 5 6 7 0]
4738
- [10 11 12 13]
4739
- [14 15 16 17]]
4740
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4741
- ... [ 5, 6, 7, 8],
4742
- ... [10, 11, 12, 13],
4743
- ... [14, 15, 16, 17]]))
4744
- >>> result = ops.tril(x, diagonal=-1)
4745
- >>> print(result)
4746
- [[ 0 0 0 0]
4747
- [ 5 0 0 0]
4748
- [10 11 0 0]
4749
- [14 15 16 0]]
4750
- """
4751
- tril_ = Tril(diagonal)
4752
- return tril_(input)
4753
-
4754
-
4755
- def triu(input, diagonal=0): # pylint: disable=redefined-outer-name
4756
- r"""
4757
- Returns the upper triangle part of 'input' (elements that contain the diagonal and below),
4758
- and set the other elements to zeros.
4759
-
4760
- .. warning::
4761
- This is an experimental API that is subject to change or deletion.
4714
+ Splits the Tensor into chunks along the given axis.
4762
4715
 
4763
4716
  Args:
4764
- input (Tensor): The input tensor with shape :math:`(M, N, *)` where * means any number of additional dimensions.
4765
- diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
4766
- indicating the main diagonal.
4717
+ tensor (Tensor): A Tensor to be divided.
4718
+ split_size_or_sections (Union[int, tuple(int), list(int)]):
4719
+ If `split_size_or_sections` is an int type, `tensor` will be split into equally sized chunks,
4720
+ each chunk with size `split_size_or_sections`. Last chunk will be smaller than `split_size_or_sections`
4721
+ if `tensor.shape[axis]` is not divisible by `split_size_or_sections`.
4722
+ If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
4723
+ chunks with sizes `split_size_or_sections` along the given `axis`.
4724
+ axis (int): The axis along which to split. Default: ``0`` .
4767
4725
 
4768
4726
  Returns:
4769
- Tensor, a tensor has the same shape and data type as input.
4727
+ A tuple of sub-tensors.
4770
4728
 
4771
4729
  Raises:
4772
- TypeError: If `diagonal` is not an int.
4773
- TypeError: If `input` is not a Tensor.
4774
- ValueError: If the dimension of `input` is less than 2.
4730
+ TypeError: If argument `tensor` is not Tensor.
4731
+ TypeError: If argument `axis` is not Tensor.
4732
+ ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)` .
4733
+ TypeError: If each element in `split_size_or_sections` is not integer.
4734
+ TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
4735
+ ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
4775
4736
 
4776
4737
  Supported Platforms:
4777
- ``Ascend`` ``GPU`` ``CPU``
4738
+ ``Ascend``
4778
4739
 
4779
4740
  Examples:
4780
4741
  >>> import numpy as np
4781
- >>> from mindspore import Tensor, ops
4782
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4783
- ... [ 5, 6, 7, 8],
4784
- ... [10, 11, 12, 13],
4785
- ... [14, 15, 16, 17]]))
4786
- >>> result = ops.triu(x)
4787
- >>> print(result)
4788
- [[ 1 2 3 4]
4789
- [ 0 6 7 8]
4790
- [ 0 0 12 13]
4791
- [ 0 0 0 17]]
4792
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4793
- ... [ 5, 6, 7, 8],
4794
- ... [10, 11, 12, 13],
4795
- ... [14, 15, 16, 17]]))
4796
- >>> result = ops.triu(x, diagonal=1)
4797
- >>> print(result)
4798
- [[ 0 2 3 4]
4799
- [ 0 0 7 8]
4800
- [ 0 0 0 13]
4801
- [ 0 0 0 0]]
4802
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
4803
- ... [ 5, 6, 7, 8],
4804
- ... [10, 11, 12, 13],
4805
- ... [14, 15, 16, 17]]))
4806
- >>> result = ops.triu(x, diagonal=-1)
4807
- >>> print(result)
4808
- [[ 1 2 3 4]
4809
- [ 5 6 7 8]
4810
- [ 0 11 12 13]
4811
- [ 0 0 16 17]]
4812
- """
4813
- return _get_cache_prim(P.Triu)(diagonal)(input)
4742
+ >>> from mindspore import ops, Tensor
4743
+ >>> input_x = np.arange(9).astype("float32")
4744
+ >>> output = ops.split(Tensor(input_x), 3)
4745
+ >>> print(output)
4746
+ (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
4747
+ Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
4748
+ Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
4749
+ """
4750
+ if isinstance(split_size_or_sections, int):
4751
+ res = split_tensor(tensor, split_size_or_sections, axis)
4752
+ elif isinstance(split_size_or_sections, (list, tuple)):
4753
+ res = split_with_size(tensor, split_size_or_sections, axis)
4754
+ else:
4755
+ raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), " \
4756
+ f"but got {type(split_size_or_sections)}")
4757
+ return res
4814
4758
 
4815
4759
 
4816
4760
  @_primexpr
@@ -5191,7 +5135,7 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
5191
5135
  if not input.shape:
5192
5136
  return (input, Tensor(0, dtype=mstype.int64))
5193
5137
  if axis is None:
5194
- return (reduce_max_(input), Tensor(0, dtype=mstype.int64))
5138
+ return (max_(input), Tensor(0, dtype=mstype.int64))
5195
5139
  if initial is not None and not isinstance(initial, numbers.Number):
5196
5140
  raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
5197
5141
  if axis is not None and not isinstance(axis, int):
@@ -5202,51 +5146,6 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
5202
5146
  return values, indices
5203
5147
 
5204
5148
 
5205
- def argmax(input, dim=None, keepdim=False):
5206
- """
5207
- Return the indices of the maximum values of a tensor across a dimension.
5208
-
5209
- Args:
5210
- input (Tensor): Input tensor.
5211
- dim (Union[int, None], optional): The dimension to reduce. If `dim` is ``None`` , the indices of the maximum
5212
- value within the flattened input will be returned. Default: ``None`` .
5213
- keepdim (bool, optional): Whether the output tensor retains the specified
5214
- dimension. Ignored if `dim` is None. Default: ``False`` .
5215
-
5216
- Returns:
5217
- Tensor, indices of the maximum values across a dimension.
5218
-
5219
- Raises:
5220
- TypeError: If `keepdim` is not bool.
5221
- ValueError: If `dim` is out of range.
5222
-
5223
- Supported Platforms:
5224
- ``Ascend`` ``GPU`` ``CPU``
5225
-
5226
- Examples:
5227
- >>> import numpy as np
5228
- >>> from mindspore import Tensor, ops
5229
- >>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
5230
- >>> output = ops.argmax(x, dim=-1)
5231
- >>> print(output)
5232
- [1 0 0]
5233
- """
5234
- _check_attr_dtype("keepdim", keepdim, [bool], "argmax")
5235
- if not input.shape:
5236
- return Tensor(0)
5237
- if input.dtype == mstype.bool_:
5238
- input = input.astype(mstype.int32)
5239
- is_dim_none = False
5240
- if dim is None:
5241
- input = reshape_(input, (-1,))
5242
- dim = 0
5243
- is_dim_none = True
5244
- out = _get_cache_prim(Argmax)(dim, mstype.int64)(input)
5245
- if keepdim and not is_dim_none:
5246
- out = expand_dims(out, dim)
5247
- return out
5248
-
5249
-
5250
5149
  def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
5251
5150
  """
5252
5151
  Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
@@ -5307,7 +5206,7 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
5307
5206
  if not input.shape:
5308
5207
  return (input, Tensor(0, dtype=mstype.int64))
5309
5208
  if axis is None:
5310
- return (reduce_min_(input), Tensor(0, dtype=mstype.int64))
5209
+ return (min_(input), Tensor(0, dtype=mstype.int64))
5311
5210
  if initial is not None and not isinstance(initial, numbers.Number):
5312
5211
  raise TypeError(f"For 'min', 'initial' must be a scalar, but got {type(initial)}")
5313
5212
  if axis is not None and not isinstance(axis, int):
@@ -5455,7 +5354,7 @@ def topk(input, k, dim=None, largest=True, sorted=True):
5455
5354
 
5456
5355
  Args:
5457
5356
  input (Tensor): Input to be computed, data type must be float16, float32 or int32.
5458
- k (int): The number of top or bottom elements to be computed along the last dimension, constant input is needed.
5357
+ k (int): The number of top or bottom elements to be computed along the last dimension.
5459
5358
  dim (int, optional): The dimension to sort along. Default: ``None`` .
5460
5359
  largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
5461
5360
  Default: ``True`` .
@@ -5522,6 +5421,80 @@ def topk(input, k, dim=None, largest=True, sorted=True):
5522
5421
  return res
5523
5422
 
5524
5423
 
5424
+ def topk_ext(input, k, dim=-1, largest=True, sorted=True):
5425
+ r"""
5426
+ Finds values and indices of the `k` largest or smallest entries along a given dimension.
5427
+
5428
+ .. warning::
5429
+ - If sorted is set to False, it will use the aicpu operator, the performance may be reduced. In addition, due to
5430
+ different memory layout and traversal methods on different platforms, the display order of calculation results
5431
+ may be inconsistent when `sorted` is False.
5432
+
5433
+ If the `input` is a one-dimensional Tensor, finds the `k` largest or smallest entries in the Tensor,
5434
+ and outputs its value and index as a Tensor. values[`k`] is the `k` largest item in `input`,
5435
+ and its index is indices [`k`].
5436
+
5437
+ For a multi-dimensional matrix,
5438
+ calculates the first or last `k` entries in a given dimension, therefore:
5439
+
5440
+ .. math::
5441
+
5442
+ values.shape = indices.shape
5443
+
5444
+ If the two compared elements are the same, the one with the smaller index value is returned first.
5445
+
5446
+ Args:
5447
+ input (Tensor): Input to be computed, data type must be float16, float32 or int32.
5448
+ k (int): The number of top or bottom elements to be computed along the last dimension.
5449
+ dim (int, optional): The dimension to sort along. Default: ``-1`` .
5450
+ largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
5451
+ Default: ``True`` .
5452
+ sorted (bool, optional): If ``True`` , the obtained elements will be sorted by the values in descending order.
5453
+ If ``False`` , the obtained elements will not be sorted. Default: ``True`` .
5454
+
5455
+ Returns:
5456
+ A tuple consisting of `values` and `indexes`.
5457
+
5458
+ - values (Tensor): The `k` largest or smallest elements in each slice of the given dimension.
5459
+ - indices (Tensor): The indices of values within the last dimension of input.
5460
+
5461
+ Raises:
5462
+ TypeError: If `sorted` is not a bool.
5463
+ TypeError: If `input` is not a Tensor.
5464
+ TypeError: If `k` is not an int.
5465
+ TypeError: If dtype of `input` is not one of the following: float16, float32 or int32.
5466
+
5467
+ Supported Platforms:
5468
+ ``Ascend`` ``GPU`` ``CPU``
5469
+
5470
+ Examples:
5471
+ >>> import mindspore as ms
5472
+ >>> from mindspore import ops
5473
+ >>> x = ms.Tensor([[0.5368, 0.2447, 0.4302, 0.9673],
5474
+ ... [0.4388, 0.6525, 0.4685, 0.1868],
5475
+ ... [0.3563, 0.5152, 0.9675, 0.8230]], dtype=ms.float32)
5476
+ >>> output = ops.topk_ext(x, 2, dim=1)
5477
+ >>> print(output)
5478
+ (Tensor(shape=[3, 2], dtype=Float32, value=
5479
+ [[ 9.67299998e-01, 5.36800027e-01],
5480
+ [ 6.52499974e-01, 4.68499988e-01],
5481
+ [ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
5482
+ [[3, 0],
5483
+ [1, 2],
5484
+ [2, 3]]))
5485
+ >>> output2 = ops.topk(x, 2, dim=1, largest=False)
5486
+ >>> print(output2)
5487
+ (Tensor(shape=[3, 2], dtype=Float32, value=
5488
+ [[ 2.44700000e-01, 4.30200011e-01],
5489
+ [ 1.86800003e-01, 4.38800007e-01],
5490
+ [ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
5491
+ [[1, 2],
5492
+ [3, 0],
5493
+ [0, 1]]))
5494
+ """
5495
+ return _get_cache_prim(ops.auto_generate.TopkExt)()(input, k, dim, largest, sorted)
5496
+
5497
+
5525
5498
  def expand(input_x, size):
5526
5499
  r"""
5527
5500
  :func:`mindspore.ops.expand` will be deprecated in the future.
@@ -6318,6 +6291,53 @@ def repeat_interleave(input, repeats, axis=None):
6318
6291
  return output
6319
6292
 
6320
6293
 
6294
+ def repeat_interleave_ext(tensor, repeats, axis=None, output_size=None):
6295
+ r"""
6296
+ Repeat elements of a tensor.
6297
+
6298
+ Args:
6299
+ tensor (Tensor): the input tensor.
6300
+ repeats (Union[int, list, tuple, Tensor]) the number of repetitions for each element
6301
+ axis (int, optional) the axis along wich to repeat, if None, defaults to 0.
6302
+ output_size (int, optional): Calculated output size along specified axis.
6303
+
6304
+ Returns:
6305
+ Tensor, one-hot tensor.
6306
+
6307
+ Supported Platforms:
6308
+ ``Ascend``
6309
+
6310
+ Examples:
6311
+ >>> import mindspore
6312
+ >>> import numpy as np
6313
+ >>> from mindspore import mint
6314
+ >>> from mindspore import Tensor
6315
+ >>> tensor = Tensor(np.array([0, 1, 2], [3, 4, 5]), mindspore.int32)
6316
+ >>> repeats = 2
6317
+ >>> axis = 0
6318
+ >>> output = mint.repeat_interleave(tensor, repeats, axis)
6319
+ >>> print(output)
6320
+ [[0. 1. 2.]
6321
+ [0. 1. 2.]
6322
+ [3. 4. 5.]
6323
+ [3. 4. 5.]]
6324
+ """
6325
+ if axis is None:
6326
+ tensor = tensor.ravel()
6327
+ axis = 0
6328
+
6329
+ size = tensor.shape[axis]
6330
+ if output_size is None:
6331
+ if isinstance(repeats, int):
6332
+ output_size = size*repeats
6333
+ elif len(repeats) == 1:
6334
+ output_size = size*repeats[0]
6335
+ else:
6336
+ output_size = sum(repeats)
6337
+
6338
+ return repeat_interleave_(tensor, repeats, axis, output_size)
6339
+
6340
+
6321
6341
  def repeat_elements(x, rep, axis=0):
6322
6342
  """
6323
6343
  Repeat elements of a tensor along an axis, like `numpy.repeat` .
@@ -6462,8 +6482,10 @@ __all__ = [
6462
6482
  'ger',
6463
6483
  'ones',
6464
6484
  'ones_like',
6485
+ 'ones_like_ext',
6465
6486
  'zeros',
6466
6487
  'zeros_like',
6488
+ 'zeros_like_ext',
6467
6489
  'shape',
6468
6490
  'shape_',
6469
6491
  'reverse',
@@ -6471,6 +6493,7 @@ __all__ = [
6471
6493
  'hamming_window',
6472
6494
  'chunk',
6473
6495
  'full',
6496
+ 'full_ext',
6474
6497
  'full_like',
6475
6498
  'dyn_shape',
6476
6499
  'rank',
@@ -6529,6 +6552,7 @@ __all__ = [
6529
6552
  'narrow',
6530
6553
  'ravel',
6531
6554
  'scatter_add',
6555
+ 'scatter_add_ext',
6532
6556
  'scatter_mul',
6533
6557
  'scatter_max',
6534
6558
  'scatter_min',
@@ -6557,7 +6581,6 @@ __all__ = [
6557
6581
  'index_fill',
6558
6582
  'index_select',
6559
6583
  'max',
6560
- 'argmax',
6561
6584
  'min',
6562
6585
  'unsorted_segment_sum',
6563
6586
  'population_count',
@@ -6584,6 +6607,7 @@ __all__ = [
6584
6607
  'aminmax',
6585
6608
  'sort',
6586
6609
  'top_k',
6587
- 'deepcopy'
6610
+ 'deepcopy',
6611
+ 'flip'
6588
6612
  ]
6589
6613
  __all__.sort()