mindspore 2.3.0__cp310-cp310-win_amd64.whl → 2.4.1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (275) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +3 -1
  3. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +50 -9
  7. mindspore/_extends/parse/compile_config.py +41 -0
  8. mindspore/_extends/parse/parser.py +9 -7
  9. mindspore/_extends/parse/standard_method.py +52 -14
  10. mindspore/_extends/pijit/pijit_func_white_list.py +350 -24
  11. mindspore/amp.py +24 -10
  12. mindspore/common/__init__.py +6 -4
  13. mindspore/common/_pijit_context.py +190 -0
  14. mindspore/common/_register_for_tensor.py +2 -1
  15. mindspore/common/_tensor_overload.py +139 -0
  16. mindspore/common/api.py +102 -87
  17. mindspore/common/dump.py +5 -6
  18. mindspore/common/generator.py +1 -7
  19. mindspore/common/hook_handle.py +14 -26
  20. mindspore/common/initializer.py +51 -15
  21. mindspore/common/mindir_util.py +2 -2
  22. mindspore/common/parameter.py +62 -15
  23. mindspore/common/recompute.py +39 -9
  24. mindspore/common/sparse_tensor.py +7 -3
  25. mindspore/common/tensor.py +183 -37
  26. mindspore/communication/__init__.py +1 -1
  27. mindspore/communication/_comm_helper.py +38 -3
  28. mindspore/communication/comm_func.py +315 -60
  29. mindspore/communication/management.py +14 -14
  30. mindspore/context.py +132 -22
  31. mindspore/dataset/__init__.py +1 -1
  32. mindspore/dataset/audio/__init__.py +1 -1
  33. mindspore/dataset/core/config.py +7 -0
  34. mindspore/dataset/core/validator_helpers.py +7 -0
  35. mindspore/dataset/engine/cache_client.py +1 -1
  36. mindspore/dataset/engine/datasets.py +72 -44
  37. mindspore/dataset/engine/datasets_audio.py +7 -7
  38. mindspore/dataset/engine/datasets_standard_format.py +53 -3
  39. mindspore/dataset/engine/datasets_text.py +20 -20
  40. mindspore/dataset/engine/datasets_user_defined.py +174 -104
  41. mindspore/dataset/engine/datasets_vision.py +33 -33
  42. mindspore/dataset/engine/iterators.py +29 -0
  43. mindspore/dataset/engine/obs/util.py +7 -0
  44. mindspore/dataset/engine/queue.py +114 -60
  45. mindspore/dataset/engine/serializer_deserializer.py +2 -2
  46. mindspore/dataset/engine/validators.py +34 -14
  47. mindspore/dataset/text/__init__.py +1 -4
  48. mindspore/dataset/transforms/__init__.py +0 -3
  49. mindspore/dataset/utils/line_reader.py +2 -0
  50. mindspore/dataset/vision/__init__.py +1 -4
  51. mindspore/dataset/vision/utils.py +1 -1
  52. mindspore/dataset/vision/validators.py +2 -1
  53. mindspore/{nn/extend → experimental/es}/__init__.py +4 -11
  54. mindspore/experimental/es/embedding_service.py +883 -0
  55. mindspore/{nn/layer → experimental/es}/embedding_service_layer.py +218 -30
  56. mindspore/experimental/llm_boost/__init__.py +21 -0
  57. mindspore/{nn/extend/layer → experimental/llm_boost/atb}/__init__.py +4 -8
  58. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  59. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  60. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  61. mindspore/experimental/llm_boost/register.py +129 -0
  62. mindspore/experimental/llm_boost/utils.py +31 -0
  63. mindspore/experimental/optim/adamw.py +85 -0
  64. mindspore/experimental/optim/optimizer.py +3 -0
  65. mindspore/hal/__init__.py +3 -3
  66. mindspore/hal/contiguous_tensors_handle.py +175 -0
  67. mindspore/hal/stream.py +18 -0
  68. mindspore/include/api/model_group.h +13 -1
  69. mindspore/include/api/types.h +10 -10
  70. mindspore/include/dataset/config.h +2 -2
  71. mindspore/include/dataset/constants.h +2 -2
  72. mindspore/include/dataset/execute.h +2 -2
  73. mindspore/include/dataset/vision.h +4 -0
  74. mindspore/log.py +1 -1
  75. mindspore/mindrecord/filewriter.py +68 -51
  76. mindspore/mindspore_backend.dll +0 -0
  77. mindspore/mindspore_common.dll +0 -0
  78. mindspore/mindspore_core.dll +0 -0
  79. mindspore/mindspore_np_dtype.dll +0 -0
  80. mindspore/mindspore_ops.dll +0 -0
  81. mindspore/mint/__init__.py +983 -46
  82. mindspore/mint/distributed/__init__.py +31 -0
  83. mindspore/mint/distributed/distributed.py +254 -0
  84. mindspore/mint/nn/__init__.py +268 -23
  85. mindspore/mint/nn/functional.py +125 -19
  86. mindspore/mint/nn/layer/__init__.py +39 -0
  87. mindspore/mint/nn/layer/activation.py +133 -0
  88. mindspore/mint/nn/layer/normalization.py +477 -0
  89. mindspore/mint/nn/layer/pooling.py +110 -0
  90. mindspore/mint/optim/adamw.py +26 -13
  91. mindspore/mint/special/__init__.py +63 -0
  92. mindspore/multiprocessing/__init__.py +2 -1
  93. mindspore/nn/__init__.py +0 -1
  94. mindspore/nn/cell.py +276 -96
  95. mindspore/nn/layer/activation.py +211 -44
  96. mindspore/nn/layer/basic.py +137 -10
  97. mindspore/nn/layer/embedding.py +137 -2
  98. mindspore/nn/layer/normalization.py +101 -5
  99. mindspore/nn/layer/padding.py +34 -48
  100. mindspore/nn/layer/pooling.py +161 -7
  101. mindspore/nn/layer/transformer.py +3 -3
  102. mindspore/nn/loss/__init__.py +2 -2
  103. mindspore/nn/loss/loss.py +84 -6
  104. mindspore/nn/optim/__init__.py +2 -1
  105. mindspore/nn/optim/adadelta.py +1 -1
  106. mindspore/nn/optim/adam.py +1 -1
  107. mindspore/nn/optim/lamb.py +1 -1
  108. mindspore/nn/optim/tft_wrapper.py +124 -0
  109. mindspore/nn/wrap/cell_wrapper.py +12 -23
  110. mindspore/nn/wrap/grad_reducer.py +5 -5
  111. mindspore/nn/wrap/loss_scale.py +17 -3
  112. mindspore/numpy/__init__.py +1 -1
  113. mindspore/numpy/array_creations.py +65 -68
  114. mindspore/numpy/array_ops.py +64 -60
  115. mindspore/numpy/fft.py +610 -75
  116. mindspore/numpy/logic_ops.py +11 -10
  117. mindspore/numpy/math_ops.py +85 -84
  118. mindspore/numpy/utils_const.py +4 -4
  119. mindspore/opencv_core452.dll +0 -0
  120. mindspore/opencv_imgcodecs452.dll +0 -0
  121. mindspore/opencv_imgproc452.dll +0 -0
  122. mindspore/ops/__init__.py +6 -4
  123. mindspore/ops/_grad_experimental/grad_array_ops.py +0 -11
  124. mindspore/ops/_grad_experimental/grad_comm_ops.py +67 -4
  125. mindspore/ops/_grad_experimental/grad_math_ops.py +0 -22
  126. mindspore/ops/_vmap/vmap_array_ops.py +2 -4
  127. mindspore/ops/_vmap/vmap_math_ops.py +17 -1
  128. mindspore/ops/_vmap/vmap_nn_ops.py +43 -2
  129. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +91 -7
  130. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +2 -0
  131. mindspore/ops/auto_generate/gen_extend_func.py +767 -13
  132. mindspore/ops/auto_generate/gen_ops_def.py +2452 -364
  133. mindspore/ops/auto_generate/gen_ops_prim.py +5442 -1756
  134. mindspore/ops/auto_generate/pyboost_inner_prim.py +176 -56
  135. mindspore/ops/composite/base.py +85 -48
  136. mindspore/ops/composite/multitype_ops/_compile_utils.py +1 -0
  137. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -2
  138. mindspore/ops/function/__init__.py +22 -0
  139. mindspore/ops/function/array_func.py +492 -153
  140. mindspore/ops/function/debug_func.py +113 -1
  141. mindspore/ops/function/fft_func.py +15 -2
  142. mindspore/ops/function/grad/grad_func.py +3 -2
  143. mindspore/ops/function/math_func.py +564 -207
  144. mindspore/ops/function/nn_func.py +817 -383
  145. mindspore/ops/function/other_func.py +3 -2
  146. mindspore/ops/function/random_func.py +402 -12
  147. mindspore/ops/function/reshard_func.py +13 -11
  148. mindspore/ops/function/sparse_unary_func.py +1 -1
  149. mindspore/ops/function/vmap_func.py +3 -2
  150. mindspore/ops/functional.py +24 -14
  151. mindspore/ops/op_info_register.py +3 -3
  152. mindspore/ops/operations/__init__.py +7 -2
  153. mindspore/ops/operations/_grad_ops.py +2 -76
  154. mindspore/ops/operations/_infer_ops.py +1 -1
  155. mindspore/ops/operations/_inner_ops.py +71 -94
  156. mindspore/ops/operations/array_ops.py +14 -146
  157. mindspore/ops/operations/comm_ops.py +63 -53
  158. mindspore/ops/operations/custom_ops.py +83 -19
  159. mindspore/ops/operations/debug_ops.py +42 -10
  160. mindspore/ops/operations/manually_defined/_inner.py +12 -0
  161. mindspore/ops/operations/manually_defined/ops_def.py +273 -20
  162. mindspore/ops/operations/math_ops.py +12 -223
  163. mindspore/ops/operations/nn_ops.py +20 -114
  164. mindspore/ops/operations/other_ops.py +7 -4
  165. mindspore/ops/operations/random_ops.py +46 -1
  166. mindspore/ops/primitive.py +18 -6
  167. mindspore/ops_generate/arg_dtype_cast.py +2 -0
  168. mindspore/ops_generate/gen_aclnn_implement.py +11 -11
  169. mindspore/ops_generate/gen_constants.py +36 -0
  170. mindspore/ops_generate/gen_ops.py +67 -52
  171. mindspore/ops_generate/gen_ops_inner_prim.py +1 -1
  172. mindspore/ops_generate/gen_pyboost_func.py +131 -47
  173. mindspore/ops_generate/op_proto.py +10 -3
  174. mindspore/ops_generate/pyboost_utils.py +14 -1
  175. mindspore/ops_generate/template.py +43 -21
  176. mindspore/parallel/__init__.py +3 -1
  177. mindspore/parallel/_auto_parallel_context.py +31 -9
  178. mindspore/parallel/_cell_wrapper.py +85 -0
  179. mindspore/parallel/_parallel_serialization.py +47 -19
  180. mindspore/parallel/_tensor.py +127 -13
  181. mindspore/parallel/_utils.py +53 -22
  182. mindspore/parallel/algo_parameter_config.py +5 -5
  183. mindspore/parallel/checkpoint_transform.py +46 -39
  184. mindspore/parallel/cluster/process_entity/__init__.py +1 -1
  185. mindspore/parallel/cluster/process_entity/_api.py +31 -23
  186. mindspore/parallel/cluster/process_entity/_utils.py +2 -27
  187. mindspore/parallel/parameter_broadcast.py +3 -4
  188. mindspore/parallel/shard.py +162 -31
  189. mindspore/parallel/transform_safetensors.py +1146 -0
  190. mindspore/profiler/__init__.py +2 -1
  191. mindspore/profiler/common/constant.py +29 -0
  192. mindspore/profiler/common/registry.py +47 -0
  193. mindspore/profiler/common/util.py +28 -0
  194. mindspore/profiler/dynamic_profiler.py +694 -0
  195. mindspore/profiler/envprofiling.py +17 -19
  196. mindspore/profiler/parser/ascend_analysis/constant.py +18 -0
  197. mindspore/profiler/parser/ascend_analysis/file_manager.py +25 -4
  198. mindspore/profiler/parser/ascend_analysis/function_event.py +43 -19
  199. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +31 -26
  200. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +56 -10
  201. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +55 -8
  202. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  203. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +27 -20
  204. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +9 -2
  205. mindspore/profiler/parser/ascend_msprof_exporter.py +5 -4
  206. mindspore/profiler/parser/ascend_timeline_generator.py +27 -25
  207. mindspore/profiler/parser/base_timeline_generator.py +19 -25
  208. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
  209. mindspore/profiler/parser/framework_parser.py +1 -391
  210. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  211. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  212. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  213. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  214. mindspore/profiler/parser/memory_usage_parser.py +0 -154
  215. mindspore/profiler/parser/profiler_info.py +78 -6
  216. mindspore/profiler/profiler.py +153 -0
  217. mindspore/profiler/profiling.py +285 -413
  218. mindspore/rewrite/__init__.py +1 -2
  219. mindspore/rewrite/common/namespace.py +4 -4
  220. mindspore/rewrite/symbol_tree/symbol_tree.py +3 -3
  221. mindspore/run_check/_check_version.py +39 -104
  222. mindspore/safeguard/rewrite_obfuscation.py +591 -247
  223. mindspore/train/__init__.py +4 -3
  224. mindspore/train/_utils.py +105 -19
  225. mindspore/train/amp.py +171 -53
  226. mindspore/train/callback/__init__.py +2 -2
  227. mindspore/train/callback/_callback.py +4 -4
  228. mindspore/train/callback/_checkpoint.py +97 -31
  229. mindspore/train/callback/_cluster_monitor.py +1 -1
  230. mindspore/train/callback/_flops_collector.py +1 -0
  231. mindspore/train/callback/_loss_monitor.py +3 -3
  232. mindspore/train/callback/_on_request_exit.py +145 -31
  233. mindspore/train/callback/_summary_collector.py +5 -5
  234. mindspore/train/callback/_tft_register.py +375 -0
  235. mindspore/train/dataset_helper.py +15 -3
  236. mindspore/train/metrics/metric.py +3 -3
  237. mindspore/train/metrics/roc.py +4 -4
  238. mindspore/train/mind_ir_pb2.py +44 -39
  239. mindspore/train/model.py +154 -58
  240. mindspore/train/serialization.py +342 -128
  241. mindspore/utils/__init__.py +21 -0
  242. mindspore/utils/utils.py +60 -0
  243. mindspore/version.py +1 -1
  244. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/METADATA +13 -7
  245. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/RECORD +248 -242
  246. mindspore/include/c_api/ms/abstract.h +0 -67
  247. mindspore/include/c_api/ms/attribute.h +0 -197
  248. mindspore/include/c_api/ms/base/handle_types.h +0 -43
  249. mindspore/include/c_api/ms/base/macros.h +0 -32
  250. mindspore/include/c_api/ms/base/status.h +0 -33
  251. mindspore/include/c_api/ms/base/types.h +0 -283
  252. mindspore/include/c_api/ms/context.h +0 -102
  253. mindspore/include/c_api/ms/graph.h +0 -160
  254. mindspore/include/c_api/ms/node.h +0 -606
  255. mindspore/include/c_api/ms/tensor.h +0 -161
  256. mindspore/include/c_api/ms/value.h +0 -84
  257. mindspore/mindspore_shared_lib.dll +0 -0
  258. mindspore/nn/extend/basic.py +0 -140
  259. mindspore/nn/extend/embedding.py +0 -143
  260. mindspore/nn/extend/layer/normalization.py +0 -109
  261. mindspore/nn/extend/pooling.py +0 -117
  262. mindspore/nn/layer/embedding_service.py +0 -531
  263. mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
  264. mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
  265. mindspore/ops/extend/__init__.py +0 -53
  266. mindspore/ops/extend/array_func.py +0 -218
  267. mindspore/ops/extend/math_func.py +0 -76
  268. mindspore/ops/extend/nn_func.py +0 -308
  269. mindspore/ops/silent_check.py +0 -162
  270. mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
  271. mindspore/profiler/parser/msadvisor_parser.py +0 -240
  272. mindspore/train/callback/_mindio_ttp.py +0 -443
  273. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/WHEEL +0 -0
  274. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/entry_points.txt +0 -0
  275. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/top_level.txt +0 -0
@@ -26,25 +26,35 @@ import mindspore as ms
26
26
  from mindspore import log as logger
27
27
  import mindspore.ops as ops
28
28
  from mindspore.common import dtype as mstype
29
+ from mindspore.common.generator import default_generator
29
30
  from mindspore.ops import operations as P
30
31
  from mindspore.ops import composite as C
31
32
  from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
32
33
  from mindspore.ops.primitive import constexpr, _primexpr
33
34
  from mindspore.ops.operations._inner_ops import TileSize
34
- from mindspore.ops.auto_generate import Cummin, BatchMatMul, LinSpaceExt, Norm
35
+ from mindspore.ops.auto_generate import Cummin, BatchMatMul, BernoulliExt, lin_space_ext_op, BitwiseAndScalar,\
36
+ BitwiseAndTensor, BitwiseOrScalar, BitwiseOrTensor, BitwiseXorScalar, BitwiseXorTensor, RemainderTensorTensor,\
37
+ RemainderTensorScalar, RemainderScalarTensor
35
38
  from mindspore.ops import auto_generate
36
39
  from mindspore.ops.operations.math_ops import STFT
37
40
  from mindspore.ops.operations.math_ops import LuUnpack
38
- from mindspore.ops.operations.math_ops import Roll
41
+ from mindspore.ops.auto_generate.pyboost_inner_prim import roll_impl, cross_impl
39
42
  from mindspore.ops.operations.math_ops import Ormqr
40
43
  from mindspore.ops.operations.math_ops import DivMod
41
44
  from mindspore.ops.operations.array_ops import MatrixSetDiagV3, Transpose
42
- from mindspore.ops.auto_generate import (minimum, maximum, mul, sin, sinc, sinh, cummax, real, conj, add, sub, cos, cosh,
45
+ from mindspore.ops.auto_generate import (minimum, maximum, mul, sin, sinc, sinh, cummax, real, conj, add, sub, cos,
46
+ cosh, nan_to_num, norm_op, lp_norm_v2_op,
43
47
  matrix_exp, sqrt, rsqrt, square, trace, nextafter, abs, acos, acosh, angle,
44
48
  asin, asinh, atan, atan2, atanh, ceil, equal, erf, erfc, erfinv, exp, expm1,
45
49
  floor, floor_divide, floor_mod, gcd, greater, greater_equal, less, less_equal,
46
- log, log1p, neg, not_equal, pow, round, isfinite, argmax_ext, mean_ext_op,
47
- sum_ext_op, prod_ext_op, all, matrix_inverse_ext, atan2_ext, sign)
50
+ log, log1p, neg, not_equal, pow, round_op, isfinite, argmax_ext, mean_ext_op,
51
+ sum_ext_op, prod_ext_op, all, matrix_inverse_ext, atan2_ext, sign, acos_ext,
52
+ acosh_ext, asin_ext, asinh_ext, atan_ext, tan, median_ext_op, median_dim_op,
53
+ xlogy_op, xlogy_scalar_other_op, xlogy_scalar_self_op, trunc, histc_ext)
54
+
55
+
56
+
57
+ from mindspore.ops.auto_generate.gen_ops_def import add_ext, sub_ext, bmm_ext
48
58
  from mindspore.ops.auto_generate import tanh
49
59
  from mindspore.nn import layer
50
60
  from mindspore._checkparam import check_is_number
@@ -133,6 +143,7 @@ transpose_ = P.Transpose()
133
143
  xdivy_ = P.Xdivy()
134
144
  tensor_div_ = P.Div()
135
145
  tensor_divmod_ = DivMod()
146
+ generator_step_ = Tensor(10, mstype.int64)
136
147
 
137
148
  #####################################
138
149
  # Private Operation Functions.
@@ -150,6 +161,7 @@ atan2_ = P.Atan2()
150
161
  atan_ = P.Atan()
151
162
  atanh_ = P.Atanh()
152
163
  batch_matmul_ = BatchMatMul()
164
+ bernoulli_ext_ = BernoulliExt()
153
165
  bessel_i0_ = BesselI0()
154
166
  bessel_i0e_ = P.BesselI0e()
155
167
  bessel_i1_ = BesselI1()
@@ -217,9 +229,7 @@ slice_ = P.Slice()
217
229
  size_ = P.Size()
218
230
  scalar_to_tensor_ = P.ScalarToTensor()
219
231
  shape_ = P.Shape()
220
- sign_ = P.Sign()
221
232
  sparse_segment_mean_ = SparseSegmentMean()
222
- tan_ = P.Tan()
223
233
  tanh_ = P.Tanh()
224
234
  tensor_round_ = P.Round()
225
235
  tile_ = P.Tile()
@@ -230,12 +240,20 @@ truncate_mod_ = P.TruncateMod()
230
240
  xlogy_ = P.Xlogy()
231
241
  zeros_ = P.Zeros()
232
242
  zeta_ = P.Zeta()
243
+ bitwise_and_scalar_ = BitwiseAndScalar()
244
+ bitwise_and_tensor_ = BitwiseAndTensor()
245
+ bitwise_or_scalar_ = BitwiseOrScalar()
246
+ bitwise_or_tensor_ = BitwiseOrTensor()
247
+ bitwise_xor_scalar_ = BitwiseXorScalar()
248
+ bitwise_xor_tensor_ = BitwiseXorTensor()
233
249
 
234
250
 
235
251
  #####################################
236
252
  # Element-wise Operation Functions.
237
253
  #####################################
238
-
254
+ remainder_tensor_tensor_ = RemainderTensorTensor()
255
+ remainder_tensor_scalar_ = RemainderTensorScalar()
256
+ remainder_scalar_tensor_ = RemainderScalarTensor()
239
257
 
240
258
  def addn(x):
241
259
  """
@@ -1558,22 +1576,41 @@ def t(input):
1558
1576
  return input
1559
1577
 
1560
1578
 
1561
- def tan(input):
1579
+ def xlogy(input, other):
1562
1580
  r"""
1563
- Computes tangent of `input` element-wise.
1581
+ Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
1582
+ Returns zero when `input` is zero.
1564
1583
 
1565
1584
  .. math::
1566
1585
 
1567
- out_i = \tan(input_i)
1586
+ out_i = input_{i}\ln{other_{i}}
1587
+
1588
+ Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1589
+ The inputs must be two tensors or one tensor and one scalar.
1590
+ When the inputs are two tensors, the shapes of them could be broadcast.
1591
+ When the inputs are one tensor and one scalar,
1592
+ the scalar could only be a constant.
1593
+
1594
+ .. warning::
1595
+ - On Ascend, the data type of `input` and `other` must be float16 or float32.
1568
1596
 
1569
1597
  Args:
1570
- input (Tensor): The input Tensor, valid for any dimensions.
1598
+ input (Union[Tensor, numbers.Number, bool]): The first input is a numbers.Number or
1599
+ a bool or a tensor whose data type is
1600
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1601
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1602
+ other (Union[Tensor, numbers.Number, bool]): The second input is a numbers.Number or
1603
+ a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
1604
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
1571
1605
 
1572
1606
  Returns:
1573
- Tensor, has the same shape as `input`.
1607
+ Tensor, the shape is the same as the one after broadcasting,
1608
+ and the data type is the one with higher precision or higher digits among the two inputs.
1574
1609
 
1575
1610
  Raises:
1576
- TypeError: If `input` is not a Tensor.
1611
+ TypeError: If `input` and `other` is not a numbers.Number or a bool or a Tensor.
1612
+ TypeError: If dtype of `input` and `other` is not in [float16, float32, float64, complex64, complex128].
1613
+ ValueError: If `input` could not be broadcast to a tensor with shape of `other`.
1577
1614
 
1578
1615
  Supported Platforms:
1579
1616
  ``Ascend`` ``GPU`` ``CPU``
@@ -1582,52 +1619,51 @@ def tan(input):
1582
1619
  >>> import mindspore
1583
1620
  >>> import numpy as np
1584
1621
  >>> from mindspore import Tensor, ops
1585
- >>> input = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
1586
- >>> output = ops.tan(input)
1622
+ >>> input = Tensor(np.array([-5, 0, 4]), mindspore.float32)
1623
+ >>> other = Tensor(np.array([2, 2, 2]), mindspore.float32)
1624
+ >>> output = ops.xlogy(input, other)
1587
1625
  >>> print(output)
1588
- [-1.5574081 0. 1.5574081]
1626
+ [-3.465736 0. 2.7725887]
1589
1627
  """
1590
- return tan_(input)
1628
+ if isinstance(input, Tensor) and isinstance(other, Tensor) and input.dtype == mstype.bool_ \
1629
+ and other.dtype == mstype.bool_:
1630
+ input = input.astype(mstype.float32)
1631
+ other = other.astype(mstype.float32)
1632
+ return xlogy_(input, other)
1591
1633
 
1592
1634
 
1593
- def xlogy(input, other):
1635
+ def xlogy_ext(input, other):
1594
1636
  r"""
1595
- Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
1637
+ Computes the first input multiplied by the logarithm of second input element-wise.
1596
1638
  Returns zero when `input` is zero.
1597
1639
 
1598
1640
  .. math::
1599
1641
 
1600
- out_i = input_{i}\ln{other_{i}}
1642
+ out_i = input_{i}\log{other_{i}}
1601
1643
 
1602
1644
  Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1603
1645
  The inputs must be two tensors or one tensor and one scalar.
1604
1646
  When the inputs are two tensors, the shapes of them could be broadcast.
1605
- When the inputs are one tensor and one scalar,
1606
- the scalar could only be a constant.
1607
-
1608
- .. warning::
1609
- - On Ascend, the data type of `input` and `other` must be float16 or float32.
1610
1647
 
1611
1648
  Args:
1612
- input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
1649
+ input (Union[Tensor, numbers.Number, bool]): The first input is a numbers.Number or
1613
1650
  a bool or a tensor whose data type is
1614
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
1615
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1616
- other (Union[Tensor, number.Number, bool]): The second input is a number.Number or
1617
- a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
1618
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
1651
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1652
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1653
+ other (Union[Tensor, numbers.Number, bool]): The second input is a numbers.Number or
1654
+ a bool or a tensor whose data type is number or bool when the first input is a tensor.
1655
+ When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
1619
1656
 
1620
1657
  Returns:
1621
1658
  Tensor, the shape is the same as the one after broadcasting,
1622
1659
  and the data type is the one with higher precision or higher digits among the two inputs.
1623
1660
 
1624
1661
  Raises:
1625
- TypeError: If `input` and `other` is not a number.Number or a bool or a Tensor.
1626
- TypeError: If dtype of `input` and `other` is not in [float16, float32, float64, complex64, complex128].
1662
+ TypeError: If `input` and `other` is not a numbers.Number or a bool or a Tensor.
1627
1663
  ValueError: If `input` could not be broadcast to a tensor with shape of `other`.
1628
1664
 
1629
1665
  Supported Platforms:
1630
- ``Ascend`` ``GPU`` ``CPU``
1666
+ ``Ascend``
1631
1667
 
1632
1668
  Examples:
1633
1669
  >>> import mindspore
@@ -1635,16 +1671,17 @@ def xlogy(input, other):
1635
1671
  >>> from mindspore import Tensor, ops
1636
1672
  >>> input = Tensor(np.array([-5, 0, 4]), mindspore.float32)
1637
1673
  >>> other = Tensor(np.array([2, 2, 2]), mindspore.float32)
1638
- >>> output = ops.xlogy(input, other)
1674
+ >>> output = ops.xlogy_ext(input, other)
1639
1675
  >>> print(output)
1640
1676
  [-3.465736 0. 2.7725887]
1641
1677
  """
1642
- if isinstance(input, Tensor) and isinstance(other, Tensor) and input.dtype == mstype.bool_ \
1643
- and other.dtype == mstype.bool_:
1644
- input = input.astype(mstype.float32)
1645
- other = other.astype(mstype.float32)
1646
- return xlogy_(input, other)
1647
-
1678
+ if isinstance(input, Tensor) and isinstance(other, Tensor):
1679
+ return xlogy_op(input, other)
1680
+ if isinstance(input, Tensor) and isinstance(other, (float, int, bool)):
1681
+ return xlogy_scalar_other_op(input, other)
1682
+ if isinstance(input, (float, int, bool)) and isinstance(other, Tensor):
1683
+ return xlogy_scalar_self_op(input, other)
1684
+ raise TypeError(f"For 'xlogy', at least one of input and other should be Tensor.")
1648
1685
 
1649
1686
  def arccosh(input):
1650
1687
  r"""
@@ -1665,6 +1702,16 @@ def arccosh(input):
1665
1702
  return acosh_(input)
1666
1703
 
1667
1704
 
1705
+ def arccosh_ext(input):
1706
+ r"""
1707
+ Alias for :func:`mindspore.ops.acosh_ext`.
1708
+
1709
+ Supported Platforms:
1710
+ ``Ascend`` ``GPU`` ``CPU``
1711
+ """
1712
+ return acosh_ext(input)
1713
+
1714
+
1668
1715
  def arcsin(x):
1669
1716
  r"""
1670
1717
  Alias for :func:`mindspore.ops.asin`.
@@ -1675,6 +1722,16 @@ def arcsin(x):
1675
1722
  return asin_(x)
1676
1723
 
1677
1724
 
1725
+ def arcsin_ext(x):
1726
+ r"""
1727
+ Alias for :func:`mindspore.ops.asin_ext`.
1728
+
1729
+ Supported Platforms:
1730
+ ``Ascend`` ``GPU`` ``CPU``
1731
+ """
1732
+ return asin_ext(x)
1733
+
1734
+
1678
1735
  def arctan(input):
1679
1736
  r"""
1680
1737
  Alias for :func:`mindspore.ops.atan`.
@@ -1694,6 +1751,16 @@ def arctan(input):
1694
1751
  return atan_(input)
1695
1752
 
1696
1753
 
1754
+ def arctan_ext(input):
1755
+ r"""
1756
+ Alias for :func:`mindspore.ops.atan_ext`.
1757
+
1758
+ Supported Platforms:
1759
+ ``Ascend`` ``GPU`` ``CPU``
1760
+ """
1761
+ return atan_ext(input)
1762
+
1763
+
1697
1764
  def arctan2(input, other):
1698
1765
  r"""
1699
1766
  Alias for :func:`mindspore.ops.atan2`.
@@ -1790,6 +1857,16 @@ def arccos(input):
1790
1857
  return acos(input)
1791
1858
 
1792
1859
 
1860
+ def arccos_ext(input):
1861
+ """
1862
+ Alias for :func:`mindspore.ops.acos_ext` .
1863
+
1864
+ Supported Platforms:
1865
+ ``Ascend`` ``GPU`` ``CPU``
1866
+ """
1867
+ return acos_ext(input)
1868
+
1869
+
1793
1870
  def arcsinh(input):
1794
1871
  r"""
1795
1872
  Alias for :func:`mindspore.ops.asinh`.
@@ -1800,6 +1877,16 @@ def arcsinh(input):
1800
1877
  return asinh(input)
1801
1878
 
1802
1879
 
1880
+ def arcsinh_ext(input):
1881
+ r"""
1882
+ Alias for :func:`mindspore.ops.asinh_ext`.
1883
+
1884
+ Supported Platforms:
1885
+ ``Ascend`` ``GPU`` ``CPU``
1886
+ """
1887
+ return asinh_ext(input)
1888
+
1889
+
1803
1890
  def arctanh(input):
1804
1891
  r"""
1805
1892
  Alias for :func:`mindspore.ops.atanh`.
@@ -1850,6 +1937,46 @@ def bitwise_and(input, other):
1850
1937
  return bitwise_and_(input, other)
1851
1938
 
1852
1939
 
1940
+ def bitwise_and_ext(input, other):
1941
+ r"""
1942
+ Returns bitwise `and` of two tensors element-wise.
1943
+
1944
+ .. math::
1945
+
1946
+ out_i = input_{i} \wedge other_{i}
1947
+
1948
+ Note:
1949
+ Args of `input` and `other` comply with the implicit type conversion rules to
1950
+ make the data types consistent.
1951
+ If they have different data types, the lower precision data type will be converted to
1952
+ the relatively highest precision data type.
1953
+
1954
+ Args:
1955
+ input (Tensor): The input tensor.
1956
+ other (Tensor, Number.number): The input tensor or scalar. It has the same shape
1957
+ with `input` or its shape is able to broadcast with `input`.
1958
+
1959
+ Returns:
1960
+ Tensor, the shape is the same as the one after broadcasting, and the data type is same as `input`.
1961
+
1962
+ Supported Platforms:
1963
+ ``Ascend``
1964
+
1965
+ Examples:
1966
+ >>> import mindspore
1967
+ >>> import numpy as np
1968
+ >>> from mindspore import Tensor, ops
1969
+ >>> input = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
1970
+ >>> other = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
1971
+ >>> output = ops.bitwise_and_ext(input, other)
1972
+ >>> print(output)
1973
+ [ 0 0 1 -1 1 0 1]
1974
+ """
1975
+ if not isinstance(other, Tensor):
1976
+ return bitwise_and_scalar_(input, other)
1977
+ return bitwise_and_tensor_(input, other)
1978
+
1979
+
1853
1980
  def bitwise_or(input, other):
1854
1981
  r"""
1855
1982
  Returns bitwise `or` of two tensors element-wise.
@@ -1890,6 +2017,46 @@ def bitwise_or(input, other):
1890
2017
  return bitwise_or_(input, other)
1891
2018
 
1892
2019
 
2020
+ def bitwise_or_ext(input, other):
2021
+ r"""
2022
+ Returns bitwise `or` of two tensors element-wise.
2023
+
2024
+ .. math::
2025
+
2026
+ out_i = input_{i} \mid other_{i}
2027
+
2028
+ Note:
2029
+ Args of `input` and `other` comply with the implicit type conversion rules to
2030
+ make the data types consistent.
2031
+ If they have different data types, the lower precision data type will be converted to
2032
+ the relatively highest precision data type.
2033
+
2034
+ Args:
2035
+ input (Tensor): The input tensor.
2036
+ other (Tensor, Number.number): The input tensor or scalar. It has the same shape
2037
+ with `input` or its shape is able to broadcast with `input`.
2038
+
2039
+ Returns:
2040
+ Tensor, the shape is the same as the one after broadcasting, and the data type is same as `input`.
2041
+
2042
+ Supported Platforms:
2043
+ ``Ascend``
2044
+
2045
+ Examples:
2046
+ >>> import mindspore
2047
+ >>> import numpy as np
2048
+ >>> from mindspore import Tensor, ops
2049
+ >>> input = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
2050
+ >>> other = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
2051
+ >>> output = ops.bitwise_or_ext(input, other)
2052
+ >>> print(output)
2053
+ [ 0 1 1 -1 -1 3 3]
2054
+ """
2055
+ if not isinstance(other, Tensor):
2056
+ return bitwise_or_scalar_(input, other)
2057
+ return bitwise_or_tensor_(input, other)
2058
+
2059
+
1893
2060
  def bitwise_xor(input, other):
1894
2061
  r"""
1895
2062
  Returns bitwise `xor` of two tensors element-wise.
@@ -1930,6 +2097,46 @@ def bitwise_xor(input, other):
1930
2097
  return bitwise_xor_(input, other)
1931
2098
 
1932
2099
 
2100
+ def bitwise_xor_ext(input, other):
2101
+ r"""
2102
+ Returns bitwise `xor` of two tensors element-wise.
2103
+
2104
+ .. math::
2105
+
2106
+ out_i = input_{i} \oplus other_{i}
2107
+
2108
+ Note:
2109
+ Args of `input` and `other` comply with the implicit type conversion rules to
2110
+ make the data types consistent.
2111
+ If they have different data types, the lower precision data type will be converted to
2112
+ the relatively highest precision data type.
2113
+
2114
+ Args:
2115
+ input (Tensor): The input tensor.
2116
+ other (Tensor, Number.number): The input tensor or scalar. It has the same shape
2117
+ with `input` or its shape is able to broadcast with `input`.
2118
+
2119
+ Returns:
2120
+ Tensor, the shape is the same as the one after broadcasting, and the data type is same as `input`.
2121
+
2122
+ Supported Platforms:
2123
+ ``Ascend``
2124
+
2125
+ Examples:
2126
+ >>> import mindspore
2127
+ >>> import numpy as np
2128
+ >>> from mindspore import Tensor, ops
2129
+ >>> input = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
2130
+ >>> other = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
2131
+ >>> output = ops.bitwise_xor_ext(input, other)
2132
+ >>> print(output)
2133
+ [ 0 1 0 0 -2 3 2]
2134
+ """
2135
+ if not isinstance(other, Tensor):
2136
+ return bitwise_xor_scalar_(input, other)
2137
+ return bitwise_xor_tensor_(input, other)
2138
+
2139
+
1933
2140
  def bitwise_left_shift(input, other):
1934
2141
  r"""
1935
2142
  Perform a left bitwise shift operation on the `input` element-wise, where the number of bits to shift is
@@ -2554,39 +2761,42 @@ def linspace_ext(start, end, steps, *, dtype=None):
2554
2761
  &output = [start, start+step, start+2*step, ... , end]
2555
2762
  \end{aligned}
2556
2763
 
2764
+ .. warning::
2765
+ Atlas training series does not support int16 dtype currently.
2766
+
2557
2767
  Args:
2558
- start (Union[Tensor, Number]): Start value of interval.
2559
- If `start` is Tensor, data type must be float32 or float64 and with shape of 0-D.
2560
- end (Union[Tensor, Number]): Last value of interval.
2561
- If `end` is Tensor, data type must be float32 or float64 and with shape of 0-D.
2562
- steps (Union[Tensor, int]): Number of ticks in the interval, inclusive of start and end.
2563
- Must be positive int number or 0D int32/int64 Tensor.
2768
+ start (Union[float, int]): Start value of interval.
2769
+ It can be a float or integer.
2770
+ end (Union[float, int]): Last value of interval.
2771
+ It can be a float or integer.
2772
+ steps (int): Number of ticks in the interval, inclusive of start and end.
2773
+ Must be positive integer.
2564
2774
 
2565
2775
  Keyword Args:
2566
- dtype (mindspore.dtype, optional): The output Tensor data type. Default: ``None`` , the data type of output
2567
- Tensor is float32.
2776
+ dtype (mindspore.dtype, optional): The output Tensor data type. Default: ``None`` ,
2777
+ in which case the data type of output Tensor is float32.
2568
2778
 
2569
2779
  Returns:
2570
- Tensor, has the shape of :math:`(steps,)`.
2780
+ Tensor, has the shape of :math:`(steps,)`, with dtype specified by `dtype`.
2571
2781
 
2572
2782
  Raises:
2573
- TypeError: If dtype of `start` or dtype of `end` is not supported.
2574
- ValueError: If shape of `start` or shape of `end` is not 0-D.
2575
- TypeError: If `steps` is not int or 0D int32/int64 Tensor.
2576
- ValueError: If `steps` is not positive int number.
2783
+ TypeError: If type of `start` or dtype of `end` is not supported.
2784
+ ValueError: If `steps` is not positive integer.
2577
2785
 
2578
2786
  Supported Platforms:
2579
- ``Ascend`` ``GPU`` ``CPU``
2787
+ ``Ascend``
2580
2788
 
2581
2789
  Examples:
2582
- >>> start = Tensor(1, mindspore.float32)
2583
- >>> end = Tensor(10, mindspore.float32)
2790
+ >>> import mindspore as ms
2791
+ >>> from mindspore import ops
2792
+ >>> start = 1
2793
+ >>> end = 10
2584
2794
  >>> steps = 5
2585
- >>> output = ops.function.math_func.linspace_ext(start, end, steps, dtype=mindspore.float32)
2795
+ >>> output = ops.function.math_func.linspace_ext(start, end, steps, dtype=ms.float32)
2586
2796
  >>> print(output)
2587
2797
  [ 1. 3.25 5.5 7.75 10. ]
2588
2798
  """
2589
- return _get_cache_prim(LinSpaceExt)()(start, end, steps, dtype)
2799
+ return lin_space_ext_op(start, end, steps, dtype)
2590
2800
 
2591
2801
 
2592
2802
  def det(input):
@@ -2872,34 +3082,6 @@ def truncate_mod(x, y):
2872
3082
  return truncate_mod_(x, y)
2873
3083
 
2874
3084
 
2875
- def trunc(input):
2876
- r"""
2877
- Returns a new tensor with the truncated integer values of the elements of the input tensor.
2878
-
2879
- Args:
2880
- input (Tensor): The input tensor.
2881
-
2882
- Returns:
2883
- Tensor, the same shape and data type as the input.
2884
-
2885
- Raises:
2886
- TypeError: If `input` is not a Tensor.
2887
-
2888
- Supported Platforms:
2889
- ``Ascend`` ``GPU`` ``CPU``
2890
-
2891
- Examples:
2892
- >>> import mindspore
2893
- >>> import numpy as np
2894
- >>> from mindspore import Tensor, ops
2895
- >>> x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]),mindspore.float32)
2896
- >>> output = ops.trunc(x)
2897
- >>> print(output)
2898
- [3. 0. 0. -3.]
2899
- """
2900
- return trunc_(input)
2901
-
2902
-
2903
3085
  def ldexp(x, other):
2904
3086
  """
2905
3087
  Multiplies input Tensor by :math:`2^{other}` element-wise.
@@ -2996,6 +3178,7 @@ def logit(input, eps=None):
2996
3178
  logit_ = _get_cache_prim(P.Logit)(eps)
2997
3179
  return logit_(input)
2998
3180
 
3181
+
2999
3182
  #####################################
3000
3183
  # Comparison Operation Functions.
3001
3184
  #####################################
@@ -3031,8 +3214,8 @@ def le(input, other):
3031
3214
  Args:
3032
3215
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
3033
3216
  a bool or a tensor whose data type is
3034
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
3035
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
3217
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
3218
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
3036
3219
  other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
3037
3220
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
3038
3221
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -3081,8 +3264,8 @@ def gt(input, other):
3081
3264
  Args:
3082
3265
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
3083
3266
  a bool or a tensor whose data type is
3084
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
3085
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
3267
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
3268
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ .
3086
3269
  other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
3087
3270
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
3088
3271
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -3460,66 +3643,6 @@ def is_complex(input):
3460
3643
  return input.dtype in mstype.complex_type
3461
3644
 
3462
3645
 
3463
- def nan_to_num(input, nan=0.0, posinf=None, neginf=None):
3464
- """
3465
- Replace the `NaN`, positive infinity and negative infinity values in 'input' with the
3466
- specified values in `nan`, `posinf` and `neginf` respectively.
3467
-
3468
- Args:
3469
- input (Tensor): The shape of tensor is :math:`(input_1, input_2, ..., input_R)`.
3470
- With float32 or float16 data type.
3471
- nan (float): The replace value of 'NaN'. Default value is 0.0.
3472
- posinf (float): the value to replace positive infinity values with. Default: ``None``,
3473
- replacing positive infinity with the maximum value supported by the data type of `input`.
3474
- neginf (float): the value to replace negative infinity values with. Default: ``None``,
3475
- replacing negative infinity with the minimum value supported by the data type of `input`.
3476
-
3477
- Returns:
3478
- Tensor, has the same shape and dtype as the `input`.
3479
-
3480
- Raises:
3481
- TypeError: If `input` is not a Tensor.
3482
- TypeError: If dtype of `input` is not float16 or float32.
3483
-
3484
- Supported Platforms:
3485
- ``Ascend`` ``CPU``
3486
-
3487
- Examples:
3488
- >>> import mindspore
3489
- >>> import numpy as np
3490
- >>> from mindspore import Tensor, ops
3491
- >>> input = Tensor(np.array([float('nan'), float('inf'), -float('inf'), 5.0]), mindspore.float32)
3492
- >>> output = ops.nan_to_num(input, 1.0, 2.0, 3.0)
3493
- >>> print(output)
3494
- [1. 2. 3. 5.0]
3495
- """
3496
- if not isinstance(input, (Tensor, Tensor_)):
3497
- raise TypeError("the input x must be Tensor!")
3498
- if nan is not None:
3499
- if not isinstance(nan, float):
3500
- raise TypeError("the parameter nan's dtype must be float.")
3501
- else:
3502
- nan = 0.0
3503
- if posinf is not None:
3504
- if not isinstance(posinf, float):
3505
- raise TypeError("the parameter posinf's dtype must be float.")
3506
- else:
3507
- if input.dtype == mstype.float16:
3508
- posinf = (float)(np.finfo(np.float16).max)
3509
- elif input.dtype == mstype.float32:
3510
- posinf = (float)(np.finfo(np.float32).max)
3511
- if neginf is not None:
3512
- if not isinstance(neginf, float):
3513
- raise TypeError("the parameter neginf's dtype must be float.")
3514
- else:
3515
- if input.dtype == mstype.float16:
3516
- neginf = (float)(np.finfo(np.float16).min)
3517
- elif input.dtype == mstype.float32:
3518
- neginf = (float)(np.finfo(np.float32).min)
3519
- _nan_to_num = _get_cache_prim(NanToNum)(nan=nan, posinf=posinf, neginf=neginf)
3520
- return _nan_to_num(input)
3521
-
3522
-
3523
3646
  def fmax(input, other):
3524
3647
  r"""
3525
3648
  Computes the maximum of input tensors element-wise.
@@ -3606,6 +3729,51 @@ def fmin(input, other):
3606
3729
  return fmin_(input, other)
3607
3730
 
3608
3731
 
3732
+ def median_ext(input, dim=None, keepdim=False):
3733
+ r"""
3734
+ Output the median on the specified dimension ``dim`` and its corresponding index.
3735
+ If ``dim`` is None, calculate the median of all elements in the Tensor.
3736
+
3737
+ Args:
3738
+ input (Tensor): A Tensor of any dimension whose data type is uint8, int16, int32, int64, float16 or float32.
3739
+ dim (int, optional): Specify the axis for calculation. Default: ``None`` .
3740
+ keepdim (bool, optional): Whether the output tensor need to retain ``dim`` dimension or not.
3741
+ Default: ``False`` .
3742
+
3743
+ Returns:
3744
+ - y (Tensor) - Output median, with the same data type as ``input`` .
3745
+
3746
+ - If ``dim`` is ``None`` , ``y`` only has one element.
3747
+ - If ``keepdim`` is ``True`` , the ``y`` has the same shape as the ``input`` except the shape
3748
+ of ``y`` in dimension `dim` is size 1.
3749
+ - Otherwise, the ``y`` lacks `dim` dimension than input.
3750
+
3751
+ - indices (Tensor) - The index of the median. Shape is consistent with ``y`` , with a data type of int64.
3752
+
3753
+ Raises:
3754
+ TypeError: If dtype of ``input`` is not one of the following: uint8, int16, int32, int64, float16 or float32.
3755
+ TypeError: If input ``input`` is not a Tensor.
3756
+ TypeError: If ``dim`` is not a int.
3757
+ TypeError: If ``keepdim`` is not a bool.
3758
+ ValueError: If ``dim`` is not in range of [-x.dim, x.dim-1].
3759
+
3760
+ Supported Platforms:
3761
+ ``Ascend``
3762
+
3763
+ Examples:
3764
+ >>> import numpy as np
3765
+ >>> from mindspore import Tensor, ops
3766
+ >>> x = Tensor(np.array([[0.57, 0.11, 0.21],[0.38, 0.50, 0.57], [0.36, 0.16, 0.44]]).astype(np.float32))
3767
+ >>> y = ops.function.math_func.median_ext(x, dim=0, keepdim=False)
3768
+ >>> print(y)
3769
+ (Tensor(shape=[3], dtype=Float32, value= [ 3.79999995e-01, 1.59999996e-01, 4.39999998e-01]),
3770
+ Tensor(shape=[3], dtype=Int64, value= [1, 2, 2]))
3771
+ """
3772
+ if dim is None:
3773
+ return median_ext_op(input)
3774
+ return median_dim_op(input, dim, keepdim)
3775
+
3776
+
3609
3777
  def median(input, axis=-1, keepdims=False):
3610
3778
  r"""
3611
3779
  Computes the median and indices of input tensor.
@@ -3959,7 +4127,7 @@ def histc(input, bins=100, min=0., max=0.):
3959
4127
  max (int, float, optional): An optional float of the upper end of the range (inclusive). Default: ``0.0`` .
3960
4128
 
3961
4129
  Returns:
3962
- Tensor, 1-D Tensor with type int32.
4130
+ 1-D Tensor. If the input is int32, the output returns int32, otherwise it returns float32.
3963
4131
 
3964
4132
  Raises:
3965
4133
  TypeError: If `input` is not a Tensor.
@@ -3977,7 +4145,7 @@ def histc(input, bins=100, min=0., max=0.):
3977
4145
  >>> x = Tensor([1., 2, 1])
3978
4146
  >>> y = ops.histc(x, bins=4, min=0.0, max=3.0)
3979
4147
  >>> print(y)
3980
- [0 2 1 0]
4148
+ [0. 2. 1. 0.]
3981
4149
  """
3982
4150
  if not isinstance(min, (int, float)):
3983
4151
  raise TypeError(f"For 'histc', parameter 'min' must be an int or float, but got {type(min)}.")
@@ -4049,7 +4217,7 @@ def logaddexp(input, other):
4049
4217
 
4050
4218
  Args:
4051
4219
  input (Tensor): Input Tensor. The dtype of `input` must be float.
4052
- other (Tensor): Input Tensor. The dtype of `input` must be float.
4220
+ other (Tensor): Input Tensor. The dtype of `other` must be float.
4053
4221
  If the shape of `input` is not equal to the shape of `other`,
4054
4222
  they must be broadcastable to a common shape (which becomes the shape of the output).
4055
4223
 
@@ -4078,7 +4246,7 @@ def logaddexp(input, other):
4078
4246
  if not isinstance(other, (Tensor, Tensor_)):
4079
4247
  raise TypeError(f"For logaddexp, the other must be a Tensor, but got {type(other)}.")
4080
4248
  if not ops.is_floating_point(input) or not ops.is_floating_point(other):
4081
- raise TypeError(f"For logaddexp2, the dtype of 'input' and 'other' must be float,"
4249
+ raise TypeError(f"For logaddexp, the dtype of 'input' and 'other' must be float,"
4082
4250
  f"but got {input.dtype} and {other.dtype}.")
4083
4251
  m = maximum(input, other)
4084
4252
  abs_val = abs(input - other)
@@ -4991,7 +5159,7 @@ def bernoulli(input, p=0.5, seed=None):
4991
5159
  positive integer. Default: ``None`` , means using the current timestamp.
4992
5160
 
4993
5161
  Returns:
4994
- output (Tensor), with the same shape and type as `input` .
5162
+ output (Tensor), with the same shape and type as `input`.
4995
5163
 
4996
5164
  Raises:
4997
5165
  TypeError: If dtype of `input` is not one of: int8, uint8, int16, int32, int64, bool, float32, float64.
@@ -5027,6 +5195,61 @@ def bernoulli(input, p=0.5, seed=None):
5027
5195
  return bernoulli_(input, p)
5028
5196
 
5029
5197
 
5198
+ def bernoulli_ext(input, *, generator=None):
5199
+ r"""
5200
+ Sample from the Bernoulli distribution and randomly set the i^{th} element of the `output` to (0 or 1) according to
5201
+ the i^{th} probability value given in the `input`.
5202
+
5203
+ .. math::
5204
+ output_{i} \sim Bernoulli(p=input_{i})
5205
+
5206
+ .. warning::
5207
+ This is an experimental API that is subject to change or deletion.
5208
+
5209
+ Args:
5210
+ input (Tensor): The input tensor of Bernoulli distribution, where the i^{th} element 'input_{i}' represents the
5211
+ probability that the corresponding output element 'output_{i}' is set to '1', therefore each element in
5212
+ 'input' have to be in the range '[0,1]'. Supported dtype: float16, float32, float64, bfloat16
5213
+ (only supported by Atlas A2 training series products).
5214
+
5215
+ Keyword Args:
5216
+ generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
5217
+ Default: ``None``, uses the default pseudorandom number generator.
5218
+
5219
+ Returns:
5220
+ output (Tensor): The output tensor, with the same shape and dtype as `input`.
5221
+
5222
+ Raises:
5223
+ TypeError: If dtype of `input` is not one of: float16, float32, float64, bfloat16.
5224
+ ValueError: If any element of the `input` is not in the range [0, 1].
5225
+
5226
+ Supported Platforms:
5227
+ ``Ascend``
5228
+
5229
+ Examples:
5230
+ >>> import mindspore
5231
+ >>> import numpy as np
5232
+ >>> from mindspore import Tensor
5233
+ >>> from mindspore import ops
5234
+ >>> input_x = Tensor(np.ones((3, 3)), mindspore.float32)
5235
+ >>> output = ops.bernoulli_ext(input_x)
5236
+ >>> print(output)
5237
+ [[ 1. 1. 1.]
5238
+ [ 1. 1. 1.]
5239
+ [ 1. 1. 1.]]
5240
+ >>> input_x = Tensor(np.zeros((3, 3)), mindspore.float32)
5241
+ >>> output = ops.bernoulli_ext(input_x)
5242
+ >>> print(output)
5243
+ [[ 0. 0. 0.]
5244
+ [ 0. 0. 0.]
5245
+ [ 0. 0. 0.]]
5246
+ """
5247
+ if generator is None:
5248
+ generator = default_generator
5249
+ seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
5250
+ return bernoulli_ext_(input, seed, offset)
5251
+
5252
+
5030
5253
  def bessel_i1(x):
5031
5254
  r"""
5032
5255
  Computes modified Bessel function of the first kind, order 1 element-wise.
@@ -7276,14 +7499,14 @@ def _compute_vector_norm_inf(x, dim, keepdims, norm_func):
7276
7499
  return ret_norm
7277
7500
 
7278
7501
 
7279
- def norm_ext(A, ord=None, dim=None, keepdim=False, *, dtype=None):
7502
+ def norm_ext(input, p='fro', dim=None, keepdim=False, *, dtype=None):
7280
7503
  r"""
7281
7504
  Returns the matrix norm or vector norm of a given tensor.
7282
7505
 
7283
- `ord` is the calculation mode of norm. The following norm modes are supported.
7506
+ `p` is the calculation mode of norm. The following norm modes are supported.
7284
7507
 
7285
7508
  ====================== ================================ ==========================================
7286
- `ord` norm for matrices norm for vectors
7509
+ `p` norm for matrices norm for vectors
7287
7510
  ====================== ================================ ==========================================
7288
7511
  `None` (default) Frobenius norm `2`-norm (see below)
7289
7512
  `'fro'` Frobenius norm -- not supported --
@@ -7291,44 +7514,31 @@ def norm_ext(A, ord=None, dim=None, keepdim=False, *, dtype=None):
7291
7514
  `inf` :math:`max(sum(abs(x), dim=1))` :math:`max(abs(x))`
7292
7515
  `-inf` :math:`min(sum(abs(x), dim=1))` :math:`min(abs(x))`
7293
7516
  `0` -- not supported -- :math:`sum(x != 0)`
7294
- `1` :math:`max(sum(abs(x), dim=0))` as below
7295
- `-1` :math:`min(sum(abs(x), dim=0))` as below
7296
- `2` largest singular value as below
7297
- `-2` smallest singular value as below
7298
7517
  other `int` or `float` -- not supported -- :math:`sum(abs(x)^{ord})^{(1 / ord)}`
7299
7518
  ====================== ================================ ==========================================
7300
7519
 
7520
+ .. warning::
7521
+ This is an experimental API that is subject to change or deletion.
7522
+
7301
7523
  Args:
7302
- A (Tensor): Tensor of shape :math:`(*, n)` or :math:`(*, m, n)` where * is zero or more batch dimensions.
7303
- ord (Union[int, float, inf, -inf, 'fro', 'nuc'], optional): norm's mode. refer to the table above for
7304
- behavior. Default: ``None`` .
7524
+ input (Tensor): The input of norm with data type of bfloat16, float16 or float32.
7525
+ The shape is :math:`(*)` where :math:`*` means, any number of additional dimensions.
7526
+ p (Union[int, float, inf, -inf, 'fro', 'nuc'], optional): norm's mode. refer to the table above for
7527
+ behavior. Default: ``fro`` .
7305
7528
  dim (Union[int, Tuple(int)], optional): calculate the dimension of vector norm or matrix norm.
7306
7529
  Default: ``None`` .
7307
-
7308
- - When `dim` is int, it will be calculated by vector norm.
7309
-
7310
- - When `dim` is a 2-tuple, it will be calculated by matrix norm.
7311
-
7312
- - If `dim` is None and `ord` is None, `A` will be flattened to 1D and the 2-norm
7313
- of the vector will be calculated.
7314
-
7315
- - If `dim` is None and `ord` is not None, `A` must be 1D or 2D.
7316
-
7317
7530
  keepdim (bool): whether the output Tensor retains the original dimension. Default: ``False`` .
7318
7531
 
7319
7532
  Keyword Args:
7320
- dtype (:class:`mindspore.dtype`, optional): When set, `A` will be converted to the specified type,
7533
+ dtype (:class:`mindspore.dtype`, optional): When set, `input` will be converted to the specified type,
7321
7534
  `dtype`, before execution, and dtype of returned Tensor will also be `dtype`. Default: ``None`` .
7322
7535
 
7323
7536
  Returns:
7324
- Tensor, the result of norm calculation on the specified dimension, `dim`, has the same dtype as `A`.
7537
+ Tensor, the result of norm calculation on the specified dimension, `dim`, has the same dtype as `input`.
7325
7538
 
7326
7539
  Raises:
7327
7540
  ValueError: If `dim` is out of range.
7328
7541
  TypeError: If `dim` is neither an int nor a tuple of int.
7329
- TypeError: If `A` is a vector and `ord` is a str.
7330
- ValueError: If `A` is a matrices and `ord` is not in valid mode.
7331
- ValueError: If `A` is a matrices and `ord` is an integer but not in [1, -1, 2, -2].
7332
7542
  ValueError: If two elements of `dim` is same after normalize.
7333
7543
  ValueError: If any elements of `dim` is out of range.
7334
7544
 
@@ -7336,23 +7546,38 @@ def norm_ext(A, ord=None, dim=None, keepdim=False, *, dtype=None):
7336
7546
  ``Ascend``
7337
7547
 
7338
7548
  Note:
7339
- Currently, it only support `ops.function.math_func.norm_ext(A)`.
7549
+ Currently, it only support `ops.function.math_func.norm_ext(input, p=number)`.
7340
7550
 
7341
7551
  Examples:
7342
7552
  >>> import mindspore as ms
7343
7553
  >>> from mindspore import ops
7344
7554
  >>> data_range = ops.arange(-13, 13, dtype=ms.float32)
7345
- >>> # Exclude 0 from original data for 0 is invalid input when `ord` is negative.
7346
7555
  >>> x = data_range[data_range != 0]
7347
7556
  >>> y = x.reshape(5, 5)
7348
- >>> print(ops.function.math_func.norm_ext(x))
7557
+ >>> print(ops.function.math_func.norm_ext(x, 2.0))
7349
7558
  38.327538
7350
- >>> print(ops.norm(x, 0))
7351
- 25.0
7352
7559
  """
7353
- norm_ext_op = Norm()
7354
- return norm_ext_op(A, ord, dim, keepdim, dtype)
7355
-
7560
+ if not isinstance(input, (Tensor, Tensor_)):
7561
+ raise TypeError(f"For `norm_ext`, the `input` must be Tensor!, but get {type(input)}.")
7562
+
7563
+ if (dim is not None) or keepdim or (dtype is not None):
7564
+ raise ValueError(f"For `norm_ext`, the value of `dim`, `keepdim` and `dtype` must be default value currently.")
7565
+
7566
+ if isinstance(p, (int, float)):
7567
+ if float(p) in [0.0, 1.0, 2.0, 3.0]:
7568
+ return norm_op(input, p, dim, keepdim, dtype)
7569
+ if input.dtype in [mstype.bfloat16, mstype.float16, mstype.float32]:
7570
+ return lp_norm_v2_op(input, p, dim, keepdim, 0.0)
7571
+ dtype = input.dtype
7572
+ input = ops.cast(input, mstype.float32)
7573
+ return ops.cast(lp_norm_v2_op(input, p, dim, keepdim, 0.0), dtype)
7574
+
7575
+ if p == 'fro':
7576
+ if isinstance(dim, (list, tuple)) and len(dim) > 2:
7577
+ raise ValueError(f"For `norm_ext`, the size of `dim` cannot be greater than 2 "
7578
+ f"when the mode of norm is `fro`.")
7579
+ return norm_op(input, 2.0, dim, keepdim, dtype)
7580
+ raise ValueError(f"For `norm_ext`, the value of `p` cannot be `{p}` currently.")
7356
7581
 
7357
7582
  def vector_norm(x, ord=2, axis=None, keepdims=False, *, dtype=None):
7358
7583
  r"""
@@ -7822,7 +8047,7 @@ def kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None):
7822
8047
  Tensor, a Kaiser window.
7823
8048
 
7824
8049
  Raises:
7825
- TypeError: If `window_length` or `beta` is not an integer.
8050
+ TypeError: If `window_length` is not an integer.
7826
8051
  TypeError: If `periodic` is not a variable of Boolean type.
7827
8052
  ValueError: If `window_length` is negative.
7828
8053
 
@@ -8405,6 +8630,50 @@ def baddbmm(input, batch1, batch2, beta=1, alpha=1):
8405
8630
  return y
8406
8631
 
8407
8632
 
8633
+ def baddbmm_ext(input, batch1, batch2, *, beta=1, alpha=1):
8634
+ r"""
8635
+ The result is the sum of the input and a batch matrix-matrix product of matrices in batch1 and batch2.
8636
+ The formula is defined as follows:
8637
+
8638
+ .. math::
8639
+ \text{out}_{i} = \beta \text{input}_{i} + \alpha (\text{batch1}_{i} \mathbin{@} \text{batch2}_{i})
8640
+
8641
+ Args:
8642
+ input (Tensor): The input Tensor. When batch1 is a :math:`(C, W, T)` Tensor and batch2 is a
8643
+ :math:`(C, T, H)` Tensor, input must be broadcastable with :math:`(C, W, H)` Tensor.
8644
+ batch1 (Tensor): :math:`batch1` in the above formula. Must be 3-D Tensor, dtype is same as input.
8645
+ batch2 (Tensor): :math:`batch2` in the above formula. Must be 3-D Tensor, dtype is same as input.
8646
+
8647
+ Keyword Args:
8648
+ beta (Union[float, int], optional): multiplier for input. Default: ``1`` .
8649
+ alpha (Union[float, int], optional): multiplier for :math:`batch1 @ batch2`. Default: ``1`` .
8650
+
8651
+ Returns:
8652
+ Tensor, has the same dtype as input, shape will be :math:`(C, W, H)`.
8653
+
8654
+ Raises:
8655
+ TypeError: If the type of `input`, `batch1`, `batch2` is not Tensor.
8656
+ TypeError: If the types of `input`, `batch1`, `batch2` are different.
8657
+ ValueError: If `batch1` and `batch2` are not 3-D tensors.
8658
+
8659
+ Supported Platforms:
8660
+ ``Ascend``
8661
+
8662
+ Examples:
8663
+ >>> import numpy as np
8664
+ >>> from mindspore import Tensor, ops
8665
+ >>> input = Tensor(np.ones([1, 3, 3]).astype(np.float32))
8666
+ >>> batch1 = Tensor(np.ones([1, 3, 4]).astype(np.float32))
8667
+ >>> batch2 = Tensor(np.ones([1, 4, 3]).astype(np.float32))
8668
+ >>> output = ops.baddbmm_ext(input, batch1, batch2)
8669
+ >>> print(output)
8670
+ [[[5. 5. 5.]
8671
+ [5. 5. 5.]
8672
+ [5. 5. 5.]]]
8673
+ """
8674
+ return ops.auto_generate.baddbmm(input, batch1, batch2, beta, alpha)
8675
+
8676
+
8408
8677
  def log2(input):
8409
8678
  r"""
8410
8679
  Returns a new Tensor by taking the base 2 logarithm of the elements in the input Tensor.
@@ -8563,16 +8832,12 @@ def roll(input, shifts, dims=None):
8563
8832
  >>> import mindspore as ms
8564
8833
  >>> from mindspore import ops
8565
8834
  >>> from mindspore import Tensor
8566
- >>> input_x = Tensor(np.array([0, 1, 2, 3, 4]).astype(np.float32))
8567
- >>> output = ops.roll(input_x, shifts=2, dims=0)
8835
+ >>> input = Tensor(np.array([0, 1, 2, 3, 4]).astype(np.float32))
8836
+ >>> output = ops.roll(input, shifts=2, dims=0)
8568
8837
  >>> print(output)
8569
8838
  [3. 4. 0. 1. 2.]
8570
8839
  """
8571
- _shape = input.shape
8572
- if dims is None:
8573
- flatten_x = input.reshape(-1)
8574
- return Roll(shifts, 0)(flatten_x).reshape(_shape)
8575
- return Roll(shifts, dims)(input)
8840
+ return roll_impl(input, shifts, dims)
8576
8841
 
8577
8842
 
8578
8843
  def xdivy(x, y):
@@ -8752,7 +9017,6 @@ def _check_is_tensor(param_name, input, cls_name):
8752
9017
  raise TypeError(f"For {cls_name}, {param_name} must be a Tensor, but got {type(input)}.")
8753
9018
 
8754
9019
 
8755
-
8756
9020
  def any(input, axis=None, keep_dims=False):
8757
9021
  r"""
8758
9022
  Reduces a dimension of `input` by the "logical OR" of all elements in the dimension, by default. And also can
@@ -8763,7 +9027,7 @@ def any(input, axis=None, keep_dims=False):
8763
9027
  The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
8764
9028
 
8765
9029
  Args:
8766
- input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
9030
+ input (Tensor): Input Tensor(bool),has the shape :math:`(N, *)` where :math:`*` means,
8767
9031
  any number of additional dimensions.
8768
9032
  axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
8769
9033
  Suppose the rank of `input` is r, `axis` must be in the range [-rank(input), rank(input)).
@@ -8785,7 +9049,7 @@ def any(input, axis=None, keep_dims=False):
8785
9049
 
8786
9050
  Raises:
8787
9051
  TypeError: If `keep_dims` is not a bool.
8788
- TypeError: If `input` is not a Tensor.
9052
+ TypeError: If `input` is not a Tensor(bool).
8789
9053
  TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
8790
9054
 
8791
9055
  Supported Platforms:
@@ -8864,6 +9128,59 @@ def remainder(input, other):
8864
9128
  return out
8865
9129
 
8866
9130
 
9131
+ def remainder_ext(input, other):
9132
+ r"""
9133
+ Computes the remainder of `input` divided by `other` element-wise. The result has the same sign as the divisor and
9134
+ its absolute value is less than that of `other`.
9135
+
9136
+ Supports broadcasting to a common shape and implicit type promotion.
9137
+
9138
+ .. math::
9139
+
9140
+ remainder(input, other) = input - input.div(other, rounding\_mode="floor") * other
9141
+
9142
+ Note:
9143
+ Complex inputs are not supported. At least one input need to be tensor, but not both are bool tensors.
9144
+
9145
+ Args:
9146
+ input (Union[Tensor, numbers.Number, bool]): The dividend is a numbers.Number or
9147
+ a bool or a tensor whose data type is
9148
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
9149
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
9150
+ other (Union[Tensor, numbers.Number, bool]): The divisor is a numbers.Number or
9151
+ a bool or a tensor whose data type is number or bool\_ when the dividend is a tensor.
9152
+ When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool\_.
9153
+
9154
+ Returns:
9155
+ Tensor, with dtype promoted and shape broadcasted.
9156
+
9157
+ Raises:
9158
+ TypeError: If `input` and `other` are not of types: (tensor, tensor), (tensor, number), (tensor, bool),
9159
+ (number, tensor) or (bool, tensor).
9160
+ ValueError: If `input` and `other` are not broadcastable.
9161
+
9162
+ Supported Platforms:
9163
+ ``Ascend``
9164
+
9165
+ Examples:
9166
+ >>> import numpy as np
9167
+ >>> from mindspore import Tensor, ops
9168
+ >>> x = Tensor(np.array([-4.0, 5.0, 6.0]).astype(np.float32))
9169
+ >>> y = Tensor(np.array([3.0, 2.0, 3.0]).astype(np.float64))
9170
+ >>> output = ops.remainder_ext(x, y)
9171
+ >>> print(output)
9172
+ [2. 1. 0.]
9173
+ """
9174
+
9175
+ if isinstance(input, Tensor) and isinstance(other, Tensor):
9176
+ return remainder_tensor_tensor_(input, other)
9177
+ if isinstance(input, Tensor) and isinstance(other, (float, int, bool)):
9178
+ return remainder_tensor_scalar_(input, other)
9179
+ if isinstance(input, (float, int, bool)) and isinstance(other, Tensor):
9180
+ return remainder_scalar_tensor_(input, other)
9181
+ raise TypeError(f"For 'remainder', inputs should either be two tensors, or a tensor and a scalar.")
9182
+
9183
+
8867
9184
  def accumulate_n(x):
8868
9185
  r"""
8869
9186
  Computes accumulation of all input tensors element-wise.
@@ -9344,8 +9661,7 @@ def cross(input, other, dim=None):
9344
9661
  """
9345
9662
  if dim is None:
9346
9663
  dim = -65530
9347
- cross_op = _get_cache_prim(P.Cross)(dim=dim)
9348
- return cross_op(input, other)
9664
+ return cross_impl(input, other, dim)
9349
9665
 
9350
9666
 
9351
9667
  def _einsum_convert_num_to_char(num):
@@ -9874,10 +10190,6 @@ def logical_xor(input, other):
9874
10190
  >>> print(output)
9875
10191
  [False True]
9876
10192
  """
9877
- if isinstance(input, Tensor) and input.dtype != mstype.bool_:
9878
- input = input.astype(mstype.bool_)
9879
- if isinstance(other, Tensor) and other.dtype != mstype.bool_:
9880
- other = other.astype(mstype.bool_)
9881
10193
  return logical_xor_(input, other)
9882
10194
 
9883
10195
 
@@ -11598,6 +11910,50 @@ def batch_dot(x1, x2, axes=None):
11598
11910
  return final_result
11599
11911
 
11600
11912
 
11913
+ def round(input, *, decimals=0):
11914
+ r"""
11915
+ Returns half to even of a tensor element-wise.
11916
+
11917
+ .. math::
11918
+ out_i \approx input_i
11919
+
11920
+ .. note::
11921
+ The input data types supported by the Ascend platform include
11922
+ bfloat16 (Atlas training series products are not supported), float16, float32, float64, int32, and int64.
11923
+
11924
+ Args:
11925
+ input (Tensor): The input tensor.
11926
+
11927
+ Keyword Args:
11928
+ decimals (int, optional): Number of decimal places to round to (default: 0). If decimals is negative,
11929
+ it specifies the number of positions to the left of the decimal point. It supports converting the
11930
+ single-element tensor to an int.
11931
+
11932
+ Returns:
11933
+ Tensor, has the same shape and type as the `input`.
11934
+
11935
+ Raises:
11936
+ TypeError: If `input` is not a Tensor.
11937
+
11938
+ Supported Platforms:
11939
+ ``Ascend`` ``GPU`` ``CPU``
11940
+
11941
+ Examples:
11942
+ >>> import mindspore
11943
+ >>> import numpy as np
11944
+ >>> from mindspore import Tensor, ops
11945
+ >>> input = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
11946
+ >>> output = ops.round(input)
11947
+ >>> print(output)
11948
+ [ 1. 2. 2. 2. -4.]
11949
+ >>> input = Tensor(np.array([0.81, 1.52, 2.35, 2.53, -4.57]), mindspore.float32)
11950
+ >>> output = ops.round(input, decimals=1)
11951
+ >>> print(output)
11952
+ [ 0.8 1.5 2.4 2.5 -4.6]
11953
+ """
11954
+ return round_op(input, decimals)
11955
+
11956
+
11601
11957
  __all__ = [
11602
11958
  'addn',
11603
11959
  'absolute',
@@ -11821,6 +12177,7 @@ __all__ = [
11821
12177
  'accumulate_n',
11822
12178
  'iou',
11823
12179
  'baddbmm',
12180
+ 'baddbmm_ext',
11824
12181
  'bmm',
11825
12182
  'trapz',
11826
12183
  'cholesky',