mindspore 2.3.0__cp310-cp310-win_amd64.whl → 2.4.1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (275) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +3 -1
  3. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +50 -9
  7. mindspore/_extends/parse/compile_config.py +41 -0
  8. mindspore/_extends/parse/parser.py +9 -7
  9. mindspore/_extends/parse/standard_method.py +52 -14
  10. mindspore/_extends/pijit/pijit_func_white_list.py +350 -24
  11. mindspore/amp.py +24 -10
  12. mindspore/common/__init__.py +6 -4
  13. mindspore/common/_pijit_context.py +190 -0
  14. mindspore/common/_register_for_tensor.py +2 -1
  15. mindspore/common/_tensor_overload.py +139 -0
  16. mindspore/common/api.py +102 -87
  17. mindspore/common/dump.py +5 -6
  18. mindspore/common/generator.py +1 -7
  19. mindspore/common/hook_handle.py +14 -26
  20. mindspore/common/initializer.py +51 -15
  21. mindspore/common/mindir_util.py +2 -2
  22. mindspore/common/parameter.py +62 -15
  23. mindspore/common/recompute.py +39 -9
  24. mindspore/common/sparse_tensor.py +7 -3
  25. mindspore/common/tensor.py +183 -37
  26. mindspore/communication/__init__.py +1 -1
  27. mindspore/communication/_comm_helper.py +38 -3
  28. mindspore/communication/comm_func.py +315 -60
  29. mindspore/communication/management.py +14 -14
  30. mindspore/context.py +132 -22
  31. mindspore/dataset/__init__.py +1 -1
  32. mindspore/dataset/audio/__init__.py +1 -1
  33. mindspore/dataset/core/config.py +7 -0
  34. mindspore/dataset/core/validator_helpers.py +7 -0
  35. mindspore/dataset/engine/cache_client.py +1 -1
  36. mindspore/dataset/engine/datasets.py +72 -44
  37. mindspore/dataset/engine/datasets_audio.py +7 -7
  38. mindspore/dataset/engine/datasets_standard_format.py +53 -3
  39. mindspore/dataset/engine/datasets_text.py +20 -20
  40. mindspore/dataset/engine/datasets_user_defined.py +174 -104
  41. mindspore/dataset/engine/datasets_vision.py +33 -33
  42. mindspore/dataset/engine/iterators.py +29 -0
  43. mindspore/dataset/engine/obs/util.py +7 -0
  44. mindspore/dataset/engine/queue.py +114 -60
  45. mindspore/dataset/engine/serializer_deserializer.py +2 -2
  46. mindspore/dataset/engine/validators.py +34 -14
  47. mindspore/dataset/text/__init__.py +1 -4
  48. mindspore/dataset/transforms/__init__.py +0 -3
  49. mindspore/dataset/utils/line_reader.py +2 -0
  50. mindspore/dataset/vision/__init__.py +1 -4
  51. mindspore/dataset/vision/utils.py +1 -1
  52. mindspore/dataset/vision/validators.py +2 -1
  53. mindspore/{nn/extend → experimental/es}/__init__.py +4 -11
  54. mindspore/experimental/es/embedding_service.py +883 -0
  55. mindspore/{nn/layer → experimental/es}/embedding_service_layer.py +218 -30
  56. mindspore/experimental/llm_boost/__init__.py +21 -0
  57. mindspore/{nn/extend/layer → experimental/llm_boost/atb}/__init__.py +4 -8
  58. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  59. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  60. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  61. mindspore/experimental/llm_boost/register.py +129 -0
  62. mindspore/experimental/llm_boost/utils.py +31 -0
  63. mindspore/experimental/optim/adamw.py +85 -0
  64. mindspore/experimental/optim/optimizer.py +3 -0
  65. mindspore/hal/__init__.py +3 -3
  66. mindspore/hal/contiguous_tensors_handle.py +175 -0
  67. mindspore/hal/stream.py +18 -0
  68. mindspore/include/api/model_group.h +13 -1
  69. mindspore/include/api/types.h +10 -10
  70. mindspore/include/dataset/config.h +2 -2
  71. mindspore/include/dataset/constants.h +2 -2
  72. mindspore/include/dataset/execute.h +2 -2
  73. mindspore/include/dataset/vision.h +4 -0
  74. mindspore/log.py +1 -1
  75. mindspore/mindrecord/filewriter.py +68 -51
  76. mindspore/mindspore_backend.dll +0 -0
  77. mindspore/mindspore_common.dll +0 -0
  78. mindspore/mindspore_core.dll +0 -0
  79. mindspore/mindspore_np_dtype.dll +0 -0
  80. mindspore/mindspore_ops.dll +0 -0
  81. mindspore/mint/__init__.py +983 -46
  82. mindspore/mint/distributed/__init__.py +31 -0
  83. mindspore/mint/distributed/distributed.py +254 -0
  84. mindspore/mint/nn/__init__.py +268 -23
  85. mindspore/mint/nn/functional.py +125 -19
  86. mindspore/mint/nn/layer/__init__.py +39 -0
  87. mindspore/mint/nn/layer/activation.py +133 -0
  88. mindspore/mint/nn/layer/normalization.py +477 -0
  89. mindspore/mint/nn/layer/pooling.py +110 -0
  90. mindspore/mint/optim/adamw.py +26 -13
  91. mindspore/mint/special/__init__.py +63 -0
  92. mindspore/multiprocessing/__init__.py +2 -1
  93. mindspore/nn/__init__.py +0 -1
  94. mindspore/nn/cell.py +276 -96
  95. mindspore/nn/layer/activation.py +211 -44
  96. mindspore/nn/layer/basic.py +137 -10
  97. mindspore/nn/layer/embedding.py +137 -2
  98. mindspore/nn/layer/normalization.py +101 -5
  99. mindspore/nn/layer/padding.py +34 -48
  100. mindspore/nn/layer/pooling.py +161 -7
  101. mindspore/nn/layer/transformer.py +3 -3
  102. mindspore/nn/loss/__init__.py +2 -2
  103. mindspore/nn/loss/loss.py +84 -6
  104. mindspore/nn/optim/__init__.py +2 -1
  105. mindspore/nn/optim/adadelta.py +1 -1
  106. mindspore/nn/optim/adam.py +1 -1
  107. mindspore/nn/optim/lamb.py +1 -1
  108. mindspore/nn/optim/tft_wrapper.py +124 -0
  109. mindspore/nn/wrap/cell_wrapper.py +12 -23
  110. mindspore/nn/wrap/grad_reducer.py +5 -5
  111. mindspore/nn/wrap/loss_scale.py +17 -3
  112. mindspore/numpy/__init__.py +1 -1
  113. mindspore/numpy/array_creations.py +65 -68
  114. mindspore/numpy/array_ops.py +64 -60
  115. mindspore/numpy/fft.py +610 -75
  116. mindspore/numpy/logic_ops.py +11 -10
  117. mindspore/numpy/math_ops.py +85 -84
  118. mindspore/numpy/utils_const.py +4 -4
  119. mindspore/opencv_core452.dll +0 -0
  120. mindspore/opencv_imgcodecs452.dll +0 -0
  121. mindspore/opencv_imgproc452.dll +0 -0
  122. mindspore/ops/__init__.py +6 -4
  123. mindspore/ops/_grad_experimental/grad_array_ops.py +0 -11
  124. mindspore/ops/_grad_experimental/grad_comm_ops.py +67 -4
  125. mindspore/ops/_grad_experimental/grad_math_ops.py +0 -22
  126. mindspore/ops/_vmap/vmap_array_ops.py +2 -4
  127. mindspore/ops/_vmap/vmap_math_ops.py +17 -1
  128. mindspore/ops/_vmap/vmap_nn_ops.py +43 -2
  129. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +91 -7
  130. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +2 -0
  131. mindspore/ops/auto_generate/gen_extend_func.py +767 -13
  132. mindspore/ops/auto_generate/gen_ops_def.py +2452 -364
  133. mindspore/ops/auto_generate/gen_ops_prim.py +5442 -1756
  134. mindspore/ops/auto_generate/pyboost_inner_prim.py +176 -56
  135. mindspore/ops/composite/base.py +85 -48
  136. mindspore/ops/composite/multitype_ops/_compile_utils.py +1 -0
  137. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -2
  138. mindspore/ops/function/__init__.py +22 -0
  139. mindspore/ops/function/array_func.py +492 -153
  140. mindspore/ops/function/debug_func.py +113 -1
  141. mindspore/ops/function/fft_func.py +15 -2
  142. mindspore/ops/function/grad/grad_func.py +3 -2
  143. mindspore/ops/function/math_func.py +564 -207
  144. mindspore/ops/function/nn_func.py +817 -383
  145. mindspore/ops/function/other_func.py +3 -2
  146. mindspore/ops/function/random_func.py +402 -12
  147. mindspore/ops/function/reshard_func.py +13 -11
  148. mindspore/ops/function/sparse_unary_func.py +1 -1
  149. mindspore/ops/function/vmap_func.py +3 -2
  150. mindspore/ops/functional.py +24 -14
  151. mindspore/ops/op_info_register.py +3 -3
  152. mindspore/ops/operations/__init__.py +7 -2
  153. mindspore/ops/operations/_grad_ops.py +2 -76
  154. mindspore/ops/operations/_infer_ops.py +1 -1
  155. mindspore/ops/operations/_inner_ops.py +71 -94
  156. mindspore/ops/operations/array_ops.py +14 -146
  157. mindspore/ops/operations/comm_ops.py +63 -53
  158. mindspore/ops/operations/custom_ops.py +83 -19
  159. mindspore/ops/operations/debug_ops.py +42 -10
  160. mindspore/ops/operations/manually_defined/_inner.py +12 -0
  161. mindspore/ops/operations/manually_defined/ops_def.py +273 -20
  162. mindspore/ops/operations/math_ops.py +12 -223
  163. mindspore/ops/operations/nn_ops.py +20 -114
  164. mindspore/ops/operations/other_ops.py +7 -4
  165. mindspore/ops/operations/random_ops.py +46 -1
  166. mindspore/ops/primitive.py +18 -6
  167. mindspore/ops_generate/arg_dtype_cast.py +2 -0
  168. mindspore/ops_generate/gen_aclnn_implement.py +11 -11
  169. mindspore/ops_generate/gen_constants.py +36 -0
  170. mindspore/ops_generate/gen_ops.py +67 -52
  171. mindspore/ops_generate/gen_ops_inner_prim.py +1 -1
  172. mindspore/ops_generate/gen_pyboost_func.py +131 -47
  173. mindspore/ops_generate/op_proto.py +10 -3
  174. mindspore/ops_generate/pyboost_utils.py +14 -1
  175. mindspore/ops_generate/template.py +43 -21
  176. mindspore/parallel/__init__.py +3 -1
  177. mindspore/parallel/_auto_parallel_context.py +31 -9
  178. mindspore/parallel/_cell_wrapper.py +85 -0
  179. mindspore/parallel/_parallel_serialization.py +47 -19
  180. mindspore/parallel/_tensor.py +127 -13
  181. mindspore/parallel/_utils.py +53 -22
  182. mindspore/parallel/algo_parameter_config.py +5 -5
  183. mindspore/parallel/checkpoint_transform.py +46 -39
  184. mindspore/parallel/cluster/process_entity/__init__.py +1 -1
  185. mindspore/parallel/cluster/process_entity/_api.py +31 -23
  186. mindspore/parallel/cluster/process_entity/_utils.py +2 -27
  187. mindspore/parallel/parameter_broadcast.py +3 -4
  188. mindspore/parallel/shard.py +162 -31
  189. mindspore/parallel/transform_safetensors.py +1146 -0
  190. mindspore/profiler/__init__.py +2 -1
  191. mindspore/profiler/common/constant.py +29 -0
  192. mindspore/profiler/common/registry.py +47 -0
  193. mindspore/profiler/common/util.py +28 -0
  194. mindspore/profiler/dynamic_profiler.py +694 -0
  195. mindspore/profiler/envprofiling.py +17 -19
  196. mindspore/profiler/parser/ascend_analysis/constant.py +18 -0
  197. mindspore/profiler/parser/ascend_analysis/file_manager.py +25 -4
  198. mindspore/profiler/parser/ascend_analysis/function_event.py +43 -19
  199. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +31 -26
  200. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +56 -10
  201. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +55 -8
  202. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  203. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +27 -20
  204. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +9 -2
  205. mindspore/profiler/parser/ascend_msprof_exporter.py +5 -4
  206. mindspore/profiler/parser/ascend_timeline_generator.py +27 -25
  207. mindspore/profiler/parser/base_timeline_generator.py +19 -25
  208. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
  209. mindspore/profiler/parser/framework_parser.py +1 -391
  210. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  211. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  212. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  213. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  214. mindspore/profiler/parser/memory_usage_parser.py +0 -154
  215. mindspore/profiler/parser/profiler_info.py +78 -6
  216. mindspore/profiler/profiler.py +153 -0
  217. mindspore/profiler/profiling.py +285 -413
  218. mindspore/rewrite/__init__.py +1 -2
  219. mindspore/rewrite/common/namespace.py +4 -4
  220. mindspore/rewrite/symbol_tree/symbol_tree.py +3 -3
  221. mindspore/run_check/_check_version.py +39 -104
  222. mindspore/safeguard/rewrite_obfuscation.py +591 -247
  223. mindspore/train/__init__.py +4 -3
  224. mindspore/train/_utils.py +105 -19
  225. mindspore/train/amp.py +171 -53
  226. mindspore/train/callback/__init__.py +2 -2
  227. mindspore/train/callback/_callback.py +4 -4
  228. mindspore/train/callback/_checkpoint.py +97 -31
  229. mindspore/train/callback/_cluster_monitor.py +1 -1
  230. mindspore/train/callback/_flops_collector.py +1 -0
  231. mindspore/train/callback/_loss_monitor.py +3 -3
  232. mindspore/train/callback/_on_request_exit.py +145 -31
  233. mindspore/train/callback/_summary_collector.py +5 -5
  234. mindspore/train/callback/_tft_register.py +375 -0
  235. mindspore/train/dataset_helper.py +15 -3
  236. mindspore/train/metrics/metric.py +3 -3
  237. mindspore/train/metrics/roc.py +4 -4
  238. mindspore/train/mind_ir_pb2.py +44 -39
  239. mindspore/train/model.py +154 -58
  240. mindspore/train/serialization.py +342 -128
  241. mindspore/utils/__init__.py +21 -0
  242. mindspore/utils/utils.py +60 -0
  243. mindspore/version.py +1 -1
  244. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/METADATA +13 -7
  245. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/RECORD +248 -242
  246. mindspore/include/c_api/ms/abstract.h +0 -67
  247. mindspore/include/c_api/ms/attribute.h +0 -197
  248. mindspore/include/c_api/ms/base/handle_types.h +0 -43
  249. mindspore/include/c_api/ms/base/macros.h +0 -32
  250. mindspore/include/c_api/ms/base/status.h +0 -33
  251. mindspore/include/c_api/ms/base/types.h +0 -283
  252. mindspore/include/c_api/ms/context.h +0 -102
  253. mindspore/include/c_api/ms/graph.h +0 -160
  254. mindspore/include/c_api/ms/node.h +0 -606
  255. mindspore/include/c_api/ms/tensor.h +0 -161
  256. mindspore/include/c_api/ms/value.h +0 -84
  257. mindspore/mindspore_shared_lib.dll +0 -0
  258. mindspore/nn/extend/basic.py +0 -140
  259. mindspore/nn/extend/embedding.py +0 -143
  260. mindspore/nn/extend/layer/normalization.py +0 -109
  261. mindspore/nn/extend/pooling.py +0 -117
  262. mindspore/nn/layer/embedding_service.py +0 -531
  263. mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
  264. mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
  265. mindspore/ops/extend/__init__.py +0 -53
  266. mindspore/ops/extend/array_func.py +0 -218
  267. mindspore/ops/extend/math_func.py +0 -76
  268. mindspore/ops/extend/nn_func.py +0 -308
  269. mindspore/ops/silent_check.py +0 -162
  270. mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
  271. mindspore/profiler/parser/msadvisor_parser.py +0 -240
  272. mindspore/train/callback/_mindio_ttp.py +0 -443
  273. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/WHEEL +0 -0
  274. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/entry_points.txt +0 -0
  275. {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/top_level.txt +0 -0
@@ -38,9 +38,9 @@ from ..auto_generate import (Add, Addcdiv, Addcmul, ReduceMean, ReduceSum, Reduc
38
38
  Greater, GreaterEqual, Gcd, LogicalNot, LogicalAnd, LogicalOr,
39
39
  LogicalXor, Cos, ACos, Sin, Asin, Abs, Round, Atan, Atanh, Atan2,
40
40
  LinSpace, MatrixDeterminant, LogMatrixDeterminant, Erfinv, Conj,
41
- Real, Complex, Angle, MatrixExp, CholeskyInverse, Trace, Cholesky,
41
+ Real, Complex, Angle, MatrixExp, CholeskyInverse, Trace, Cholesky, Cross,
42
42
  FFTWithSize, NextAfter, NanToNum, Eig, Qr, Roll, Maximum, Div, DivMod, CumProd,
43
- CumSum, Less, LessEqual, AssignAdd, IsFinite, IsClose, TanhGrad)
43
+ CumSum, Less, LessEqual, AssignAdd, IsFinite, IsClose, TanhGrad, Xlogy, Trunc, Sign)
44
44
 
45
45
 
46
46
  def _infer_shape_reduce(x, axis, keep_dims, prim_name):
@@ -136,64 +136,6 @@ class _MathBinaryOp(_BinaryOp):
136
136
  real_shape = [dim if cmp_dim > 0 else cmp_dim for dim, cmp_dim in zip(shape_value, cmp_shape)]
137
137
  return tuple(real_shape)
138
138
 
139
- class SilentCheck(Primitive):
140
- """
141
- Implement SilentCheck on `pre_val`, `min_val`, `max_val`, `result` and
142
- update them inplace with given parameters.
143
-
144
- Args:
145
- c_min_steps (int): an int determines...
146
-
147
- c_thresh_l1 (float): a float determines...
148
-
149
- c_coeff_l1 (float): a float determines...
150
-
151
- c_thresh_l2 (float): a float determines...
152
-
153
- c_coeff_l2 (float): a float determines...
154
-
155
- Inputs:
156
- - **val** (Tensor) - Tensor with dtype float32.
157
- - **input_grad** (Parameter) - Tensor with dtype float32.
158
- - **pre_val** (Parameter) - Input Parameter with dtype float32.
159
- - **min_val** (Parameter) - Input Parameter with dtype float32.
160
- - **max_val** (Parameter) - Input Parameter with dtype float32.
161
- - **val_counter** (Parameter) - Input Parameter with dtype int32.
162
-
163
- Outputs:
164
- Tuple of 5 Tensors, the updated parameters.
165
- - **input_grad** (Tensor) - Tensor with dtype float32.
166
- - **pre_val** (Tensor) - Tensor with dtype float32.
167
- - **min_val** (Tensor) - Tensor with dtype float32.
168
- - **max_val** (Tensor) - Tensor with dtype float32.
169
- - **result** (Tensor) - Tensor with dtype int32.
170
-
171
- Raises:
172
- TypeError: If `val` is not Tensor with dtype float32.
173
- TypeError: If `result` is not Tensor with dtype int32.
174
- TypeError: If `pre_val`, `min_val`, `max_val`, `input_grad` are not all Parameter type with dtype float32.
175
- TypeError: If `c_thresh_l1` or `c_coeff_l1` is not a float number.
176
- TypeError: If `c_min_steps` is not an int number.
177
-
178
- Supported Platforms:
179
- ``Ascend``
180
-
181
- Examples:
182
- >>> from mindspore.ops.operations.math_ops import SilentCheck
183
- >>> silent_check = SilentCheck()
184
- xxx
185
- """
186
-
187
- @prim_attr_register
188
- def __init__(self, c_min_steps, c_thresh_l1, c_coeff_l1, c_thresh_l2, c_coeff_l2):
189
- """Initialize SilentCheck."""
190
- validator.check_value_type("c_min_steps", c_min_steps, [int], self.name)
191
- validator.check_value_type("c_thresh_l1", c_thresh_l1, [float], self.name)
192
- validator.check_value_type("c_coeff_l1", c_coeff_l1, [float], self.name)
193
- validator.check_value_type("c_thresh_l2", c_thresh_l2, [float], self.name)
194
- validator.check_value_type("c_coeff_l2", c_coeff_l2, [float], self.name)
195
- self.add_prim_attr('side_effect_mem', True)
196
-
197
139
 
198
140
  class _BitwiseBinaryOp(_MathBinaryOp):
199
141
  """
@@ -1041,8 +983,8 @@ class Sub(_MathBinaryOp):
1041
983
  Inputs:
1042
984
  - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
1043
985
  a bool or a tensor whose data type is
1044
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
1045
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
986
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
987
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1046
988
  - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
1047
989
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
1048
990
 
@@ -1246,7 +1188,7 @@ class Histogram(Primitive):
1246
1188
  - **x** (Tensor) - the input tensor, type support list: [float16, float32, int32].
1247
1189
 
1248
1190
  Outputs:
1249
- Tensor, 1-D Tensor with type int32.
1191
+ 1-D Tensor. If the input is int32, the output returns int32, otherwise it returns float32.
1250
1192
 
1251
1193
  Raises:
1252
1194
  TypeError: If `x` is not a Tensor.
@@ -1264,7 +1206,7 @@ class Histogram(Primitive):
1264
1206
  >>> op = ops.Histogram(bins=4, min=0.0, max=3.0)
1265
1207
  >>> y = op(x)
1266
1208
  >>> print(y)
1267
- [0 2 1 0]
1209
+ [0. 2. 1. 0.]
1268
1210
  """
1269
1211
 
1270
1212
  @prim_attr_register
@@ -1440,8 +1382,8 @@ class DivNoNan(Primitive):
1440
1382
  Inputs:
1441
1383
  - **x1** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
1442
1384
  a bool or a tensor whose data type is
1443
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
1444
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1385
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1386
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1445
1387
  - **x2** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
1446
1388
  a bool when the first input is a bool or a tensor whose data type is number or bool\_.
1447
1389
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -1803,48 +1745,6 @@ class Xdivy(Primitive):
1803
1745
  return None
1804
1746
 
1805
1747
 
1806
- class Xlogy(Primitive):
1807
- r"""
1808
- Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
1809
- Returns zero when `x` is zero.
1810
-
1811
- Refer to :func:`mindspore.ops.xlogy` for more details.
1812
-
1813
- Inputs:
1814
- - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
1815
- a bool or a tensor whose data type is
1816
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
1817
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1818
- - **y** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
1819
- a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
1820
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
1821
-
1822
- Outputs:
1823
- Tensor, the shape is the same as the one after broadcasting,
1824
- and the data type is the one with higher precision or higher digits among the two inputs.
1825
-
1826
- Supported Platforms:
1827
- ``Ascend`` ``GPU`` ``CPU``
1828
-
1829
- Examples:
1830
- >>> import mindspore
1831
- >>> import numpy as np
1832
- >>> from mindspore import Tensor, ops
1833
- >>> x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
1834
- >>> y = Tensor(np.array([2, 2, 2]), mindspore.float32)
1835
- >>> xlogy = ops.Xlogy()
1836
- >>> output = xlogy(x, y)
1837
- >>> print(output)
1838
- [-3.465736 0. 2.7725887]
1839
- """
1840
- __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
1841
-
1842
- @prim_attr_register
1843
- def __init__(self):
1844
- """Initialize Xlogy."""
1845
- self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
1846
-
1847
-
1848
1748
  class _LogicBinaryOp(_BinaryOp):
1849
1749
  """
1850
1750
  Define logic binary operators.
@@ -2564,54 +2464,17 @@ class NMSWithMask(PrimitiveWithInfer):
2564
2464
  return bboxes_dtype, mstype.int32, mstype.bool_
2565
2465
 
2566
2466
 
2567
- class Sign(Primitive):
2568
- r"""
2569
- Performs sign on the tensor element-wise.
2570
-
2571
- .. math::
2572
- sign(x) = \begin{cases} -1, &if\ x < 0 \cr
2573
- 0, &if\ x = 0 \cr
2574
- 1, &if\ x > 0\end{cases}
2575
-
2576
- Inputs:
2577
- - **x** (Tensor) - The input tensor of any dimension.
2578
-
2579
- Outputs:
2580
- Tensor, has the same shape and dtype as the `x`.
2581
-
2582
- Raises:
2583
- TypeError: If `x` is not a Tensor.
2584
-
2585
- Supported Platforms:
2586
- ``Ascend`` ``GPU`` ``CPU``
2587
-
2588
- Examples:
2589
- >>> import mindspore
2590
- >>> import numpy as np
2591
- >>> from mindspore import Tensor, ops
2592
- >>> x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
2593
- >>> sign = ops.Sign()
2594
- >>> output = sign(x)
2595
- >>> print(output)
2596
- [[ 1. 0. -1.]]
2597
- """
2598
-
2599
- @prim_attr_register
2600
- def __init__(self):
2601
- pass
2602
-
2603
-
2604
2467
  class Tan(Primitive):
2605
2468
  r"""
2606
- Computes tangent of `x` element-wise.
2469
+ Computes tangent of `input` element-wise.
2607
2470
 
2608
2471
  Refer to :func:`mindspore.ops.tan` for more details.
2609
2472
 
2610
2473
  Inputs:
2611
- - **x** (Tensor) - Input tensor of any dimension.
2474
+ - **input** (Tensor) - Input tensor of any dimension.
2612
2475
 
2613
2476
  Outputs:
2614
- Tensor, has the same shape as `x`.
2477
+ Tensor, has the same shape as `input`.
2615
2478
 
2616
2479
  Supported Platforms:
2617
2480
  ``Ascend`` ``GPU`` ``CPU``
@@ -2630,7 +2493,7 @@ class Tan(Primitive):
2630
2493
  @prim_attr_register
2631
2494
  def __init__(self):
2632
2495
  """Initialize Tan"""
2633
- self.init_prim_io_names(inputs=['x'], outputs=['y'])
2496
+ self.init_prim_io_names(inputs=['input'], outputs=['output'])
2634
2497
 
2635
2498
 
2636
2499
  class SquareSumAll(Primitive):
@@ -3702,37 +3565,6 @@ class Imag(Primitive):
3702
3565
  self.init_prim_io_names(inputs=['input'], outputs=['output'])
3703
3566
 
3704
3567
 
3705
- class Trunc(Primitive):
3706
- """
3707
- Returns a new tensor with the truncated integer values of the elements of input.
3708
-
3709
- Refer to :func:`mindspore.ops.trunc` for more details.
3710
-
3711
- Inputs:
3712
- - **input_x** (Tensor) - Input tensor of any dimension.
3713
-
3714
- Outputs:
3715
- Tensor, the same shape and data type as `input_x`.
3716
-
3717
- Supported Platforms:
3718
- ``Ascend`` ``GPU`` ``CPU``
3719
-
3720
- Examples:
3721
- >>> import mindspore
3722
- >>> import numpy as np
3723
- >>> from mindspore import Tensor, ops
3724
- >>> x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]), mindspore.float32)
3725
- >>> output = ops.Trunc()(x)
3726
- >>> print(output)
3727
- [ 3. 0. -0. -3.]
3728
- """
3729
-
3730
- @prim_attr_register
3731
- def __init__(self):
3732
- """Initialize Trunc"""
3733
- self.init_prim_io_names(inputs=['input'], outputs=['output'])
3734
-
3735
-
3736
3568
  class TridiagonalMatMul(Primitive):
3737
3569
  """
3738
3570
  Return the result of a multiplication of two matrices, where the left one is a Tridiagonal Matrix.
@@ -4294,49 +4126,6 @@ class Polygamma(Primitive):
4294
4126
  self.init_prim_io_names(inputs=['a', 'x'], outputs=['y'])
4295
4127
 
4296
4128
 
4297
- class Cross(Primitive):
4298
- """
4299
- Returns the cross product of vectors in dimension `dim` of x1 and x2.
4300
-
4301
- .. warning::
4302
- This is an experimental API that is subject to change or deletion.
4303
-
4304
- Refer to :func:`mindspore.ops.cross` for more details.
4305
-
4306
- Args:
4307
- dim (int): Spefcified dim along which to cumpute cross product with. Default: ``-65530`` .
4308
-
4309
- Inputs:
4310
- - **x1** (Tensor) - Input Tensor.
4311
- - **x2** (Tensor) - Another input Tensor, must have the same shape and
4312
- the same type as `x1`, and the size of their `dim` dimension should be 3.
4313
-
4314
- Outputs:
4315
- Tensor, has the same shape and type as inputs.
4316
-
4317
- Supported Platforms:
4318
- ``Ascend`` ``CPU``
4319
-
4320
- Examples:
4321
- >>> import mindspore
4322
- >>> import numpy as np
4323
- >>> from mindspore import Tensor
4324
- >>> from mindspore import dtype as mstype
4325
- >>> from mindspore import ops
4326
- >>> cross = ops.Cross(dim = 0)
4327
- >>> x1 = Tensor([1, 2, 3], mstype.int8)
4328
- >>> x2 = Tensor([1, 2, 3], mstype.int8)
4329
- >>> output = cross(x1, x2)
4330
- >>> print(output)
4331
- [0 0 0]
4332
- """
4333
-
4334
- @prim_attr_register
4335
- def __init__(self, dim=-65530):
4336
- validator.check_value_type('dim', dim, [int], self.name)
4337
- self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
4338
-
4339
-
4340
4129
  class RaggedRange(Primitive):
4341
4130
  """
4342
4131
  Returns a `RaggedTensor` containing the specified sequences of numbers.
@@ -30,15 +30,16 @@ from mindspore.ops.primitive import Primitive
30
30
  from mindspore.ops.primitive import PrimitiveWithInfer
31
31
  from mindspore.ops.primitive import PrimitiveWithCheck
32
32
  from mindspore.ops.primitive import prim_attr_register
33
- from ..auto_generate import (CeLU, Flatten, LogSoftmax, ReLU, ReLU6, Dense, Tanh,
33
+ from mindspore.run_check._check_version import AscendEnvChecker
34
+ from ..auto_generate import (CeLU, Flatten, LogSoftmax, LogSoftmaxExt, ReLU, ReLU6, Dense, Tanh,
34
35
  Elu, Sigmoid, Softmax, SoftplusExt, HSwish, HSigmoid, AvgPool, BiasAdd,
35
- NLLLoss, OneHot, GeLU, FastGeLU, PReLU, RmsNorm,
36
+ NLLLoss, OneHot, GeLU, FastGeLU, PReLU, RmsNorm, IncreFlashAttention, MSELossExt,
36
37
  GridSampler3D, GridSampler2D, LayerNorm, LayerNormExt, HShrink, AdamWeightDecay, Dropout,
37
38
  ApplyRotaryPosEmb, PagedAttention, PagedAttentionMask, ReshapeAndCache,
38
39
  FlashAttentionScore, Embedding, UpsampleNearest1D, UpsampleNearest2D,
39
40
  UpsampleNearest3D, UpsampleTrilinear3D,
40
41
  UpsampleBilinear2D, UpsampleLinear1D,
41
- BinaryCrossEntropy, BCEWithLogitsLoss)
42
+ BinaryCrossEntropy, BCEWithLogitsLoss, SoftShrink)
42
43
  from .manually_defined import BatchNorm
43
44
 
44
45
 
@@ -453,7 +454,7 @@ class ReLUV3(Primitive):
453
454
  Inputs:
454
455
  - **input_x** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of
455
456
  additional dimensions, data type is
456
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
457
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
457
458
 
458
459
  Outputs:
459
460
  Tensor of shape :math:`(N, *)`, with the same type and shape as the `input_x`.
@@ -569,8 +570,6 @@ class SeLU(Primitive):
569
570
  self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
570
571
 
571
572
 
572
-
573
-
574
573
  class FusedBatchNorm(Primitive):
575
574
  r"""
576
575
  The FusedBatchNorm interface is deprecated, please use the BatchNorm interface.
@@ -3075,9 +3074,9 @@ class LSTM(Primitive):
3075
3074
  Args:
3076
3075
  input_size (int): Number of features of input.
3077
3076
  hidden_size (int): Number of features of hidden layer.
3078
- num_layers (int): Number of layers of stacked LSTM.
3079
- has_bias (bool): Whether the cell has bias `b_ih` and `b_hh`.
3080
- bidirectional (bool): Specifies whether it is a bidirectional LSTM.
3077
+ num_layers (int): Number of layers of stacked LSTM, , which is only support `1` on CPU.
3078
+ has_bias (bool): Whether the cell has bias `b_ih` and `b_hh` , which is only support `False` on CPU.
3079
+ bidirectional (bool): Specifies whether it is a bidirectional LSTM, , which is only support `False` on CPU.
3081
3080
  dropout (float): If not 0, append `Dropout` layer on the outputs of each
3082
3081
  LSTM layer except the last layer. The range of dropout is [0.0, 1.0].
3083
3082
  proj_size (int): If `proj_size` > 0, a projection of the corresponding size will be used,
@@ -3776,6 +3775,7 @@ class AdamNoUpdateParam(Primitive):
3776
3775
  @prim_attr_register
3777
3776
  def __init__(self, use_locking=False, use_nesterov=False):
3778
3777
  """Initialize AdamNoUpdateParam."""
3778
+ self.add_prim_attr('side_effect_mem', True)
3779
3779
  validator.check_value_type("use_locking", use_locking, [bool], self.name)
3780
3780
  validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
3781
3781
 
@@ -6376,6 +6376,9 @@ class AvgPool3D(Primitive):
6376
6376
  \frac{1}{d_{ker} * h_{ker} * w_{ker}} \sum_{l=0}^{d_{ker}-1} \sum_{m=0}^{h_{ker}-1} \sum_{n=0}^{w_{ker}-1}
6377
6377
  \text{input}(N_i, C_j, s_0 \times d + l, s_1 \times h + m, s_2 \times w + n)
6378
6378
 
6379
+ Note:
6380
+ This interface currently does not support Atlas A2 training series products.
6381
+
6379
6382
  Args:
6380
6383
  kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value,
6381
6384
  is an int number that represents depth, height and width are both kernel_size, or a tuple
@@ -7091,7 +7094,7 @@ class CTCLossV2Grad(Primitive):
7091
7094
  zero_infinity (bool): Whether to set infinite loss and correlation gradient to zero. Default: ``False`` .
7092
7095
 
7093
7096
  Inputs:
7094
- - **grad_out** (Tenosr) - Gradient renewal codfficient, A tensor for shape (N), where N is batch size.
7097
+ - **grad_out** (Tensor) - Gradient renewal codfficient, A tensor for shape (N), where N is batch size.
7095
7098
  - **log_probs** (Tensor) - A tensor of shape (T, N, C), where T is input length, N is batch size and C is number
7096
7099
  of classes (including blank).
7097
7100
  - **targets** (Tensor) - A tensor of shape (N, S), where S is max target length, means the target sequences.
@@ -7461,43 +7464,6 @@ class Dilation2D(Primitive):
7461
7464
  self.add_prim_attr('dilation', self.dilation)
7462
7465
 
7463
7466
 
7464
- class SoftShrink(Primitive):
7465
- r"""
7466
- Applies the SoftShrink function element-wise.
7467
-
7468
- Refer to :func:`mindspore.ops.softshrink` for more details.
7469
-
7470
- Args:
7471
- lambd(float, optional): The :math:`\lambda` must be no less than zero. Default: ``0.5`` .
7472
-
7473
- Inputs:
7474
- - **input_x** (Tensor) - The input of soft shrink with data type of float16 or float32.
7475
-
7476
- Outputs:
7477
- Tensor, has the same shape and data type as `input_x`.
7478
-
7479
- Supported Platforms:
7480
- ``Ascend`` ``GPU`` ``CPU``
7481
-
7482
- Examples:
7483
- >>> import mindspore
7484
- >>> import numpy as np
7485
- >>> from mindspore import Tensor, ops
7486
- >>> input_x = Tensor(np.array([[ 0.5297, 0.7871, 1.1754], [ 0.7836, 0.6218, -1.1542]]), mindspore.float16)
7487
- >>> softshrink = ops.SoftShrink()
7488
- >>> output = softshrink(input_x)
7489
- >>> print(output)
7490
- [[ 0.02979 0.287 0.676 ]
7491
- [ 0.2837 0.1216 -0.6543 ]]
7492
- """
7493
-
7494
- @prim_attr_register
7495
- def __init__(self, lambd=0.5):
7496
- """Initialize SoftShrink"""
7497
- validator.check_value_type("lambd", lambd, [float], self.name)
7498
- validator.check_number("lambd", lambd, 0, validator.GE, self.name)
7499
-
7500
-
7501
7467
  class ApplyAdagradDA(Primitive):
7502
7468
  r"""
7503
7469
  Update `var` according to the proximal adagrad scheme.
@@ -9591,79 +9557,19 @@ class PromptFlashAttention(Primitive):
9591
9557
  outputs=["attention_out"])
9592
9558
 
9593
9559
 
9594
- class IncreFlashAttention(Primitive):
9595
- r"""
9596
- The interface for fully inference.
9597
-
9598
- B -- Batch size
9599
-
9600
- S -- Sequence length
9601
-
9602
- H -- Hidden size
9603
-
9604
- .. warning::
9605
- This is an experimental API that is subject to change or deletion.
9606
- If there is no input parameter and no default value, None needs to be passed.
9607
-
9608
- Args:
9609
- - **num_heads** (int) - The number of heads.
9610
- - **input_layout** (str) - the data layout of the input qkv, support `(BSH)` and `(BNSD)`. Default `BSH`.
9611
- - **scale_value** (double) - The scale value indicating the scale coefficient, which is used as the scalar of
9612
- Muls in the calculation. Default: 1.0.
9613
- - **num_key_value_heads** (int) - head numbers of key/value which are used in GQA algorithm.
9614
- The value o indicates if the key and value have the same head nums, use numHeads. Default: 0.
9615
- - **block_size** (int) - Default: 0.
9616
- - **inner_precise** (int) - Default: 1.
9617
-
9618
- Inputs:
9619
- - **query** (Tensor) - The query tensor with data type of float16 or bfloat16.
9620
- Input tensor of shape :math:`(B, 1, H)` / :math:`(B, N, 1, D)`.
9621
- - **key** (TensorList) - The key tensor with data type of float16 or bfloat16.
9622
- Input tensor of shape :math:`(B, S, H)` / :math:`(B, N, S, D)`.
9623
- - **value** (TensorList) - The value tensor with data type of float16 or bfloat16.
9624
- Input tensor of shape :math:`(B, S, H)` / :math:`(B, N, S, D)`.
9625
- - **attn_mask** (Tensor) - The attention mask tensor with data type of float16 or bool.
9626
- Input tensor of shape :math:`(B, S)` / :math:`(B, 1, S)` / :math:`(B, 1, 1, S)`.
9627
- - **actual_seq_lengths** (Tensor) - Describe actual sequence length of each input with data type of int.
9628
- - **pse_shift** (Tensor) - The position encoding tensor with data type of float16 or float32.
9629
- - **dequant_scale1** (Tensor) - Quantitative parametor, the tensor with data type of uint64.
9630
- - **quant_scale1** (Tensor) - Quantitative parametor, the tensor with data type of float.
9631
- - **dequant_scale2** (Tensor) - Quantitative parametor, the tensor with data type of uint64.
9632
- - **quant_scale2** (Tensor) - Quantitative parametor, the tensor with data type of float.
9633
- - **quant_offset2** (Tensor) - Quantitative parametor, the tensor with data type of float.
9634
- - **antiquant_scale** (Tensor) - Quantitative parametor, the tensor with data type of float.
9635
- - **antiquant_offset** (Tensor) - Quantitative parametor, the tensor with data type of float.
9636
- - **block_table** (Tensor) - The tensor with data type of float.
9637
-
9638
- Outputs:
9639
- - **attention_out** (Tensor) - Input tensor of shape :math:`(B, 1, H)` / :math:`(B, N, 1, D)`.
9640
-
9641
- Supported Platforms:
9642
- ``Ascend``
9643
- """
9644
-
9645
- @prim_attr_register
9646
- def __init__(self, num_heads, input_layout="BSH", scale_value=1.0, num_key_value_heads=0, block_size=0,
9647
- inner_precise=1):
9648
- """Initialize IncreFlashAttention."""
9649
- validator.check_value_type('num_heads', num_heads, [int], self.name)
9650
- validator.check_value_type('input_layout', input_layout, [str], self.name)
9651
- validator.check_value_type('scale_value', scale_value, [float], self.name)
9652
- validator.check_value_type('num_key_value_heads', num_key_value_heads, [int], self.name)
9653
- validator.check_value_type('block_size', block_size, [int], self.name)
9654
- validator.check_value_type('inner_precise', inner_precise, [int], self.name)
9655
- self.init_prim_io_names(inputs=["query", "key", "value", "attn_mask", "actual_seq_lengths", "pse_shift",
9656
- "dequant_scale1", "quant_scale1", "dequant_scale2", "quant_scale2",
9657
- "quant_offset2", "antiquant_scale", "antiquant_offset", "block_table"],
9658
- outputs=["attention_out"])
9659
-
9660
-
9661
9560
  class AllFinite(Primitive):
9662
9561
  r"""
9663
9562
  Check all gradients is finite.
9664
9563
  """
9564
+
9665
9565
  @prim_attr_register
9666
9566
  def __init__(self):
9667
9567
  """Initialize"""
9668
9568
  self.init_prim_io_names(inputs=['gradients'],
9669
9569
  outputs=["is_finite"])
9570
+ if context.get_context("device_target") == "Ascend":
9571
+ checker = AscendEnvChecker(None)
9572
+ if not checker.check_custom_version():
9573
+ raise RuntimeError(
9574
+ "The version of Ascend AI software package installed "
9575
+ "in the current environment does not support AllFinite.")
@@ -300,10 +300,10 @@ class SampleDistortedBoundingBoxV2(Primitive):
300
300
 
301
301
  @prim_attr_register
302
302
  def __init__(self, seed=0, seed2=0, \
303
- aspect_ratio_range=(0.75, 1.33), \
304
- area_range=(0.05, 1.0), \
305
- max_attempts=100, \
306
- use_image_if_no_bounding_boxes=False):
303
+ aspect_ratio_range=(0.75, 1.33), \
304
+ area_range=(0.05, 1.0), \
305
+ max_attempts=100, \
306
+ use_image_if_no_bounding_boxes=False):
307
307
  validator.check_is_int(seed, "seed", self.name)
308
308
  validator.check_is_int(seed2, "seed2", self.name)
309
309
  validator.check_value_type("aspect_ratio_range", aspect_ratio_range, [list, tuple], self.name)
@@ -584,6 +584,9 @@ class StopGradient(Primitive):
584
584
  pass
585
585
 
586
586
 
587
+ stop_gradient_ = StopGradient()
588
+
589
+
587
590
  class ConfusionMatrix(PrimitiveWithInfer):
588
591
  r"""
589
592
  Calculates the confusion matrix from labels and predictions.
@@ -89,6 +89,10 @@ class TruncatedNormal(Primitive):
89
89
  - Using the Philox algorithm to scramble seed and seed2 to obtain random seed so that the user doesn't need
90
90
  to worry about which seed is more important.
91
91
 
92
+ .. warning::
93
+ The Ascend backend does not support the reproducibility of random numbers, so
94
+ the `seed` and `seed2` parameter have no effect.
95
+
92
96
  Args:
93
97
  seed (int, optional): The operator-level random seed, used to generate random numbers,
94
98
  must be non-negative. Default: ``0`` .
@@ -153,6 +157,10 @@ class StandardNormal(Primitive):
153
157
  - Using the Philox algorithm to scramble seed and seed2 to obtain random seed so that the user doesn't need
154
158
  to worry about which seed is more important.
155
159
 
160
+ .. warning::
161
+ The Ascend backend does not support the reproducibility of random numbers, so
162
+ the `seed` and `seed2` parameter have no effect.
163
+
156
164
  Args:
157
165
  seed (int, optional): The operator-level random seed, used to generate random numbers,
158
166
  must be non-negative. Default: ``0`` .
@@ -204,6 +212,10 @@ class StandardLaplace(Primitive):
204
212
  - Using the Philox algorithm to scramble seed and seed2 to obtain random seed so that the user doesn't need
205
213
  to worry about which seed is more important.
206
214
 
215
+ .. warning::
216
+ The Ascend backend does not support the reproducibility of random numbers, so
217
+ the `seed` and `seed2` parameter have no effect.
218
+
207
219
  Args:
208
220
  seed (int, optional): The operator-level random seed, used to generate random numbers,
209
221
  must be non-negative. Default: ``0`` .
@@ -367,6 +379,10 @@ class Gamma(PrimitiveWithInfer):
367
379
  - Using the Philox algorithm to scramble seed and seed2 to obtain random seed so that the user doesn't need
368
380
  to worry about which seed is more important.
369
381
 
382
+ .. warning::
383
+ The Ascend backend does not support the reproducibility of random numbers, so
384
+ the `seed` and `seed2` parameter have no effect.
385
+
370
386
  Args:
371
387
  seed (int, optional): The operator-level random seed, used to generate random numbers,
372
388
  must be non-negative. Default: ``0`` .
@@ -450,6 +466,10 @@ class ParameterizedTruncatedNormal(Primitive):
450
466
  - Using the Philox algorithm to scramble seed and seed2 to obtain random seed so that the user doesn't need
451
467
  to worry about which seed is more important.
452
468
 
469
+ .. warning::
470
+ The Ascend backend does not support the reproducibility of random numbers, so
471
+ the `seed` and `seed2` parameter have no effect.
472
+
453
473
  Args:
454
474
  seed (int, optional): The operator-level random seed, used to generate random numbers,
455
475
  must be non-negative. Default: ``0`` .
@@ -672,6 +692,10 @@ class UniformInt(Primitive):
672
692
  - Using the Philox algorithm to scramble seed and seed2 to obtain random seed so that the user doesn't need
673
693
  to worry about which seed is more important.
674
694
 
695
+ .. warning::
696
+ The Ascend backend does not support the reproducibility of random numbers, so
697
+ the `seed` and `seed2` parameter have no effect.
698
+
675
699
  Args:
676
700
  seed (int, optional): The operator-level random seed, used to generate random numbers,
677
701
  must be non-negative. Default: ``0`` .
@@ -737,6 +761,10 @@ class UniformReal(Primitive):
737
761
  - GPU: int32, int64.
738
762
  - CPU: int16, int32, int64.
739
763
 
764
+ .. warning::
765
+ The Ascend backend does not support the reproducibility of random numbers, so
766
+ the `seed` and `seed2` parameter have no effect.
767
+
740
768
  Args:
741
769
  seed (int, optional): The operator-level random seed, used to generate random numbers,
742
770
  must be non-negative. Default: ``0`` .
@@ -837,6 +865,10 @@ class RandomCategorical(PrimitiveWithInfer):
837
865
  r"""
838
866
  Generates random samples from a given categorical distribution tensor.
839
867
 
868
+ .. warning::
869
+ The Ascend backend does not support the reproducibility of random numbers, so
870
+ the `seed` parameter has no effect.
871
+
840
872
  Args:
841
873
  dtype (mindspore.dtype): The type of output. Its value must be one of mstype.int16,
842
874
  mstype.int32 and mstype.int64. Default: ``mstype.int64`` .
@@ -903,6 +935,10 @@ class Multinomial(Primitive):
903
935
  - Using the Philox algorithm to scramble seed and seed2 to obtain random seed so that the user doesn't need
904
936
  to worry about which seed is more important.
905
937
 
938
+ .. warning::
939
+ The Ascend backend does not support the reproducibility of random numbers, so
940
+ the `seed` and `seed2` parameter have no effect.
941
+
906
942
  Args:
907
943
  seed (int, optional): The operator-level random seed, used to generate random numbers,
908
944
  must be non-negative. Default: ``0`` .
@@ -1012,6 +1048,11 @@ class UniformCandidateSampler(Primitive):
1012
1048
 
1013
1049
  Refer to :func:`mindspore.ops.uniform_candidate_sampler` for more details.
1014
1050
 
1051
+ .. warning::
1052
+ - The Ascend backend does not support the reproducibility of random numbers, so
1053
+ the `seed` parameter has no effect.
1054
+ - The Ascend backend does not support dynamic shape scenarios currently.
1055
+
1015
1056
  Args:
1016
1057
  num_true (int): The number of target classes in each training example.
1017
1058
  num_sampled (int): The number of classes to randomly sample. The sampled_candidates will have a shape
@@ -1026,7 +1067,7 @@ class UniformCandidateSampler(Primitive):
1026
1067
 
1027
1068
  Inputs:
1028
1069
  - **true_classes** (Tensor) - A Tensor. The target classes with a Tensor shape of
1029
- :math:`(batch\_size, num\_true)`.
1070
+ :math:`(batch\_size, num\_true)`. The value range of the elements must be :math:`[0, range\_max)`.
1030
1071
 
1031
1072
  Outputs:
1032
1073
  - **sampled_candidates** (Tensor) - The sampled_candidates is independent of the true classes.
@@ -1086,6 +1127,10 @@ class LogUniformCandidateSampler(Primitive):
1086
1127
 
1087
1128
  Refer to :func:`mindspore.ops.log_uniform_candidate_sampler` for more details.
1088
1129
 
1130
+ .. warning::
1131
+ The Ascend backend does not support the reproducibility of random numbers, so
1132
+ the `seed` parameter has no effect.
1133
+
1089
1134
  Args:
1090
1135
  num_true (int, optional): The number of target classes per training example. Default: ``1`` .
1091
1136
  num_sampled (int, optional): The number of classes to randomly sample. Default: ``5`` .