mindspore 2.3.0__cp310-cp310-win_amd64.whl → 2.4.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (308) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +3 -1
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +50 -9
  9. mindspore/_extends/parse/compile_config.py +41 -0
  10. mindspore/_extends/parse/parser.py +9 -7
  11. mindspore/_extends/parse/standard_method.py +52 -14
  12. mindspore/_extends/pijit/pijit_func_white_list.py +350 -24
  13. mindspore/amp.py +24 -10
  14. mindspore/atlprov.dll +0 -0
  15. mindspore/avcodec-59.dll +0 -0
  16. mindspore/avdevice-59.dll +0 -0
  17. mindspore/avfilter-8.dll +0 -0
  18. mindspore/avformat-59.dll +0 -0
  19. mindspore/avutil-57.dll +0 -0
  20. mindspore/c1.dll +0 -0
  21. mindspore/c1xx.dll +0 -0
  22. mindspore/c2.dll +0 -0
  23. mindspore/common/__init__.py +6 -4
  24. mindspore/common/_pijit_context.py +190 -0
  25. mindspore/common/_register_for_tensor.py +2 -1
  26. mindspore/common/_tensor_overload.py +139 -0
  27. mindspore/common/api.py +102 -87
  28. mindspore/common/dump.py +5 -6
  29. mindspore/common/generator.py +1 -7
  30. mindspore/common/hook_handle.py +14 -26
  31. mindspore/common/mindir_util.py +2 -2
  32. mindspore/common/parameter.py +46 -13
  33. mindspore/common/recompute.py +39 -9
  34. mindspore/common/sparse_tensor.py +7 -3
  35. mindspore/common/tensor.py +209 -29
  36. mindspore/communication/__init__.py +1 -1
  37. mindspore/communication/_comm_helper.py +38 -3
  38. mindspore/communication/comm_func.py +310 -55
  39. mindspore/communication/management.py +14 -14
  40. mindspore/context.py +123 -22
  41. mindspore/dataset/__init__.py +1 -1
  42. mindspore/dataset/audio/__init__.py +1 -1
  43. mindspore/dataset/core/config.py +7 -0
  44. mindspore/dataset/core/validator_helpers.py +7 -0
  45. mindspore/dataset/engine/cache_client.py +1 -1
  46. mindspore/dataset/engine/datasets.py +72 -44
  47. mindspore/dataset/engine/datasets_audio.py +7 -7
  48. mindspore/dataset/engine/datasets_standard_format.py +53 -3
  49. mindspore/dataset/engine/datasets_text.py +20 -20
  50. mindspore/dataset/engine/datasets_user_defined.py +174 -104
  51. mindspore/dataset/engine/datasets_vision.py +33 -33
  52. mindspore/dataset/engine/iterators.py +29 -0
  53. mindspore/dataset/engine/obs/util.py +7 -0
  54. mindspore/dataset/engine/queue.py +114 -60
  55. mindspore/dataset/engine/serializer_deserializer.py +2 -2
  56. mindspore/dataset/engine/validators.py +34 -14
  57. mindspore/dataset/text/__init__.py +1 -4
  58. mindspore/dataset/transforms/__init__.py +0 -3
  59. mindspore/dataset/utils/line_reader.py +2 -0
  60. mindspore/dataset/vision/__init__.py +1 -4
  61. mindspore/dataset/vision/utils.py +1 -1
  62. mindspore/dataset/vision/validators.py +2 -1
  63. mindspore/dnnl.dll +0 -0
  64. mindspore/dpcmi.dll +0 -0
  65. mindspore/{nn/extend → experimental/es}/__init__.py +4 -11
  66. mindspore/experimental/es/embedding_service.py +883 -0
  67. mindspore/{nn/layer → experimental/es}/embedding_service_layer.py +218 -30
  68. mindspore/experimental/llm_boost/__init__.py +21 -0
  69. mindspore/{nn/extend/layer → experimental/llm_boost/atb}/__init__.py +4 -8
  70. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  71. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  72. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  73. mindspore/experimental/llm_boost/register.py +129 -0
  74. mindspore/experimental/llm_boost/utils.py +31 -0
  75. mindspore/experimental/optim/adamw.py +85 -0
  76. mindspore/experimental/optim/optimizer.py +3 -0
  77. mindspore/hal/__init__.py +3 -3
  78. mindspore/hal/contiguous_tensors_handle.py +175 -0
  79. mindspore/hal/stream.py +18 -0
  80. mindspore/include/api/model_group.h +13 -1
  81. mindspore/include/api/types.h +10 -10
  82. mindspore/include/dataset/config.h +2 -2
  83. mindspore/include/dataset/constants.h +2 -2
  84. mindspore/include/dataset/execute.h +2 -2
  85. mindspore/include/dataset/vision.h +4 -0
  86. mindspore/jpeg62.dll +0 -0
  87. mindspore/log.py +1 -1
  88. mindspore/mindrecord/filewriter.py +68 -51
  89. mindspore/mindspore_backend.dll +0 -0
  90. mindspore/mindspore_common.dll +0 -0
  91. mindspore/mindspore_core.dll +0 -0
  92. mindspore/mindspore_glog.dll +0 -0
  93. mindspore/mindspore_np_dtype.dll +0 -0
  94. mindspore/mindspore_ops.dll +0 -0
  95. mindspore/mint/__init__.py +495 -46
  96. mindspore/mint/distributed/__init__.py +31 -0
  97. mindspore/mint/distributed/distributed.py +254 -0
  98. mindspore/mint/nn/__init__.py +266 -21
  99. mindspore/mint/nn/functional.py +125 -19
  100. mindspore/mint/nn/layer/__init__.py +39 -0
  101. mindspore/mint/nn/layer/activation.py +133 -0
  102. mindspore/mint/nn/layer/normalization.py +477 -0
  103. mindspore/mint/nn/layer/pooling.py +110 -0
  104. mindspore/mint/optim/adamw.py +28 -7
  105. mindspore/mint/special/__init__.py +63 -0
  106. mindspore/msobj140.dll +0 -0
  107. mindspore/mspdb140.dll +0 -0
  108. mindspore/mspdbcore.dll +0 -0
  109. mindspore/mspdbst.dll +0 -0
  110. mindspore/mspft140.dll +0 -0
  111. mindspore/msvcdis140.dll +0 -0
  112. mindspore/msvcp140_1.dll +0 -0
  113. mindspore/msvcp140_2.dll +0 -0
  114. mindspore/msvcp140_atomic_wait.dll +0 -0
  115. mindspore/msvcp140_codecvt_ids.dll +0 -0
  116. mindspore/multiprocessing/__init__.py +2 -1
  117. mindspore/nn/__init__.py +0 -1
  118. mindspore/nn/cell.py +275 -93
  119. mindspore/nn/layer/activation.py +211 -44
  120. mindspore/nn/layer/basic.py +113 -3
  121. mindspore/nn/layer/embedding.py +120 -2
  122. mindspore/nn/layer/normalization.py +101 -5
  123. mindspore/nn/layer/padding.py +34 -48
  124. mindspore/nn/layer/pooling.py +161 -7
  125. mindspore/nn/layer/transformer.py +3 -3
  126. mindspore/nn/loss/__init__.py +2 -2
  127. mindspore/nn/loss/loss.py +84 -6
  128. mindspore/nn/optim/__init__.py +2 -1
  129. mindspore/nn/optim/adadelta.py +1 -1
  130. mindspore/nn/optim/adam.py +1 -1
  131. mindspore/nn/optim/lamb.py +1 -1
  132. mindspore/nn/optim/tft_wrapper.py +127 -0
  133. mindspore/nn/wrap/cell_wrapper.py +12 -23
  134. mindspore/nn/wrap/grad_reducer.py +5 -5
  135. mindspore/nn/wrap/loss_scale.py +17 -3
  136. mindspore/numpy/__init__.py +1 -1
  137. mindspore/numpy/array_creations.py +65 -68
  138. mindspore/numpy/array_ops.py +64 -60
  139. mindspore/numpy/fft.py +610 -75
  140. mindspore/numpy/logic_ops.py +11 -10
  141. mindspore/numpy/math_ops.py +85 -84
  142. mindspore/numpy/utils_const.py +4 -4
  143. mindspore/opencv_core452.dll +0 -0
  144. mindspore/opencv_imgcodecs452.dll +0 -0
  145. mindspore/opencv_imgproc452.dll +0 -0
  146. mindspore/ops/__init__.py +6 -4
  147. mindspore/ops/_grad_experimental/grad_comm_ops.py +47 -3
  148. mindspore/ops/_grad_experimental/grad_math_ops.py +0 -22
  149. mindspore/ops/_vmap/vmap_array_ops.py +2 -4
  150. mindspore/ops/_vmap/vmap_math_ops.py +17 -1
  151. mindspore/ops/_vmap/vmap_nn_ops.py +43 -2
  152. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +85 -7
  153. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +2 -0
  154. mindspore/ops/auto_generate/gen_extend_func.py +734 -13
  155. mindspore/ops/auto_generate/gen_ops_def.py +2420 -381
  156. mindspore/ops/auto_generate/gen_ops_prim.py +5196 -1659
  157. mindspore/ops/auto_generate/pyboost_inner_prim.py +176 -56
  158. mindspore/ops/composite/base.py +85 -48
  159. mindspore/ops/composite/multitype_ops/_compile_utils.py +1 -0
  160. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -2
  161. mindspore/ops/function/__init__.py +22 -0
  162. mindspore/ops/function/array_func.py +490 -153
  163. mindspore/ops/function/debug_func.py +113 -1
  164. mindspore/ops/function/fft_func.py +15 -2
  165. mindspore/ops/function/grad/grad_func.py +3 -2
  166. mindspore/ops/function/math_func.py +558 -207
  167. mindspore/ops/function/nn_func.py +817 -383
  168. mindspore/ops/function/other_func.py +3 -2
  169. mindspore/ops/function/random_func.py +184 -8
  170. mindspore/ops/function/reshard_func.py +13 -11
  171. mindspore/ops/function/sparse_unary_func.py +1 -1
  172. mindspore/ops/function/vmap_func.py +3 -2
  173. mindspore/ops/functional.py +24 -14
  174. mindspore/ops/op_info_register.py +3 -3
  175. mindspore/ops/operations/__init__.py +6 -1
  176. mindspore/ops/operations/_grad_ops.py +2 -76
  177. mindspore/ops/operations/_infer_ops.py +1 -1
  178. mindspore/ops/operations/_inner_ops.py +71 -94
  179. mindspore/ops/operations/array_ops.py +12 -146
  180. mindspore/ops/operations/comm_ops.py +42 -53
  181. mindspore/ops/operations/custom_ops.py +83 -19
  182. mindspore/ops/operations/debug_ops.py +42 -10
  183. mindspore/ops/operations/manually_defined/_inner.py +12 -0
  184. mindspore/ops/operations/manually_defined/ops_def.py +265 -10
  185. mindspore/ops/operations/math_ops.py +12 -223
  186. mindspore/ops/operations/nn_ops.py +20 -114
  187. mindspore/ops/operations/other_ops.py +7 -4
  188. mindspore/ops/operations/random_ops.py +46 -1
  189. mindspore/ops/primitive.py +18 -6
  190. mindspore/ops_generate/arg_dtype_cast.py +2 -0
  191. mindspore/ops_generate/gen_aclnn_implement.py +11 -11
  192. mindspore/ops_generate/gen_constants.py +36 -0
  193. mindspore/ops_generate/gen_ops.py +67 -52
  194. mindspore/ops_generate/gen_ops_inner_prim.py +1 -1
  195. mindspore/ops_generate/gen_pyboost_func.py +131 -47
  196. mindspore/ops_generate/op_proto.py +10 -3
  197. mindspore/ops_generate/pyboost_utils.py +14 -1
  198. mindspore/ops_generate/template.py +43 -21
  199. mindspore/parallel/__init__.py +3 -1
  200. mindspore/parallel/_auto_parallel_context.py +28 -8
  201. mindspore/parallel/_cell_wrapper.py +83 -0
  202. mindspore/parallel/_parallel_serialization.py +47 -19
  203. mindspore/parallel/_tensor.py +81 -11
  204. mindspore/parallel/_utils.py +13 -1
  205. mindspore/parallel/algo_parameter_config.py +5 -5
  206. mindspore/parallel/checkpoint_transform.py +46 -39
  207. mindspore/parallel/cluster/process_entity/__init__.py +1 -1
  208. mindspore/parallel/cluster/process_entity/_api.py +31 -23
  209. mindspore/parallel/cluster/process_entity/_utils.py +2 -27
  210. mindspore/parallel/parameter_broadcast.py +3 -4
  211. mindspore/parallel/shard.py +162 -31
  212. mindspore/parallel/transform_safetensors.py +993 -0
  213. mindspore/pgodb140.dll +0 -0
  214. mindspore/pgort140.dll +0 -0
  215. mindspore/profiler/__init__.py +2 -1
  216. mindspore/profiler/common/constant.py +29 -0
  217. mindspore/profiler/common/registry.py +47 -0
  218. mindspore/profiler/common/util.py +28 -0
  219. mindspore/profiler/dynamic_profiler.py +694 -0
  220. mindspore/profiler/envprofiling.py +17 -19
  221. mindspore/profiler/parser/ascend_analysis/constant.py +18 -0
  222. mindspore/profiler/parser/ascend_analysis/file_manager.py +25 -4
  223. mindspore/profiler/parser/ascend_analysis/function_event.py +43 -19
  224. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +31 -26
  225. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +56 -10
  226. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +55 -8
  227. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  228. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +27 -20
  229. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +9 -2
  230. mindspore/profiler/parser/ascend_msprof_exporter.py +5 -4
  231. mindspore/profiler/parser/ascend_timeline_generator.py +27 -25
  232. mindspore/profiler/parser/base_timeline_generator.py +19 -25
  233. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
  234. mindspore/profiler/parser/framework_parser.py +1 -391
  235. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  236. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  237. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  238. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  239. mindspore/profiler/parser/memory_usage_parser.py +0 -154
  240. mindspore/profiler/parser/profiler_info.py +78 -6
  241. mindspore/profiler/profiler.py +153 -0
  242. mindspore/profiler/profiling.py +280 -412
  243. mindspore/rewrite/__init__.py +1 -2
  244. mindspore/rewrite/common/namespace.py +4 -4
  245. mindspore/rewrite/symbol_tree/symbol_tree.py +3 -3
  246. mindspore/run_check/_check_version.py +36 -103
  247. mindspore/safeguard/rewrite_obfuscation.py +591 -247
  248. mindspore/swresample-4.dll +0 -0
  249. mindspore/swscale-6.dll +0 -0
  250. mindspore/tbbmalloc.dll +0 -0
  251. mindspore/tinyxml2.dll +0 -0
  252. mindspore/train/__init__.py +4 -3
  253. mindspore/train/_utils.py +28 -2
  254. mindspore/train/amp.py +171 -53
  255. mindspore/train/callback/__init__.py +2 -2
  256. mindspore/train/callback/_callback.py +4 -4
  257. mindspore/train/callback/_checkpoint.py +85 -22
  258. mindspore/train/callback/_cluster_monitor.py +1 -1
  259. mindspore/train/callback/_flops_collector.py +1 -0
  260. mindspore/train/callback/_loss_monitor.py +3 -3
  261. mindspore/train/callback/_on_request_exit.py +134 -31
  262. mindspore/train/callback/_summary_collector.py +5 -5
  263. mindspore/train/callback/_tft_register.py +352 -0
  264. mindspore/train/dataset_helper.py +7 -3
  265. mindspore/train/metrics/metric.py +3 -3
  266. mindspore/train/metrics/roc.py +4 -4
  267. mindspore/train/mind_ir_pb2.py +44 -39
  268. mindspore/train/model.py +134 -58
  269. mindspore/train/serialization.py +336 -112
  270. mindspore/turbojpeg.dll +0 -0
  271. mindspore/utils/__init__.py +21 -0
  272. mindspore/utils/utils.py +60 -0
  273. mindspore/vcmeta.dll +0 -0
  274. mindspore/vcruntime140.dll +0 -0
  275. mindspore/vcruntime140_1.dll +0 -0
  276. mindspore/version.py +1 -1
  277. {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/METADATA +6 -2
  278. {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/RECORD +281 -275
  279. mindspore/include/c_api/ms/abstract.h +0 -67
  280. mindspore/include/c_api/ms/attribute.h +0 -197
  281. mindspore/include/c_api/ms/base/handle_types.h +0 -43
  282. mindspore/include/c_api/ms/base/macros.h +0 -32
  283. mindspore/include/c_api/ms/base/status.h +0 -33
  284. mindspore/include/c_api/ms/base/types.h +0 -283
  285. mindspore/include/c_api/ms/context.h +0 -102
  286. mindspore/include/c_api/ms/graph.h +0 -160
  287. mindspore/include/c_api/ms/node.h +0 -606
  288. mindspore/include/c_api/ms/tensor.h +0 -161
  289. mindspore/include/c_api/ms/value.h +0 -84
  290. mindspore/mindspore_shared_lib.dll +0 -0
  291. mindspore/nn/extend/basic.py +0 -140
  292. mindspore/nn/extend/embedding.py +0 -143
  293. mindspore/nn/extend/layer/normalization.py +0 -109
  294. mindspore/nn/extend/pooling.py +0 -117
  295. mindspore/nn/layer/embedding_service.py +0 -531
  296. mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
  297. mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
  298. mindspore/ops/extend/__init__.py +0 -53
  299. mindspore/ops/extend/array_func.py +0 -218
  300. mindspore/ops/extend/math_func.py +0 -76
  301. mindspore/ops/extend/nn_func.py +0 -308
  302. mindspore/ops/silent_check.py +0 -162
  303. mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
  304. mindspore/profiler/parser/msadvisor_parser.py +0 -240
  305. mindspore/train/callback/_mindio_ttp.py +0 -443
  306. {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/WHEEL +0 -0
  307. {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/entry_points.txt +0 -0
  308. {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/top_level.txt +0 -0
@@ -18,6 +18,7 @@ from __future__ import division
18
18
 
19
19
  import numbers
20
20
  import math
21
+ import types
21
22
  import numpy as np
22
23
  from mindspore.ops import signature as sig
23
24
  from mindspore.ops.primitive import Primitive, prim_attr_register, prim_arg_register, PrimitiveWithInfer
@@ -937,6 +938,10 @@ class Tile(Primitive):
937
938
 
938
939
  Refer to :func:`mindspore.ops.tile` for more details.
939
940
 
941
+ Note:
942
+ On Ascend, the number of `dims` should not exceed 8, and currently does not support scenarios
943
+ where more than 4 dimensions are repeated simultaneously.
944
+
940
945
  Inputs:
941
946
  - **input** (Tensor) - The tensor whose elements need to be repeated. Set the shape of input tensor as
942
947
  :math:`(x_1, x_2, ..., x_S)` .
@@ -1025,6 +1030,10 @@ def tile(input, dims):
1025
1030
  output tensor has `input.shape[i] * dims[i]` elements, and the values of `input`
1026
1031
  are replicated `dims[i]` times along the i'th dimension.
1027
1032
 
1033
+ Note:
1034
+ On Ascend, the number of `dims` should not exceed 8, and currently does not support scenarios
1035
+ where more than 4 dimensions are repeated simultaneously.
1036
+
1028
1037
  Args:
1029
1038
  input (Tensor): The tensor whose elements need to be repeated. Set the shape of input tensor as
1030
1039
  :math:`(x_1, x_2, ..., x_S)` .
@@ -1127,16 +1136,16 @@ class Cast(Primitive):
1127
1136
  taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
1128
1137
 
1129
1138
  Inputs:
1130
- - **input_x** (Union[Tensor, Number]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1139
+ - **input** (Union[Tensor, Number]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1131
1140
  The tensor to be cast.
1132
- - **type** (dtype.Number) - The valid data type of the output tensor. Only constant value is allowed.
1141
+ - **dtype** (dtype.Number) - The valid data type of the output tensor. Only constant value is allowed.
1133
1142
 
1134
1143
  Outputs:
1135
- Tensor, the shape of tensor is the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.
1144
+ Tensor, the shape of tensor is the same as `input`, :math:`(x_1, x_2, ..., x_R)`.
1136
1145
 
1137
1146
  Raises:
1138
- TypeError: If `input_x` is neither Tensor nor Number.
1139
- TypeError: If `type` is not a Number.
1147
+ TypeError: If `input` is neither Tensor nor Number.
1148
+ TypeError: If `dtype` is not a Number.
1140
1149
 
1141
1150
  Supported Platforms:
1142
1151
  ``Ascend`` ``GPU`` ``CPU``
@@ -1146,10 +1155,10 @@ class Cast(Primitive):
1146
1155
  >>> import numpy as np
1147
1156
  >>> from mindspore import Tensor, ops
1148
1157
  >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
1149
- >>> input_x = Tensor(input_np)
1150
- >>> type_dst = mindspore.int32
1158
+ >>> input = Tensor(input_np)
1159
+ >>> dtype = mindspore.int32
1151
1160
  >>> cast = ops.Cast()
1152
- >>> output = cast(input_x, type_dst)
1161
+ >>> output = cast(input, dtype)
1153
1162
  >>> print(output.dtype)
1154
1163
  Int32
1155
1164
  >>> print(output.shape)
@@ -1187,7 +1196,7 @@ def to_sequence(val):
1187
1196
  to_sequence
1188
1197
  """
1189
1198
  if isinstance(val, (tuple, list)):
1190
- return val
1199
+ return tuple(val)
1191
1200
  return (val,)
1192
1201
 
1193
1202
 
@@ -1891,7 +1900,7 @@ def flash_attention_score(query, key, value, head_num, real_shift=None, drop_mas
1891
1900
  H2 -- Hidden size of key and value, which equals to N2 * D.
1892
1901
 
1893
1902
  .. warning::
1894
- This is an experimental API that is subject to change or deletion. Only support on Atlas training series.
1903
+ This is an experimental API that is subject to change or deletion. Only support on Atlas A2 training series.
1895
1904
 
1896
1905
  Args:
1897
1906
  query (Tensor[float16, bfloat16]): The query tensor. Input tensor of shape :math:`(B, S1, H1)`,
@@ -2014,3 +2023,249 @@ def flash_attention_score(query, key, value, head_num, real_shift=None, drop_mas
2014
2023
  inner_precise, input_layout, sparse_mode)
2015
2024
  return rank_op(query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen,
2016
2025
  actual_seq_kvlen)[3]
2026
+
2027
+
2028
+ class WhileLoop(Primitive):
2029
+ """
2030
+ Provide a useful op for reducing compilation times of while loop.
2031
+ The execution logic of the WhileLoop operator can be roughly represented by the following code:
2032
+
2033
+ .. code-block:: python
2034
+
2035
+ def WhileLoop(cond_func, loop_func, init_val):
2036
+ while(cond_func(init_val)):
2037
+ init_val = loop_func(init_val)
2038
+ return init_val
2039
+
2040
+ The current WhileLoop operator has the following syntactic limitations:
2041
+
2042
+ - Using a side-effect function as `loop_func` is currently not support,
2043
+ such as operations that modify parameters, global variables, etc.
2044
+ - The return value of `loop_func` being of a different type or shape
2045
+ from the `init_val` is currently not support.
2046
+
2047
+ .. warning::
2048
+ This is an experimental API that is subject to change or deletion.
2049
+
2050
+ Inputs:
2051
+ - **cond_func** (Function) - The condition function.
2052
+ - **loop_func** (Function) - The loop function, take one argument and
2053
+ return value has the same type with input argument.
2054
+ - **init_val** (Union[Tensor, number, str, bool, list, tuple, dict]) - The initial value.
2055
+
2056
+ Outputs:
2057
+ Union[Tensor, number, str, bool, list, tuple, dict], the final result of the while loop,
2058
+ has same type and shape with input `init_val` .
2059
+
2060
+ Raises:
2061
+ TypeError: If `cond_func` is not a function.
2062
+ TypeError: If `loop_func` is not a function.
2063
+ ValueError: If `loop_func` cannot take `init_val` as input or has different
2064
+ output type or shape with `init_val` .
2065
+
2066
+ Supported Platforms:
2067
+ ``Ascend`` ``GPU`` ``CPU``
2068
+
2069
+ Examples:
2070
+ >>> from mindspore import ops
2071
+ >>> def loop_while_fun(init_val):
2072
+ ... val = init_val
2073
+ ... val = val + 1
2074
+ ... return val
2075
+ ...
2076
+ >>> init_state = 10
2077
+ >>> while_loop = ops.WhileLoop()
2078
+ >>> result = while_loop(lambda x : x < 100, loop_while_fun, init_state)
2079
+ >>> print(result)
2080
+ 100
2081
+ """
2082
+
2083
+ @prim_attr_register
2084
+ def __init__(self):
2085
+ """Initialize WhileLoop."""
2086
+
2087
+ def __call__(self, cond_func, loop_func, init_val):
2088
+ validator.check_value_type("cond_func", cond_func,
2089
+ [types.FunctionType, types.MethodType], "WhileLoop")
2090
+ validator.check_value_type("loop_func", loop_func,
2091
+ [types.FunctionType, types.MethodType], "WhileLoop")
2092
+ val = init_val
2093
+ try:
2094
+ while cond_func(val):
2095
+ val = loop_func(val)
2096
+ except Exception as e:
2097
+ raise ValueError("Invalid loop_func, please check input arguments and \
2098
+ return value, error info: {}".format(e))
2099
+ return val
2100
+
2101
+
2102
+ class Scan(Primitive):
2103
+ """
2104
+ Scan a function over an array while the processing of the current element
2105
+ depends on the execution result of the previous element.
2106
+ The execution logic of the Scan operator can be roughly represented by the following code:
2107
+
2108
+ .. code-block:: python
2109
+
2110
+ def Scan(loop_func, init, xs, length=None):
2111
+ if xs is None:
2112
+ xs = [None] * length
2113
+ carry = init
2114
+ ys = []
2115
+ for x in xs:
2116
+ carry, y = loop_func(carry, x)
2117
+ ys.append(y)
2118
+ return carry, ys
2119
+
2120
+ The current Scan operator has the following syntactic limitations:
2121
+
2122
+ - Using a side-effect function as `loop_func` is currently not support,
2123
+ such as operations that modify parameters, global variables, etc.
2124
+ - The first element of the return value of `loop_func` being of a different
2125
+ type or shape from the `init_val` is currently not support.
2126
+
2127
+ .. warning::
2128
+ This is an experimental API that is subject to change or deletion.
2129
+
2130
+ Inputs:
2131
+ - **loop_func** (Function) - The loop function.
2132
+ - **init** (Union[Tensor, number, str, bool, list, tuple, dict]) - An initial loop carry value.
2133
+ - **xs** (Union[tuple, list, None]) - The value over which to scan.
2134
+ - **length** (Union[int, None], optional) - The size of xs. Default: ``None`` .
2135
+ - **unroll** (bool, optional) - The flag for whether to perform loop unrolling in compile process.
2136
+ Default: ``True`` .
2137
+
2138
+ Outputs:
2139
+ Tuple(Union[Tensor, number, str, bool, list, tuple, dict], list). Output of scan loop,
2140
+ a tuple with two elements, the first element has same type and shape with init argument,
2141
+ and the second is a list containing the results of each loop.
2142
+
2143
+ Raises:
2144
+ TypeError: If `loop_func` is not a function.
2145
+ TypeError: If `xs` is not a tuple, a list or None.
2146
+ TypeError: If `length` is not an int or None.
2147
+ TypeError: If `unroll` is not a bool.
2148
+ ValueError: If `loop_func` cannot take `init` and element of `xs` as inputs.
2149
+ ValueError: If the return value of `loop_func` is not a tuple with two elements,
2150
+ or the first element of the tuple has different type or shape from `init` .
2151
+
2152
+ Supported Platforms:
2153
+ ``Ascend`` ``GPU`` ``CPU``
2154
+
2155
+ Examples:
2156
+ >>> from mindspore import ops
2157
+ >>> def cumsum(res, el):
2158
+ ... res = res + el
2159
+ ... return res, res
2160
+ ...
2161
+ >>> a = [1, 2, 3, 4]
2162
+ >>> result_init = 0
2163
+ >>> scan_op = ops.Scan()
2164
+ >>> result = scan_op(cumsum, result_init, a)
2165
+ >>> print(result == (10, [1, 3, 6, 10]))
2166
+ True
2167
+ """
2168
+
2169
+ @prim_attr_register
2170
+ def __init__(self):
2171
+ """Initialize Scan."""
2172
+
2173
+ def __call__(self, loop_func, init, xs, length=None, unroll=True):
2174
+ validator.check_value_type("loop_func", loop_func,
2175
+ [types.FunctionType, types.MethodType], "Scan")
2176
+ validator.check_value_type("xs", xs, [list, tuple, None], "Scan")
2177
+ if xs is None:
2178
+ validator.check_value_type("length", length, [int], "Scan")
2179
+ xs = [None] * length
2180
+ carry = init
2181
+ length = len(xs)
2182
+ if not length:
2183
+ return init, []
2184
+ try:
2185
+ carry, y = loop_func(carry, xs[0])
2186
+ ys = [y]
2187
+ i = 1
2188
+ while i < length:
2189
+ carry, y = loop_func(carry, xs[i])
2190
+ ys.append(y)
2191
+ i = i + 1
2192
+ except Exception as e:
2193
+ raise ValueError("Invalid loop_func, please check input arguments and \
2194
+ return value, error info: {}".format(e))
2195
+ return carry, ys
2196
+
2197
+
2198
+ class ForiLoop(Primitive):
2199
+ """
2200
+ Provide a useful op for loop from lower to upper.
2201
+ The execution logic of the ForiLoop operator can be roughly represented by the following code:
2202
+
2203
+ .. code-block:: python
2204
+
2205
+ def ForiLoop(lower, upper, loop_func, init_val):
2206
+ for i in range(lower, upper):
2207
+ init_val = loop_func(i, init_val)
2208
+ return init_val
2209
+
2210
+ The current ForiLoop operator has the following syntactic limitations:
2211
+
2212
+ - Using a side-effect function as `loop_func` is currently not support,
2213
+ such as operations that modify parameters, global variables, etc.
2214
+ - The return value of `loop_func` being of a different type or shape
2215
+ from the `init_val` is currently not support.
2216
+ - Negative numbers or custom increments is currently not support.
2217
+
2218
+ .. warning::
2219
+ This is an experimental API that is subject to change or deletion.
2220
+
2221
+ Inputs:
2222
+ - **lower** (Union[int, Tensor]) - The start index of loop.
2223
+ - **upper** (Union[int, Tensor]) - The end index of loop.
2224
+ - **loop_func** (Function) - The loop function, takes two arguments.
2225
+ - **init_val** (Union[Tensor, number, str, bool, list, tuple, dict]) - The init value.
2226
+ - **unroll** (bool, optional) - The flag for whether unroll in compile process,
2227
+ only valid when the number of loop iterations is determined. Default: ``True`` .
2228
+
2229
+ Outputs:
2230
+ Union[Tensor, number, str, bool, list, tuple, dict], the final result of the loop,
2231
+ has same type and shape with input `init_val` .
2232
+
2233
+ Raises:
2234
+ TypeError: If `lower` is not an int or a Tensor.
2235
+ TypeError: If `upper` is not an int or a Tensor.
2236
+ TypeError: If `loop_func` is not a function.
2237
+ ValueError: If `loop_func` cannot take index and `init_val` as arguments or if the type
2238
+ of output it produces is different from the type or shape of `init_val` .
2239
+
2240
+ Supported Platforms:
2241
+ ``Ascend`` ``GPU`` ``CPU``
2242
+
2243
+ Examples:
2244
+ >>> from mindspore import ops
2245
+ >>> def cumsum(index, res):
2246
+ ... return index + res
2247
+ ...
2248
+ >>> result_init = 0
2249
+ >>> fori_loop = ops.ForiLoop()
2250
+ >>> result = fori_loop(0, 4, cumsum, result_init)
2251
+ >>> print(result)
2252
+ 6
2253
+ """
2254
+
2255
+ @prim_attr_register
2256
+ def __init__(self):
2257
+ """Initialize ForiLoop."""
2258
+
2259
+ def __call__(self, lower, upper, loop_func, init_val, unroll=True):
2260
+ validator.check_value_type("lower", lower, [int, Tensor], "ForiLoop")
2261
+ validator.check_value_type("upper", upper, [int, Tensor], "ForiLoop")
2262
+ validator.check_value_type("loop_func", loop_func,
2263
+ [types.FunctionType, types.MethodType], "ForiLoop")
2264
+ val = init_val
2265
+ try:
2266
+ for i in range(lower, upper):
2267
+ val = loop_func(i, val)
2268
+ except Exception as e:
2269
+ raise ValueError("Invalid loop_func, please check input arguments and \
2270
+ return value, error info: {}".format(e))
2271
+ return val
@@ -38,9 +38,9 @@ from ..auto_generate import (Add, Addcdiv, Addcmul, ReduceMean, ReduceSum, Reduc
38
38
  Greater, GreaterEqual, Gcd, LogicalNot, LogicalAnd, LogicalOr,
39
39
  LogicalXor, Cos, ACos, Sin, Asin, Abs, Round, Atan, Atanh, Atan2,
40
40
  LinSpace, MatrixDeterminant, LogMatrixDeterminant, Erfinv, Conj,
41
- Real, Complex, Angle, MatrixExp, CholeskyInverse, Trace, Cholesky,
41
+ Real, Complex, Angle, MatrixExp, CholeskyInverse, Trace, Cholesky, Cross,
42
42
  FFTWithSize, NextAfter, NanToNum, Eig, Qr, Roll, Maximum, Div, DivMod, CumProd,
43
- CumSum, Less, LessEqual, AssignAdd, IsFinite, IsClose, TanhGrad)
43
+ CumSum, Less, LessEqual, AssignAdd, IsFinite, IsClose, TanhGrad, Xlogy, Trunc, Sign)
44
44
 
45
45
 
46
46
  def _infer_shape_reduce(x, axis, keep_dims, prim_name):
@@ -136,64 +136,6 @@ class _MathBinaryOp(_BinaryOp):
136
136
  real_shape = [dim if cmp_dim > 0 else cmp_dim for dim, cmp_dim in zip(shape_value, cmp_shape)]
137
137
  return tuple(real_shape)
138
138
 
139
- class SilentCheck(Primitive):
140
- """
141
- Implement SilentCheck on `pre_val`, `min_val`, `max_val`, `result` and
142
- update them inplace with given parameters.
143
-
144
- Args:
145
- c_min_steps (int): an int determines...
146
-
147
- c_thresh_l1 (float): a float determines...
148
-
149
- c_coeff_l1 (float): a float determines...
150
-
151
- c_thresh_l2 (float): a float determines...
152
-
153
- c_coeff_l2 (float): a float determines...
154
-
155
- Inputs:
156
- - **val** (Tensor) - Tensor with dtype float32.
157
- - **input_grad** (Parameter) - Tensor with dtype float32.
158
- - **pre_val** (Parameter) - Input Parameter with dtype float32.
159
- - **min_val** (Parameter) - Input Parameter with dtype float32.
160
- - **max_val** (Parameter) - Input Parameter with dtype float32.
161
- - **val_counter** (Parameter) - Input Parameter with dtype int32.
162
-
163
- Outputs:
164
- Tuple of 5 Tensors, the updated parameters.
165
- - **input_grad** (Tensor) - Tensor with dtype float32.
166
- - **pre_val** (Tensor) - Tensor with dtype float32.
167
- - **min_val** (Tensor) - Tensor with dtype float32.
168
- - **max_val** (Tensor) - Tensor with dtype float32.
169
- - **result** (Tensor) - Tensor with dtype int32.
170
-
171
- Raises:
172
- TypeError: If `val` is not Tensor with dtype float32.
173
- TypeError: If `result` is not Tensor with dtype int32.
174
- TypeError: If `pre_val`, `min_val`, `max_val`, `input_grad` are not all Parameter type with dtype float32.
175
- TypeError: If `c_thresh_l1` or `c_coeff_l1` is not a float number.
176
- TypeError: If `c_min_steps` is not an int number.
177
-
178
- Supported Platforms:
179
- ``Ascend``
180
-
181
- Examples:
182
- >>> from mindspore.ops.operations.math_ops import SilentCheck
183
- >>> silent_check = SilentCheck()
184
- xxx
185
- """
186
-
187
- @prim_attr_register
188
- def __init__(self, c_min_steps, c_thresh_l1, c_coeff_l1, c_thresh_l2, c_coeff_l2):
189
- """Initialize SilentCheck."""
190
- validator.check_value_type("c_min_steps", c_min_steps, [int], self.name)
191
- validator.check_value_type("c_thresh_l1", c_thresh_l1, [float], self.name)
192
- validator.check_value_type("c_coeff_l1", c_coeff_l1, [float], self.name)
193
- validator.check_value_type("c_thresh_l2", c_thresh_l2, [float], self.name)
194
- validator.check_value_type("c_coeff_l2", c_coeff_l2, [float], self.name)
195
- self.add_prim_attr('side_effect_mem', True)
196
-
197
139
 
198
140
  class _BitwiseBinaryOp(_MathBinaryOp):
199
141
  """
@@ -1041,8 +983,8 @@ class Sub(_MathBinaryOp):
1041
983
  Inputs:
1042
984
  - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
1043
985
  a bool or a tensor whose data type is
1044
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
1045
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
986
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
987
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1046
988
  - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
1047
989
  the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
1048
990
 
@@ -1246,7 +1188,7 @@ class Histogram(Primitive):
1246
1188
  - **x** (Tensor) - the input tensor, type support list: [float16, float32, int32].
1247
1189
 
1248
1190
  Outputs:
1249
- Tensor, 1-D Tensor with type int32.
1191
+ 1-D Tensor. If the input is int32, the output returns int32, otherwise it returns float32.
1250
1192
 
1251
1193
  Raises:
1252
1194
  TypeError: If `x` is not a Tensor.
@@ -1264,7 +1206,7 @@ class Histogram(Primitive):
1264
1206
  >>> op = ops.Histogram(bins=4, min=0.0, max=3.0)
1265
1207
  >>> y = op(x)
1266
1208
  >>> print(y)
1267
- [0 2 1 0]
1209
+ [0. 2. 1. 0.]
1268
1210
  """
1269
1211
 
1270
1212
  @prim_attr_register
@@ -1440,8 +1382,8 @@ class DivNoNan(Primitive):
1440
1382
  Inputs:
1441
1383
  - **x1** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
1442
1384
  a bool or a tensor whose data type is
1443
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
1444
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1385
+ `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1386
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1445
1387
  - **x2** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
1446
1388
  a bool when the first input is a bool or a tensor whose data type is number or bool\_.
1447
1389
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
@@ -1803,48 +1745,6 @@ class Xdivy(Primitive):
1803
1745
  return None
1804
1746
 
1805
1747
 
1806
- class Xlogy(Primitive):
1807
- r"""
1808
- Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
1809
- Returns zero when `x` is zero.
1810
-
1811
- Refer to :func:`mindspore.ops.xlogy` for more details.
1812
-
1813
- Inputs:
1814
- - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
1815
- a bool or a tensor whose data type is
1816
- `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
1817
- `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1818
- - **y** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
1819
- a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
1820
- When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
1821
-
1822
- Outputs:
1823
- Tensor, the shape is the same as the one after broadcasting,
1824
- and the data type is the one with higher precision or higher digits among the two inputs.
1825
-
1826
- Supported Platforms:
1827
- ``Ascend`` ``GPU`` ``CPU``
1828
-
1829
- Examples:
1830
- >>> import mindspore
1831
- >>> import numpy as np
1832
- >>> from mindspore import Tensor, ops
1833
- >>> x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
1834
- >>> y = Tensor(np.array([2, 2, 2]), mindspore.float32)
1835
- >>> xlogy = ops.Xlogy()
1836
- >>> output = xlogy(x, y)
1837
- >>> print(output)
1838
- [-3.465736 0. 2.7725887]
1839
- """
1840
- __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
1841
-
1842
- @prim_attr_register
1843
- def __init__(self):
1844
- """Initialize Xlogy."""
1845
- self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
1846
-
1847
-
1848
1748
  class _LogicBinaryOp(_BinaryOp):
1849
1749
  """
1850
1750
  Define logic binary operators.
@@ -2564,54 +2464,17 @@ class NMSWithMask(PrimitiveWithInfer):
2564
2464
  return bboxes_dtype, mstype.int32, mstype.bool_
2565
2465
 
2566
2466
 
2567
- class Sign(Primitive):
2568
- r"""
2569
- Performs sign on the tensor element-wise.
2570
-
2571
- .. math::
2572
- sign(x) = \begin{cases} -1, &if\ x < 0 \cr
2573
- 0, &if\ x = 0 \cr
2574
- 1, &if\ x > 0\end{cases}
2575
-
2576
- Inputs:
2577
- - **x** (Tensor) - The input tensor of any dimension.
2578
-
2579
- Outputs:
2580
- Tensor, has the same shape and dtype as the `x`.
2581
-
2582
- Raises:
2583
- TypeError: If `x` is not a Tensor.
2584
-
2585
- Supported Platforms:
2586
- ``Ascend`` ``GPU`` ``CPU``
2587
-
2588
- Examples:
2589
- >>> import mindspore
2590
- >>> import numpy as np
2591
- >>> from mindspore import Tensor, ops
2592
- >>> x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
2593
- >>> sign = ops.Sign()
2594
- >>> output = sign(x)
2595
- >>> print(output)
2596
- [[ 1. 0. -1.]]
2597
- """
2598
-
2599
- @prim_attr_register
2600
- def __init__(self):
2601
- pass
2602
-
2603
-
2604
2467
  class Tan(Primitive):
2605
2468
  r"""
2606
- Computes tangent of `x` element-wise.
2469
+ Computes tangent of `input` element-wise.
2607
2470
 
2608
2471
  Refer to :func:`mindspore.ops.tan` for more details.
2609
2472
 
2610
2473
  Inputs:
2611
- - **x** (Tensor) - Input tensor of any dimension.
2474
+ - **input** (Tensor) - Input tensor of any dimension.
2612
2475
 
2613
2476
  Outputs:
2614
- Tensor, has the same shape as `x`.
2477
+ Tensor, has the same shape as `input`.
2615
2478
 
2616
2479
  Supported Platforms:
2617
2480
  ``Ascend`` ``GPU`` ``CPU``
@@ -2630,7 +2493,7 @@ class Tan(Primitive):
2630
2493
  @prim_attr_register
2631
2494
  def __init__(self):
2632
2495
  """Initialize Tan"""
2633
- self.init_prim_io_names(inputs=['x'], outputs=['y'])
2496
+ self.init_prim_io_names(inputs=['input'], outputs=['output'])
2634
2497
 
2635
2498
 
2636
2499
  class SquareSumAll(Primitive):
@@ -3702,37 +3565,6 @@ class Imag(Primitive):
3702
3565
  self.init_prim_io_names(inputs=['input'], outputs=['output'])
3703
3566
 
3704
3567
 
3705
- class Trunc(Primitive):
3706
- """
3707
- Returns a new tensor with the truncated integer values of the elements of input.
3708
-
3709
- Refer to :func:`mindspore.ops.trunc` for more details.
3710
-
3711
- Inputs:
3712
- - **input_x** (Tensor) - Input tensor of any dimension.
3713
-
3714
- Outputs:
3715
- Tensor, the same shape and data type as `input_x`.
3716
-
3717
- Supported Platforms:
3718
- ``Ascend`` ``GPU`` ``CPU``
3719
-
3720
- Examples:
3721
- >>> import mindspore
3722
- >>> import numpy as np
3723
- >>> from mindspore import Tensor, ops
3724
- >>> x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]), mindspore.float32)
3725
- >>> output = ops.Trunc()(x)
3726
- >>> print(output)
3727
- [ 3. 0. -0. -3.]
3728
- """
3729
-
3730
- @prim_attr_register
3731
- def __init__(self):
3732
- """Initialize Trunc"""
3733
- self.init_prim_io_names(inputs=['input'], outputs=['output'])
3734
-
3735
-
3736
3568
  class TridiagonalMatMul(Primitive):
3737
3569
  """
3738
3570
  Return the result of a multiplication of two matrices, where the left one is a Tridiagonal Matrix.
@@ -4294,49 +4126,6 @@ class Polygamma(Primitive):
4294
4126
  self.init_prim_io_names(inputs=['a', 'x'], outputs=['y'])
4295
4127
 
4296
4128
 
4297
- class Cross(Primitive):
4298
- """
4299
- Returns the cross product of vectors in dimension `dim` of x1 and x2.
4300
-
4301
- .. warning::
4302
- This is an experimental API that is subject to change or deletion.
4303
-
4304
- Refer to :func:`mindspore.ops.cross` for more details.
4305
-
4306
- Args:
4307
- dim (int): Spefcified dim along which to cumpute cross product with. Default: ``-65530`` .
4308
-
4309
- Inputs:
4310
- - **x1** (Tensor) - Input Tensor.
4311
- - **x2** (Tensor) - Another input Tensor, must have the same shape and
4312
- the same type as `x1`, and the size of their `dim` dimension should be 3.
4313
-
4314
- Outputs:
4315
- Tensor, has the same shape and type as inputs.
4316
-
4317
- Supported Platforms:
4318
- ``Ascend`` ``CPU``
4319
-
4320
- Examples:
4321
- >>> import mindspore
4322
- >>> import numpy as np
4323
- >>> from mindspore import Tensor
4324
- >>> from mindspore import dtype as mstype
4325
- >>> from mindspore import ops
4326
- >>> cross = ops.Cross(dim = 0)
4327
- >>> x1 = Tensor([1, 2, 3], mstype.int8)
4328
- >>> x2 = Tensor([1, 2, 3], mstype.int8)
4329
- >>> output = cross(x1, x2)
4330
- >>> print(output)
4331
- [0 0 0]
4332
- """
4333
-
4334
- @prim_attr_register
4335
- def __init__(self, dim=-65530):
4336
- validator.check_value_type('dim', dim, [int], self.name)
4337
- self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
4338
-
4339
-
4340
4129
  class RaggedRange(Primitive):
4341
4130
  """
4342
4131
  Returns a `RaggedTensor` containing the specified sequences of numbers.