mindspore 2.4.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1406) hide show
  1. mindspore/.commit_id +1 -0
  2. mindspore/ConcurrencyCheck.dll +0 -0
  3. mindspore/CppBuildInsights.dll +0 -0
  4. mindspore/CppCoreCheck.dll +0 -0
  5. mindspore/EnumIndex.dll +0 -0
  6. mindspore/EspXEngine.dll +0 -0
  7. mindspore/HResultCheck.dll +0 -0
  8. mindspore/KernelTraceControl.dll +0 -0
  9. mindspore/LocalESPC.dll +0 -0
  10. mindspore/Microsoft.Diagnostics.Tracing.EventSource.dll +0 -0
  11. mindspore/Microsoft.VisualStudio.RemoteControl.dll +0 -0
  12. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  13. mindspore/Microsoft.VisualStudio.Utilities.Internal.dll +0 -0
  14. mindspore/Newtonsoft.Json.dll +0 -0
  15. mindspore/System.Runtime.CompilerServices.Unsafe.dll +0 -0
  16. mindspore/VariantClear.dll +0 -0
  17. mindspore/__init__.py +53 -0
  18. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  19. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  20. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  21. mindspore/_check_jit_forbidden_api.py +106 -0
  22. mindspore/_checkparam.py +1419 -0
  23. mindspore/_extends/__init__.py +23 -0
  24. mindspore/_extends/builtin_operations.py +224 -0
  25. mindspore/_extends/graph_kernel/__init__.py +17 -0
  26. mindspore/_extends/graph_kernel/model/__init__.py +19 -0
  27. mindspore/_extends/graph_kernel/model/graph_parallel.py +311 -0
  28. mindspore/_extends/graph_kernel/model/graph_split.py +1348 -0
  29. mindspore/_extends/graph_kernel/model/model.py +553 -0
  30. mindspore/_extends/graph_kernel/model/model_builder.py +216 -0
  31. mindspore/_extends/graph_kernel/parallel_estimate.py +60 -0
  32. mindspore/_extends/graph_kernel/splitter.py +140 -0
  33. mindspore/_extends/graph_kernel/utils.py +28 -0
  34. mindspore/_extends/parallel_compile/__init__.py +19 -0
  35. mindspore/_extends/parallel_compile/akg_compiler/__init__.py +19 -0
  36. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +269 -0
  37. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +529 -0
  38. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +56 -0
  39. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  40. mindspore/_extends/parallel_compile/akg_compiler/get_file_path.py +36 -0
  41. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +556 -0
  42. mindspore/_extends/parallel_compile/akg_compiler/util.py +159 -0
  43. mindspore/_extends/parse/__init__.py +49 -0
  44. mindspore/_extends/parse/compile_config.py +299 -0
  45. mindspore/_extends/parse/namespace.py +136 -0
  46. mindspore/_extends/parse/parser.py +1448 -0
  47. mindspore/_extends/parse/resources.py +213 -0
  48. mindspore/_extends/parse/standard_method.py +4475 -0
  49. mindspore/_extends/parse/trope.py +97 -0
  50. mindspore/_extends/pijit/__init__.py +23 -0
  51. mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
  52. mindspore/_extends/remote/__init__.py +19 -0
  53. mindspore/_extends/remote/kernel_build_server.py +199 -0
  54. mindspore/_extends/remote/kernel_build_server_akg.py +55 -0
  55. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  56. mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
  57. mindspore/_extends/utils.py +68 -0
  58. mindspore/_install_custom.py +43 -0
  59. mindspore/_profiler.py +30 -0
  60. mindspore/amp.py +433 -0
  61. mindspore/atlprov.dll +0 -0
  62. mindspore/avcodec-59.dll +0 -0
  63. mindspore/avdevice-59.dll +0 -0
  64. mindspore/avfilter-8.dll +0 -0
  65. mindspore/avformat-59.dll +0 -0
  66. mindspore/avutil-57.dll +0 -0
  67. mindspore/boost/__init__.py +42 -0
  68. mindspore/boost/adasum.py +319 -0
  69. mindspore/boost/base.py +535 -0
  70. mindspore/boost/boost.py +400 -0
  71. mindspore/boost/boost_cell_wrapper.py +790 -0
  72. mindspore/boost/dim_reduce.py +323 -0
  73. mindspore/boost/grad_accumulation.py +79 -0
  74. mindspore/boost/grad_freeze.py +382 -0
  75. mindspore/boost/group_loss_scale_manager.py +166 -0
  76. mindspore/boost/less_batch_normalization.py +174 -0
  77. mindspore/c1.dll +0 -0
  78. mindspore/c1xx.dll +0 -0
  79. mindspore/c2.dll +0 -0
  80. mindspore/cfgpersist.dll +0 -0
  81. mindspore/clang_rt.asan_dbg_dynamic-x86_64.dll +0 -0
  82. mindspore/clang_rt.asan_dynamic-x86_64.dll +0 -0
  83. mindspore/common/__init__.py +86 -0
  84. mindspore/common/_auto_dynamic.py +68 -0
  85. mindspore/common/_decorator.py +50 -0
  86. mindspore/common/_jit_fallback_utils.py +110 -0
  87. mindspore/common/_monad.py +25 -0
  88. mindspore/common/_pijit_context.py +190 -0
  89. mindspore/common/_register_for_adapter.py +74 -0
  90. mindspore/common/_register_for_recompute.py +48 -0
  91. mindspore/common/_register_for_tensor.py +46 -0
  92. mindspore/common/_stub_tensor.py +210 -0
  93. mindspore/common/_tensor_overload.py +139 -0
  94. mindspore/common/_utils.py +122 -0
  95. mindspore/common/api.py +2064 -0
  96. mindspore/common/auto_dynamic_shape.py +507 -0
  97. mindspore/common/dtype.py +422 -0
  98. mindspore/common/dump.py +130 -0
  99. mindspore/common/file_system.py +48 -0
  100. mindspore/common/generator.py +254 -0
  101. mindspore/common/hook_handle.py +143 -0
  102. mindspore/common/initializer.py +880 -0
  103. mindspore/common/jit_config.py +98 -0
  104. mindspore/common/lazy_inline.py +240 -0
  105. mindspore/common/mindir_util.py +111 -0
  106. mindspore/common/mutable.py +234 -0
  107. mindspore/common/no_inline.py +54 -0
  108. mindspore/common/np_dtype.py +25 -0
  109. mindspore/common/parameter.py +1081 -0
  110. mindspore/common/recompute.py +292 -0
  111. mindspore/common/seed.py +260 -0
  112. mindspore/common/sparse_tensor.py +1175 -0
  113. mindspore/common/symbol.py +122 -0
  114. mindspore/common/tensor.py +5039 -0
  115. mindspore/communication/__init__.py +37 -0
  116. mindspore/communication/_comm_helper.py +501 -0
  117. mindspore/communication/_hccl_management.py +297 -0
  118. mindspore/communication/comm_func.py +1395 -0
  119. mindspore/communication/management.py +673 -0
  120. mindspore/config/op_info.config +533 -0
  121. mindspore/context.py +2077 -0
  122. mindspore/d3dcompiler_47.dll +0 -0
  123. mindspore/dataset/__init__.py +90 -0
  124. mindspore/dataset/audio/__init__.py +61 -0
  125. mindspore/dataset/audio/transforms.py +3690 -0
  126. mindspore/dataset/audio/utils.py +386 -0
  127. mindspore/dataset/audio/validators.py +1172 -0
  128. mindspore/dataset/callback/__init__.py +20 -0
  129. mindspore/dataset/callback/ds_callback.py +368 -0
  130. mindspore/dataset/callback/validators.py +32 -0
  131. mindspore/dataset/core/__init__.py +13 -0
  132. mindspore/dataset/core/config.py +1095 -0
  133. mindspore/dataset/core/datatypes.py +101 -0
  134. mindspore/dataset/core/py_util_helpers.py +65 -0
  135. mindspore/dataset/core/validator_helpers.py +781 -0
  136. mindspore/dataset/debug/__init__.py +21 -0
  137. mindspore/dataset/debug/debug_hook.py +97 -0
  138. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  139. mindspore/dataset/engine/__init__.py +124 -0
  140. mindspore/dataset/engine/cache_admin.py +47 -0
  141. mindspore/dataset/engine/cache_client.py +129 -0
  142. mindspore/dataset/engine/datasets.py +4582 -0
  143. mindspore/dataset/engine/datasets_audio.py +911 -0
  144. mindspore/dataset/engine/datasets_standard_format.py +543 -0
  145. mindspore/dataset/engine/datasets_text.py +2161 -0
  146. mindspore/dataset/engine/datasets_user_defined.py +1184 -0
  147. mindspore/dataset/engine/datasets_vision.py +4816 -0
  148. mindspore/dataset/engine/iterators.py +371 -0
  149. mindspore/dataset/engine/obs/__init__.py +23 -0
  150. mindspore/dataset/engine/obs/config_loader.py +68 -0
  151. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +508 -0
  152. mindspore/dataset/engine/obs/util.py +482 -0
  153. mindspore/dataset/engine/offload.py +596 -0
  154. mindspore/dataset/engine/queue.py +304 -0
  155. mindspore/dataset/engine/samplers.py +895 -0
  156. mindspore/dataset/engine/serializer_deserializer.py +159 -0
  157. mindspore/dataset/engine/validators.py +2895 -0
  158. mindspore/dataset/text/__init__.py +51 -0
  159. mindspore/dataset/text/transforms.py +1703 -0
  160. mindspore/dataset/text/utils.py +715 -0
  161. mindspore/dataset/text/validators.py +642 -0
  162. mindspore/dataset/transforms/__init__.py +45 -0
  163. mindspore/dataset/transforms/c_transforms.py +638 -0
  164. mindspore/dataset/transforms/py_transforms.py +393 -0
  165. mindspore/dataset/transforms/py_transforms_util.py +255 -0
  166. mindspore/dataset/transforms/transforms.py +1260 -0
  167. mindspore/dataset/transforms/validators.py +410 -0
  168. mindspore/dataset/utils/__init__.py +19 -0
  169. mindspore/dataset/utils/browse_dataset.py +190 -0
  170. mindspore/dataset/utils/line_reader.py +126 -0
  171. mindspore/dataset/vision/__init__.py +65 -0
  172. mindspore/dataset/vision/c_transforms.py +2641 -0
  173. mindspore/dataset/vision/py_transforms.py +2120 -0
  174. mindspore/dataset/vision/py_transforms_util.py +1660 -0
  175. mindspore/dataset/vision/transforms.py +7295 -0
  176. mindspore/dataset/vision/utils.py +863 -0
  177. mindspore/dataset/vision/validators.py +1483 -0
  178. mindspore/default_config.py +2 -0
  179. mindspore/dnnl.dll +0 -0
  180. mindspore/dpcmi.dll +0 -0
  181. mindspore/experimental/__init__.py +20 -0
  182. mindspore/experimental/es/__init__.py +22 -0
  183. mindspore/experimental/es/embedding_service.py +883 -0
  184. mindspore/experimental/es/embedding_service_layer.py +581 -0
  185. mindspore/experimental/llm_boost/__init__.py +21 -0
  186. mindspore/experimental/llm_boost/atb/__init__.py +23 -0
  187. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  188. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  189. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  190. mindspore/experimental/llm_boost/register.py +129 -0
  191. mindspore/experimental/llm_boost/utils.py +31 -0
  192. mindspore/experimental/map_parameter.py +309 -0
  193. mindspore/experimental/optim/__init__.py +40 -0
  194. mindspore/experimental/optim/adadelta.py +161 -0
  195. mindspore/experimental/optim/adagrad.py +168 -0
  196. mindspore/experimental/optim/adam.py +193 -0
  197. mindspore/experimental/optim/adamax.py +170 -0
  198. mindspore/experimental/optim/adamw.py +290 -0
  199. mindspore/experimental/optim/asgd.py +153 -0
  200. mindspore/experimental/optim/lr_scheduler.py +1371 -0
  201. mindspore/experimental/optim/nadam.py +157 -0
  202. mindspore/experimental/optim/optimizer.py +262 -0
  203. mindspore/experimental/optim/radam.py +194 -0
  204. mindspore/experimental/optim/rmsprop.py +154 -0
  205. mindspore/experimental/optim/rprop.py +164 -0
  206. mindspore/experimental/optim/sgd.py +156 -0
  207. mindspore/hal/__init__.py +40 -0
  208. mindspore/hal/_ascend.py +57 -0
  209. mindspore/hal/_base.py +57 -0
  210. mindspore/hal/_cpu.py +56 -0
  211. mindspore/hal/_gpu.py +57 -0
  212. mindspore/hal/contiguous_tensors_handle.py +175 -0
  213. mindspore/hal/device.py +356 -0
  214. mindspore/hal/event.py +179 -0
  215. mindspore/hal/memory.py +326 -0
  216. mindspore/hal/stream.py +357 -0
  217. mindspore/include/OWNERS +7 -0
  218. mindspore/include/api/allocator.h +97 -0
  219. mindspore/include/api/callback/callback.h +93 -0
  220. mindspore/include/api/callback/ckpt_saver.h +41 -0
  221. mindspore/include/api/callback/loss_monitor.h +33 -0
  222. mindspore/include/api/callback/lr_scheduler.h +51 -0
  223. mindspore/include/api/callback/time_monitor.h +34 -0
  224. mindspore/include/api/callback/train_accuracy.h +37 -0
  225. mindspore/include/api/cell.h +90 -0
  226. mindspore/include/api/cfg.h +82 -0
  227. mindspore/include/api/context.h +602 -0
  228. mindspore/include/api/data_type.h +47 -0
  229. mindspore/include/api/delegate.h +178 -0
  230. mindspore/include/api/delegate_api.h +75 -0
  231. mindspore/include/api/dual_abi_helper.h +208 -0
  232. mindspore/include/api/format.h +28 -0
  233. mindspore/include/api/graph.h +46 -0
  234. mindspore/include/api/kernel.h +58 -0
  235. mindspore/include/api/kernel_api.h +168 -0
  236. mindspore/include/api/metrics/accuracy.h +36 -0
  237. mindspore/include/api/metrics/metrics.h +41 -0
  238. mindspore/include/api/model.h +438 -0
  239. mindspore/include/api/model_group.h +91 -0
  240. mindspore/include/api/model_parallel_runner.h +168 -0
  241. mindspore/include/api/serialization.h +185 -0
  242. mindspore/include/api/status.h +192 -0
  243. mindspore/include/api/types.h +431 -0
  244. mindspore/include/api/visible.h +41 -0
  245. mindspore/include/c_api/context_c.h +179 -0
  246. mindspore/include/c_api/data_type_c.h +52 -0
  247. mindspore/include/c_api/format_c.h +46 -0
  248. mindspore/include/c_api/model_c.h +347 -0
  249. mindspore/include/c_api/status_c.h +79 -0
  250. mindspore/include/c_api/tensor_c.h +146 -0
  251. mindspore/include/c_api/types_c.h +67 -0
  252. mindspore/include/dataset/config.h +163 -0
  253. mindspore/include/dataset/constants.h +363 -0
  254. mindspore/include/dataset/execute.h +196 -0
  255. mindspore/include/dataset/text.h +1092 -0
  256. mindspore/include/dataset/transforms.h +638 -0
  257. mindspore/include/dataset/vision.h +2129 -0
  258. mindspore/include/dataset/vision_ascend.h +206 -0
  259. mindspore/include/dataset/vision_lite.h +625 -0
  260. mindspore/jpeg62.dll +0 -0
  261. mindspore/log.py +633 -0
  262. mindspore/mindrecord/__init__.py +43 -0
  263. mindspore/mindrecord/common/__init__.py +17 -0
  264. mindspore/mindrecord/common/constant.py +20 -0
  265. mindspore/mindrecord/common/enums.py +44 -0
  266. mindspore/mindrecord/common/exceptions.py +311 -0
  267. mindspore/mindrecord/config.py +809 -0
  268. mindspore/mindrecord/filereader.py +174 -0
  269. mindspore/mindrecord/filewriter.py +722 -0
  270. mindspore/mindrecord/mindpage.py +210 -0
  271. mindspore/mindrecord/shardheader.py +141 -0
  272. mindspore/mindrecord/shardindexgenerator.py +74 -0
  273. mindspore/mindrecord/shardreader.py +117 -0
  274. mindspore/mindrecord/shardsegment.py +128 -0
  275. mindspore/mindrecord/shardutils.py +185 -0
  276. mindspore/mindrecord/shardwriter.py +237 -0
  277. mindspore/mindrecord/tools/__init__.py +17 -0
  278. mindspore/mindrecord/tools/cifar10.py +140 -0
  279. mindspore/mindrecord/tools/cifar100.py +153 -0
  280. mindspore/mindrecord/tools/cifar100_to_mr.py +185 -0
  281. mindspore/mindrecord/tools/cifar10_to_mr.py +177 -0
  282. mindspore/mindrecord/tools/csv_to_mr.py +200 -0
  283. mindspore/mindrecord/tools/imagenet_to_mr.py +206 -0
  284. mindspore/mindrecord/tools/mnist_to_mr.py +259 -0
  285. mindspore/mindrecord/tools/tfrecord_to_mr.py +360 -0
  286. mindspore/mindspore_backend.dll +0 -0
  287. mindspore/mindspore_common.dll +0 -0
  288. mindspore/mindspore_core.dll +0 -0
  289. mindspore/mindspore_glog.dll +0 -0
  290. mindspore/mindspore_np_dtype.dll +0 -0
  291. mindspore/mindspore_ops.dll +0 -0
  292. mindspore/mint/__init__.py +1586 -0
  293. mindspore/mint/distributed/__init__.py +31 -0
  294. mindspore/mint/distributed/distributed.py +254 -0
  295. mindspore/mint/linalg/__init__.py +22 -0
  296. mindspore/mint/nn/__init__.py +757 -0
  297. mindspore/mint/nn/functional.py +679 -0
  298. mindspore/mint/nn/layer/__init__.py +39 -0
  299. mindspore/mint/nn/layer/activation.py +133 -0
  300. mindspore/mint/nn/layer/normalization.py +477 -0
  301. mindspore/mint/nn/layer/pooling.py +110 -0
  302. mindspore/mint/optim/__init__.py +24 -0
  303. mindspore/mint/optim/adamw.py +206 -0
  304. mindspore/mint/special/__init__.py +63 -0
  305. mindspore/msobj140.dll +0 -0
  306. mindspore/mspdb140.dll +0 -0
  307. mindspore/mspdbcore.dll +0 -0
  308. mindspore/mspdbst.dll +0 -0
  309. mindspore/mspft140.dll +0 -0
  310. mindspore/msvcdis140.dll +0 -0
  311. mindspore/msvcp140.dll +0 -0
  312. mindspore/msvcp140_1.dll +0 -0
  313. mindspore/msvcp140_2.dll +0 -0
  314. mindspore/msvcp140_atomic_wait.dll +0 -0
  315. mindspore/msvcp140_codecvt_ids.dll +0 -0
  316. mindspore/multiprocessing/__init__.py +73 -0
  317. mindspore/nn/__init__.py +47 -0
  318. mindspore/nn/cell.py +2787 -0
  319. mindspore/nn/dynamic_lr.py +482 -0
  320. mindspore/nn/grad/__init__.py +21 -0
  321. mindspore/nn/grad/cell_grad.py +196 -0
  322. mindspore/nn/layer/__init__.py +63 -0
  323. mindspore/nn/layer/activation.py +1822 -0
  324. mindspore/nn/layer/basic.py +1629 -0
  325. mindspore/nn/layer/channel_shuffle.py +90 -0
  326. mindspore/nn/layer/combined.py +248 -0
  327. mindspore/nn/layer/container.py +734 -0
  328. mindspore/nn/layer/conv.py +1505 -0
  329. mindspore/nn/layer/dense.py +204 -0
  330. mindspore/nn/layer/embedding.py +869 -0
  331. mindspore/nn/layer/image.py +661 -0
  332. mindspore/nn/layer/math.py +1069 -0
  333. mindspore/nn/layer/normalization.py +1273 -0
  334. mindspore/nn/layer/padding.py +880 -0
  335. mindspore/nn/layer/pooling.py +2302 -0
  336. mindspore/nn/layer/rnn_cells.py +388 -0
  337. mindspore/nn/layer/rnns.py +849 -0
  338. mindspore/nn/layer/thor_layer.py +963 -0
  339. mindspore/nn/layer/timedistributed.py +155 -0
  340. mindspore/nn/layer/transformer.py +823 -0
  341. mindspore/nn/learning_rate_schedule.py +512 -0
  342. mindspore/nn/loss/__init__.py +36 -0
  343. mindspore/nn/loss/loss.py +2924 -0
  344. mindspore/nn/metrics.py +53 -0
  345. mindspore/nn/optim/__init__.py +45 -0
  346. mindspore/nn/optim/_dist_optimizer_registry.py +111 -0
  347. mindspore/nn/optim/ada_grad.py +217 -0
  348. mindspore/nn/optim/adadelta.py +206 -0
  349. mindspore/nn/optim/adafactor.py +448 -0
  350. mindspore/nn/optim/adam.py +1297 -0
  351. mindspore/nn/optim/adamax.py +220 -0
  352. mindspore/nn/optim/adasum.py +548 -0
  353. mindspore/nn/optim/asgd.py +216 -0
  354. mindspore/nn/optim/ftrl.py +401 -0
  355. mindspore/nn/optim/lamb.py +296 -0
  356. mindspore/nn/optim/lars.py +202 -0
  357. mindspore/nn/optim/lazyadam.py +533 -0
  358. mindspore/nn/optim/momentum.py +239 -0
  359. mindspore/nn/optim/optimizer.py +1034 -0
  360. mindspore/nn/optim/proximal_ada_grad.py +242 -0
  361. mindspore/nn/optim/rmsprop.py +264 -0
  362. mindspore/nn/optim/rprop.py +251 -0
  363. mindspore/nn/optim/sgd.py +237 -0
  364. mindspore/nn/optim/tft_wrapper.py +127 -0
  365. mindspore/nn/optim/thor.py +1310 -0
  366. mindspore/nn/probability/__init__.py +22 -0
  367. mindspore/nn/probability/bijector/__init__.py +35 -0
  368. mindspore/nn/probability/bijector/bijector.py +337 -0
  369. mindspore/nn/probability/bijector/exp.py +65 -0
  370. mindspore/nn/probability/bijector/gumbel_cdf.py +144 -0
  371. mindspore/nn/probability/bijector/invert.py +126 -0
  372. mindspore/nn/probability/bijector/power_transform.py +196 -0
  373. mindspore/nn/probability/bijector/scalar_affine.py +167 -0
  374. mindspore/nn/probability/bijector/softplus.py +189 -0
  375. mindspore/nn/probability/bnn_layers/__init__.py +29 -0
  376. mindspore/nn/probability/bnn_layers/_util.py +46 -0
  377. mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +112 -0
  378. mindspore/nn/probability/bnn_layers/conv_variational.py +267 -0
  379. mindspore/nn/probability/bnn_layers/dense_variational.py +302 -0
  380. mindspore/nn/probability/bnn_layers/layer_distribution.py +123 -0
  381. mindspore/nn/probability/distribution/__init__.py +56 -0
  382. mindspore/nn/probability/distribution/_utils/__init__.py +34 -0
  383. mindspore/nn/probability/distribution/_utils/custom_ops.py +96 -0
  384. mindspore/nn/probability/distribution/_utils/utils.py +362 -0
  385. mindspore/nn/probability/distribution/bernoulli.py +334 -0
  386. mindspore/nn/probability/distribution/beta.py +391 -0
  387. mindspore/nn/probability/distribution/categorical.py +435 -0
  388. mindspore/nn/probability/distribution/cauchy.py +383 -0
  389. mindspore/nn/probability/distribution/distribution.py +827 -0
  390. mindspore/nn/probability/distribution/exponential.py +350 -0
  391. mindspore/nn/probability/distribution/gamma.py +391 -0
  392. mindspore/nn/probability/distribution/geometric.py +335 -0
  393. mindspore/nn/probability/distribution/gumbel.py +257 -0
  394. mindspore/nn/probability/distribution/half_normal.py +133 -0
  395. mindspore/nn/probability/distribution/laplace.py +128 -0
  396. mindspore/nn/probability/distribution/log_normal.py +272 -0
  397. mindspore/nn/probability/distribution/logistic.py +379 -0
  398. mindspore/nn/probability/distribution/normal.py +336 -0
  399. mindspore/nn/probability/distribution/poisson.py +288 -0
  400. mindspore/nn/probability/distribution/student_t.py +149 -0
  401. mindspore/nn/probability/distribution/transformed_distribution.py +235 -0
  402. mindspore/nn/probability/distribution/uniform.py +375 -0
  403. mindspore/nn/reinforcement/__init__.py +24 -0
  404. mindspore/nn/reinforcement/_batch_read_write.py +142 -0
  405. mindspore/nn/reinforcement/_tensors_queue.py +152 -0
  406. mindspore/nn/reinforcement/tensor_array.py +145 -0
  407. mindspore/nn/sparse/__init__.py +23 -0
  408. mindspore/nn/sparse/sparse.py +147 -0
  409. mindspore/nn/wrap/__init__.py +49 -0
  410. mindspore/nn/wrap/cell_wrapper.py +968 -0
  411. mindspore/nn/wrap/grad_reducer.py +608 -0
  412. mindspore/nn/wrap/loss_scale.py +694 -0
  413. mindspore/numpy/__init__.py +121 -0
  414. mindspore/numpy/array_creations.py +2731 -0
  415. mindspore/numpy/array_ops.py +2629 -0
  416. mindspore/numpy/dtypes.py +185 -0
  417. mindspore/numpy/fft.py +966 -0
  418. mindspore/numpy/logic_ops.py +936 -0
  419. mindspore/numpy/math_ops.py +5911 -0
  420. mindspore/numpy/utils.py +214 -0
  421. mindspore/numpy/utils_const.py +565 -0
  422. mindspore/opencv_core452.dll +0 -0
  423. mindspore/opencv_imgcodecs452.dll +0 -0
  424. mindspore/opencv_imgproc452.dll +0 -0
  425. mindspore/ops/__init__.py +56 -0
  426. mindspore/ops/_constants.py +30 -0
  427. mindspore/ops/_grad_experimental/__init__.py +31 -0
  428. mindspore/ops/_grad_experimental/grad_array_ops.py +830 -0
  429. mindspore/ops/_grad_experimental/grad_base.py +143 -0
  430. mindspore/ops/_grad_experimental/grad_comm_ops.py +714 -0
  431. mindspore/ops/_grad_experimental/grad_debug_ops.py +31 -0
  432. mindspore/ops/_grad_experimental/grad_implementations.py +203 -0
  433. mindspore/ops/_grad_experimental/grad_inner_ops.py +79 -0
  434. mindspore/ops/_grad_experimental/grad_math_ops.py +802 -0
  435. mindspore/ops/_grad_experimental/grad_nn_ops.py +231 -0
  436. mindspore/ops/_grad_experimental/grad_quant_ops.py +238 -0
  437. mindspore/ops/_grad_experimental/grad_sparse.py +342 -0
  438. mindspore/ops/_grad_experimental/grad_sparse_ops.py +399 -0
  439. mindspore/ops/_grad_experimental/taylor_rule.py +220 -0
  440. mindspore/ops/_op_impl/__init__.py +23 -0
  441. mindspore/ops/_op_impl/_custom_op/__init__.py +39 -0
  442. mindspore/ops/_op_impl/_custom_op/_basic.py +158 -0
  443. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +279 -0
  444. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +156 -0
  445. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +109 -0
  446. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +125 -0
  447. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +105 -0
  448. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +124 -0
  449. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +116 -0
  450. mindspore/ops/_op_impl/_custom_op/correction_mul.py +89 -0
  451. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +196 -0
  452. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +366 -0
  453. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +162 -0
  454. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +136 -0
  455. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +206 -0
  456. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +88 -0
  457. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +128 -0
  458. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +199 -0
  459. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +88 -0
  460. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +156 -0
  461. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +184 -0
  462. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +143 -0
  463. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +169 -0
  464. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +548 -0
  465. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +881 -0
  466. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +278 -0
  467. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +200 -0
  468. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +334 -0
  469. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +255 -0
  470. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +222 -0
  471. mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +644 -0
  472. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +488 -0
  473. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +87 -0
  474. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +129 -0
  475. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +121 -0
  476. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +352 -0
  477. mindspore/ops/_op_impl/aicpu/__init__.py +441 -0
  478. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  479. mindspore/ops/_op_impl/aicpu/acos.py +32 -0
  480. mindspore/ops/_op_impl/aicpu/acos_grad.py +33 -0
  481. mindspore/ops/_op_impl/aicpu/acosh.py +34 -0
  482. mindspore/ops/_op_impl/aicpu/acosh_grad.py +35 -0
  483. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
  484. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  485. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
  486. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
  487. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
  488. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
  489. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
  490. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
  491. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  492. mindspore/ops/_op_impl/aicpu/add_n.py +41 -0
  493. mindspore/ops/_op_impl/aicpu/add_v2.py +40 -0
  494. mindspore/ops/_op_impl/aicpu/addcdiv.py +41 -0
  495. mindspore/ops/_op_impl/aicpu/addcmul.py +47 -0
  496. mindspore/ops/_op_impl/aicpu/adjust_contrastv2.py +32 -0
  497. mindspore/ops/_op_impl/aicpu/adjust_hue.py +31 -0
  498. mindspore/ops/_op_impl/aicpu/adjust_saturation.py +32 -0
  499. mindspore/ops/_op_impl/aicpu/affine_grid.py +33 -0
  500. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  501. mindspore/ops/_op_impl/aicpu/angle.py +31 -0
  502. mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
  503. mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
  504. mindspore/ops/_op_impl/aicpu/argmax_with_value.py +43 -0
  505. mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
  506. mindspore/ops/_op_impl/aicpu/asin.py +32 -0
  507. mindspore/ops/_op_impl/aicpu/asin_grad.py +33 -0
  508. mindspore/ops/_op_impl/aicpu/asinh.py +34 -0
  509. mindspore/ops/_op_impl/aicpu/asinh_grad.py +35 -0
  510. mindspore/ops/_op_impl/aicpu/atanh.py +34 -0
  511. mindspore/ops/_op_impl/aicpu/avgpool_grad_v1.py +37 -0
  512. mindspore/ops/_op_impl/aicpu/avgpool_v1.py +36 -0
  513. mindspore/ops/_op_impl/aicpu/bartlett_window.py +36 -0
  514. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
  515. mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
  516. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  517. mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
  518. mindspore/ops/_op_impl/aicpu/betainc.py +31 -0
  519. mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
  520. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +42 -0
  521. mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
  522. mindspore/ops/_op_impl/aicpu/blackman_window.py +36 -0
  523. mindspore/ops/_op_impl/aicpu/broadcast_to.py +58 -0
  524. mindspore/ops/_op_impl/aicpu/bucketize.py +34 -0
  525. mindspore/ops/_op_impl/aicpu/cache_swap_table.py +102 -0
  526. mindspore/ops/_op_impl/aicpu/cast.py +225 -0
  527. mindspore/ops/_op_impl/aicpu/cauchy.py +33 -0
  528. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  529. mindspore/ops/_op_impl/aicpu/check_numerics.py +33 -0
  530. mindspore/ops/_op_impl/aicpu/cholesky.py +32 -0
  531. mindspore/ops/_op_impl/aicpu/cholesky_inverse.py +31 -0
  532. mindspore/ops/_op_impl/aicpu/cholesky_solve.py +33 -0
  533. mindspore/ops/_op_impl/aicpu/choleskygrad.py +32 -0
  534. mindspore/ops/_op_impl/aicpu/coalesce.py +37 -0
  535. mindspore/ops/_op_impl/aicpu/col2im.py +38 -0
  536. mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
  537. mindspore/ops/_op_impl/aicpu/compare_and_bitpack.py +37 -0
  538. mindspore/ops/_op_impl/aicpu/complex.py +32 -0
  539. mindspore/ops/_op_impl/aicpu/complex_abs.py +31 -0
  540. mindspore/ops/_op_impl/aicpu/compute_accidental_hits.py +44 -0
  541. mindspore/ops/_op_impl/aicpu/concat.py +57 -0
  542. mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
  543. mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
  544. mindspore/ops/_op_impl/aicpu/conj.py +42 -0
  545. mindspore/ops/_op_impl/aicpu/conjugate_transpose.py +58 -0
  546. mindspore/ops/_op_impl/aicpu/cos.py +34 -0
  547. mindspore/ops/_op_impl/aicpu/cosh.py +34 -0
  548. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  549. mindspore/ops/_op_impl/aicpu/crop_and_resize.py +69 -0
  550. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_boxes.py +68 -0
  551. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
  552. mindspore/ops/_op_impl/aicpu/cross.py +42 -0
  553. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_dense.py +48 -0
  554. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_sparse_tensor.py +51 -0
  555. mindspore/ops/_op_impl/aicpu/ctc_greedy_decoder.py +35 -0
  556. mindspore/ops/_op_impl/aicpu/ctc_loss_v2.py +43 -0
  557. mindspore/ops/_op_impl/aicpu/ctc_loss_v2_grad.py +45 -0
  558. mindspore/ops/_op_impl/aicpu/ctcloss.py +38 -0
  559. mindspore/ops/_op_impl/aicpu/cummax.py +41 -0
  560. mindspore/ops/_op_impl/aicpu/cumprod.py +58 -0
  561. mindspore/ops/_op_impl/aicpu/cumsum.py +58 -0
  562. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
  563. mindspore/ops/_op_impl/aicpu/data_format_vec_permute.py +32 -0
  564. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  565. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  566. mindspore/ops/_op_impl/aicpu/dense_to_csr_sparse_matrix.py +49 -0
  567. mindspore/ops/_op_impl/aicpu/dense_to_dense_set_operation.py +45 -0
  568. mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
  569. mindspore/ops/_op_impl/aicpu/depth_to_space.py +44 -0
  570. mindspore/ops/_op_impl/aicpu/diag.py +36 -0
  571. mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
  572. mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
  573. mindspore/ops/_op_impl/aicpu/digamma.py +31 -0
  574. mindspore/ops/_op_impl/aicpu/div.py +41 -0
  575. mindspore/ops/_op_impl/aicpu/div_no_nan.py +35 -0
  576. mindspore/ops/_op_impl/aicpu/dropout2d.py +42 -0
  577. mindspore/ops/_op_impl/aicpu/dropout3d.py +42 -0
  578. mindspore/ops/_op_impl/aicpu/dropout_genmask.py +41 -0
  579. mindspore/ops/_op_impl/aicpu/dropout_genmask_v3.py +32 -0
  580. mindspore/ops/_op_impl/aicpu/dynamic_stitch.py +42 -0
  581. mindspore/ops/_op_impl/aicpu/edit_distance.py +56 -0
  582. mindspore/ops/_op_impl/aicpu/eig.py +35 -0
  583. mindspore/ops/_op_impl/aicpu/embedding_lookup.py +102 -0
  584. mindspore/ops/_op_impl/aicpu/end_of_sequence.py +30 -0
  585. mindspore/ops/_op_impl/aicpu/environ_create.py +28 -0
  586. mindspore/ops/_op_impl/aicpu/environ_destroy_all.py +28 -0
  587. mindspore/ops/_op_impl/aicpu/environ_get.py +41 -0
  588. mindspore/ops/_op_impl/aicpu/environ_set.py +40 -0
  589. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  590. mindspore/ops/_op_impl/aicpu/equal.py +41 -0
  591. mindspore/ops/_op_impl/aicpu/exp.py +37 -0
  592. mindspore/ops/_op_impl/aicpu/expand.py +45 -0
  593. mindspore/ops/_op_impl/aicpu/expand_dims.py +42 -0
  594. mindspore/ops/_op_impl/aicpu/expm1.py +34 -0
  595. mindspore/ops/_op_impl/aicpu/extract_glimpse.py +35 -0
  596. mindspore/ops/_op_impl/aicpu/eye.py +44 -0
  597. mindspore/ops/_op_impl/aicpu/fft_with_size.py +47 -0
  598. mindspore/ops/_op_impl/aicpu/fill_diagonal.py +39 -0
  599. mindspore/ops/_op_impl/aicpu/fill_v2.py +58 -0
  600. mindspore/ops/_op_impl/aicpu/flatten.py +43 -0
  601. mindspore/ops/_op_impl/aicpu/floor_div.py +38 -0
  602. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  603. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  604. mindspore/ops/_op_impl/aicpu/fractional_avg_pool.py +41 -0
  605. mindspore/ops/_op_impl/aicpu/fractional_avg_pool_grad.py +41 -0
  606. mindspore/ops/_op_impl/aicpu/fractional_max_pool.py +41 -0
  607. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_grad_with_fixed_ksize.py +43 -0
  608. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +65 -0
  609. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad.py +42 -0
  610. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad_with_fixed_ksize.py +42 -0
  611. mindspore/ops/_op_impl/aicpu/fractional_max_pool_with_fixed_ksize.py +49 -0
  612. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  613. mindspore/ops/_op_impl/aicpu/fused_sparse_adam.py +46 -0
  614. mindspore/ops/_op_impl/aicpu/fused_sparse_ftrl.py +41 -0
  615. mindspore/ops/_op_impl/aicpu/fused_sparse_lazy_adam.py +46 -0
  616. mindspore/ops/_op_impl/aicpu/fused_sparse_proximal_adagrad.py +39 -0
  617. mindspore/ops/_op_impl/aicpu/gamma.py +38 -0
  618. mindspore/ops/_op_impl/aicpu/gather.py +46 -0
  619. mindspore/ops/_op_impl/aicpu/gather_d.py +79 -0
  620. mindspore/ops/_op_impl/aicpu/gather_d_grad_v2.py +79 -0
  621. mindspore/ops/_op_impl/aicpu/gather_grad.py +54 -0
  622. mindspore/ops/_op_impl/aicpu/gather_nd.py +56 -0
  623. mindspore/ops/_op_impl/aicpu/gcd.py +32 -0
  624. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
  625. mindspore/ops/_op_impl/aicpu/geqrf.py +32 -0
  626. mindspore/ops/_op_impl/aicpu/get_next.py +39 -0
  627. mindspore/ops/_op_impl/aicpu/glu.py +33 -0
  628. mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
  629. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  630. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  631. mindspore/ops/_op_impl/aicpu/grid_sampler_2d.py +35 -0
  632. mindspore/ops/_op_impl/aicpu/grid_sampler_2d_grad.py +38 -0
  633. mindspore/ops/_op_impl/aicpu/grid_sampler_3d.py +34 -0
  634. mindspore/ops/_op_impl/aicpu/grid_sampler_3d_grad.py +38 -0
  635. mindspore/ops/_op_impl/aicpu/hamming_window.py +57 -0
  636. mindspore/ops/_op_impl/aicpu/hard_sigmoid.py +32 -0
  637. mindspore/ops/_op_impl/aicpu/hard_sigmoid_grad.py +33 -0
  638. mindspore/ops/_op_impl/aicpu/heaviside.py +40 -0
  639. mindspore/ops/_op_impl/aicpu/histogram.py +35 -0
  640. mindspore/ops/_op_impl/aicpu/hsv_to_rgb.py +32 -0
  641. mindspore/ops/_op_impl/aicpu/hypot.py +32 -0
  642. mindspore/ops/_op_impl/aicpu/identity.py +42 -0
  643. mindspore/ops/_op_impl/aicpu/identity_n.py +41 -0
  644. mindspore/ops/_op_impl/aicpu/igamma.py +30 -0
  645. mindspore/ops/_op_impl/aicpu/igammac.py +30 -0
  646. mindspore/ops/_op_impl/aicpu/igammagrada.py +30 -0
  647. mindspore/ops/_op_impl/aicpu/im2col.py +43 -0
  648. mindspore/ops/_op_impl/aicpu/imag.py +31 -0
  649. mindspore/ops/_op_impl/aicpu/index_fill.py +54 -0
  650. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  651. mindspore/ops/_op_impl/aicpu/init_data_set_queue.py +27 -0
  652. mindspore/ops/_op_impl/aicpu/inplace_index_add.py +39 -0
  653. mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
  654. mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
  655. mindspore/ops/_op_impl/aicpu/is_finite.py +40 -0
  656. mindspore/ops/_op_impl/aicpu/is_inf.py +31 -0
  657. mindspore/ops/_op_impl/aicpu/is_nan.py +31 -0
  658. mindspore/ops/_op_impl/aicpu/kldivloss.py +34 -0
  659. mindspore/ops/_op_impl/aicpu/kldivlossgrad.py +35 -0
  660. mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
  661. mindspore/ops/_op_impl/aicpu/lcm.py +32 -0
  662. mindspore/ops/_op_impl/aicpu/left_shift.py +38 -0
  663. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  664. mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
  665. mindspore/ops/_op_impl/aicpu/lgamma.py +33 -0
  666. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +57 -0
  667. mindspore/ops/_op_impl/aicpu/linspace.py +33 -0
  668. mindspore/ops/_op_impl/aicpu/list_diff.py +50 -0
  669. mindspore/ops/_op_impl/aicpu/log.py +37 -0
  670. mindspore/ops/_op_impl/aicpu/log1p.py +34 -0
  671. mindspore/ops/_op_impl/aicpu/log_matrix_determinant.py +31 -0
  672. mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
  673. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +37 -0
  674. mindspore/ops/_op_impl/aicpu/logical_xor.py +30 -0
  675. mindspore/ops/_op_impl/aicpu/logit.py +33 -0
  676. mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
  677. mindspore/ops/_op_impl/aicpu/logspace.py +36 -0
  678. mindspore/ops/_op_impl/aicpu/lower_bound.py +47 -0
  679. mindspore/ops/_op_impl/aicpu/lstsq.py +34 -0
  680. mindspore/ops/_op_impl/aicpu/lu.py +39 -0
  681. mindspore/ops/_op_impl/aicpu/lu_solve.py +32 -0
  682. mindspore/ops/_op_impl/aicpu/lu_unpack.py +114 -0
  683. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +49 -0
  684. mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
  685. mindspore/ops/_op_impl/aicpu/masked_scatter.py +40 -0
  686. mindspore/ops/_op_impl/aicpu/masked_select.py +31 -0
  687. mindspore/ops/_op_impl/aicpu/masked_select_grad.py +35 -0
  688. mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
  689. mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
  690. mindspore/ops/_op_impl/aicpu/matrix_determinant.py +30 -0
  691. mindspore/ops/_op_impl/aicpu/matrix_diag_part_v3.py +54 -0
  692. mindspore/ops/_op_impl/aicpu/matrix_diag_v3.py +56 -0
  693. mindspore/ops/_op_impl/aicpu/matrix_exp.py +34 -0
  694. mindspore/ops/_op_impl/aicpu/matrix_inverse.py +31 -0
  695. mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
  696. mindspore/ops/_op_impl/aicpu/matrix_power.py +37 -0
  697. mindspore/ops/_op_impl/aicpu/matrix_set_diag_v3.py +54 -0
  698. mindspore/ops/_op_impl/aicpu/matrix_solve.py +35 -0
  699. mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
  700. mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
  701. mindspore/ops/_op_impl/aicpu/max_pool3d_grad_with_argmax.py +60 -0
  702. mindspore/ops/_op_impl/aicpu/max_pool3d_with_argmax.py +59 -0
  703. mindspore/ops/_op_impl/aicpu/max_unpool2d.py +57 -0
  704. mindspore/ops/_op_impl/aicpu/max_unpool2d_grad.py +58 -0
  705. mindspore/ops/_op_impl/aicpu/max_unpool3d.py +57 -0
  706. mindspore/ops/_op_impl/aicpu/max_unpool3d_grad.py +58 -0
  707. mindspore/ops/_op_impl/aicpu/maximum_grad_grad.py +40 -0
  708. mindspore/ops/_op_impl/aicpu/maxpool_grad_v1.py +46 -0
  709. mindspore/ops/_op_impl/aicpu/maxpool_v1.py +42 -0
  710. mindspore/ops/_op_impl/aicpu/median.py +39 -0
  711. mindspore/ops/_op_impl/aicpu/median_grad.py +45 -0
  712. mindspore/ops/_op_impl/aicpu/meshgrid.py +41 -0
  713. mindspore/ops/_op_impl/aicpu/minimum_grad_grad.py +40 -0
  714. mindspore/ops/_op_impl/aicpu/mirror_pad.py +50 -0
  715. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +48 -0
  716. mindspore/ops/_op_impl/aicpu/mul.py +43 -0
  717. mindspore/ops/_op_impl/aicpu/mul_no_nan.py +42 -0
  718. mindspore/ops/_op_impl/aicpu/multi_margin_loss.py +37 -0
  719. mindspore/ops/_op_impl/aicpu/multi_margin_loss_grad.py +41 -0
  720. mindspore/ops/_op_impl/aicpu/multilabel_margin_loss_grad.py +37 -0
  721. mindspore/ops/_op_impl/aicpu/multinomial.py +47 -0
  722. mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
  723. mindspore/ops/_op_impl/aicpu/mvlgamma.py +32 -0
  724. mindspore/ops/_op_impl/aicpu/mvlgamma_grad.py +33 -0
  725. mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
  726. mindspore/ops/_op_impl/aicpu/neg.py +36 -0
  727. mindspore/ops/_op_impl/aicpu/nextafter.py +32 -0
  728. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  729. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  730. mindspore/ops/_op_impl/aicpu/no_repeat_ngram.py +34 -0
  731. mindspore/ops/_op_impl/aicpu/non_deterministic_ints.py +33 -0
  732. mindspore/ops/_op_impl/aicpu/non_max_suppression.py +36 -0
  733. mindspore/ops/_op_impl/aicpu/non_max_suppression_with_overlaps.py +35 -0
  734. mindspore/ops/_op_impl/aicpu/non_zero.py +43 -0
  735. mindspore/ops/_op_impl/aicpu/not_equal.py +39 -0
  736. mindspore/ops/_op_impl/aicpu/nth_element.py +39 -0
  737. mindspore/ops/_op_impl/aicpu/nuclear_norm.py +33 -0
  738. mindspore/ops/_op_impl/aicpu/one_hot.py +116 -0
  739. mindspore/ops/_op_impl/aicpu/ones_like.py +39 -0
  740. mindspore/ops/_op_impl/aicpu/orgqr.py +34 -0
  741. mindspore/ops/_op_impl/aicpu/pad_and_shift.py +33 -0
  742. mindspore/ops/_op_impl/aicpu/pad_v3.py +61 -0
  743. mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +59 -0
  744. mindspore/ops/_op_impl/aicpu/padding.py +41 -0
  745. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +54 -0
  746. mindspore/ops/_op_impl/aicpu/pdist_grad.py +33 -0
  747. mindspore/ops/_op_impl/aicpu/poisson.py +37 -0
  748. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  749. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  750. mindspore/ops/_op_impl/aicpu/pow.py +39 -0
  751. mindspore/ops/_op_impl/aicpu/print_tensor.py +39 -0
  752. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +113 -0
  753. mindspore/ops/_op_impl/aicpu/qr.py +36 -0
  754. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  755. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  756. mindspore/ops/_op_impl/aicpu/ragged_range.py +49 -0
  757. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  758. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
  759. mindspore/ops/_op_impl/aicpu/random_categorical.py +68 -0
  760. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +36 -0
  761. mindspore/ops/_op_impl/aicpu/random_gamma.py +38 -0
  762. mindspore/ops/_op_impl/aicpu/random_poisson.py +134 -0
  763. mindspore/ops/_op_impl/aicpu/random_shuffle.py +47 -0
  764. mindspore/ops/_op_impl/aicpu/randperm.py +38 -0
  765. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  766. mindspore/ops/_op_impl/aicpu/range.py +36 -0
  767. mindspore/ops/_op_impl/aicpu/range_v2.py +35 -0
  768. mindspore/ops/_op_impl/aicpu/real.py +31 -0
  769. mindspore/ops/_op_impl/aicpu/real_div.py +40 -0
  770. mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
  771. mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
  772. mindspore/ops/_op_impl/aicpu/reduce_mean.py +57 -0
  773. mindspore/ops/_op_impl/aicpu/reduce_prod.py +57 -0
  774. mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
  775. mindspore/ops/_op_impl/aicpu/relu_grad_v3.py +41 -0
  776. mindspore/ops/_op_impl/aicpu/relu_v3.py +38 -0
  777. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +96 -0
  778. mindspore/ops/_op_impl/aicpu/reshape.py +42 -0
  779. mindspore/ops/_op_impl/aicpu/resize_area.py +40 -0
  780. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +20 -0
  781. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +19 -0
  782. mindspore/ops/_op_impl/aicpu/resize_bilinear.py +32 -0
  783. mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +32 -0
  784. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +36 -0
  785. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +35 -0
  786. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  787. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  788. mindspore/ops/_op_impl/aicpu/reverse_sequence.py +55 -0
  789. mindspore/ops/_op_impl/aicpu/reversev2.py +54 -0
  790. mindspore/ops/_op_impl/aicpu/rgb_to_hsv.py +32 -0
  791. mindspore/ops/_op_impl/aicpu/right_shift.py +38 -0
  792. mindspore/ops/_op_impl/aicpu/rnnt_loss.py +35 -0
  793. mindspore/ops/_op_impl/aicpu/round.py +34 -0
  794. mindspore/ops/_op_impl/aicpu/rsqrt.py +33 -0
  795. mindspore/ops/_op_impl/aicpu/rsqrt_grad.py +36 -0
  796. mindspore/ops/_op_impl/aicpu/sample_distorted_bounding_box_v2.py +49 -0
  797. mindspore/ops/_op_impl/aicpu/scale_and_translate.py +52 -0
  798. mindspore/ops/_op_impl/aicpu/scale_and_translate_grad.py +36 -0
  799. mindspore/ops/_op_impl/aicpu/scatter.py +79 -0
  800. mindspore/ops/_op_impl/aicpu/scatter_add_with_axis.py +53 -0
  801. mindspore/ops/_op_impl/aicpu/scatter_elements.py +39 -0
  802. mindspore/ops/_op_impl/aicpu/scatter_nd.py +59 -0
  803. mindspore/ops/_op_impl/aicpu/scatter_nd_max.py +54 -0
  804. mindspore/ops/_op_impl/aicpu/scatter_nd_min.py +54 -0
  805. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +59 -0
  806. mindspore/ops/_op_impl/aicpu/search_sorted.py +44 -0
  807. mindspore/ops/_op_impl/aicpu/segment_max.py +52 -0
  808. mindspore/ops/_op_impl/aicpu/segment_mean.py +56 -0
  809. mindspore/ops/_op_impl/aicpu/segment_min.py +52 -0
  810. mindspore/ops/_op_impl/aicpu/segment_prod.py +56 -0
  811. mindspore/ops/_op_impl/aicpu/segment_sum.py +56 -0
  812. mindspore/ops/_op_impl/aicpu/select.py +45 -0
  813. mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
  814. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  815. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  816. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  817. mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
  818. mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
  819. mindspore/ops/_op_impl/aicpu/set_size.py +38 -0
  820. mindspore/ops/_op_impl/aicpu/sign.py +36 -0
  821. mindspore/ops/_op_impl/aicpu/sin.py +34 -0
  822. mindspore/ops/_op_impl/aicpu/sinc.py +43 -0
  823. mindspore/ops/_op_impl/aicpu/sinh.py +34 -0
  824. mindspore/ops/_op_impl/aicpu/slice.py +59 -0
  825. mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
  826. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  827. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  828. mindspore/ops/_op_impl/aicpu/sort.py +39 -0
  829. mindspore/ops/_op_impl/aicpu/space_to_depth.py +44 -0
  830. mindspore/ops/_op_impl/aicpu/sparse_addmm.py +87 -0
  831. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +80 -0
  832. mindspore/ops/_op_impl/aicpu/sparse_apply_centered_rms_prop.py +105 -0
  833. mindspore/ops/_op_impl/aicpu/sparse_apply_momentum.py +80 -0
  834. mindspore/ops/_op_impl/aicpu/sparse_apply_proximal_gradient_descent.py +79 -0
  835. mindspore/ops/_op_impl/aicpu/sparse_concat.py +59 -0
  836. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  837. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_add.py +58 -0
  838. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_div.py +58 -0
  839. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_mul.py +58 -0
  840. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
  841. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
  842. mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
  843. mindspore/ops/_op_impl/aicpu/sparse_matrix_nnz.py +81 -0
  844. mindspore/ops/_op_impl/aicpu/sparse_matrix_transpose.py +116 -0
  845. mindspore/ops/_op_impl/aicpu/sparse_reorder.py +56 -0
  846. mindspore/ops/_op_impl/aicpu/sparse_reshape.py +34 -0
  847. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_grad.py +36 -0
  848. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_with_num_segments.py +44 -0
  849. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n.py +43 -0
  850. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_grad.py +38 -0
  851. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_with_num_segments.py +44 -0
  852. mindspore/ops/_op_impl/aicpu/sparse_segment_sum.py +49 -0
  853. mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
  854. mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
  855. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
  856. mindspore/ops/_op_impl/aicpu/sparse_softmax.py +33 -0
  857. mindspore/ops/_op_impl/aicpu/sparse_softmax_cross_entropy_with_logits_v2.py +35 -0
  858. mindspore/ops/_op_impl/aicpu/sparse_sparse_maximum.py +53 -0
  859. mindspore/ops/_op_impl/aicpu/sparse_sparse_minimum.py +53 -0
  860. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_add.py +84 -0
  861. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_mat_mul.py +190 -0
  862. mindspore/ops/_op_impl/aicpu/sparse_tensor_to_csr_sparse_matrix.py +51 -0
  863. mindspore/ops/_op_impl/aicpu/sparse_to_dense_v2.py +73 -0
  864. mindspore/ops/_op_impl/aicpu/split.py +45 -0
  865. mindspore/ops/_op_impl/aicpu/sqrt.py +34 -0
  866. mindspore/ops/_op_impl/aicpu/sqrt_grad.py +35 -0
  867. mindspore/ops/_op_impl/aicpu/square.py +35 -0
  868. mindspore/ops/_op_impl/aicpu/squared_difference.py +37 -0
  869. mindspore/ops/_op_impl/aicpu/squeeze.py +42 -0
  870. mindspore/ops/_op_impl/aicpu/sspaddmm.py +97 -0
  871. mindspore/ops/_op_impl/aicpu/stack.py +45 -0
  872. mindspore/ops/_op_impl/aicpu/stack_push_pop.py +87 -0
  873. mindspore/ops/_op_impl/aicpu/standard_laplace.py +34 -0
  874. mindspore/ops/_op_impl/aicpu/standard_normal.py +34 -0
  875. mindspore/ops/_op_impl/aicpu/stateless_dropout_genmask.py +37 -0
  876. mindspore/ops/_op_impl/aicpu/stft.py +70 -0
  877. mindspore/ops/_op_impl/aicpu/strided_slice.py +43 -0
  878. mindspore/ops/_op_impl/aicpu/strided_slice_grad.py +50 -0
  879. mindspore/ops/_op_impl/aicpu/sub.py +41 -0
  880. mindspore/ops/_op_impl/aicpu/sub_and_filter.py +36 -0
  881. mindspore/ops/_op_impl/aicpu/tan.py +34 -0
  882. mindspore/ops/_op_impl/aicpu/tanh.py +34 -0
  883. mindspore/ops/_op_impl/aicpu/tanh_grad.py +35 -0
  884. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  885. mindspore/ops/_op_impl/aicpu/tile.py +56 -0
  886. mindspore/ops/_op_impl/aicpu/topk.py +34 -0
  887. mindspore/ops/_op_impl/aicpu/trace.py +40 -0
  888. mindspore/ops/_op_impl/aicpu/tracegrad.py +41 -0
  889. mindspore/ops/_op_impl/aicpu/trans_data.py +35 -0
  890. mindspore/ops/_op_impl/aicpu/transpose.py +58 -0
  891. mindspore/ops/_op_impl/aicpu/tridiagonal_matmul.py +42 -0
  892. mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
  893. mindspore/ops/_op_impl/aicpu/tril.py +42 -0
  894. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  895. mindspore/ops/_op_impl/aicpu/triplet_margin_loss.py +62 -0
  896. mindspore/ops/_op_impl/aicpu/triu.py +43 -0
  897. mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
  898. mindspore/ops/_op_impl/aicpu/truncated_normal.py +39 -0
  899. mindspore/ops/_op_impl/aicpu/uniform.py +36 -0
  900. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +41 -0
  901. mindspore/ops/_op_impl/aicpu/uniform_int.py +36 -0
  902. mindspore/ops/_op_impl/aicpu/uniform_real.py +33 -0
  903. mindspore/ops/_op_impl/aicpu/unique.py +31 -0
  904. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +47 -0
  905. mindspore/ops/_op_impl/aicpu/unique_with_pad.py +32 -0
  906. mindspore/ops/_op_impl/aicpu/unravel_index.py +32 -0
  907. mindspore/ops/_op_impl/aicpu/unsorted_segment_prod.py +53 -0
  908. mindspore/ops/_op_impl/aicpu/unsorted_segment_sum.py +57 -0
  909. mindspore/ops/_op_impl/aicpu/unstack.py +45 -0
  910. mindspore/ops/_op_impl/aicpu/update_cache.py +44 -0
  911. mindspore/ops/_op_impl/aicpu/upper_bound.py +47 -0
  912. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +42 -0
  913. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +49 -0
  914. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +40 -0
  915. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +50 -0
  916. mindspore/ops/_op_impl/aicpu/xdivy.py +35 -0
  917. mindspore/ops/_op_impl/aicpu/xlogy.py +33 -0
  918. mindspore/ops/_op_impl/aicpu/zeros_like.py +42 -0
  919. mindspore/ops/_op_impl/aicpu/zeta.py +31 -0
  920. mindspore/ops/_op_impl/akg/__init__.py +19 -0
  921. mindspore/ops/_op_impl/akg/ascend/__init__.py +48 -0
  922. mindspore/ops/_op_impl/akg/ascend/abs.py +35 -0
  923. mindspore/ops/_op_impl/akg/ascend/add.py +42 -0
  924. mindspore/ops/_op_impl/akg/ascend/add_n.py +37 -0
  925. mindspore/ops/_op_impl/akg/ascend/batchmatmul.py +33 -0
  926. mindspore/ops/_op_impl/akg/ascend/cast.py +46 -0
  927. mindspore/ops/_op_impl/akg/ascend/equal.py +35 -0
  928. mindspore/ops/_op_impl/akg/ascend/exp.py +35 -0
  929. mindspore/ops/_op_impl/akg/ascend/expand_dims.py +33 -0
  930. mindspore/ops/_op_impl/akg/ascend/greater.py +34 -0
  931. mindspore/ops/_op_impl/akg/ascend/greater_equal.py +35 -0
  932. mindspore/ops/_op_impl/akg/ascend/less.py +31 -0
  933. mindspore/ops/_op_impl/akg/ascend/less_equal.py +35 -0
  934. mindspore/ops/_op_impl/akg/ascend/load_im2col.py +33 -0
  935. mindspore/ops/_op_impl/akg/ascend/log.py +34 -0
  936. mindspore/ops/_op_impl/akg/ascend/maximum.py +36 -0
  937. mindspore/ops/_op_impl/akg/ascend/minimum.py +39 -0
  938. mindspore/ops/_op_impl/akg/ascend/mul.py +41 -0
  939. mindspore/ops/_op_impl/akg/ascend/neg.py +37 -0
  940. mindspore/ops/_op_impl/akg/ascend/pow.py +35 -0
  941. mindspore/ops/_op_impl/akg/ascend/prod_force_se_a.py +33 -0
  942. mindspore/ops/_op_impl/akg/ascend/real_div.py +36 -0
  943. mindspore/ops/_op_impl/akg/ascend/reciprocal.py +32 -0
  944. mindspore/ops/_op_impl/akg/ascend/reduce_max.py +32 -0
  945. mindspore/ops/_op_impl/akg/ascend/reduce_min.py +32 -0
  946. mindspore/ops/_op_impl/akg/ascend/reduce_sum.py +37 -0
  947. mindspore/ops/_op_impl/akg/ascend/rsqrt.py +35 -0
  948. mindspore/ops/_op_impl/akg/ascend/select.py +37 -0
  949. mindspore/ops/_op_impl/akg/ascend/sqrt.py +35 -0
  950. mindspore/ops/_op_impl/akg/ascend/square.py +35 -0
  951. mindspore/ops/_op_impl/akg/ascend/sub.py +42 -0
  952. mindspore/ops/_op_impl/akg/cpu/__init__.py +23 -0
  953. mindspore/ops/_op_impl/akg/cpu/coo2csr.py +29 -0
  954. mindspore/ops/_op_impl/akg/cpu/csr2coo.py +29 -0
  955. mindspore/ops/_op_impl/akg/cpu/csr_gather.py +33 -0
  956. mindspore/ops/_op_impl/akg/cpu/csr_mm.py +34 -0
  957. mindspore/ops/_op_impl/akg/cpu/csr_mul.py +33 -0
  958. mindspore/ops/_op_impl/akg/cpu/csr_mv.py +33 -0
  959. mindspore/ops/_op_impl/akg/cpu/csr_reduce_sum.py +31 -0
  960. mindspore/ops/_op_impl/akg/gpu/__init__.py +24 -0
  961. mindspore/ops/_op_impl/akg/gpu/coo2csr.py +29 -0
  962. mindspore/ops/_op_impl/akg/gpu/csr2coo.py +29 -0
  963. mindspore/ops/_op_impl/akg/gpu/csr_div.py +36 -0
  964. mindspore/ops/_op_impl/akg/gpu/csr_gather.py +33 -0
  965. mindspore/ops/_op_impl/akg/gpu/csr_mm.py +37 -0
  966. mindspore/ops/_op_impl/akg/gpu/csr_mul.py +36 -0
  967. mindspore/ops/_op_impl/akg/gpu/csr_mv.py +36 -0
  968. mindspore/ops/_op_impl/akg/gpu/csr_reduce_sum.py +33 -0
  969. mindspore/ops/_op_impl/cpu/__init__.py +78 -0
  970. mindspore/ops/_op_impl/cpu/adam.py +49 -0
  971. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +47 -0
  972. mindspore/ops/_op_impl/cpu/arg_max.py +30 -0
  973. mindspore/ops/_op_impl/cpu/arg_max_with_value.py +31 -0
  974. mindspore/ops/_op_impl/cpu/arg_min_with_value.py +31 -0
  975. mindspore/ops/_op_impl/cpu/buffer_append.py +28 -0
  976. mindspore/ops/_op_impl/cpu/buffer_get.py +28 -0
  977. mindspore/ops/_op_impl/cpu/buffer_sample.py +28 -0
  978. mindspore/ops/_op_impl/cpu/cast.py +171 -0
  979. mindspore/ops/_op_impl/cpu/concat_offset.py +38 -0
  980. mindspore/ops/_op_impl/cpu/conv2d.py +30 -0
  981. mindspore/ops/_op_impl/cpu/conv3d.py +30 -0
  982. mindspore/ops/_op_impl/cpu/div.py +32 -0
  983. mindspore/ops/_op_impl/cpu/dropout.py +31 -0
  984. mindspore/ops/_op_impl/cpu/dropout_grad.py +30 -0
  985. mindspore/ops/_op_impl/cpu/dynamic_shape.py +42 -0
  986. mindspore/ops/_op_impl/cpu/dynamic_stitch.py +41 -0
  987. mindspore/ops/_op_impl/cpu/equal_count.py +30 -0
  988. mindspore/ops/_op_impl/cpu/gather_d.py +49 -0
  989. mindspore/ops/_op_impl/cpu/gather_d_grad.py +38 -0
  990. mindspore/ops/_op_impl/cpu/gather_d_grad_v2.py +40 -0
  991. mindspore/ops/_op_impl/cpu/gather_v2.py +40 -0
  992. mindspore/ops/_op_impl/cpu/hsigmoid.py +33 -0
  993. mindspore/ops/_op_impl/cpu/hsigmoid_grad.py +34 -0
  994. mindspore/ops/_op_impl/cpu/hswish.py +32 -0
  995. mindspore/ops/_op_impl/cpu/hswish_grad.py +33 -0
  996. mindspore/ops/_op_impl/cpu/identity_n.py +40 -0
  997. mindspore/ops/_op_impl/cpu/is_finite.py +39 -0
  998. mindspore/ops/_op_impl/cpu/l2loss.py +30 -0
  999. mindspore/ops/_op_impl/cpu/layer_norm.py +36 -0
  1000. mindspore/ops/_op_impl/cpu/layer_norm_grad.py +38 -0
  1001. mindspore/ops/_op_impl/cpu/maximum.py +35 -0
  1002. mindspore/ops/_op_impl/cpu/maximum_grad.py +47 -0
  1003. mindspore/ops/_op_impl/cpu/minimum.py +40 -0
  1004. mindspore/ops/_op_impl/cpu/minimum_grad.py +51 -0
  1005. mindspore/ops/_op_impl/cpu/mirror_pad.py +36 -0
  1006. mindspore/ops/_op_impl/cpu/mirror_pad_grad.py +36 -0
  1007. mindspore/ops/_op_impl/cpu/mul.py +32 -0
  1008. mindspore/ops/_op_impl/cpu/one_hot.py +31 -0
  1009. mindspore/ops/_op_impl/cpu/pad.py +32 -0
  1010. mindspore/ops/_op_impl/cpu/pow.py +32 -0
  1011. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +42 -0
  1012. mindspore/ops/_op_impl/cpu/pyexecute.py +29 -0
  1013. mindspore/ops/_op_impl/cpu/pyfunc.py +29 -0
  1014. mindspore/ops/_op_impl/cpu/range.py +34 -0
  1015. mindspore/ops/_op_impl/cpu/real_div.py +33 -0
  1016. mindspore/ops/_op_impl/cpu/reduce_all.py +29 -0
  1017. mindspore/ops/_op_impl/cpu/reduce_any.py +29 -0
  1018. mindspore/ops/_op_impl/cpu/reduce_max.py +32 -0
  1019. mindspore/ops/_op_impl/cpu/reduce_mean.py +40 -0
  1020. mindspore/ops/_op_impl/cpu/reduce_min.py +32 -0
  1021. mindspore/ops/_op_impl/cpu/reduce_prod.py +40 -0
  1022. mindspore/ops/_op_impl/cpu/reduce_std.py +31 -0
  1023. mindspore/ops/_op_impl/cpu/reduce_sum.py +41 -0
  1024. mindspore/ops/_op_impl/cpu/space_to_batch_nd.py +38 -0
  1025. mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
  1026. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
  1027. mindspore/ops/_op_impl/cpu/split.py +34 -0
  1028. mindspore/ops/_op_impl/cpu/sspaddmm.py +95 -0
  1029. mindspore/ops/_op_impl/cpu/stack.py +38 -0
  1030. mindspore/ops/_op_impl/cpu/sub.py +32 -0
  1031. mindspore/ops/_op_impl/cpu/tensor_copy_slices.py +41 -0
  1032. mindspore/ops/_op_impl/cpu/tile.py +37 -0
  1033. mindspore/ops/_op_impl/cpu/top_k.py +31 -0
  1034. mindspore/ops/_op_impl/cpu/transpose.py +39 -0
  1035. mindspore/ops/_primitive_cache.py +90 -0
  1036. mindspore/ops/_register_for_op.py +73 -0
  1037. mindspore/ops/_utils/__init__.py +20 -0
  1038. mindspore/ops/_utils/utils.py +147 -0
  1039. mindspore/ops/_vmap/__init__.py +25 -0
  1040. mindspore/ops/_vmap/vmap_array_ops.py +2149 -0
  1041. mindspore/ops/_vmap/vmap_base.py +533 -0
  1042. mindspore/ops/_vmap/vmap_convolution_ops.py +441 -0
  1043. mindspore/ops/_vmap/vmap_debug_ops.py +50 -0
  1044. mindspore/ops/_vmap/vmap_grad_math_ops.py +274 -0
  1045. mindspore/ops/_vmap/vmap_grad_nn_ops.py +806 -0
  1046. mindspore/ops/_vmap/vmap_image_ops.py +194 -0
  1047. mindspore/ops/_vmap/vmap_math_ops.py +993 -0
  1048. mindspore/ops/_vmap/vmap_nn_ops.py +2250 -0
  1049. mindspore/ops/_vmap/vmap_other_ops.py +105 -0
  1050. mindspore/ops/_vmap/vmap_random_ops.py +122 -0
  1051. mindspore/ops/_vmap/vmap_sparse_ops.py +89 -0
  1052. mindspore/ops/auto_generate/__init__.py +31 -0
  1053. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
  1054. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
  1055. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  1056. mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
  1057. mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
  1058. mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
  1059. mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
  1060. mindspore/ops/composite/__init__.py +71 -0
  1061. mindspore/ops/composite/base.py +1318 -0
  1062. mindspore/ops/composite/env_ops.py +41 -0
  1063. mindspore/ops/composite/math_ops.py +125 -0
  1064. mindspore/ops/composite/multitype_ops/__init__.py +77 -0
  1065. mindspore/ops/composite/multitype_ops/_compile_utils.py +1459 -0
  1066. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +897 -0
  1067. mindspore/ops/composite/multitype_ops/add_impl.py +606 -0
  1068. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +56 -0
  1069. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +56 -0
  1070. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +56 -0
  1071. mindspore/ops/composite/multitype_ops/div_impl.py +189 -0
  1072. mindspore/ops/composite/multitype_ops/equal_impl.py +335 -0
  1073. mindspore/ops/composite/multitype_ops/floordiv_impl.py +88 -0
  1074. mindspore/ops/composite/multitype_ops/getitem_impl.py +400 -0
  1075. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +109 -0
  1076. mindspore/ops/composite/multitype_ops/greater_impl.py +110 -0
  1077. mindspore/ops/composite/multitype_ops/in_impl.py +196 -0
  1078. mindspore/ops/composite/multitype_ops/left_shift_impl.py +37 -0
  1079. mindspore/ops/composite/multitype_ops/less_equal_impl.py +111 -0
  1080. mindspore/ops/composite/multitype_ops/less_impl.py +112 -0
  1081. mindspore/ops/composite/multitype_ops/logic_not_impl.py +113 -0
  1082. mindspore/ops/composite/multitype_ops/logical_and_impl.py +60 -0
  1083. mindspore/ops/composite/multitype_ops/logical_or_impl.py +61 -0
  1084. mindspore/ops/composite/multitype_ops/mod_impl.py +86 -0
  1085. mindspore/ops/composite/multitype_ops/mul_impl.py +294 -0
  1086. mindspore/ops/composite/multitype_ops/negative_impl.py +79 -0
  1087. mindspore/ops/composite/multitype_ops/not_equal_impl.py +290 -0
  1088. mindspore/ops/composite/multitype_ops/not_in_impl.py +196 -0
  1089. mindspore/ops/composite/multitype_ops/ones_like_impl.py +96 -0
  1090. mindspore/ops/composite/multitype_ops/pow_impl.py +87 -0
  1091. mindspore/ops/composite/multitype_ops/right_shift_impl.py +37 -0
  1092. mindspore/ops/composite/multitype_ops/setitem_impl.py +884 -0
  1093. mindspore/ops/composite/multitype_ops/sub_impl.py +116 -0
  1094. mindspore/ops/composite/multitype_ops/uadd_impl.py +29 -0
  1095. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +228 -0
  1096. mindspore/ops/deprecated.py +315 -0
  1097. mindspore/ops/function/__init__.py +782 -0
  1098. mindspore/ops/function/array_func.py +7226 -0
  1099. mindspore/ops/function/clip_func.py +384 -0
  1100. mindspore/ops/function/debug_func.py +181 -0
  1101. mindspore/ops/function/fft_func.py +44 -0
  1102. mindspore/ops/function/grad/__init__.py +34 -0
  1103. mindspore/ops/function/grad/grad_func.py +1425 -0
  1104. mindspore/ops/function/image_func.py +292 -0
  1105. mindspore/ops/function/linalg_func.py +416 -0
  1106. mindspore/ops/function/math_func.py +12228 -0
  1107. mindspore/ops/function/nn_func.py +8609 -0
  1108. mindspore/ops/function/other_func.py +115 -0
  1109. mindspore/ops/function/parameter_func.py +134 -0
  1110. mindspore/ops/function/random_func.py +1715 -0
  1111. mindspore/ops/function/reshard_func.py +104 -0
  1112. mindspore/ops/function/sparse_func.py +884 -0
  1113. mindspore/ops/function/sparse_unary_func.py +2422 -0
  1114. mindspore/ops/function/spectral_func.py +150 -0
  1115. mindspore/ops/function/vmap_func.py +117 -0
  1116. mindspore/ops/functional.py +464 -0
  1117. mindspore/ops/op_info_register.py +1572 -0
  1118. mindspore/ops/operations/__init__.py +722 -0
  1119. mindspore/ops/operations/_csr_ops.py +403 -0
  1120. mindspore/ops/operations/_custom_grad.py +181 -0
  1121. mindspore/ops/operations/_embedding_cache_ops.py +307 -0
  1122. mindspore/ops/operations/_grad_ops.py +2978 -0
  1123. mindspore/ops/operations/_infer_ops.py +19 -0
  1124. mindspore/ops/operations/_inner_ops.py +2544 -0
  1125. mindspore/ops/operations/_map_tensor_ops.py +112 -0
  1126. mindspore/ops/operations/_ms_kernel.py +601 -0
  1127. mindspore/ops/operations/_ocr_ops.py +379 -0
  1128. mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
  1129. mindspore/ops/operations/_pyfunc_registry.py +58 -0
  1130. mindspore/ops/operations/_quant_ops.py +1844 -0
  1131. mindspore/ops/operations/_rl_inner_ops.py +1231 -0
  1132. mindspore/ops/operations/_scalar_ops.py +106 -0
  1133. mindspore/ops/operations/_sequence_ops.py +1155 -0
  1134. mindspore/ops/operations/_sparse_grad_ops.py +56 -0
  1135. mindspore/ops/operations/_tensor_array.py +359 -0
  1136. mindspore/ops/operations/_thor_ops.py +807 -0
  1137. mindspore/ops/operations/array_ops.py +6124 -0
  1138. mindspore/ops/operations/comm_ops.py +1985 -0
  1139. mindspore/ops/operations/control_ops.py +127 -0
  1140. mindspore/ops/operations/custom_ops.py +1129 -0
  1141. mindspore/ops/operations/debug_ops.py +678 -0
  1142. mindspore/ops/operations/image_ops.py +1041 -0
  1143. mindspore/ops/operations/inner_ops.py +697 -0
  1144. mindspore/ops/operations/linalg_ops.py +95 -0
  1145. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  1146. mindspore/ops/operations/manually_defined/_inner.py +73 -0
  1147. mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
  1148. mindspore/ops/operations/math_ops.py +5095 -0
  1149. mindspore/ops/operations/nn_ops.py +9575 -0
  1150. mindspore/ops/operations/other_ops.py +874 -0
  1151. mindspore/ops/operations/random_ops.py +1288 -0
  1152. mindspore/ops/operations/reshard_ops.py +53 -0
  1153. mindspore/ops/operations/rl_ops.py +288 -0
  1154. mindspore/ops/operations/sparse_ops.py +2753 -0
  1155. mindspore/ops/operations/spectral_ops.py +111 -0
  1156. mindspore/ops/primitive.py +1046 -0
  1157. mindspore/ops/signature.py +54 -0
  1158. mindspore/ops/vm_impl_registry.py +91 -0
  1159. mindspore/ops_generate/__init__.py +27 -0
  1160. mindspore/ops_generate/arg_dtype_cast.py +252 -0
  1161. mindspore/ops_generate/arg_handler.py +197 -0
  1162. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  1163. mindspore/ops_generate/gen_constants.py +36 -0
  1164. mindspore/ops_generate/gen_ops.py +1099 -0
  1165. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  1166. mindspore/ops_generate/gen_pyboost_func.py +1052 -0
  1167. mindspore/ops_generate/gen_utils.py +209 -0
  1168. mindspore/ops_generate/op_proto.py +145 -0
  1169. mindspore/ops_generate/pyboost_utils.py +367 -0
  1170. mindspore/ops_generate/template.py +261 -0
  1171. mindspore/parallel/__init__.py +30 -0
  1172. mindspore/parallel/_auto_parallel_context.py +1486 -0
  1173. mindspore/parallel/_cell_wrapper.py +174 -0
  1174. mindspore/parallel/_cost_model_context.py +700 -0
  1175. mindspore/parallel/_dp_allreduce_fusion.py +159 -0
  1176. mindspore/parallel/_offload_context.py +275 -0
  1177. mindspore/parallel/_parallel_serialization.py +561 -0
  1178. mindspore/parallel/_ps_context.py +242 -0
  1179. mindspore/parallel/_recovery_context.py +110 -0
  1180. mindspore/parallel/_tensor.py +730 -0
  1181. mindspore/parallel/_transformer/__init__.py +35 -0
  1182. mindspore/parallel/_transformer/layers.py +765 -0
  1183. mindspore/parallel/_transformer/loss.py +251 -0
  1184. mindspore/parallel/_transformer/moe.py +693 -0
  1185. mindspore/parallel/_transformer/op_parallel_config.py +222 -0
  1186. mindspore/parallel/_transformer/transformer.py +3119 -0
  1187. mindspore/parallel/_utils.py +612 -0
  1188. mindspore/parallel/algo_parameter_config.py +400 -0
  1189. mindspore/parallel/checkpoint_transform.py +650 -0
  1190. mindspore/parallel/cluster/__init__.py +15 -0
  1191. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  1192. mindspore/parallel/cluster/process_entity/_api.py +352 -0
  1193. mindspore/parallel/cluster/process_entity/_utils.py +101 -0
  1194. mindspore/parallel/cluster/run.py +136 -0
  1195. mindspore/parallel/mpi/__init__.py +14 -0
  1196. mindspore/parallel/mpi/_mpi_config.py +116 -0
  1197. mindspore/parallel/parameter_broadcast.py +151 -0
  1198. mindspore/parallel/shard.py +481 -0
  1199. mindspore/parallel/transform_safetensors.py +993 -0
  1200. mindspore/perf_msvcbuildinsights.dll +0 -0
  1201. mindspore/pgodb140.dll +0 -0
  1202. mindspore/pgort140.dll +0 -0
  1203. mindspore/profiler/__init__.py +28 -0
  1204. mindspore/profiler/common/__init__.py +14 -0
  1205. mindspore/profiler/common/constant.py +29 -0
  1206. mindspore/profiler/common/exceptions/__init__.py +14 -0
  1207. mindspore/profiler/common/exceptions/error_code.py +83 -0
  1208. mindspore/profiler/common/exceptions/exceptions.py +286 -0
  1209. mindspore/profiler/common/process_pool.py +41 -0
  1210. mindspore/profiler/common/registry.py +47 -0
  1211. mindspore/profiler/common/singleton.py +28 -0
  1212. mindspore/profiler/common/struct_type.py +118 -0
  1213. mindspore/profiler/common/util.py +472 -0
  1214. mindspore/profiler/common/validator/__init__.py +14 -0
  1215. mindspore/profiler/common/validator/validate_path.py +84 -0
  1216. mindspore/profiler/dynamic_profiler.py +694 -0
  1217. mindspore/profiler/envprofiling.py +254 -0
  1218. mindspore/profiler/parser/__init__.py +14 -0
  1219. mindspore/profiler/parser/aicpu_data_parser.py +272 -0
  1220. mindspore/profiler/parser/ascend_analysis/__init__.py +14 -0
  1221. mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
  1222. mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
  1223. mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
  1224. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
  1225. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
  1226. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
  1227. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  1228. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
  1229. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  1230. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
  1231. mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
  1232. mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
  1233. mindspore/profiler/parser/ascend_flops_generator.py +116 -0
  1234. mindspore/profiler/parser/ascend_fpbp_generator.py +82 -0
  1235. mindspore/profiler/parser/ascend_hccl_generator.py +271 -0
  1236. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  1237. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  1238. mindspore/profiler/parser/ascend_msprof_exporter.py +282 -0
  1239. mindspore/profiler/parser/ascend_msprof_generator.py +187 -0
  1240. mindspore/profiler/parser/ascend_op_generator.py +334 -0
  1241. mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
  1242. mindspore/profiler/parser/ascend_timeline_generator.py +545 -0
  1243. mindspore/profiler/parser/base_timeline_generator.py +483 -0
  1244. mindspore/profiler/parser/container.py +229 -0
  1245. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +697 -0
  1246. mindspore/profiler/parser/flops_parser.py +531 -0
  1247. mindspore/profiler/parser/framework_enum.py +111 -0
  1248. mindspore/profiler/parser/framework_parser.py +464 -0
  1249. mindspore/profiler/parser/framework_struct.py +61 -0
  1250. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  1251. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  1252. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  1253. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  1254. mindspore/profiler/parser/hccl_parser.py +573 -0
  1255. mindspore/profiler/parser/hwts_log_parser.py +122 -0
  1256. mindspore/profiler/parser/integrator.py +526 -0
  1257. mindspore/profiler/parser/memory_usage_parser.py +277 -0
  1258. mindspore/profiler/parser/minddata_analyzer.py +800 -0
  1259. mindspore/profiler/parser/minddata_parser.py +186 -0
  1260. mindspore/profiler/parser/minddata_pipeline_parser.py +299 -0
  1261. mindspore/profiler/parser/op_intermediate_parser.py +149 -0
  1262. mindspore/profiler/parser/optime_parser.py +250 -0
  1263. mindspore/profiler/parser/profiler_info.py +213 -0
  1264. mindspore/profiler/parser/step_trace_parser.py +666 -0
  1265. mindspore/profiler/profiler.py +153 -0
  1266. mindspore/profiler/profiling.py +1922 -0
  1267. mindspore/rewrite/__init__.py +28 -0
  1268. mindspore/rewrite/api/__init__.py +17 -0
  1269. mindspore/rewrite/api/node.py +519 -0
  1270. mindspore/rewrite/api/node_type.py +53 -0
  1271. mindspore/rewrite/api/pattern_engine.py +490 -0
  1272. mindspore/rewrite/api/scoped_value.py +181 -0
  1273. mindspore/rewrite/api/symbol_tree.py +497 -0
  1274. mindspore/rewrite/ast_helpers/__init__.py +25 -0
  1275. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  1276. mindspore/rewrite/ast_helpers/ast_finder.py +404 -0
  1277. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  1278. mindspore/rewrite/ast_helpers/ast_modifier.py +605 -0
  1279. mindspore/rewrite/ast_helpers/ast_replacer.py +79 -0
  1280. mindspore/rewrite/common/__init__.py +19 -0
  1281. mindspore/rewrite/common/config.py +24 -0
  1282. mindspore/rewrite/common/error_log.py +39 -0
  1283. mindspore/rewrite/common/event.py +28 -0
  1284. mindspore/rewrite/common/namer.py +271 -0
  1285. mindspore/rewrite/common/namespace.py +118 -0
  1286. mindspore/rewrite/common/observable.py +44 -0
  1287. mindspore/rewrite/common/observer.py +54 -0
  1288. mindspore/rewrite/node/__init__.py +22 -0
  1289. mindspore/rewrite/node/call_function.py +95 -0
  1290. mindspore/rewrite/node/cell_container.py +139 -0
  1291. mindspore/rewrite/node/control_flow.py +113 -0
  1292. mindspore/rewrite/node/node.py +1428 -0
  1293. mindspore/rewrite/node/node_manager.py +283 -0
  1294. mindspore/rewrite/node/node_topological_manager.py +223 -0
  1295. mindspore/rewrite/parsers/__init__.py +29 -0
  1296. mindspore/rewrite/parsers/arguments_parser.py +63 -0
  1297. mindspore/rewrite/parsers/assign_parser.py +852 -0
  1298. mindspore/rewrite/parsers/attribute_parser.py +57 -0
  1299. mindspore/rewrite/parsers/class_def_parser.py +289 -0
  1300. mindspore/rewrite/parsers/constant_parser.py +104 -0
  1301. mindspore/rewrite/parsers/container_parser.py +88 -0
  1302. mindspore/rewrite/parsers/expr_parser.py +55 -0
  1303. mindspore/rewrite/parsers/for_parser.py +61 -0
  1304. mindspore/rewrite/parsers/function_def_parser.py +84 -0
  1305. mindspore/rewrite/parsers/if_parser.py +85 -0
  1306. mindspore/rewrite/parsers/module_parser.py +117 -0
  1307. mindspore/rewrite/parsers/parser.py +43 -0
  1308. mindspore/rewrite/parsers/parser_register.py +86 -0
  1309. mindspore/rewrite/parsers/return_parser.py +37 -0
  1310. mindspore/rewrite/parsers/while_parser.py +59 -0
  1311. mindspore/rewrite/sparsify/__init__.py +0 -0
  1312. mindspore/rewrite/sparsify/sparse_transformer.py +457 -0
  1313. mindspore/rewrite/sparsify/sparsify.py +112 -0
  1314. mindspore/rewrite/sparsify/utils.py +179 -0
  1315. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  1316. mindspore/rewrite/symbol_tree/symbol_tree.py +1819 -0
  1317. mindspore/rewrite/symbol_tree/symbol_tree_builder.py +76 -0
  1318. mindspore/rewrite/symbol_tree/symbol_tree_dumper.py +142 -0
  1319. mindspore/run_check/__init__.py +20 -0
  1320. mindspore/run_check/_check_version.py +507 -0
  1321. mindspore/run_check/run_check.py +66 -0
  1322. mindspore/safeguard/__init__.py +18 -0
  1323. mindspore/safeguard/rewrite_obfuscation.py +875 -0
  1324. mindspore/swresample-4.dll +0 -0
  1325. mindspore/swscale-6.dll +0 -0
  1326. mindspore/tbbmalloc.dll +0 -0
  1327. mindspore/tinyxml2.dll +0 -0
  1328. mindspore/train/__init__.py +48 -0
  1329. mindspore/train/_utils.py +465 -0
  1330. mindspore/train/amp.py +935 -0
  1331. mindspore/train/anf_ir_pb2.py +1517 -0
  1332. mindspore/train/callback/__init__.py +44 -0
  1333. mindspore/train/callback/_backup_and_restore.py +117 -0
  1334. mindspore/train/callback/_callback.py +613 -0
  1335. mindspore/train/callback/_checkpoint.py +814 -0
  1336. mindspore/train/callback/_cluster_monitor.py +201 -0
  1337. mindspore/train/callback/_dataset_graph.py +150 -0
  1338. mindspore/train/callback/_early_stop.py +239 -0
  1339. mindspore/train/callback/_flops_collector.py +239 -0
  1340. mindspore/train/callback/_history.py +92 -0
  1341. mindspore/train/callback/_lambda_callback.py +80 -0
  1342. mindspore/train/callback/_landscape.py +1049 -0
  1343. mindspore/train/callback/_loss_monitor.py +107 -0
  1344. mindspore/train/callback/_lr_scheduler_callback.py +76 -0
  1345. mindspore/train/callback/_on_request_exit.py +298 -0
  1346. mindspore/train/callback/_reduce_lr_on_plateau.py +226 -0
  1347. mindspore/train/callback/_summary_collector.py +1184 -0
  1348. mindspore/train/callback/_tft_register.py +352 -0
  1349. mindspore/train/callback/_time_monitor.py +141 -0
  1350. mindspore/train/checkpoint_pb2.py +233 -0
  1351. mindspore/train/data_sink.py +219 -0
  1352. mindspore/train/dataset_helper.py +692 -0
  1353. mindspore/train/lineage_pb2.py +1260 -0
  1354. mindspore/train/loss_scale_manager.py +213 -0
  1355. mindspore/train/memory_profiling_pb2.py +298 -0
  1356. mindspore/train/metrics/__init__.py +175 -0
  1357. mindspore/train/metrics/accuracy.py +133 -0
  1358. mindspore/train/metrics/auc.py +129 -0
  1359. mindspore/train/metrics/bleu_score.py +170 -0
  1360. mindspore/train/metrics/confusion_matrix.py +700 -0
  1361. mindspore/train/metrics/cosine_similarity.py +109 -0
  1362. mindspore/train/metrics/dice.py +116 -0
  1363. mindspore/train/metrics/error.py +175 -0
  1364. mindspore/train/metrics/fbeta.py +167 -0
  1365. mindspore/train/metrics/hausdorff_distance.py +333 -0
  1366. mindspore/train/metrics/loss.py +97 -0
  1367. mindspore/train/metrics/mean_surface_distance.py +189 -0
  1368. mindspore/train/metrics/metric.py +373 -0
  1369. mindspore/train/metrics/occlusion_sensitivity.py +225 -0
  1370. mindspore/train/metrics/perplexity.py +133 -0
  1371. mindspore/train/metrics/precision.py +160 -0
  1372. mindspore/train/metrics/recall.py +159 -0
  1373. mindspore/train/metrics/roc.py +223 -0
  1374. mindspore/train/metrics/root_mean_square_surface_distance.py +191 -0
  1375. mindspore/train/metrics/topk.py +167 -0
  1376. mindspore/train/mind_ir_pb2.py +1908 -0
  1377. mindspore/train/model.py +2252 -0
  1378. mindspore/train/node_strategy_pb2.py +653 -0
  1379. mindspore/train/print_pb2.py +184 -0
  1380. mindspore/train/profiling_parallel_pb2.py +151 -0
  1381. mindspore/train/serialization.py +3325 -0
  1382. mindspore/train/summary/__init__.py +23 -0
  1383. mindspore/train/summary/_lineage_adapter.py +41 -0
  1384. mindspore/train/summary/_summary_adapter.py +496 -0
  1385. mindspore/train/summary/_writer_pool.py +207 -0
  1386. mindspore/train/summary/enums.py +56 -0
  1387. mindspore/train/summary/summary_record.py +581 -0
  1388. mindspore/train/summary/writer.py +167 -0
  1389. mindspore/train/summary_pb2.py +1165 -0
  1390. mindspore/train/train_thor/__init__.py +20 -0
  1391. mindspore/train/train_thor/convert_utils.py +268 -0
  1392. mindspore/train/train_thor/dataset_helper.py +192 -0
  1393. mindspore/train/train_thor/model_thor.py +257 -0
  1394. mindspore/turbojpeg.dll +0 -0
  1395. mindspore/utils/__init__.py +21 -0
  1396. mindspore/utils/utils.py +60 -0
  1397. mindspore/vcmeta.dll +0 -0
  1398. mindspore/vcomp140.dll +0 -0
  1399. mindspore/vcruntime140.dll +0 -0
  1400. mindspore/vcruntime140_1.dll +0 -0
  1401. mindspore/version.py +1 -0
  1402. mindspore-2.4.0.dist-info/METADATA +352 -0
  1403. mindspore-2.4.0.dist-info/RECORD +1406 -0
  1404. mindspore-2.4.0.dist-info/WHEEL +5 -0
  1405. mindspore-2.4.0.dist-info/entry_points.txt +3 -0
  1406. mindspore-2.4.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2753 @@
1
+ # coding: utf-8
2
+
3
+ # Copyright 2020-2022 Huawei Technologies Co., Ltd
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ # limitations under the License.
17
+ # ============================================================================
18
+
19
+ """Operators for sparse operators."""
20
+
21
+ from mindspore import _checkparam as validator
22
+ from mindspore.common import dtype as mstype
23
+ from mindspore.ops import signature as sig
24
+ from mindspore.ops.primitive import prim_attr_register, Primitive
25
+
26
+
27
+ class SparseDenseCwiseAdd(Primitive):
28
+ """
29
+ The dense tensor is broadcast into the shape of SparseTensor, and then the corresponding
30
+ positions of the two matrices are added element by element according to the `x1_indices`, then output.
31
+ Note: only dense tensor can be broadcast to SparseTensor.
32
+
33
+ Inputs:
34
+ - **x1_indices** (Tensor) - A 2-D Tensor,N x R matrix with the indices of non-empty values in a SparseTensor,
35
+ possibly not in canonical ordering.Support int64, each element value should be a non-negative number.
36
+ The shape is :math:`(N, R)`.
37
+ - **x1_values** (Tensor) - A 1-D Tensor, N non-empty values corresponding to `x1_indices`.
38
+ The shape should be :math:`(N,)`.
39
+ - **x1_shape**(Tensor) - A Tensor of type int64. 1-D. Shape of the input SparseTensor.
40
+ - **x2** (Tensor) - A R-D tensor, must have the same type as `x1_values`. The dense tensor operand.
41
+
42
+ Returns:
43
+ Tensor, a new instance of SparseDenseCwiseAdd. The dtype is same as `x1_values`, and the shape is same with
44
+ the shape of `x1_values`.
45
+
46
+ Raises:
47
+ TypeError: If the dtype of `x1_indices` and dtype of `x1_shape` is not int64.
48
+ TypeError: If the dtype of `x1_values` and dtype of `x2` is not same.
49
+ ValueError: If the dims of `x1_indices` is not 2.
50
+ ValueError: If the dims of `x1_values` is not 1.
51
+ ValueError: If the dims of `x1_shape` is not 1.
52
+ ValueError: If dense tensor cannot be broabcast to SparseTensor. The size of the trailing axes for `x2` and
53
+ sparse in an operation must either be the same size or size of the trailing axes for `x2` must be 1.
54
+ ValueError: If shape[0] of `x1_indices` is not equal to shape[0] of `x1_values`.
55
+ ValueError: If shape[1] of `x1_indices` is not equal to shape[0] of `x1_shape`.
56
+ ValueError: If `x1_indices` proceed to cross the border the interview.
57
+
58
+
59
+ Supported Platforms:
60
+ ``Ascend`` ``GPU`` ``CPU``
61
+
62
+ Examples:
63
+ >>> from mindspore import Tensor
64
+ >>> from mindspore.common import dtype as ms
65
+ >>> from mindspore.ops.operations import sparse_ops as ops
66
+ >>> x1_indices = Tensor([[0, 0], [2, 2]], dtype=ms.int64)
67
+ >>> x1_values = Tensor([1, 2], dtype=ms.int32)
68
+ >>> x1_shape = Tensor([3, 3], dtype=ms.int64)
69
+ >>> x2=Tensor([1,2,3],dtype=ms.int32)
70
+ >>> sparse_dense_cwise_add = ops.SparseDenseCwiseAdd()
71
+ >>> y = sparse_dense_cwise_add(x1_indices, x1_values, x1_shape, x2)
72
+ >>> print(y)
73
+ [2 5]
74
+ """
75
+
76
+ @prim_attr_register
77
+ def __init__(self):
78
+ """Initialize SparseDenseCwiseAdd."""
79
+ self.init_prim_io_names(
80
+ inputs=['x1_indices', 'x1_values', 'x1_shape', 'x2'], outputs=['y'])
81
+
82
+
83
+ class SparseDenseCwiseMul(Primitive):
84
+ """
85
+ The dense tensor is broadcast into the shape of SparseTensor, and then the corresponding
86
+ positions of the two matrices are multiplied element by element according to the `x1_indices`, then output.
87
+ Note: only dense tensor can be broadcast to SparseTensor.
88
+
89
+ Inputs:
90
+ - **x1_indices** (Tensor) - A 2-D Tensor,N x R matrix with the indices of non-empty values in a SparseTensor,
91
+ possibly not in canonical ordering.Support int64, each element value should be a non-negative number.
92
+ The shape is :math:`(N, R)`.
93
+ - **x1_values** (Tensor) - A 1-D Tensor, N non-empty values corresponding to `x1_indices`.
94
+ The shape should be :math:`(N,)`.
95
+ - **x1_shape**(Tensor) - A Tensor of type int64. 1-D. Shape of the input SparseTensor.
96
+ - **x2** (Tensor) - A R-D tensor, must have the same type as `x1_values`. The dense tensor operand.
97
+
98
+ Returns:
99
+ Tensor, a new instance of SparseDenseCwiseMul. The dtype is same as `x1_values`, and the shape is same with the
100
+ shape of `x1_values`.
101
+
102
+ Raises:
103
+ TypeError: If the dtype of `x1_indices` and dtype of `x1_shape` is not int64.
104
+ TypeError: If the dtype of `x1_values` and dtype of `x2` is not same.
105
+ ValueError: If the dims of `x1_indices` is not 2.
106
+ ValueError: If the dims of `x1_values` is not 1.
107
+ ValueError: If the dims of `x1_shape` is not 1.
108
+ ValueError: If dense tensor cannot be broabcast to SparseTensor. The size of the trailing axes for `x2` and
109
+ sparse in an operation must either be the same size or size of the trailing axes for `x2` must be 1.
110
+ ValueError: If shape[0] of `x1_indices` is not equal to shape[0] of `x1_values`.
111
+ ValueError: If shape[1] of `x1_indices` is not equal to shape[0] of `x1_shape`.
112
+ ValueError: If `x1_indices` proceed to cross the border the interview.
113
+
114
+ Supported Platforms:
115
+ ``Ascend`` ``GPU`` ``CPU``
116
+
117
+ Examples:
118
+ >>> from mindspore import Tensor
119
+ >>> from mindspore.common import dtype as ms
120
+ >>> from mindspore.ops.operations import sparse_ops as ops
121
+ >>> x1_indices = Tensor([[0, 0], [2, 2]], dtype=ms.int64)
122
+ >>> x1_values = Tensor([1, 2], dtype=ms.int32)
123
+ >>> x1_shape = Tensor([3, 3], dtype=ms.int64)
124
+ >>> x2=Tensor([1,2,3],dtype=ms.int32)
125
+ >>> sparse_dense_cwise_mul = ops.SparseDenseCwiseMul()
126
+ >>> y = sparse_dense_cwise_mul(x1_indices, x1_values, x1_shape, x2)
127
+ >>> print(y)
128
+ [1 6]
129
+ """
130
+
131
+ @prim_attr_register
132
+ def __init__(self):
133
+ """Initialize SparseDenseCwiseMul."""
134
+ self.init_prim_io_names(
135
+ inputs=['x1_indices', 'x1_values', 'x1_shape', 'x2'], outputs=['y'])
136
+
137
+
138
+ class SparseDenseCwiseDiv(Primitive):
139
+ """
140
+ The dense tensor is broadcast into the shape of SparseTensor, and then the corresponding positions elements of
141
+ the dense tensor which non-zeros are divided by SparseTensor element by element according to the `x1_indices`,
142
+ then output.Note: only dense tensor can be broadcast to SparseTensor.
143
+
144
+ Inputs:
145
+ - **x1_indices** (Tensor) - A 2-D Tensor,N x R matrix with the indices of non-empty values in a SparseTensor,
146
+ possibly not in canonical ordering.Support int64, each element value should be a non-negative number.
147
+ The shape is :math:`(N, R)`.
148
+ - **x1_values** (Tensor) - A 1-D Tensor, N non-empty values corresponding to `x1_indices`.
149
+ The shape should be :math:`(N,)`.
150
+ - **x1_shape**(Tensor) - A Tensor of type int64. 1-D. Shape of the input SparseTensor.
151
+ - **x2** (Tensor) - A R-D tensor, must have the same type as `x1_values`. The dense tensor operand.
152
+
153
+ Returns:
154
+ Tensor, a new instance of SparseDenseCwiseDiv. The dtype is same as `x1_values`, and the shape is same with
155
+ the shape of `x1_values`.
156
+
157
+ Raises:
158
+ TypeError: If the dtype of `x1_indices` and dtype of `x1_shape` is not int64.
159
+ TypeError: If the dtype of `x1_values` and dtype of `x2` is not same.
160
+ ValueError: If the dims of `x1_indices` is not 2.
161
+ ValueError: If the dims of `x1_values` is not 1.
162
+ ValueError: If the dims of `x1_shape` is not 1.
163
+ ValueError: If dense tensor cannot be broabcast to SparseTensor. The size of the trailing axes for `x2` and
164
+ sparse in an operation must either be the same size or size of the trailing axes for `x2` must be 1.
165
+ ValueError: If shape[0] of `x1_indices` is not equal to shape[0] of `x1_values`.
166
+ ValueError: If shape[1] of `x1_indices` is not equal to shape[0] of `x1_shape`.
167
+ ValueError: If `x1_indices` proceed to cross the border the interview.
168
+
169
+ Supported Platforms:
170
+ ``Ascend`` ``GPU``
171
+
172
+ Examples:
173
+ >>> from mindspore import Tensor
174
+ >>> from mindspore.common import dtype as ms
175
+ >>> from mindspore.ops.operations import sparse_ops as ops
176
+ >>> x1_indices = Tensor([[0, 0], [2, 2]], dtype=ms.int64)
177
+ >>> x1_values = Tensor([4, 2], dtype=ms.int32)
178
+ >>> x1_shape = Tensor([3, 3], dtype=ms.int64)
179
+ >>> x2=Tensor([1,2,2],dtype=ms.int32)
180
+ >>> sparse_dense_cwise_div = ops.SparseDenseCwiseDiv()
181
+ >>> y = sparse_dense_cwise_div(x1_indices, x1_values, x1_shape, x2)
182
+ >>> print(y)
183
+ [4 1]
184
+ """
185
+
186
+ @prim_attr_register
187
+ def __init__(self):
188
+ """Initialize SparseDenseCwiseDiv."""
189
+ self.init_prim_io_names(
190
+ inputs=['x1_indices', 'x1_values', 'x1_shape', 'x2'], outputs=['y'])
191
+
192
+
193
+ class SparseSlice(Primitive):
194
+ r"""
195
+ Slices a SparseTensor based on the `start` and `size`.
196
+
197
+ Inputs:
198
+ - **indices** (Tensor) - A 2D Tensor of shape :math:`(N, R)`, the indices of the SparseTensor.
199
+ Support int64, each element value should be a non-negative int number.
200
+ The shape is :math:`(N, R)`.
201
+ - **values** (Tensor) - A 1D Tensor, represents the value corresponding to the position in the `indices`.
202
+ The shape should be :math:`(N,)`.
203
+ - **shape** (Tensor) - A 1D Tensor of type int64 which specifies the shape of sparsetensor,
204
+ represent sparse tensor shape. The shape should be :math:`(R,)`.
205
+ - **start** (Tensor) - A 1D Tensor of type int64, represents the start of the slice.
206
+ The shape should be :math:`(R,)`.
207
+ - **size** (Tensor) - A 1D Tensor of type int64, represents the size of the slice.
208
+ The shape should be :math:`(R,)`.
209
+
210
+ Outputs:
211
+ A `SparseTensor` objects resulting from splicing.
212
+
213
+ - \*y_indices: A Tensor of type int64.
214
+ - \*y_values: A Tensor. Has the same type as `values`.
215
+ - \*y_shape: A Tensor of type int64. Has the same size as `size`.
216
+
217
+ Raises:
218
+ TypeError: If the dtype of `indices`, `shape`, `start`, `size` are not int64.
219
+ ValueError: If `indices` is not 2-D tensor.
220
+ ValueError: If `values`, `start`, `shape` , `size` is not a 1-D tensor.
221
+ ValueError: If the number of `indices` is not corresponding to the number of `values`.
222
+ ValueError: If the shape of `indices[1]` is not corresponding to `shape`.
223
+ ValueError: If the shape of `shape` is not corresponding to `start`.
224
+ ValueError: If the shape of `shape` is not corresponding to `size`.
225
+
226
+ Supported Platforms:
227
+
228
+
229
+ Examples:
230
+ >>> indices = Tensor(np.array([[0, 1], [1, 2], [1, 3], [2, 2]]).astype(np.int64))
231
+ >>> values = Tensor(np.array([1, 2, 3, 4]).astype(np.int64))
232
+ >>> shape = Tensor(np.array([3, 4]).astype(np.int64))
233
+ >>> start = Tensor(np.array([0, 1]).astype(np.int64))
234
+ >>> size = Tensor(np.array([2, 3]).astype(np.int64))
235
+ >>> sparseslice = ops.SparseSlice()
236
+ >>> output = sparseslice(indices, values, shape, start, size)
237
+ >>> print(output[0])
238
+ [[0 0]
239
+ [1 1]
240
+ [1 2]]
241
+ >>> print(output[1])
242
+ [1 2 3]
243
+ >>> print(output[2])
244
+ [2 3]
245
+ """
246
+
247
+ @prim_attr_register
248
+ def __init__(self):
249
+ """Initialize SparseSlice."""
250
+ self.init_prim_io_names(inputs=['indices', 'values', 'shape', 'start', 'size'],
251
+ outputs=['y_indices', 'y_values', 'y_shape'])
252
+
253
+
254
+ class SparseSparseMaximum(Primitive):
255
+ """
256
+ return a sparse tensor representation max element of two input sparse tensor.
257
+
258
+ Inputs:
259
+ - **x1_indices** - A 2-D Tensor, type int64, represents the position of the element in the x1 sparse tensor.
260
+ each element value should be a non-negative int number. the shape of which should be :math:`(n1, m,)`.
261
+ - **x1_values** - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
262
+ the shape of which should be :math:`(n1,)`.
263
+ - **x1_shape** - A 1-D Tensor, type int64, which specifies the shape of x1 sparse tensor.
264
+ the shape of which should be :math:`(m,)`.
265
+ - **x2_indices** - A 2-D Tensor, type int64, represents the position of the element in the x2 sparse tensor.
266
+ each element value should be a non-negative int number. the shape of which should be :math:`(n2, m,)`.
267
+ - **x2_values** - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
268
+ the shape of which should be :math:`(n2,)`.
269
+ - **x2_shape** - A 1-D Tensor, type int64, which specifies the shape of x2 sparse tensor.
270
+ the shape of which should be :math:`(m,)`.
271
+
272
+ Returns:
273
+ - **y_indices** - A 2-D Tensor, type int64. It represents the position of the element-wise max of
274
+ two input tensors.
275
+ - **y_values** - A 1-D Tensor. It represents the value corresponding to the position
276
+ in the `y_indices`. Has the same type as x1_values.
277
+
278
+ Raises:
279
+ TypeError: If the dtype of `x1_indices`, `x2_indices`, `x1_indices` and `x2_indices` isn't int64.
280
+ TypeError: If the dtype of `x1_values` and `x2_values` isn't support.
281
+ TypeError: If the dtype of `x1_values` and `x2_values` isn't same.
282
+ TypeError: If the input is not tensor.
283
+ ValueError: If x1_indices.shape[0] and x1_values.shape[0] isn't same.
284
+ ValueError: If x2_indices.shape[0] and x2_values.shape[0] isn't same.
285
+ ValueError: If x1_indices.shape[1] and x1_shape.shape[0] isn't same.
286
+ ValueError: If x2_indices.shape[0] and x2_values.shape[0] isn't same.
287
+ ValueError: If the `x1_shape` and `x2_shape` mismatch with each other.
288
+
289
+ Supported Platforms:
290
+ ``Ascend`` ``GPU`` ``CPU``
291
+
292
+ Examples:
293
+ >>> x1_indices = Tensor([[0, 1], [1, 2]])
294
+ >>> x1_values = Tensor([1, 2], dtype=ms.float32)
295
+ >>> x1_shape = Tensor([3, 3])
296
+ >>> x2_indices = Tensor([[0, 1], [1, 1]])
297
+ >>> x2_values = Tensor([3, 4], dtype=ms.float32)
298
+ >>> x2_shape = Tensor([3, 3])
299
+ >>> SparseSparseMaximum = ops.SparseSparseMaximum()
300
+ >>> y_indices, y_values = SparseSparseMaximum(x1_indices, x1_values, x1_shape, x2_indices, x2_values, x2_shape)
301
+ >>> print(y_indices)
302
+ [[0. 1.]
303
+ [1. 1.]
304
+ [1. 2.]]
305
+ >>> print(y_values)
306
+ [3. 4. 2.]
307
+ """
308
+
309
+ @prim_attr_register
310
+ def __init__(self):
311
+ """Initialize SparseSparseMaximum."""
312
+ self.init_prim_io_names(inputs=['x1_indices', 'x1_values', 'x1_shape', 'x2_indices', 'x2_values', 'x2_shape'],
313
+ outputs=['y_indices', 'y_values'])
314
+
315
+
316
+ class SetSize(Primitive):
317
+ """
318
+ Number of unique elements along last dimension of input set.
319
+
320
+ Args:
321
+ validate_indices (bool): If true, sparse tensor is transposed before multiplication. Default: ``True`` .
322
+
323
+ Inputs:
324
+ - **set_indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
325
+ Support int64, each element value should be a non-negative int number. The shape is :math:`(n, 2)`.
326
+ - **set_values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in
327
+ the `set_indices`. Support int8, int16, int32, int64, uint8, uint16, string, the shape should
328
+ be :math:`(n,)`.
329
+ - **set_shape** (Tensor) - A 1-D Tensor, represents the shape of a SparseTensor,
330
+ Support int64, the shape should be :math:`(n,)`.
331
+
332
+ Outputs:
333
+ Tensor. The dtype is int32, and the shape is set_shape[0:-1].
334
+
335
+ Raises:
336
+ TypeError: If the type of inputs is not Tensor.
337
+ TypeError: If the type of `set_values` is not one of the following dtype: int8, int16, uint8, uint16,
338
+ int32, int64.
339
+ TypeError: If the type of `validate_indices` is not bool, or the dtype of `set_indices` and `set_shape`
340
+ is ont int64.
341
+ ValueError: If the shape of `set_shape`, shape of `set_indices` and shape of `set_values` don't meet the
342
+ parameter description.
343
+
344
+ Supported Platforms:
345
+ ``Ascend`` ``CPU``
346
+
347
+ Examples:
348
+ >>> set_indices = Tensor(np.array([[0, 1], [1, 2]]).astype(np.int64))
349
+ >>> set_values = Tensor(np.array([1, 2]).astype(np.int64))
350
+ >>> set_shape = Tensor(np.array([3, 4]).astype(np.int64))
351
+ >>> setsize = ops.SetSize()
352
+ >>> out = setsize(set_indices, set_values, set_shape)
353
+ >>> print(out)
354
+ [1 1 0]
355
+ """
356
+
357
+ @prim_attr_register
358
+ def __init__(self, validate_indices=True):
359
+ """Initialize SetSize."""
360
+ self.validate_indices = validate_indices
361
+ validator.check_bool(validate_indices, "validate_indices", self.name)
362
+ self.init_prim_io_names(inputs=['set_indices', 'set_values', 'set_shape'],
363
+ outputs=['size'])
364
+ self.add_prim_attr("validate_indices", self.validate_indices)
365
+ self.add_prim_attr("max_length", 1000)
366
+
367
+
368
+ class SparseReorder(Primitive):
369
+ """
370
+ Reorders a SparseTensor into the canonical, row-major ordering
371
+
372
+ Inputs:
373
+ - **indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
374
+ Support int64, each element value should be a non-negative int number.The shape is :math:`(n, d)`.
375
+ - **values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
376
+ The shape should be :math:`(n,)`.
377
+ - **shape** (Tensor) - A 1-D Tensor, represents the shape corresponding to the position in the `indices`.
378
+ Support int64, each element value should be a non-negative int number.The shape should be :math:`(d,)`.
379
+ Outputs:
380
+ - **y_indices** (Tensor) - Has the same type as "indices".
381
+ - **y_values** (Tensor) - Has the same type as "values" .
382
+
383
+ Raises:
384
+ TypeError: If `indices` or `shape` is not tensor or its dtype is not int64.
385
+ TypeError: If `values` is not tensor or its dtype is incorrect.
386
+ ValueError: If the index exceeds the bounds. (Raise RuntimeError if on GPU Platform)
387
+ ValueError: If the size of `indices` tensor shape is not equal to 2.
388
+ ValueError: If the size of `values` or `shape` tensor shape is not equal to 1.
389
+ ValueError: If `values` the first dimension length is not equal the first dimension length of 'indices'.
390
+ ValueError: If `shape` the first dimension length is not equal the second dimension length of 'indices'.
391
+
392
+ Supported Platforms:
393
+ ``GPU`` ``CPU``
394
+
395
+ Examples:
396
+ >>> import mindspore.common.dtype as ms
397
+ >>> from mindspore import Tensor
398
+ >>> import mindspore.ops.operations.sparse_ops as op
399
+ >>> indices = Tensor([[2, 1], [0, 1]], dtype=ms.int64)
400
+ >>> values = Tensor([1, 2], dtype=ms.int16)
401
+ >>> shape = Tensor([3,3], dtype=ms.int64)
402
+ >>> sparse_reorder = op.SparseReorder()
403
+ >>> y_indices,y_values = sparse_reorder(indices, values, shape)
404
+ >>> print(y_indices)
405
+ [[0 1]
406
+ [2 1]]
407
+ >>> print(y_values)
408
+ [2 1]
409
+ """
410
+
411
+ @prim_attr_register
412
+ def __init__(self):
413
+ """Initialize SparseReorder."""
414
+ self.init_prim_io_names(inputs=['indices', 'values', 'shape'], outputs=['y_indices', 'y_values'])
415
+
416
+
417
+ class SparseToDense(Primitive):
418
+ """
419
+ Converts a sparse representation into a dense tensor.
420
+
421
+ Inputs:
422
+ - **indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
423
+ Support int32, int64, each element value should be a non-negative int number. The shape is :math:`(n, 2)`.
424
+ - **values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
425
+ The shape should be :math:`(n,)`.
426
+ - **sparse_shape** (tuple(int)) - A positive int tuple which specifies the shape of sparse tensor,
427
+ should have 2 elements, represent sparse tensor shape is :math:`(N, C)`.
428
+
429
+ Outputs:
430
+ Tensor, converted from sparse tensor. The dtype is same as `values`, and the shape is `sparse_shape`.
431
+
432
+ Raises:
433
+ TypeError: If the dtype of `indices` is neither int32 nor int64.
434
+ ValueError: If `sparse_shape`, shape of `indices` and shape of `values` don't meet the parameter description.
435
+
436
+ Supported Platforms:
437
+ ``CPU``
438
+
439
+ Examples:
440
+ >>> import mindspore
441
+ >>> from mindspore import Tensor, ops
442
+ >>> indices = Tensor([[0, 1], [1, 2]])
443
+ >>> values = Tensor([1, 2], dtype=mindspore.float32)
444
+ >>> sparse_shape = (3, 4)
445
+ >>> sparse_to_dense = ops.SparseToDense()
446
+ >>> out = sparse_to_dense(indices, values, sparse_shape)
447
+ >>> print(out)
448
+ [[0. 1. 0. 0.]
449
+ [0. 0. 2. 0.]
450
+ [0. 0. 0. 0.]]
451
+ """
452
+
453
+ @prim_attr_register
454
+ def __init__(self):
455
+ """Initialize SparseToDense."""
456
+ self.init_prim_io_names(
457
+ inputs=['indices', 'values', 'dense_shape'], outputs=['output'])
458
+
459
+
460
+ class SparseToDenseV2(Primitive):
461
+ """
462
+ Converts a sparse representation into a dense tensor.
463
+
464
+ Args:
465
+ validate_indices (bool): If true, indices are checked to make sure they are sorted in
466
+ lexicographic order and that there are no repeats. Default: ``True`` .
467
+
468
+ Inputs:
469
+ - **indices** (Tensor) - A 0D, 1D, or 2D Tensor of type int32 or int64, represents the position
470
+ of the element in the sparse tensor.
471
+ - **output_shape** (Tensor) - A 1D Tensor of the same type as `indices`, represents the shape
472
+ of the dense output tensor.
473
+ - **values** (Tensor) - A 1D Tensor, represents the value corresponding to the position in the `indices`
474
+ or a scalar value to be used for all indices.
475
+ - **default_value** (Tensor) - A 0D Tensor of the same type as `sparse_values`, scalar value to
476
+ set for indices not specified in indices.
477
+
478
+ Returns:
479
+ Tensor, converted from sparse tensor. The dtype is same as `values`, and the shape is `output_shape`.
480
+
481
+ Raises:
482
+ TypeError: If the dtype of `indices` is neither int32 nor int64.
483
+ TypeError: If the dtype of `outputshape` is neither int32 nor int64.
484
+ ValueError: If the shape of `output_shape`, shape of `indices`,
485
+ shape of `default_value` and shape of `values` don't meet the parameter description.
486
+ ValueError: If each Element of `output_shape` is not > 0.
487
+ ValueError: If the shape[0] of `indices` don't match with the element of `values`.
488
+
489
+ Supported Platforms:
490
+ ``Ascend`` ``GPU`` ``CPU``
491
+
492
+ Examples:
493
+ >>> indices = Tensor([[0, 1], [1, 2]], dtype=ms.int32)
494
+ >>> output_shape = Tensor([3, 4], dtype=ms.int32)
495
+ >>> values = Tensor([1, 2], dtype=ms.float32)
496
+ >>> default_value = Tensor(0, dtype=ms.float32)
497
+ >>> sparse_to_dense_v2 = ops.SparseToDenseV2()
498
+ >>> out = sparse_to_dense_v2(indices, output_shape, values, default_value)
499
+ >>> print(out)
500
+ [[0. 1. 0. 0.]
501
+ [0. 0. 2. 0.]
502
+ [0. 0. 0. 0.]]
503
+ """
504
+
505
+ @prim_attr_register
506
+ def __init__(self, validate_indices=True):
507
+ """Initialize SparseToDenseV2."""
508
+ self.add_prim_attr("max_length", 1000000)
509
+ self.validate_indices = validate_indices
510
+ validator.check_value_type('validate_indices', validate_indices, [bool], self.name)
511
+ self.add_prim_attr("validate_indices", self.validate_indices)
512
+ self.init_prim_io_names(
513
+ inputs=['indices', 'output_shape', 'values', 'default_value'], outputs=['output'])
514
+
515
+
516
+ class SparseSoftmax(Primitive):
517
+ """
518
+ Similar to softmax but with the catch that the implicitly zero elements do not participate.
519
+
520
+ Inputs:
521
+ - **indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
522
+ Support int64, each element value should be a non-negative int number. The shape is :math:`(n, m)`.
523
+ - **values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
524
+ The shape should be :math:`(n,)`.
525
+ - **shape** (Tensor) - A 1-D Tensor, represents the shape of sparse tensor,
526
+ should have 2 or more than 2 elements, represent sparse tensor shape is :math:`(N, ... , C)`.
527
+
528
+ Returns:
529
+ Tensor, calculated from sparse tensor. The dtype is same as `values`, and the shape is same as `values`.
530
+
531
+ Raises:
532
+ TypeError: If the dtype of `indices` or `shape` is not int64.
533
+ TypeError: If the dtype of `values` is neither float32 nor float64.
534
+ ValueError: If the shape[0] of indices isn't equal to size of values.
535
+ ValueError: If the shape[1] of indices isn't equal to size of shape.
536
+ ValueError: If the indices is not 2D.
537
+ ValueError: If the values is not 1D.
538
+ ValueError: If the shape is not 1D.
539
+ ValueError: If the size of shape < 2.
540
+
541
+ Supported Platforms:
542
+ ``Ascend`` ``GPU`` ``CPU``
543
+
544
+ Examples:
545
+ >>> indices = Tensor([[0,0], [0,3], [1,2], [1,5], [2,0], [2,5]])
546
+ >>> values = Tensor([1.0 ,2.0 ,3.0 ,4.0 ,5.0 ,6.0 ], dtype=ms.float64)
547
+ >>> shape = Tensor([6, 6])
548
+ >>> sparsesoftmax = ops.SparseSoftmax()
549
+ >>> out = sparsesoftmax(indices, values, shape)
550
+ >>> print(out)
551
+ [0.26894142 0.73105858 0.26894142 0.73105858 0.26894142 0.73105858]
552
+ """
553
+
554
+ @prim_attr_register
555
+ def __init__(self):
556
+ """Initialize SparseSoftmax."""
557
+ self.init_prim_io_names(inputs=['indices', 'values', 'shape'], outputs=['output'])
558
+
559
+
560
+ class SparseTensorDenseAdd(Primitive):
561
+ """
562
+ Add a sparse tensor and a dense tensor to get a dense tensor.
563
+
564
+ Inputs:
565
+ - **x1_indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
566
+ Support int32, int64, each element value should be a non-negative int number. The shape is :math:`(n, ndim)`.
567
+ - **x1_values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
568
+ The shape should be :math:`(n,)`.
569
+ - **x1_shape** (tuple(int)) - A positive int tuple which specifies the shape of sparse tensor,
570
+ should have ndim elements, represent sparse tensor shape is :math:`(ndim,)`.
571
+ - **x2** (Tensor) - A dense Tensor, the dtype is same as `values`.
572
+
573
+ Outputs:
574
+ Tensor, add result of sparse tensor and dense tensor. The dtype is same as `values`,
575
+ and the shape is `x1_shape`.
576
+
577
+ Raises:
578
+ TypeError: If the dtype of `x1_indices` and 'x1_shape' is neither int32 nor int64.
579
+ ValueError: If `x1_shape`, shape of `x1_indices`, shape of `x1_values` and shape
580
+ of 'x2' don't meet the parameter description.
581
+
582
+ Supported Platforms:
583
+ ``GPU`` ``CPU``
584
+
585
+ Examples:
586
+ >>> from mindspore import Tensor
587
+ >>> from mindspore import ops
588
+ >>> from mindspore import dtype as mstype
589
+ >>> x1_indices = Tensor([[0, 0], [0, 1]], dtype=mstype.int64)
590
+ >>> x1_values = Tensor([1, 1], dtype=mstype.float32)
591
+ >>> x1_shape = Tensor([3, 3], dtype=mstype.int64)
592
+ >>> x2= Tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=mstype.float32)
593
+ >>> sparse_tensor_dense_add = ops.SparseTensorDenseAdd()
594
+ >>> out = sparse_tensor_dense_add(x1_indices, x1_values, x1_shape, x2)
595
+ >>> print(out)
596
+ [[2. 2. 1.]
597
+ [1. 1. 1.]
598
+ [1. 1. 1.]]
599
+ """
600
+
601
+ @prim_attr_register
602
+ def __init__(self):
603
+ """Initialize SparseTensorDenseAdd."""
604
+ self.init_prim_io_names(
605
+ inputs=['x1_indices', 'x1_values', 'x1_shape', 'x2'], outputs=['y'])
606
+
607
+
608
+ class SparseTensorDenseMatmul(Primitive):
609
+ """
610
+ Multiplies sparse matrix `A` by dense matrix `B`.
611
+ The rank of sparse matrix and dense matrix must be equal to `2`.
612
+
613
+ Args:
614
+ adjoint_st (bool): If ``True`` , sparse tensor is transposed before multiplication. Default: ``False`` .
615
+ adjoint_dt (bool): If ``True`` , dense tensor is transposed before multiplication. Default: ``False`` .
616
+
617
+ Inputs:
618
+ - **indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
619
+ Support int32, int64, each element value should be a non-negative int number. The shape is :math:`(n, 2)`.
620
+ - **values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
621
+ Support float16, float32, float64, int32, int64, complex64, complex128. The shape should be :math:`(n,)`.
622
+ - **sparse_shape** (tuple(int) or (Tensor)) - A positive int tuple or tensor which specifies the shape of
623
+ sparse tensor, and only constant value is allowed when sparse_shape is a tensor, should have 2 elements,
624
+ represent sparse tensor shape is :math:`(N, C)`.
625
+ - **dense** (Tensor) - A 2-D Tensor, the dtype is same as `values`.
626
+ If `adjoint_st` is False and `adjoint_dt` is False, the shape must be :math:`(C, M)`.
627
+ If `adjoint_st` is False and `adjoint_dt` is True, the shape must be :math:`(M, C)`.
628
+ If `adjoint_st` is True and `adjoint_dt` is False, the shape must be :math:`(N, M)`.
629
+ If `adjoint_st` is True and `adjoint_dt` is True, the shape must be :math:`(M, N)`.
630
+
631
+ Outputs:
632
+ Tensor, the dtype is the same as `values`.
633
+ If `adjoint_st` is False, the shape is :math:`(N, M)`.
634
+ If `adjoint_st` is True, the shape is :math:`(C, M)`.
635
+
636
+ Raises:
637
+ TypeError: If the type of `adjoint_st` or `adjoint_dt` is not bool, or the dtype of `indices`,
638
+ dtype of `values` and dtype of `dense` don't meet the parameter description.
639
+ ValueError: If `sparse_shape`, shape of `indices`, shape of `values`,
640
+ and shape of `dense` don't meet the parameter description.
641
+
642
+ Supported Platforms:
643
+ ``GPU`` ``CPU``
644
+
645
+ Examples:
646
+ >>> import mindspore
647
+ >>> from mindspore import Tensor
648
+ >>> from mindspore.ops import operations as ops
649
+ >>> from mindspore import dtype as mstype
650
+ >>> indices = Tensor([[0, 1], [1, 2]], dtype=mindspore.int32)
651
+ >>> values = Tensor([1, 2], dtype=mindspore.float32)
652
+ >>> sparse_shape = (3, 4)
653
+ >>> dense = Tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype=mindspore.float32)
654
+ >>> sparse_dense_matmul = ops.SparseTensorDenseMatmul()
655
+ >>> out = sparse_dense_matmul(indices, values, sparse_shape, dense)
656
+ >>> print(out)
657
+ [[2. 2.]
658
+ [6. 6.]
659
+ [0. 0.]]
660
+ """
661
+
662
+ @prim_attr_register
663
+ def __init__(self, adjoint_st=False, adjoint_dt=False):
664
+ """Initialize SparseTensorDenseMatmul"""
665
+ self.adjoint_st = adjoint_st
666
+ self.adjoint_dt = adjoint_dt
667
+ self.init_prim_io_names(inputs=['indices', 'values', 'sparse_shape', 'dense'],
668
+ outputs=['output'])
669
+ self.add_prim_attr('adjoint_a', self.adjoint_st)
670
+ self.add_prim_attr('adjoint_b', self.adjoint_dt)
671
+ validator.check_value_type("adjoint_st", adjoint_st, [bool], self.name)
672
+ validator.check_value_type("adjoint_dt", adjoint_dt, [bool], self.name)
673
+
674
+
675
+ class CSRSparseMatrixToSparseTensor(Primitive):
676
+ """
677
+ Converts a CSR sparse matrix(maybe batched) to its sparse tensor form.
678
+
679
+ Inputs:
680
+ - **x_dense_shape** (Tensor) - A 1-D Tensor. It represents the dense form shape of
681
+ the input CSR sparse matrix, the shape of which should be :math:`(2,)` or :math:`(3,)`.
682
+ - **x_batch_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix is of
683
+ batch size `n`, it should have shape :math:`(n+1,)`, while the `i`-th element of which stores
684
+ acummulated counts of non-zero values of the first `i - 1` batches.
685
+ - **x_row_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix is of
686
+ batch size `n` and row number `m`, it can be divided into `n` parts, each part of length
687
+ `m + 1`. The `i`-th element of each :math:`(m+1,)` vector stores acummulated counts of
688
+ non-zero values of the first `i - 1` rows in the corresponding batch.
689
+ - **x_col_indices** (Tensor) - A 1-D Tensor. It represents column indices of the non-zero values
690
+ in the input CSR sparse matrix.
691
+ - **x_values** (Tensor) - A 1-D Tensor. It represents all the non-zero values in the
692
+ input CSR sparse matrix.
693
+
694
+ Outputs:
695
+ - **indices** (Tensor) - A 2-D Tensor. It represents the position of the non-zero element
696
+ in the sparse tensor.
697
+ - **values** (Tensor) - A 1-D Tensor. It represents the value corresponding to the position
698
+ in the `indices`, the shape of which should be :math:`(N,)`.
699
+ - **dense_shape** (Tensor) - A 1-D Tensor. It represents the dense form shape of
700
+ the sparse tensor. Its shape should be :math:`(2,)` or :math:`(3,)`.
701
+
702
+ Raises:
703
+ TypeError: If the dtype of `x_dense_shape` or `x_batch_pointers` or `x_row_pointers` or
704
+ `x_col_indices` is not int32 or int64.
705
+ TypeError: If the dtype of `x_values` is not float32, float64, complex64 or complex128.
706
+ ValueError: If `x_dense_shape` or `x_batch_pointers` or `x_row_pointers` or `x_values` or
707
+ `x_dense_shape` is not a 1-D tensor.
708
+ ValueError: If rank of `x_dense_shape` is not 2 or 3.
709
+ ValueError: If shape of `x_col_indices` is not corresponding to shape of `x_values`.
710
+
711
+ Supported Platforms:
712
+ ``Ascend`` ``GPU`` ``CPU``
713
+
714
+ Examples:
715
+ >>> from mindspore.ops.operations.sparse_ops import CSRSparseMatrixToSparseTensor
716
+ >>> x_dense_shape = Tensor(np.array([2, 2, 4]).astype(np.int64))
717
+ >>> x_batch_pointers = Tensor(np.array([0, 3, 6]).astype(np.int64))
718
+ >>> x_row_pointers = Tensor(np.array([0, 1, 3, 0, 1, 3]).astype(np.int64))
719
+ >>> x_col_indices = Tensor(np.array([1, 2, 3, 1, 2, 3]).astype(np.int64))
720
+ >>> x_values = Tensor(np.array([1, 4, 3, 1, 4, 3]).astype(np.float32))
721
+ >>> csr_sparse_matrix_to_sparse_tensor = ops.CSRSparseMatrixToSparseTensor()
722
+ >>> out = csr_sparse_matrix_to_sparse_tensor(x_dense_shape, x_batch_pointers, x_row_pointers,
723
+ ... x_col_indices, x_values)
724
+ >>> print(out[0])
725
+ [[0 0 1]
726
+ [0 1 2]
727
+ [0 1 3]
728
+ [1 0 1]
729
+ [1 1 2]
730
+ [1 1 3]]
731
+ >>> print(out[1])
732
+ [1. 4. 3. 1. 4. 3.]
733
+ >>> print(out[2])
734
+ [2 2 4]
735
+ """
736
+
737
+ @prim_attr_register
738
+ def __init__(self):
739
+ """Initialize CSRSparseMatrixToSparseTensor."""
740
+ self.init_prim_io_names(inputs=['x_dense_shape', 'x_batch_pointers', 'x_row_pointers',
741
+ 'x_col_indices', 'x_values'],
742
+ outputs=['indices', 'values', 'dense_shape'])
743
+
744
+
745
+ class DenseToCSRSparseMatrix(Primitive):
746
+ """
747
+ Converts a dense matrix(maybe batched) to its CSR sparse form.
748
+
749
+ .. warning::
750
+ This is an experimental API that is subject to change or deletion.
751
+
752
+ Inputs:
753
+ - **dense_input** (Tensor) - A 2-D or 3-D Tensor. It represents the input dense matrix.
754
+ - **indices** (Tensor) - A 2-D Tensor. It represents indices of all the nonzero elements.
755
+
756
+ Outputs:
757
+ - **y_dense_shape** (Tensor) - A 1-D Tensor. It represents the dense form shape of
758
+ the output CSR sparse matrix, the shape of which should be :math:`(2,)` or :math:`(3,)`.
759
+ - **y_batch_pointers** (Tensor) - A 1-D Tensor. Supposing the output CSR sparse matrix is of
760
+ batch size `n`, it should have shape :math:`(n+1,)`, while the `i`-th element of which stores
761
+ acummulated counts of nonzero values of the first `i - 1` batches.
762
+ - **y_row_pointers** (Tensor) - A 1-D Tensor. Supposing the output CSR sparse matrix is of
763
+ batch size `n` and row number `m`, it can be divided into `n` parts, each part of length
764
+ `m + 1`. The `i`-th element of each :math:`(m+1,)` vector stores acummulated counts of
765
+ nonzero values of the first `i - 1` rows in the corresponding batch.
766
+ - **y_col_indices** (Tensor) - A 1-D Tensor. It represents column indices of the nonzero values
767
+ in the output CSR sparse matrix.
768
+ - **y_values** (Tensor) - A 1-D Tensor. It represents all the nonzero values in the
769
+ output CSR sparse matrix.
770
+
771
+ Raises:
772
+ TypeError: If the dtype of `indices` is not int32 or int64.
773
+ TypeError: If the dtype of `dense_input` is not float32, float64, complex64 or complex128.
774
+ ValueError: If either of the inputs is not a tensor.
775
+ ValueError: If rank of `dense_input` is not 2 or 3.
776
+ ValueError: If rank of `indices` is not 2.
777
+ ValueError: If shape[1] of `indices` and rank of `dense_input` is not the same.
778
+
779
+ Supported Platforms:
780
+
781
+
782
+ Examples:
783
+ >>> x = Tensor([[[1., 0.], [0., 2.]]], dtype=mindspore.float32)
784
+ >>> indices = Tensor([[0, 0, 0], [0, 1, 1]], dtype=mindspore.int32)
785
+ >>> dense_to_csr = ops.DenseToCSRSparseMatrix()
786
+ >>> out = dense_to_csr(x, indices)
787
+ >>> print(out[0])
788
+ [1 2 2]
789
+ >>> print(out[1])
790
+ [0 2]
791
+ >>> print(out[2])
792
+ [0 1 2]
793
+ >>> print(out[3])
794
+ [0 1]
795
+ >>> print(out[4])
796
+ [1. 2.]
797
+ """
798
+
799
+ @prim_attr_register
800
+ def __init__(self):
801
+ """Initialize DenseToCSRSparseMatrix"""
802
+ self.init_prim_io_names(
803
+ inputs=['dense_input', 'indices'],
804
+ outputs=['y_dense_shape', 'y_batch_pointers', 'y_row_pointers', 'y_col_indices', 'y_values'])
805
+
806
+
807
+ class DenseToDenseSetOperation(Primitive):
808
+ """
809
+ Applies set operation along last dimension of 2 `Tensor` inputs.
810
+ Iterate over groups in set x1 and set x2, applying `ApplySetOperation` to each,
811
+ and outputting the result `SparseTensor`. A "group" is a collection of values
812
+ with the same first n-1 dimensions in x1 and x2.
813
+
814
+ Args:
815
+ set_operation (str): The type of set operation, case insensitive. Default: ``"a-b"`` .
816
+ "a-b": Get the difference set of x1 to x2.
817
+ "b-a": Get the difference set of x2 to x1.
818
+ "intersection": Get the intersection set of x2 to x1.
819
+ "union": Get the union set of x2 to x1.
820
+ validate_indices (bool): Optional attributes for DenseToDenseSetOperation. Default: ``True`` .
821
+
822
+ Inputs:
823
+ - **x1** (Tensor) - The input tensor `x1` with rank `n`. 1st `n-1` dimensions must be the same as `x2`.
824
+ Dimension `n` contains values in a set, duplicates are allowed but ignored.
825
+ - **x2** (Tensor) - The input tensor `x2` with rank `n`. 1st `n-1` dimensions must be the same as `x1`.
826
+ Dimension `n` contains values in a set, duplicates are allowed but ignored.
827
+
828
+ Outputs:
829
+ - **y_indices** (Tensor) - A 2-D Tensor of type int64, represents the position of the element
830
+ in the sparse tensor.
831
+ - **y_values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position
832
+ in the `y_indices`. The dtype is same as input.
833
+ - **y_shape** (Tensor) - A 1-D Tensor of type int64, represents the shape of sparse tensor.
834
+ `y_shape[0...n-1]` is the same as the 1st `n-1` dimensions of `x1` and `x2`,
835
+ `y_shape[n]` is the max result set size across all `0...n-1` dimensions.
836
+
837
+ Raises:
838
+ TypeError: If input `x1` or `x2` is not Tensor.
839
+ TypeError: If the type of `x1` is not the same as `x2`.
840
+ ValueError: If the group shape of `x1` or `x2` mismatch with each other.
841
+ ValueError: If the rank of `x1` or `x2` is less than 2.
842
+ ValueError: If the value of attr set_operation is not a valid value.
843
+
844
+ Supported Platforms:
845
+ ``Ascend`` ``CPU``
846
+
847
+ Examples:
848
+ >>> x1 = Tensor([[2, 2, 0], [2, 2, 1], [0, 2, 2]], dtype=mstype.int32)
849
+ >>> x2 = Tensor([[2, 2, 1], [0, 2, 0], [0, 1, 1]], dtype=mstype.int32)
850
+ >>> dtod=P.DenseToDenseSetOperation(set_operation="a-b",validate_indices=True)
851
+ >>> res=dtod(x1,x2)
852
+ >>> print(res[0])
853
+ [[0 0]
854
+ [1 0]
855
+ [2 0]]
856
+ >>> print(res[1])
857
+ [0 1 2]
858
+ >>> print(res[2])
859
+ [3 1]
860
+ """
861
+
862
+ @prim_attr_register
863
+ def __init__(self, set_operation="a-b", validate_indices=True):
864
+ """Initialize DenseToDenseSetOperation."""
865
+ self.init_prim_io_names(inputs=['x1', 'x2'], outputs=[
866
+ 'y_indices', 'y_values', 'y_shape'])
867
+ validator.check_value_type(
868
+ "set_operation", set_operation, [str], self.name)
869
+ validator.check_value_type(
870
+ "validate_indices", validate_indices, [bool], self.name)
871
+
872
+
873
+ class Sspaddmm(Primitive):
874
+ r"""
875
+ Matrix multiplies a sparse tensor `x2` with a dense tensor `x3`, then adds the sparse tensor `x1`.
876
+ If `x1_shape` is :math:`(s0, s1)`, `x2_shpae` should be :math:`(s0, s2)`, the `x3_shape` should be :math:`(s2, s1)`.
877
+
878
+ .. warning::
879
+ This is an experimental API that is subject to change or deletion.
880
+
881
+ .. math::
882
+ out =\beta * x1 + \alpha * (x2 @ x3),
883
+
884
+ Inputs:
885
+ - **x1_indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
886
+ Support int32, int64. The shape is :math:`(2, n)`. If `x1_shape` is :math:`(s0, s1)`, the row index
887
+ value of `x1_indices` should be a non-negative and less than `s0` int number, the col index value of
888
+ `x1_indices` should be a non-negative and less than `s1` int number.
889
+ - **x1_values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in
890
+ the `x1_indices`. Support float32, float64, int8, int16, int32, int64, uint8. The dtype should be the same as
891
+ `x2_values` and `x3_dense`. The shape should be :math:`(n,)`.
892
+ - **x1_shape** (Tensor) - A 1-D Tensor, specifies the shape of sparse tensor. Support int32, int64,
893
+ have 2 positive int elements, shape is :math:`(2,)`. The dtype should be the same as `x1_indices`.
894
+ - **x2_indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
895
+ Support int32, int64. The shape is :math:`(2, n)`. If `x2_shape` is :math:`(s0, s2)`, the row index
896
+ value of `x2_indices` should be a non-negative and less than `s0` int number, the col index value of
897
+ `x2_indices` should be a non-negative and less than `s2` int number.
898
+ - **x2_values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `x2_indices`.
899
+ Support float32, float64, int8, int16, int32, int64, uint8. The dtype should be the same as `x1_values`
900
+ and `x3_dense`. The shape should be :math:`(n,)`.
901
+ - **x2_shape** (Tensor) - A 1-D Tensor, specifies the shape of sparse tensor. Support int32,int64,
902
+ have 2 positive int elements, shape is :math:`(2,)`. The dtype is same as `x2_indices`.
903
+ - **x3_dense** (Tensor) - A 2-D Tensor, the dtype should be the same as `x2_values` and `x3_dense`.
904
+ - **alpha** (Tensor) - A 0-D or 1-D Tensor, the weight of x1. If alpha is 1-D tensor,
905
+ the shape should be :math:`()` otherwise the shape is :math:`(1,)`. Support uint8, uint16, uint32, uint64,
906
+ int8, int16, int32, int64, float16, float32, float64. If the dtype of alpha is not the same with expected
907
+ output dtype, alpha value should be convert without overflow.
908
+ - **beta** (Tensor) - A 0-D or 1-D, the weight of x2@x3. If alpha is 1-D tensor,
909
+ the shape should be :math:`()` otherwise the shape is :math:`(1,)`. Support uint8, uint16, uint32, uint64,
910
+ int8, int16, int32, int64, float16, float32, float64. If the `x1_values` dtype is byte, char, short, int,
911
+ long, the dtype of beta doesn't support float16, float32, float64.
912
+
913
+ Outputs:
914
+ - **y_indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
915
+ The dtype is int64, each element value should be a non-negative int number. The shape is :math:`(2, n)`.
916
+ - **y_values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `y_indices`.
917
+ The dtype is the same as `x1_values` . The shape should be :math:`(n,)`.
918
+ - **y_shape** (Tensor) - A 1-D Tensor, A positive int tuple which specifies the shape of sparse tensor.
919
+ The dtype is int64, the values is the same as `x1_shape`.
920
+
921
+ Raises:
922
+ TypeError: If dtype of `x1_indices`, `x1_shape` is not the same and neither int32 nor int64.
923
+ TypeError: If dtype of `x2_indices`, `x2_shape` is not the same and not int32 or int64.
924
+ TypeError: If type of `x1_values`, `x2_values`, `x3_dense` is not the same.
925
+ TypeError: If dtype of `x1_values`, `x2_values`, `x3_dense` is not uint8, int8, int16, int32, int64, float32,
926
+ float64.
927
+ ValueError: If shape of `x1_indices`, `x2_indices` is not (2, n).
928
+ ValueError: If shape of `x1_values`, `x2_values` is not (n,).
929
+ ValueError: If dim0 size of `x1_values` is not the same with dim1 size of `x1_indices`.
930
+ ValueError: If dim0 size of `x2_values` is not the same with dim1 size of `x2_indices`.
931
+ ValueError: If shape of `x1_shape` or shape of `x2_shape` is not (2,).
932
+ ValueError: If dim of `x3_dense` is not 2D.
933
+ ValueError: If dtype of `alpha` is not the same with `x2_values` dtype, and alpha value convert to the
934
+ `x2_values` dtype overflow.
935
+ TypeError: If dtype of `alpha`, `beta` is not uint8, uint16, uint32, uint64, int8, int16, int32, int64,
936
+ float16, float32, float64.
937
+ TypeError: If the `x1_values` dtype is byte, char, short, int, long, while the dtype of beta is float16,
938
+ float32 or float64.
939
+ ValueError: If the shape of `alpha`, `beta` is not () or (1,).
940
+
941
+ Supported Platforms:
942
+ ``Ascend`` ``GPU`` ``CPU``
943
+
944
+ Examples:
945
+ >>> x1_indices = Tensor(np.array([[0, 1], [0, 1]]), mstype.int64)
946
+ >>> x1_values = Tensor(np.array([1, 2]), mstype.int32)
947
+ >>> x1_shape = Tensor(np.array([3, 3]), mstype.int64)
948
+ >>> x2_indices = Tensor(np.array([[0, 1], [2, 2]]), mstype.int64)
949
+ >>> x2_values = Tensor(np.array([3, 4]), mstype.int32)
950
+ >>> x2_shape = Tensor(np.array([3, 3]), mstype.int64)
951
+ >>> x3_dense = Tensor(np.array([[1, 2, 3], [1, 3, 2], [3, 2, 1]]), mstype.int32)
952
+ >>> alpha = Tensor(np.array(1), mstype.int32)
953
+ >>> beta = Tensor(np.array(1), mstype.int32)
954
+ >>> sspaddmm = ops.Sspaddmm()
955
+ >>> out_indices, out_values, out_shapes = sspaddmm(x1_indices, x1_values, x1_shape,
956
+ ... x2_indices, x2_values, x2_shape, x3_dense, alpha, beta)
957
+ >>> print(out_indices)
958
+ [[0 1 0 0 0 1 1 1]
959
+ [0 1 0 1 2 0 1 2]]
960
+ >>> print(out_values)
961
+ [ 1 2 9 6 3 12 8 4]
962
+ >>> print(out_shapes)
963
+ [3 3]
964
+ """
965
+ __mindspore_signature__ = (
966
+ sig.make_sig('x1_indices', dtype=sig.sig_dtype.T1),
967
+ sig.make_sig('x1_values', dtype=sig.sig_dtype.T),
968
+ sig.make_sig('x1_shape', dtype=sig.sig_dtype.T2),
969
+ sig.make_sig('x2_indices', dtype=sig.sig_dtype.T3),
970
+ sig.make_sig('x2_values', dtype=sig.sig_dtype.T),
971
+ sig.make_sig('x2_shape', dtype=sig.sig_dtype.T4),
972
+ sig.make_sig('x3_dense', dtype=sig.sig_dtype.T),
973
+ sig.make_sig('alpha', dtype=sig.sig_dtype.T),
974
+ sig.make_sig('beta', dtype=sig.sig_dtype.T)
975
+ )
976
+
977
+ @prim_attr_register
978
+ def __init__(self):
979
+ """Initialize Sspaddmm."""
980
+ self.init_prim_io_names(inputs=['x1_indices', 'x1_values', 'x1_shape', 'x2_indices', 'x2_values', 'x2_shape',
981
+ 'x3_dense', 'alpha', 'beta'], outputs=['y_indices', 'y_values', 'y_shape'])
982
+
983
+
984
+ class SparseAddmm(Primitive):
985
+ """
986
+ Multiplies sparse matrix `x1` by dense matrix `x2` * `alpha` and add dense matrix `x3` * `beta`.
987
+ The rank of sparse matrix and dense matrix must equal to `2`. The sparse matrix `x1` is formulated by `x1_indices`,
988
+ `x1_values` and `x1_shape`.
989
+
990
+ Inputs:
991
+ - **x1_indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
992
+ Support int32, int64, each element value should be a non-negative int number. The shape is :math:`(N, 2)`.
993
+ - **x1_values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
994
+ Support float32, float64, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
995
+ The shape should be :math:`(N,)`.
996
+ - **x1_shape** (Tensor) - A positive int tuple which specifies the shape of sparse tensor.
997
+ Support int32, int64, should have 2 elements, represent sparse tensor shape is :math:`(Q, P)`.
998
+ - **x2** (Tensor) - A 2-D Dense Tensor, the dtype is same as `values`. The shape should be :math:`(P, M)`.
999
+ - **x3** (Tensor) - A 2-D Dense Tensor, the dtype is same as `values`. The shape should be :math:`(Q, M)`.
1000
+ - **alpha** (Tensor) - A 1-D Tensor, the dtype is same as `values`. The shape should be :math:`(1,)`.
1001
+ - **beta** (Tensor) - A 1-D Tensor, the dtype is same as `values`. The shape should be :math:`(1,)`.
1002
+
1003
+ Outputs:
1004
+ Tensor, the dtype is the same as `x1_values`. The shape is the same as `x3`.
1005
+
1006
+ Raises:
1007
+ TypeError: If dtype of `x1_indices`, dtype of `x1_values` and dtype of `dense` don't meet the parameter
1008
+ description.
1009
+ ValueError: If shape of `x1_indices`, shape of `x1_values`, shape of `alpha`,
1010
+ and shape of `beta` don't meet the parameter description.
1011
+ RuntimeError: If `x1_shape`, shape of `x2`, shape of `x3` don't meet the parameter description.
1012
+
1013
+ Supported Platforms:
1014
+ ``GPU`` ``CPU``
1015
+
1016
+ Examples:
1017
+ >>> indices = Tensor([[0, 1], [1, 2]], dtype=ms.int32)
1018
+ >>> values = Tensor([1, 2], dtype=ms.float32)
1019
+ >>> sparse_shape = Tensor([1, 2], dtype=ms.int32)
1020
+ >>> x2_dense = Tensor([[1,1], [2,2], [3,3], [4,4]], dtype=ms.float32)
1021
+ >>> x3_dense = Tensor([[2,2], [6,6], [0,0]], dtype=ms.float32)
1022
+ >>> alpha = Tensor([1], dtype=ms.float32)
1023
+ >>> beta = Tensor([1], dtype=ms.float32)
1024
+ >>> sparse_addmm = ops.SparseAddmm()
1025
+ >>> out = sparse_addmm(indices, values, sparse_shape, x2_dense, x3_dense, alpha, beta)
1026
+ >>> print(out)
1027
+ [[4 4]
1028
+ [12 12]
1029
+ [0 0]]
1030
+ """
1031
+
1032
+ @prim_attr_register
1033
+ def __init__(self):
1034
+ """Initialize SparseAddmm"""
1035
+ self.init_prim_io_names(inputs=['indices', 'values', 'sparse_shape', 'x2_dense', 'x3_dense', 'alpha', 'beta'],
1036
+ outputs=['output'])
1037
+
1038
+
1039
+ class SparseConcat(Primitive):
1040
+ """
1041
+ concatenates the input SparseTensor(COO format) along the specified dimension.
1042
+
1043
+ Args:
1044
+ concat_dim(Scalar) - A Scalar, decide the dimension to concatenation along.
1045
+ The value must be in range [-rank, rank), where rank is the number of dimensions in each input
1046
+ SparseTensor. Support int32, int64. Default: ``0`` .
1047
+
1048
+ Inputs:
1049
+ - **sp_input_indices** (Tensor) - the list of Tensor which means COOTensor indices, and Need to
1050
+ concatenates. Support int64.
1051
+ - **sp_input_values** (Tensor) - the list of Tensor which means COOTensor values, and
1052
+ need to concatenates.
1053
+ - **sp_input_shape** (Tensor) - the list of Tensor which means COOTensor shape, and
1054
+ need to concatenates. Support int64.
1055
+
1056
+ Outputs:
1057
+ - **output_indices** (Tensor) - the result of concatenates the input SparseTensor along the
1058
+ specified dimension. This is the indices of output COOTensor.
1059
+ - **output_values** (Tensor) - the result of concatenates the input SparseTensor along the
1060
+ specified dimension. This is the values of output COOTensor.
1061
+ - **output_shape** (Tensor) - the result of concatenates the input SparseTensor along the
1062
+ specified dimension. This is the shape of output COOTensor.
1063
+
1064
+ Raises:
1065
+ ValueError: If only one sparse tensor input.
1066
+ Error: If input axis value is not in range [-rank, rank).
1067
+
1068
+ Supported Platforms:
1069
+ ``Ascend`` ``CPU``
1070
+
1071
+ Examples:
1072
+ >>> indices0 = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
1073
+ >>> values0 = Tensor([1, 2], dtype=mstype.int32)
1074
+ >>> shape0 = Tensor([3, 4], dtype=mstype.int64)
1075
+ >>> indices1 = Tensor([[0, 0], [1, 1]], dtype=mstype.int64)
1076
+ >>> values1 = Tensor([3, 4], dtype=mstype.int32)
1077
+ >>> shape1 = Tensor([3, 4], dtype=mstype.int64)
1078
+ >>> sparse_concat = ops.SparseConcat(0)
1079
+ >>> out = sparse_concat((indices0, indices1), (values0, values1), (shape0, shape1))
1080
+ >>> print(out)
1081
+ (Tensor(shape=[4, 2], dtype=Int64, value=
1082
+ [[0, 1],
1083
+ [1, 2],
1084
+ [3, 0],
1085
+ [4, 1]]), Tensor(shape=[4], dtype=Int32, value= [1, 2, 3, 4]), Tensor(shape=[2], dtype=Int64, value= [6, 4]))
1086
+ """
1087
+
1088
+ @prim_attr_register
1089
+ def __init__(self, concat_dim=0):
1090
+ """Initialize SparseConcat."""
1091
+ self.init_prim_io_names(inputs=['sp_input_indices', 'sp_input_values', 'sp_input_shapes'],
1092
+ outputs=['output_indices', 'output_values', 'output_shape'])
1093
+ validator.check_value_type("concat_dim", concat_dim, [int], self.name)
1094
+
1095
+
1096
+ class SparseSegmentSum(Primitive):
1097
+ """
1098
+ Computes the sum along sparse segments of a tensor.
1099
+
1100
+ Inputs:
1101
+ - **x** (Tensor) - A tensor of the first input of SparseSegmentSum.
1102
+ - **indices** (Tensor) - 1-D Tensor of type int32 or int 64 with indices into `x`.
1103
+ Has same rank as `segment_ids`. The shape should be :math:`(N,)`.
1104
+ - **segment_ids** (Tensor) - 1-D Tensor of type int32 or int64 with indices into the output `y`. Values
1105
+ should be sorted and can be repeated. The shape should be :math:`(N,)`.
1106
+
1107
+ Outputs:
1108
+ A Tensor. Has the same type as `x` .
1109
+ Has same shape as `x`, except for dimension 0 which is the number of segments.
1110
+
1111
+ Raises:
1112
+ TypeError: If `x` or `indices` or `segment_ids` is not a tensor.
1113
+ TypeError: If the dtype of `indices` and `segment_ids` is not int32 or int64.
1114
+ ValueError: If dimension size of `x` less than 1.
1115
+ ValueError: If any of `indices` and `segment_ids` is not a 1-D tensor.
1116
+ ValueError: If shape[0] of `indices` is not corresponding to shape[0] of `segment_ids`.
1117
+ ValueError: If indices in `segment_ids` are not contiguous or do not start from 0.
1118
+ ValueError: If `segment_ids` is not sorted.
1119
+ ValueError: If `indices` is out of range of x's first dimension.
1120
+
1121
+ Supported Platforms:
1122
+ ``Ascend`` ``GPU`` ``CPU``
1123
+
1124
+ Examples:
1125
+ >>> x = Tensor([[0, 1, 2], [1, 2, 3], [3, 6, 7]], dtype=ms.float32)
1126
+ >>> indices = Tensor([0, 1, 2], dtype=ms.int32)
1127
+ >>> segment_ids = Tensor([0, 1, 1], dtype=ms.int32)
1128
+ >>> sparse_segment_sum = ops.SparseSegmentSum()
1129
+ >>> out = sparse_segment_sum(x, indices, segment_ids)
1130
+ >>> print(out)
1131
+ [[ 0. 1. 2.]
1132
+ [ 4. 8. 10.]]
1133
+ """
1134
+ __mindspore_signature__ = (
1135
+ sig.make_sig('x', dtype=sig.sig_dtype.T1),
1136
+ sig.make_sig('indices', dtype=sig.sig_dtype.T),
1137
+ sig.make_sig('segment_ids', dtype=sig.sig_dtype.T)
1138
+ )
1139
+
1140
+ @prim_attr_register
1141
+ def __init__(self):
1142
+ """Initialize SparseSegmentSum"""
1143
+ self.init_prim_io_names(inputs=['x', 'indices', 'segment_ids'], outputs=['y'])
1144
+
1145
+
1146
+ class SparseSegmentSumWithNumSegments(Primitive):
1147
+ """
1148
+ Computes the sum along sparse segments of a tensor, but it is allowed to miss id in segment_ids.
1149
+
1150
+ Inputs:
1151
+ - **x** (Tensor) - A Tensor of the first input of SparseSegmentSumWithNumSegments.
1152
+ - **indices** (Tensor) - 1-D Tensor with indices into `x`. Must be one of the following types: int32, int64.
1153
+ Has same rank as `segment_ids`. The shape should be :math:`(N,)`.
1154
+ - **segment_ids** (Tensor) - 1-D Tensor with indices into the output `y`. Must be one of the following types:
1155
+ int32, int64. Values should be sorted and can be repeated. The shape should be :math:`(N,)`.
1156
+ - **num_segments** (Tensor) - Num_segments indicates the size of the output.
1157
+ It should be bigger than the largest id of `segment_ids`.
1158
+
1159
+ Outputs:
1160
+ A Tensor. Has the same type as `x` .
1161
+ Has same shape as `x`, except for dimension 0 which is the value of `num_segments`.
1162
+
1163
+ Raises:
1164
+ TypeError: If `x` or `indices` or `segment_ids` or `num_segments` is not a tensor.
1165
+ TypeError: If the dtype of `indices` and `segment_ids` and `num_segments` is not int32 or int64.
1166
+ ValueError: If dimension size of `x` less than 1.
1167
+ ValueError: If any of `indices` and `segment_ids` is not a 1-D tensor.
1168
+ ValueError: If rank of `num_segments` is bigger than 1.
1169
+ ValueError: If numelements of `num_segments` is not 1.
1170
+ ValueError: If shape[0] of `indices` is not corresponding to shape[0] of `segment_ids`.
1171
+ ValueError: If `segment_ids` is not sorted.
1172
+ ValueError: If the last number of `segment_ids` is bigger than or equal to `num_segments`.
1173
+ ValueError: If `indices` is out of range of x's first dimension.
1174
+
1175
+ Supported Platforms:
1176
+ ``Ascend`` ``GPU`` ``CPU``
1177
+
1178
+ Examples:
1179
+ >>> x = Tensor([[0, 1, 0, 0], [0, 1, 1, 0], [1, 0, 1, 0]], dtype=ms.float16)
1180
+ >>> indices = Tensor([0, 2, 1], dtype=ms.int32)
1181
+ >>> segment_ids = Tensor([0, 0, 2], dtype=ms.int32)
1182
+ >>> num_segments = Tensor([4], dtype=ms.int32)
1183
+ >>> sparse_segment_sum_with_num_segments = ops.SparseSegmentSumWithNumSegments()
1184
+ >>> output = sparse_segment_sum_with_num_segments(x, indices, segment_ids, num_segments)
1185
+ >>> print(output)
1186
+ [[1. 1. 1. 0.]
1187
+ [0. 0. 0. 0.]
1188
+ [0. 1. 1. 0.]
1189
+ [0. 0. 0. 0.]]
1190
+ """
1191
+ __mindspore_signature__ = (
1192
+ sig.make_sig('x', dtype=sig.sig_dtype.T1),
1193
+ sig.make_sig('indices', dtype=sig.sig_dtype.T),
1194
+ sig.make_sig('segment_ids', dtype=sig.sig_dtype.T),
1195
+ sig.make_sig('num_segments', dtype=sig.sig_dtype.T)
1196
+ )
1197
+
1198
+ @prim_attr_register
1199
+ def __init__(self):
1200
+ """Initialize SparseSegmentSumWithNumSegments"""
1201
+ self.init_prim_io_names(inputs=['x', 'indices', 'segment_ids', 'num_segments'], outputs=['y'])
1202
+
1203
+
1204
+ class SparseSegmentSqrtN(Primitive):
1205
+ """
1206
+ Computes the sum along sparse segments of a tensor divided by the sqrt of N.
1207
+ N is the size of the segment being reduced.
1208
+
1209
+ Inputs:
1210
+ - **x** (Tensor) - A tensor. It's rank must be more than or equal to one.
1211
+ - **indices** (Tensor) - 1-D Tensor with indices into `x`. Must be one of the following types: int32, int64.
1212
+ Has same rank as segment_ids. The shape should be :math:`(N,)`.
1213
+ - **segment_ids** (Tensor) - 1-D Tensor with indices into the output `y`. Must be one of the following
1214
+ types: int32, int64. Values should be sorted and can be repeated. The shape should be :math:`(N,)`.
1215
+
1216
+ Outputs:
1217
+ A Tensor. Has the same type as `x` .
1218
+ Has same shape as `x`, except for dimension 0 which is the number of segments.
1219
+
1220
+ Raises:
1221
+ TypeError: If `x` or `indices` or `segment_ids` is not a tensor.
1222
+ TypeError: If the dtype of `x` is not any of the following data types: {float16, float32, float64}.
1223
+ TypeError: If the dtype of `indices` is not int32 or int64.
1224
+ TypeError: If the dtype of `segment_ids` is not int32 or int64.
1225
+ ValueError: If dimension size of `x` is less than 1.
1226
+ ValueError: If any of `indices` and `segment_ids` is not a 1-D tensor.
1227
+ ValueError: If shape[0] of `indices` is not corresponding to shape[0] of `segment_ids`.
1228
+ ValueError: If indices in `segment_ids` are not contiguous or do not start from 0.
1229
+ ValueError: If `segment_ids` is not sorted.
1230
+ ValueError: If `indices` is out of range of x's first dimension.
1231
+
1232
+ Supported Platforms:
1233
+ ``Ascend`` ``GPU`` ``CPU``
1234
+
1235
+ Examples:
1236
+ >>> x = Tensor(np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]]).astype(np.float32))
1237
+ >>> indices = Tensor(np.array([0,1,2]).astype(np.int32))
1238
+ >>> segment_ids = Tensor(np.array([0,1,2]).astype(np.int32))
1239
+ >>> sparse_segment_sqrt_n = SparseSegmentSqrtN()
1240
+ >>> output = sparse_segment_sqrt_n(x, indices, segment_ids)
1241
+ >>> print(output)
1242
+ [[ 1. 2. 3. 4.]
1243
+ [ 5. 6. 7. 8.]
1244
+ [ 9. 10. 11. 12.]]
1245
+ """
1246
+ __mindspore_signature__ = (
1247
+ sig.make_sig('x', dtype=sig.sig_dtype.T1),
1248
+ sig.make_sig('indices', dtype=sig.sig_dtype.T),
1249
+ sig.make_sig('segment_ids', dtype=sig.sig_dtype.T)
1250
+ )
1251
+
1252
+ @prim_attr_register
1253
+ def __init__(self):
1254
+ """Initialize SparseSegmentSqrtN"""
1255
+ self.init_prim_io_names(
1256
+ inputs=['x', 'indices', 'segment_ids'], outputs=['y'])
1257
+
1258
+
1259
+ class SparseSegmentSqrtNWithNumSegments(Primitive):
1260
+ """
1261
+ Computes the sum along sparse segments of a tensor divided by the sqrt of N.
1262
+ N is the size of the segment being reduced.
1263
+ Like SparseSegmentSqrtN, but allows missing ids in segment_ids.
1264
+ If an id is missing, the output tensor at that position will be zeroed.
1265
+
1266
+ Inputs:
1267
+ - **x** (Tensor) - A Tensor. It's rank must be more than or equal to one.
1268
+ - **indices** (Tensor) - 1-D Tensor with indices into `x`. Must be one of the following types: int32, int64.
1269
+ Has same rank as segment_ids. The shape should be :math:`(N,)`.
1270
+ - **segment_ids** (Tensor) - 1-D Tensor with indices into the output `y`. Must be one of the following
1271
+ types: int32, int64. Values should be sorted and can be repeated. The shape should be :math:`(N,)`.
1272
+ - **num_segments** (Tensor) - Num_segments indicates the size of the output.
1273
+ It should be bigger than the largest id of `segment_ids`.
1274
+
1275
+ Outputs:
1276
+ A Tensor. Has the same type as `x` .
1277
+ Has same shape as `x`, except for dimension 0 which is the value of `num_segments`.
1278
+
1279
+ Raises:
1280
+ TypeError: If `x` or `indices` or `segment_ids` or `num_segments` is not a tensor.
1281
+ TypeError: If the dtype of `x` is not any of the following data types: {float16, float32, float64}.
1282
+ TypeError: If the dtype of `indices` and `segment_ids` and `num_segments` is not int32 or int64.
1283
+ TypeError: If dtype of `segment_ids` and `indices` mismatch.
1284
+ TypeError: If dtype of `num_segments` and `indices` mismatch.
1285
+ ValueError: If dimension size of `x` is less than 1.
1286
+ ValueError: If any of `indices` and `segment_ids` is not a 1-D tensor.
1287
+ ValueError: If rank of `num_segments` is bigger than 1.
1288
+ ValueError: If numelements of `num_segments` is not 1.
1289
+ ValueError: If the first dimension of `indices` is not equal to the first dimension of `segment_ids`.
1290
+ ValueError: If `segment_ids` is not sorted.
1291
+ ValueError: If the the largest id of `segment_ids` is bigger than or equal to `num_segments`.
1292
+ ValueError: If `indices` is out of range of x's first dimension.
1293
+
1294
+ Supported Platforms:
1295
+ ``Ascend`` ``GPU`` ``CPU``
1296
+
1297
+ Examples:
1298
+ >>> x = Tensor([[0, 1, 0, 0], [0, 1, 1, 0], [1, 0, 1, 0]], dtype=ms.float16)
1299
+ >>> indices = Tensor([0, 2, 1], dtype=ms.int32)
1300
+ >>> segment_ids = Tensor([0, 1, 2], dtype=ms.int32)
1301
+ >>> num_segments = Tensor([4], dtype=ms.int32)
1302
+ >>> sparse_segment_sqrt_n_with_num_segments = SparseSegmentSqrtNWithNumSegments()
1303
+ >>> output = sparse_segment_sqrt_n_with_num_segments(x, indices, segment_ids, num_segments)
1304
+ >>> print(output)
1305
+ [[0. 1. 0. 0.]
1306
+ [1. 0. 1. 0.]
1307
+ [0. 1. 1. 0.]
1308
+ [0. 0. 0. 0.]]
1309
+ """
1310
+
1311
+ @prim_attr_register
1312
+ def __init__(self):
1313
+ """Initialize SparseSegmentSqrtNWithNumSegments"""
1314
+ self.init_prim_io_names(
1315
+ inputs=['x', 'indices', 'segment_ids', 'num_segemnts'], outputs=['y'])
1316
+
1317
+
1318
+ class SparseMatrixNNZ(Primitive):
1319
+ r"""
1320
+ Count number of the non-zero elements in sparse matrix or sparse matrixs.
1321
+ If the sparse matrix input contains batch dimension, then output dimension will be same with the batch dimension.
1322
+
1323
+ Note:
1324
+ It is assumed that all the inputs can form a legal CSR sparse matrix, otherwise this operator won't work.
1325
+
1326
+ Inputs:
1327
+ - **x_dense_shape** (Tensor) - A 1-D Tensor. It represents the dense form shape of
1328
+ the input CSR sparse matrix, the shape of which should be :math:`(2,)` or :math:`(3,)`.
1329
+ - **x_batch_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix is of
1330
+ batch size `n`, it should have shape :math:`(n+1,)`, while the `i`-th element of which stores
1331
+ acummulated counts of nonzero values of the first `i - 1` batches.
1332
+ - **x_row_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix is of
1333
+ batch size `n` and row number `m`, it can be divided into `n` parts, each part of length
1334
+ `m + 1`. The `i`-th element of each :math:`(m+1,)` vector stores acummulated counts of
1335
+ nonzero values of the first `i - 1` rows in the corresponding batch.
1336
+ - **x_col_indices** (Tensor) - A 1-D Tensor. It represents column indices of the nonzero values
1337
+ in the input CSR sparse matrix.
1338
+ - **x_values** (Tensor) - A 1-D Tensor. It represents all the nonzero values in the
1339
+ input CSR sparse matrix.
1340
+
1341
+ Outputs:
1342
+ Tensor, the dtype is int32.
1343
+ If there are n batch within input sparse matrix, the shape is :math:`(n,)`.
1344
+
1345
+ Raises:
1346
+ TypeError: If the dtype of `x_dense_shape`, `x_batch_pointers`, `x_row_pointers` or `x_col_indices`
1347
+ is not int32 or int64, or the dtypes of above inputs are not the same.
1348
+ TypeError: If the dtype of `x_values` is not supported.
1349
+ TypeError: If any of the inputs is not a tensor.
1350
+ ValueError: If any of the inputs is not 1-D.
1351
+ ValueError: If `x_values` and `x_col_indices` have different length.
1352
+ ValueError: If shape[0] of `x_dense_shape` is not 2 or 3.
1353
+
1354
+ Supported Platforms:
1355
+ ``Ascend`` ``GPU`` ``CPU``
1356
+
1357
+ Examples:
1358
+ >>> dense_shape = Tensor([2,3], dtype=mstype.int32)
1359
+ >>> batch_pointers = Tensor([0,1], dtype=mstype.int32)
1360
+ >>> row_pointers = Tensor([0,1,1], dtype=mstype.int32)
1361
+ >>> col_indices = Tensor([0], dtype=mstype.int32)
1362
+ >>> values = Tensor([99], dtype=mstype.float32)
1363
+ >>> sparse_matrix_nnz = ops.SparseMatrixNNZ()
1364
+ >>> out = sparse_matrix_nnz(dense_shape, batch_pointers, row_pointers, col_indices, values)
1365
+ >>> print(out)
1366
+ [1]
1367
+ """
1368
+
1369
+ @prim_attr_register
1370
+ def __init__(self):
1371
+ """Initialize SparseMatrixNNZ"""
1372
+ self.init_prim_io_names(
1373
+ inputs=['x_dense_shape', 'x_batch_pointers', 'x_row_pointers', 'x_col_indices', 'x_values'], outputs=['y'])
1374
+
1375
+
1376
+ class SparseFillEmptyRows(Primitive):
1377
+ r"""
1378
+ Fill the blank lines in the input 2D SparseTensor with default values.
1379
+
1380
+ Inputs:
1381
+ - **indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
1382
+ Support int64, each element value should be a non-negative int number. The shape is :math:`(n, 2)`.
1383
+ - **values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
1384
+ The shape should be :math:`(n,)`.
1385
+ - **dense_shape** (Tensor) - A 1-D Tensor with only two elements, represents the shape of SparseTensor.
1386
+ Support int64.
1387
+ - **default_value** (Tensor) - A 0-D Tensor of the same type as `values`, scalar value to
1388
+ fill the blank lines in the input 2D SparseTensor.
1389
+
1390
+ Outputs:
1391
+ - **output_indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor
1392
+ after being filled. Support int64, each element value should be a non-negative int number.
1393
+ The shape is :math:`(m, 2)`, because of being filled, m>=n.
1394
+ - **output_values** (Tensor) - A 1-D Tensor. It represents the value corresponding to the position
1395
+ in the `output_indices`, the shape of which should be :math:`(m,)`, because of being filled, m>=n.
1396
+ - **empty_row_indicator** (Tensor) - A 1-D Tensor. It indicates whether each row is empty.
1397
+ Support bool. The shape is :math:`(dense\_shape[0],)`.
1398
+ - **reverse_index_map** (Tensor) - A 1-D Tensor. It is the index that means the value here is original
1399
+ rather than filled. Support bool. The shape is :math:`(n, 2)`.
1400
+
1401
+ Raises:
1402
+ TypeError: If the dtype of `indices` is not int64.
1403
+ TypeError: If the dtype of `dense_shape` is not int64.
1404
+ TypeError: If the dtype of `values` and the dtype of `default_value` are not same.
1405
+ ValueError: If `sparse_shape`, shape of `indices` and shape of `values` don't meet the parameter description.
1406
+
1407
+ Supported Platforms:
1408
+ ``Ascend`` ``GPU`` ``CPU``
1409
+
1410
+ Examples:
1411
+ >>> indices = Tensor([[1, 0]], dtype=mstype.int64)
1412
+ >>> values = Tensor([4], dtype=mstype.float32)
1413
+ >>> dense_shape = Tensor([2, 3], dtype=mstype.int64)
1414
+ >>> default_value = Tensor(5, dtype=mstype.float32)
1415
+ >>> sparsefillemptyrows = ops.SparseFillEmptyRows()
1416
+ >>> out = sparsefillemptyrows(indices, values, dense_shape, default_value)
1417
+ >>> print(out[0])
1418
+ Tensor(shape=[2, 2], dtype=Int64, value=
1419
+ [[0, 0],
1420
+ [1, 0]])
1421
+ >>> print(out[1])
1422
+ Tensor(shape=[2], dtype=Float32, value= [ 5.00000000e+00, 4.00000000e+00])
1423
+ >>> print(out[2])
1424
+ Tensor(shape=[2], dtype=Bool, value= [ True, False])
1425
+ >>> print(out[3])
1426
+ Tensor(shape=[1], dtype=Int64, value= [1])
1427
+ """
1428
+ @prim_attr_register
1429
+ def __init__(self):
1430
+ """Initialize SparseFillEmptyRows."""
1431
+ self.init_prim_io_names(inputs=['indices', 'values', 'dense_shape', 'default_value'],
1432
+ outputs=['output_indices', 'output_values', 'empty_row_indicator', 'reverse_index_map'])
1433
+
1434
+
1435
+ class SparseSegmentMeanWithNumSegments(Primitive):
1436
+ """
1437
+ Compute the mean along sparse segments of a tensor. It is allowed to have missing id in segment_ids.
1438
+
1439
+ Inputs:
1440
+ - **x** (Tensor) - A Tensor of the first input of SparseSegmentMeanWithNumSegments.
1441
+ - **indices** (Tensor) - 1-D Tensor with indices into `x`. Must be one of the following
1442
+ types: int32, int64. Has same rank as `segment_ids`. The shape should be :math:`(N,)`.
1443
+ - **segment_ids** (Tensor) - 1-D Tensor with indices into the output `y`. Must be one of the
1444
+ following types: int32, int64. Values should be sorted and can be repeated. The shape should
1445
+ be :math:`(N,)`.
1446
+ - **num_segments** (Tensor) - Num_segments indicates the size of the output.
1447
+ It should be bigger than the largest id of `segment_ids`.
1448
+
1449
+ Outputs:
1450
+ A Tensor. Has the same type as `x` .
1451
+ Has same shape as `x`, except for dimension 0 which is the value of `num_segments`.
1452
+
1453
+ Raises:
1454
+ TypeError: If `x` or `indices` or `segment_ids` or `num_segments` is not a tensor.
1455
+ TypeError: If dtype of `x` is not in [float16, float32, float64].
1456
+ TypeError: If dtype of `indices` is not int32 or int64.
1457
+ TypeError: If dtype of `segment_ids` and `indices` mismatch.
1458
+ TypeError: If dtype of `num_segments` and `indices` mismatch.
1459
+ ValueError: If rank of `x` is less than 1.
1460
+ ValueError: If rank of `indices` or `segment_ids` is not 1.
1461
+ ValueError: If rank of `num_segments` is bigger than 1.
1462
+ ValueError: If numelements of `num_segments` is not 1.
1463
+ ValueError: If the first dimension of `indices` is not equal to the first dimension of `segment_ids`.
1464
+ ValueError: If `segment_ids` is not sorted.
1465
+ ValueError: If the largest id of `segment_ids` is bigger than or equal to `num_segments`.
1466
+ ValueError: If `indices` is out of range of x's first dimension.
1467
+
1468
+ Supported Platforms:
1469
+ ``GPU`` ``CPU``
1470
+
1471
+ Examples:
1472
+ >>> from mindspore import Tensor
1473
+ >>> import mindspore as ms
1474
+ >>> import mindspore.ops.operations.sparse_ops as ops
1475
+ >>> x = Tensor([[0, 2, 0, 0], [0, 1, 1, 0], [2, 0, 2, 0]], dtype=ms.float16)
1476
+ >>> indices = Tensor([0, 2, 1], dtype=ms.int32)
1477
+ >>> segment_ids = Tensor([0, 0, 2], dtype=ms.int32)
1478
+ >>> num_segments = Tensor([4], dtype=ms.int32)
1479
+ >>> sparse_segment_mean_with_num_segments = ops.SparseSegmentMeanWithNumSegments()
1480
+ >>> output = sparse_segment_mean_with_num_segments(x, indices, segment_ids, num_segments)
1481
+ >>> print(output)
1482
+ [[1. 1. 1. 0.]
1483
+ [0. 0. 0. 0.]
1484
+ [0. 1. 1. 0.]
1485
+ [0. 0. 0. 0.]]
1486
+ """
1487
+
1488
+ @prim_attr_register
1489
+ def __init__(self):
1490
+ """Initialize SparseSegmentMeanWithNumSegments"""
1491
+ self.init_prim_io_names(inputs=['x', 'indices', 'segment_ids', 'num_segments'], outputs=['y'])
1492
+
1493
+
1494
+ class SparseAdd(Primitive):
1495
+ """
1496
+ Computes the sum of a COOTensor and another COOTensor.
1497
+
1498
+ Inputs:
1499
+ - **x1_indices** (Tensor) - represents the first COOTensor's indices.
1500
+ - **x1_values** (Tensor) - represents the first COOTensor's values.
1501
+ - **x1_shape** (Tensor) - represents the first COOTensor's dense shape.
1502
+ - **x2_indices** (Tensor) - represents the second COOTensor's indices.
1503
+ - **x2_values** (Tensor) - represents the second COOTensor's values.
1504
+ - **x2_shape** (Tensor) - represents the second COOTensor's dense shape.
1505
+ - **thresh** (Tensor) - A 0-D Tensor, represents the magnitude threshold that determines if an output
1506
+ value/index pair take space. Its dtype should match that of the values if they are real.
1507
+ If output's value is less than the `thresh`, it will vanish.
1508
+
1509
+ Outputs:
1510
+ - **sum_indices** (Tensor) - this is the indices of the sum.
1511
+ - **sum_values** (Tensor) - this is the values of the sum.
1512
+ - **sum_shape** (Tensor) - this is the shape of the sum.
1513
+
1514
+ Raises:
1515
+ ValueError: If (x1_indices/x2_indices)'s dim is not equal to 2.
1516
+ ValueError: If (x1_values/x2_values)'s dim is not equal to 1.
1517
+ ValueError: If (x1_shape/x2_shape)'s dim is not equal to 1.
1518
+ ValueError: If thresh's dim is not equal to 0.
1519
+ TypeError: If (x1_indices/x2_indices)'s type is not equal to int64.
1520
+ TypeError: If (x1_shape/x2_shape)'s type is not equal to int64.
1521
+ ValueError: If (x1_indices/x2_indices)'s length is not equal to
1522
+ (x1_values/x2_values)'s length.
1523
+ TypeError: If (x1_values/x2_values)'s type is not equal to anf of
1524
+ (int8/int16/int32/int64/float32/float64/complex64/complex128).
1525
+ TypeError: If thresh's type is not equal to anf of
1526
+ (int8/int16/int32/int64/float32/float64).
1527
+ TypeError: If x1_indices's type is not equal to x2_indices's type.
1528
+ TypeError: If x1_values's type is not equal to x2_values's type.
1529
+ TypeError: If x1_shape's type is not equal to x2_shape's type.
1530
+ TypeError: If (x1_values/x2_values)'s type is not matched with thresh's type.
1531
+
1532
+ Supported Platforms:
1533
+ ``GPU`` ``CPU``
1534
+
1535
+ Examples:
1536
+ >>> from mindspore import Tensor
1537
+ >>> from mindspore import dtype as mstype
1538
+ >>> from mindspore.ops.operations.sparse_ops import SparseAdd
1539
+ >>> indics0 = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
1540
+ >>> values0 = Tensor([1, 2], dtype=mstype.int32)
1541
+ >>> shape0 = Tensor([3, 4], dtype=mstype.int64)
1542
+ >>> indics1 = Tensor([[0, 0], [1, 1]], dtype=mstype.int64)
1543
+ >>> values1 = Tensor([3, 4], dtype=mstype.int32)
1544
+ >>> shape1 = Tensor([3, 4], dtype=mstype.int64)
1545
+ >>> thres = Tensor(0, dtype=mstype.int32)
1546
+ >>> sparse_add = SparseAdd()
1547
+ >>> out = sparse_add(indics0, values0, shape0, indics1, values1, shape1, thres)
1548
+ >>> print(out)
1549
+ (Tensor(shape=[4, 2], dtype=Int64, value=[[0, 0], [0, 1], [1, 1], [1, 2]]),
1550
+ Tensor(shape=[4], dtype=Int32, value=[3, 1, 4, 2]),
1551
+ Tensor(shape=[2], dtype=Int64, value=[3, 4]))
1552
+ """
1553
+
1554
+ @prim_attr_register
1555
+ def __init__(self):
1556
+ self.init_prim_io_names(
1557
+ inputs=["x1_indices", "x1_values", "x1_shape",
1558
+ "x2_indices", "x2_values", "x2_shape", "thresh"],
1559
+ outputs=["sum_indices", "sum_values", "sum_shape"])
1560
+
1561
+
1562
+ class SparseMatrixSoftmax(Primitive):
1563
+ """
1564
+ Calculates the softmax of a CSRTensorMatrix.
1565
+
1566
+ .. warning::
1567
+ This is an experimental API that is subject to change or deletion.
1568
+
1569
+ Args:
1570
+ dtype (dtype.Number) - The valid data type. Only constant value is allowed.
1571
+
1572
+ Inputs:
1573
+ - **x_dense_shape** (Tensor) - Input shape of the original Dense matrix.
1574
+ - **x_batch_pointers** (Tensor) - The number of rows in the input matrix.
1575
+ - **x_row_pointers** (Tensor) - Input the column coordinates of nonzero elements.
1576
+ - **x_col_indices** (Tensor) - The number of input nonzero elements up to that line.
1577
+ - **x_values** (Tensor) - The value of the input nonzero element.
1578
+
1579
+ Outputs:
1580
+ - **y_dense_shape** (Tensor) - Output shape of the original Dense matrix.
1581
+ - **y_batch_pointers** (Tensor) - The number of rows in the output matrix.
1582
+ - **y_row_pointers** (Tensor) - Output the column coordinates of nonzero elements.
1583
+ - **y_col_indices** (Tensor) - The number of output nonzero elements up to that line.
1584
+ - **y_values** (Tensor) - The value of the input nonzero element.
1585
+
1586
+ Supported Platforms:
1587
+ ``GPU`` ``CPU``
1588
+
1589
+ Examples:
1590
+ >>> import mindspore as ms
1591
+ >>> import mindspore.common.dtype as mstype
1592
+ >>> from mindspore import Tensor, CSRTensor
1593
+ >>> from mindspore.ops.operations.sparse_ops import SparseMatrixSoftmax
1594
+ >>> logits_indptr = Tensor([0, 4, 6], dtype=mstype.int32)
1595
+ >>> logits_indices = Tensor([0, 2, 3, 4, 3, 4], dtype=mstype.int32)
1596
+ >>> logits_values = Tensor([1, 2, 3, 4, 1, 2], dtype=mstype.float32)
1597
+ >>> shape = (2, 6)
1598
+ >>> logits = CSRTensor(logits_indptr, logits_indices, logits_values, shape)
1599
+ >>> net = SparseMatrixSoftmax(mstype.float32)
1600
+ >>> logits_pointers =Tensor(logits.values.shape[0], mstype.int32)
1601
+ >>> out = net(Tensor(logits.shape, dtype=mstype.int32), logits_pointers,
1602
+ ... logits.indptr, logits.indices, logits.values)
1603
+ >>> print(out)
1604
+ (Tensor(shape=[2], dtype=Int32, value= [2, 6]),
1605
+ Tensor(shape=[], dtype=Int32, value= 6),
1606
+ Tensor(shape=[3], dtype=Int32, value= [0, 4, 6]),
1607
+ Tensor(shape=[6], dtype=Int32, value= [0, 2, 3, 4, 3, 4]),
1608
+ Tensor(shape=[6], dtype=Float32, value= [ 3.20586003e-02, 8.71443152e-02,
1609
+ 2.36882806e-01, 6.43914223e-01, 2.68941432e-01, 7.31058598e-01]))
1610
+ """
1611
+
1612
+ @prim_attr_register
1613
+ def __init__(self, dtype):
1614
+ '''Initialize for SparseMatrixSoftmax'''
1615
+ if not isinstance(dtype, (type(mstype.float32), type(mstype.single), type(mstype.float64),
1616
+ type(mstype.double))):
1617
+ raise TypeError(
1618
+ f"Only float32 and float64 type data are supported, but got {dtype}")
1619
+ self.add_prim_attr("dtype", dtype)
1620
+ self.init_prim_io_names(inputs=['x_dense_shape', 'x_batch_pointers', 'x_row_pointers',
1621
+ 'x_col_indices', 'x_values'],
1622
+ outputs=['y_dense_shape', 'y_batch_pointers', 'y_row_pointers', 'y_col_indices',
1623
+ 'y_values'])
1624
+
1625
+
1626
+ class CSRSparseMatrixToDense(Primitive):
1627
+ """
1628
+ Converts a CSR sparse matrix(maybe batched) to its dense form.
1629
+
1630
+ Note:
1631
+ It is assumed that all the inputs can form a legal CSR sparse matrix, otherwise this operator won't work.
1632
+
1633
+ Inputs:
1634
+ - **x_dense_shape** (Tensor) - A 1-D Tensor. It represents the dense form shape of
1635
+ the input CSR sparse matrix, the shape of which should be :math:`(2,)` or :math:`(3,)`.
1636
+ - **x_batch_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix is of
1637
+ batch size `n`, it should have shape :math:`(n+1,)`, while the `i`-th element of which stores
1638
+ acummulated counts of nonzero values of the first `i - 1` batches.
1639
+ - **x_row_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix is of
1640
+ batch size `n` and row number `m`, it can be divided into `n` parts, each part of length
1641
+ `m + 1`. The `i`-th element of each :math:`(m+1,)` vector stores acummulated counts of
1642
+ nonzero values of the first `i - 1` rows in the corresponding batch.
1643
+ - **x_col_indices** (Tensor) - A 1-D Tensor. It represents column indices of the nonzero values
1644
+ in the input CSR sparse matrix.
1645
+ - **x_values** (Tensor) - A 1-D Tensor. It represents all the nonzero values in the
1646
+ input CSR sparse matrix.
1647
+
1648
+ Outputs:
1649
+ Tensor, which is the dense form of the input CSR sparse matrix.
1650
+ Its dtype is the same as `x_values`.
1651
+
1652
+ Raises:
1653
+ TypeError: If the dtype of `x_dense_shape`, `x_batch_pointers`, `x_row_pointers` or `x_col_indices`
1654
+ is not int32 or int64, or the dtypes of above inputs are not the same.
1655
+ TypeError: If the dtype of `x_values` is not float32, float64, complex64 or complex128.
1656
+ TypeError: If any of the inputs is not a tensor.
1657
+ ValueError: If any of the inputs is not 1-D.
1658
+ ValueError: If shape[0] of `x_dense_shape` is not 2 or 3.
1659
+
1660
+ Supported Platforms:
1661
+ ``Ascend`` ``CPU``
1662
+
1663
+ Examples:
1664
+ >>> dense_shape = Tensor([2, 2], dtype=mindspore.int32)
1665
+ >>> batch_pointers = Tensor([0, 1], dtype=mindspore.int32)
1666
+ >>> row_pointers = Tensor([0, 1, 1], dtype=mindspore.int32)
1667
+ >>> col_indices = Tensor([1], dtype=mindspore.int32)
1668
+ >>> values = Tensor([1.], dtype=mindspore.float32)
1669
+ >>> csr_to_dense = ops.CSRSparseMatrixToDense()
1670
+ >>> out = csr_to_dense(dense_shape, batch_pointers, row_pointers, col_indices, values)
1671
+ >>> print(out)
1672
+ [[0. 1.]
1673
+ [0. 0.]]
1674
+ """
1675
+
1676
+ @prim_attr_register
1677
+ def __init__(self):
1678
+ """Initialize CSRSparseMatrixToDense"""
1679
+ self.init_prim_io_names(
1680
+ inputs=['x_dense_shape', 'x_batch_pointers',
1681
+ 'x_row_pointers', 'x_col_indices', 'x_values'],
1682
+ outputs=['y'])
1683
+
1684
+
1685
+ class SparseMatrixTranspose(Primitive):
1686
+ r"""
1687
+ Return the transpose of sparse matrix or sparse matrixs.
1688
+ If the sparse matrix input contains batch dimension, then output dimension will be same with the batch dimension.
1689
+ The rank of sparse matrix input must be equal to `2` or `3`.
1690
+
1691
+ Note:
1692
+ It is assumed that all the inputs can form a legal CSR sparse matrix, otherwise this operator is not defined.
1693
+
1694
+ Args:
1695
+ conjugate (bool): If ``True`` , the output sparse tensor is conjugated . Default: ``False`` .
1696
+
1697
+ Inputs:
1698
+ - **dense_shape** (Tensor) - A 1-D Tensor, represents the shape of input sparse matrix under dense status.
1699
+ Support int32, int64. The shape is :math:`(2,)` or :math:`(3,)`.
1700
+ - **batch_pointers** (Tensor) - A 1-D Tensor, represents the non-zero elements number in each batch.
1701
+ Support int32, int64, takes on values: :math:`(0, nnz[0], nnz[0] + nnz[1], ..., total\_nnz)`.
1702
+ If there are `n` batch within input sparse matrix, the shape is :math:`(n+1)`.
1703
+ - **row_pointers** (Tensor) - A 1-D Tensor, represents the non-zero elements of each row.
1704
+ Support int32, int64, takes on values:
1705
+ :math:`(0, num\_rows\{b\}[0], num\_rows\{b\}[0] + num\_rows\{b\}[1], ..., nnz[b])`,
1706
+ for :math:`b = 0, ..., n - 1`.
1707
+ If there are `n` batch within input sparse matrix and dense shape is :math:`(rows,cols)`,
1708
+ the shape is :math:`((rows + 1) * n)`.
1709
+ Note: num_rows{0}[0] means the non-zero elements number in the first row of first sparse matrix.
1710
+ - **col_indices** (Tensor) - A 1-D Tensor, represents the column values for the given row and column index.
1711
+ Support int32, int64. The shape is :math:`(M)`,
1712
+ where `M` is the number of non-zero elements in all input sparse matrix.
1713
+ - **values** (Tensor) - A 1-D Tensor, represents the actual values for the given row and column index.
1714
+ Support BasicType. The shape is :math:`(M)`, where `M` is the number of non-zero elements in all
1715
+ input sparse matrix.
1716
+
1717
+ Outputs:
1718
+ - **dense_shape** (Tensor) - A 1-D Tensor, represents the shape of output sparse matrix under dense status.
1719
+ Support int32, int64. The shape is the same as the input sparse matrix.
1720
+ - **batch_pointers** (Tensor) - A 1-D Tensor, which is the same as the input sparse matrix's batch_pointers.
1721
+ - **row_pointers** (Tensor) - A 1-D Tensor, represents the non-zero elements of each row of output sparse
1722
+ matrix. Support int32, int64, takes on values:
1723
+ :math:`(0, num\_rows\{b\}[0], num\_rows\{b\}[0] + num\_rows\{b\}[1], ..., nnz[b])`,
1724
+ for :math:`b = 0, ..., n - 1`.
1725
+ If there are `n` batch within output sparse matrix and dense shape is :math:`(rows,cols)`,
1726
+ the shape is :math:`((rows + 1) * n)`.
1727
+ Note: num_rows{0}[0] means the non-zero elements number in the first row of first sparse matrix.
1728
+ - **col_indices** (Tensor) - A 1-D Tensor, represents the column values for the given row and column index.
1729
+ Support int32, int64. The shape is :math:`(M)`,
1730
+ where `M` is the number of non-zero elements in all input sparse matrix.
1731
+ - **values** (Tensor) - A 1-D Tensor, which is the same as the input sparse matrix's values.
1732
+
1733
+ Raises:
1734
+ TypeError: If dtype of `values` doesn't meet the parameter description.
1735
+ TypeError: The data type of `dense_shape, batch_pointers, row_pointers, col_indices` is not int32 or int64.
1736
+ ValueError: If rank of `dense_shape` is not 2 or 3.
1737
+ TypeError: The input data should have the correct CSR form.
1738
+
1739
+ Supported Platforms:
1740
+ ``Ascend`` ``CPU``
1741
+
1742
+ Examples:
1743
+ >>> from mindspore.ops import operations as ops
1744
+ >>> dense_shape = Tensor([2,3], dtype=ms.int32)
1745
+ >>> batch_pointers = Tensor([0,1], dtype=ms.int32)
1746
+ >>> row_pointers = Tensor([0,1,1], dtype=ms.int32)
1747
+ >>> col_indices = Tensor([0], dtype=ms.int32)
1748
+ >>> values = Tensor([99], dtype=ms.float32)
1749
+ >>> sparse_matrix_transpose = ops.SparseMatrixTranspose()
1750
+ >>> output = sparse_matrix_transpose(dense_shape, batch_pointers, row_pointers, col_indices, values)
1751
+ >>> print(output[0])
1752
+ [3 2]
1753
+ >>> print(output[1])
1754
+ [0 1]
1755
+ >>> print(output[2])
1756
+ [0 1 1 1]
1757
+ >>> print(output[3])
1758
+ [0]
1759
+ >>> print(output[4])
1760
+ [99.]
1761
+ """
1762
+
1763
+ @prim_attr_register
1764
+ def __init__(self, conjugate=False):
1765
+ """Initialize SparseMatrixTranspose"""
1766
+ validator.check_value_type("conjugate", conjugate, [bool], self.name)
1767
+ self.add_prim_attr("max_length", 100000000)
1768
+ self.init_prim_io_names(inputs=['x_dense_shape', 'x_batch_pointers', 'x_row_pointers',
1769
+ 'x_col_indices', 'x_values'],
1770
+ outputs=['y_dense_shape', 'y_batch_pointers', 'y_row_pointers',
1771
+ 'y_col_indices', 'y_values'])
1772
+
1773
+
1774
+ class SparseSparseMinimum(Primitive):
1775
+ r"""
1776
+ Returns the element-wise min of two SparseTensors.
1777
+
1778
+ Inputs:
1779
+ - **x1_indices** (Tensor) - A 2-D Tensor. It represents the position of the non-zero element
1780
+ in the first sparse tensor.
1781
+ - **x1_values** (Tensor) - A 1-D Tensor. It represents the value corresponding to the position
1782
+ in the `x1_indices`, the shape of which should be :math:`(N,)`.
1783
+ - **x1_shape** (Tensor) - A 1-D Tensor. It represents the shape of the input sparse tensor,
1784
+ the shape of which should be :math:`(N,)`.
1785
+ - **x2_indices** (Tensor) - A 2-D Tensor. It represents the position of the non-zero element
1786
+ in the second sparse tensor.
1787
+ - **x2_values** (Tensor) - A 1-D Tensor. It represents the value corresponding to the position
1788
+ in the `x2_indices`, the shape of which should be :math:`(N,)`.
1789
+ - **x2_shape** (Tensor) - A 1-D Tensor. It represents the shape of the input sparse tensor,
1790
+ the shape of which should be :math:`(N,)`.
1791
+
1792
+ Outputs:
1793
+ - **y_indices** (Tensor) - A 2-D Tensor. It represents the position of the element-wise min of
1794
+ two input tensors.
1795
+ - **y_values** (Tensor) - A 1-D Tensor. It represents the value corresponding to the position
1796
+ in the `y_indices`.
1797
+
1798
+ Raises:
1799
+ TypeError: The dtype of `x1_indices`, `x1_shape`, `x2_indices` or `x2_shape` is wrong.
1800
+ TypeError: The dtype of `x1_values` or `x2_values` is wrong.
1801
+ TypeError: If `x1_indices`, `x1_values`, `x1_shape`, `x2_indices`, `x2_values`, `x2_shape`
1802
+ is not a tensor.
1803
+ TypeError: If `x1_indices` is not a 2-D tensor.
1804
+ TypeError: If `x2_indices` is not a 2-D tensor.
1805
+ ValueError: If any of `x1_values` and `x1_shape` is not a 1-D tensor.
1806
+ ValueError: If shape[0] of `x1_indices` is not corresponding to shape[0] of `x1_values`.
1807
+ ValueError: If shape[1] of `x1_indices` is not corresponding to shape[0] of `x1_shape`.
1808
+ ValueError: If any of `x2_values` and `x2_shape` is not a 1-D tensor.
1809
+ ValueError: If shape[0] of `x2_indices` is not corresponding to shape[0] of `x2_values`.
1810
+ ValueError: If shape[1] of `x2_indices` is not corresponding to shape[0] of `x2_shape`.
1811
+ ValueError: If shape[0] of `x1_shape` is not corresponding to shape[0] of `x2_shape`.
1812
+
1813
+ Supported Platforms:
1814
+ ``GPU`` ``CPU``
1815
+
1816
+ Examples:
1817
+ >>> from mindspore.ops.operations.sparse_ops import SparseSparseMinimum
1818
+ >>> x1_indices = Tensor(np.array([[0, 0, 0], [0, 1, 0], [0, 1, 1]]).astype(np.int64))
1819
+ >>> x1_values = Tensor([1, 2, 3], dtype=mstype.float32)
1820
+ >>> x1_shape = Tensor(np.array([2, 2, 2]).astype(np.int64))
1821
+ >>> x2_indices = Tensor(np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]]).astype(np.int64))
1822
+ >>> x2_values = Tensor([2, 4, 5], dtype=mstype.float32)
1823
+ >>> x2_shape = Tensor(np.array([2, 2, 2]).astype(np.int64))
1824
+ >>> sparse_sparse_minimum = ops.SparseSparseMinimum()
1825
+ >>> out = sparse_sparse_minimum(x1_indices, x1_values, x1_shape, x2_indices, x2_values, x2_shape)
1826
+ >>> print(out[0])
1827
+ [[0 0 0]
1828
+ [0 1 0]
1829
+ [0 1 1]
1830
+ [1 0 0]]
1831
+ >>> print(out[1])
1832
+ [1. 2. 0. 0.]
1833
+ """
1834
+
1835
+ @prim_attr_register
1836
+ def __init__(self):
1837
+ """Initialize SparseSparseMinimum."""
1838
+ self.init_prim_io_names(inputs=['x1_indices', 'x1_values', 'x1_shape', 'x2_indices', 'x2_values', 'x2_shape'],
1839
+ outputs=['y_indices', 'y_values'])
1840
+
1841
+
1842
+ class SparseTensorToCSRSparseMatrix(Primitive):
1843
+ """
1844
+ Converts a sparse tensor to its CSR sparse matrix(maybe batched) form.
1845
+
1846
+ Inputs:
1847
+ - **x_indices** (Tensor) - A 2-D Tensor. It represents the position of the non-zero element
1848
+ in the sparse tensor. Support int32, int64.
1849
+ - **x_values** (Tensor) - A 1-D Tensor. It represents the value corresponding to the position
1850
+ in the `x_indices`, the shape of which should be :math:`(N,)`.
1851
+ - **x_dense_shape** (Tensor) - A 1-D Tensor. It represents the dense form shape of
1852
+ the input sparse tensor. Its shape should be :math:`(2,)` or :math:`(3,)`. Support int32, int64.
1853
+ Outputs:
1854
+ - **y_dense_shape** (Tensor) - A 1-D Tensor. It represents the dense form shape of
1855
+ the output CSR sparse matrix, the shape of which should be :math:`(2,)` or :math:`(3,)`.
1856
+ - **y_batch_pointers** (Tensor) - A 1-D Tensor. Supposing the output CSR sparse matrix is of
1857
+ batch size `n`, it should have shape :math:`(n+1,)`, while the `i`-th element of which stores
1858
+ acummulated counts of non-zero values of the first `i - 1` batches.
1859
+ - **y_row_pointers** (Tensor) - A 1-D Tensor. Supposing the output CSR sparse matrix is of
1860
+ batch size `n` and row number `m`, it can be divided into `n` parts, each part of length
1861
+ `m + 1`. The `i`-th element of each :math:`(m+1,)` vector stores acummulated counts of
1862
+ non-zero values of the first `i - 1` rows in the corresponding batch.
1863
+ - **y_col_indices** (Tensor) - A 1-D Tensor. It represents column indices of the non-zero values
1864
+ in the output CSR sparse matrix.
1865
+ - **y_values** (Tensor) - A 1-D Tensor. It represents all the non-zero values in the
1866
+ output CSR sparse matrix.
1867
+
1868
+ Raises:
1869
+ TypeError: If the dtype of `x_indices` or `x_dense_shape` is not int32 or int64.
1870
+ TypeError: If the dtype of `x_values` is not one of: float32, float64, complex64 or complex128.
1871
+ ValueError: If `x_indices` or `x_values` or `x_dense_shape` is not a tensor.
1872
+ ValueError: If any of `x_values` and `x_dense_shape` is not a 1-D tensor.
1873
+ ValueError: If `x_indices` is not a 2-D tensor.
1874
+ ValueError: If shape[0] of `x_indices` is not corresponding to shape[0] of `x_values`.
1875
+ ValueError: If shape[1] of `x_indices` is not corresponding to shape[1] of `x_dense_shape`.
1876
+
1877
+ Supported Platforms:
1878
+ ``Ascend`` ``CPU``
1879
+
1880
+ Examples:
1881
+ >>> from mindspore.ops.operations.sparse_ops import SparseTensorToCSRSparseMatrix
1882
+ >>> x_indices = Tensor(np.array([[0, 0, 1], [0, 1, 2], [0, 1, 3], [1, 0, 1], [1, 1, 2],\
1883
+ [1, 1, 3]]).astype(np.int64))
1884
+ >>> x_values = Tensor(np.array([1, 4, 3, 1, 4, 3]).astype(np.float32))
1885
+ >>> x_dense_shape = Tensor(np.array([2, 2, 4]).astype(np.int64))
1886
+ >>> sparse_tensor_to_csr_sparse_matrix = SparseTensorToCSRSparseMatrix()
1887
+ >>> out = sparse_tensor_to_csr_sparse_matrix(x_indices, x_values, x_dense_shape)
1888
+ >>> print(out[0])
1889
+ [2 2 4]
1890
+ >>> print(out[1])
1891
+ [0 3 6]
1892
+ >>> print(out[2])
1893
+ [0 1 3 0 1 3]
1894
+ >>> print(out[3])
1895
+ [1 2 3 1 2 3]
1896
+ >>> print(out[4])
1897
+ [1. 4. 3. 1. 4. 3.]
1898
+ """
1899
+
1900
+ @prim_attr_register
1901
+ def __init__(self):
1902
+ """Initialize SparseTensorToCSRSparseMatrix."""
1903
+ self.init_prim_io_names(
1904
+ inputs=['x_indices', 'x_values', 'x_dense_shape'],
1905
+ outputs=['y_dense_shape', 'y_batch_pointers', 'y_row_pointers', 'y_col_indices', 'y_values'])
1906
+
1907
+
1908
+ class SparseMatrixSparseMatMul(Primitive):
1909
+ r"""
1910
+ Performs a matrix multiplication of a sparse matrix x1 with sparse matrix x2; return a sparse matrix x1*x2.
1911
+ Each matrix may be transposed or adjointed (conjugated and transposed),
1912
+ according to the Boolean parameters transpose_a,adjoint_a,transpose_b and adjoint_b.
1913
+ At most one of transpose_a or adjoint_a may be ``True`` . Similarly, at most one of transpose_b or adjoint_b may
1914
+ be ``True`` .
1915
+
1916
+ Args:
1917
+ transpose_a (bool): If ``True`` , sparse tensor x1 is transposed before multiplication. Default: ``False`` .
1918
+ transpose_b (bool): If ``True`` , dense tensor x2 is transposed before multiplication. Default: ``False`` .
1919
+ adjoint_a (bool): If ``True`` , sparse tensor x1 is adjointed before multiplication. Default: ``False`` .
1920
+ adjoint_b (bool): If ``True`` , dense tensor x2 is adjointed before multiplication. Default: ``False`` .
1921
+
1922
+ Inputs:
1923
+ - **x1_dense_shape** (Tensor) - A 1-D Tensor, represents the shape of input sparse matrix x1 under dense status.
1924
+ Support int32, int64. The shape is :math:`(2)` or :math:`(3)`.
1925
+ - **x1_batch_pointers** (Tensor) - A 1-D Tensor, represents the non-zero elements number in each batch.
1926
+ Support int32, int64, takes on values: :math:`(0, nnz[0], nnz[0] + nnz[1], ..., total\_nnz)`.
1927
+ If there are `n` batch within input sparse matrix x1, the shape is :math:`(n+1)`.
1928
+ - **x1_row_pointers** (Tensor) - A 1-D Tensor, represents the non-zero elements of each row.
1929
+ Support int32, int64, takes on values:
1930
+ :math:`(0, num\_rows\{b\}[0], num\_rows\{b\}[0] + num\_rows\{b\}[1], ..., nnz[b])`,
1931
+ for :math:`b = 0, ..., n - 1`.
1932
+ If there are `n` batch within input sparse matrix x1 and dense shape is :math:`(rows,cols)`,
1933
+ the shape is :math:`((rows + 1) * n)`.
1934
+ Note: num_rows{0}[0] means the non-zero elements number in the first row of first sparse matrix x1.
1935
+ - **x1_col_indices** (Tensor) - A 1-D Tensor, represents the column values for the given row and column index.
1936
+ Support int32, int64. The shape is :math:`(M)`,
1937
+ where `M` is the number of non-zero elements in input sparse matrix x1.
1938
+ - **x1_values** (Tensor) - A 1-D Tensor, represents the actual values for the given row and column index.
1939
+ Support float32, double, complex64, complex128.
1940
+ The shape is :math:`(M)`, where `M` is the number of non-zero elements in input sparse matrix x1.
1941
+
1942
+ **x2_dense_shape** (Tensor) - B 1-D Tensor, represents the shape of input sparse matrix x2 under dense status.
1943
+ Support int32, int64. The shape is :math:`(2)` or :math:`(3)`.
1944
+ - **x2_batch_pointers** (Tensor) - B 1-D Tensor, represents the non-zero elements number in each batch.
1945
+ Support int32, int64, takes on values: :math:`(0, nnz[0], nnz[0] + nnz[1], ..., total\_nnz)`.
1946
+ If there are `n` batch within input sparse matrix x2, the shape is :math:`(n+1)`.
1947
+ - **x2_row_pointers** (Tensor) - B 1-D Tensor, represents the non-zero elements of each row.
1948
+ Support int32, int64, takes on values:
1949
+ :math:`(0, num\_rows\{b\}[0], num\_rows\{b\}[0] + num\_rows\{b\}[1], ..., nnz[b])`,
1950
+ for :math:`b = 0, ..., n - 1`.
1951
+ If there are `n` batch within input sparse matrix x2 and dense shape is :math:`(rows,cols)`,
1952
+ the shape is :math:`((rows + 1) * n)`.
1953
+ Note: num_rows{0}[0] means the non-zero elements number in the first row of sparse matrix x2.
1954
+ - **x2_col_indices** (Tensor) - B 1-D Tensor, represents the column values for the given row and column index.
1955
+ Support int32, int64. The shape is :math:`(M)`,
1956
+ where `M` is the number of non-zero elements in input sparse matrix x2.
1957
+ - **x2_values** (Tensor) - B 1-D Tensor, represents the actual values for the given row and column index.
1958
+ Support float32, double, complex64, complex128.
1959
+ The shape is :math:`(M)`, where `M` is the number of non-zero elements in input sparse matrix x2.
1960
+
1961
+ Outputs:
1962
+ - **y_dense_shape** (Tensor) - B 1-D Tensor, represents the shape of output sparse matrix y under dense status.
1963
+ Support int32, int64. The shape is :math:`(2)` or :math:`(3)`.
1964
+ - **y_batch_pointers** (Tensor) - B 1-D Tensor, represents the non-zero elements number in each batch.
1965
+ Support int32, int64, takes on values: :math:`(0, nnz[0], nnz[0] + nnz[1], ..., total\_nnz)`.
1966
+ If there are `n` batch within output sparse matrix y, the shape is :math:`(n+1)`.
1967
+ - **y_row_pointers** (Tensor) - B 1-D Tensor, represents the non-zero elements of each row.
1968
+ Support int32, int64, takes on values:
1969
+ :math:`(0, num\_rows\{b\}[0], num\_rows\{b\}[0] + num\_rows\{b\}[1], ..., nnz[b])`,
1970
+ for :math:`b = 0, ..., n - 1`.
1971
+ If there are `n` batch within output sparse matrix y and dense shape is :math:`(rows,cols)`,
1972
+ the shape is :math:`((rows + 1) * n)`.
1973
+ Note: num_rows{0}[0] means the non-zero elements number in the first row of sparse matrix y.
1974
+ - **y_col_indices** (Tensor) - B 1-D Tensor, represents the column values for the given row and column index.
1975
+ Support int32, int64. The shape is :math:`(M)`,
1976
+ where `M` is the number of non-zero elements in output sparse matrix y.
1977
+ - **y_values** (Tensor) - B 1-D Tensor, represents the actual values for the given row and column index.
1978
+ Support float32, double, complex64, complex128.
1979
+ The shape is :math:`(M)`, where `M` is the number of non-zero elements in output sparse matrix y.
1980
+
1981
+ Raises:
1982
+ TypeError: If any dtype of `x1_dense_shape`, `x1_batch_pointers`, `x1_row_pointers`, `x1_col_indices`,
1983
+ `x1_values` or `x2_dense_shape`, `x2_batch_pointers`, `x2_row_pointers`, `x2_col_indices`,
1984
+ `x2_values` doesn't meet the parameter description.
1985
+ ValueError: If rank of `x1_dense_shape` or `x2_dense_shape' is not 2 or 3.
1986
+
1987
+ Supported Platforms:
1988
+
1989
+
1990
+ Examples:
1991
+ >>> from mindspore.ops.operations.sparse_ops import SparseMatrixSparseMatMul
1992
+ >>> x1_dense_shape = Tensor([4, 5], dtype=mindspore.int32)
1993
+ >>> x1_batch_pointers = Tensor([0, 4], dtype=mindspore.int32)
1994
+ >>> x1_row_pointers = Tensor([0, 1, 1, 3, 4], dtype=mindspore.int32)
1995
+ >>> x1_col_indices = Tensor([0, 3, 4, 0], dtype=mindspore.int32)
1996
+ >>> x1_values = Tensor([1.0, 5.0, -1.0, -2.0], dtype=mindspore.float32)
1997
+ >>> x2_dense_shape = Tensor([5, 3], dtype=mindspore.int32)
1998
+ >>> x2_batch_pointers = Tensor([0, 3], dtype=mindspore.int32)
1999
+ >>> x2_row_pointers = Tensor([0, 1, 1, 3, 3, 3], dtype=mindspore.int32)
2000
+ >>> x2_col_indices = Tensor([0, 0, 1], dtype=mindspore.int32)
2001
+ >>> x2_values = Tensor([2.0, 7.0, 8.0], dtype=mindspore.float32)
2002
+ >>> sparse_matrix_sparse_mat_mul = SparseMatrixSparseMatMul()
2003
+ >>> out_dense_shape, out_batch_pointers, out_row_pointers, out_col_indices, out_values =
2004
+ ... sparse_matrix_sparse_mat_mul(x1_dense_shape, x1_batch_pointers, x1_row_pointers, x1_col_indices, x1_values,
2005
+ ... x2_dense_shape, x2_batch_pointers, x2_row_pointers, x2_col_indices, x2_values)
2006
+ >>> print(out_dense_shape)
2007
+ [4 3]
2008
+ >>> print(out_batch_pointers)
2009
+ [0 2]
2010
+ >>> print(out_row_pointers)
2011
+ [0 1 1 1 2]
2012
+ >>> print(out_col_indices)
2013
+ [0 0]
2014
+ >>> print(out_values)
2015
+ [ 2. -4.]
2016
+ """
2017
+
2018
+ @prim_attr_register
2019
+ def __init__(self, transpose_a=False, transpose_b=False, adjoint_a=False, adjoint_b=False):
2020
+ """Initialize SparseMatrixSparseMatMul"""
2021
+ validator.check_value_type(
2022
+ "transpose_a", transpose_a, [bool], self.name)
2023
+ validator.check_value_type(
2024
+ "transpose_b", transpose_b, [bool], self.name)
2025
+ validator.check_value_type("adjoint_a", adjoint_b, [bool], self.name)
2026
+ validator.check_value_type("adjoint_b", adjoint_b, [bool], self.name)
2027
+ self.init_prim_io_names(
2028
+ inputs=['x1_dense_shape', 'x1_batch_pointers', 'x1_row_pointers', 'x1_col_indices', 'x1_values',
2029
+ 'x2_dense_shape', 'x2_batch_pointers', 'x2_row_pointers', 'x2_col_indices', 'x2_values'],
2030
+ outputs=['y_dense_shape', 'y_batch_pointers', 'y_row_pointers', 'y_col_indices', 'y_values'])
2031
+
2032
+
2033
+ class SparseMatrixMatMul(Primitive):
2034
+ r"""
2035
+ Performs a matrix multiplication of a sparse matrix x1 with dense matrix x2; return a dense matrix x1*x2.
2036
+ Each matrix may be transposed or adjointed (conjugated and transposed)
2037
+ according to the Boolean parameters transpose_x1, adjoint_x1, transpose_x2 and adjoint_x2.
2038
+ At most one of transpose_x1 or adjoint_x1 may be ``True`` .
2039
+ Similarly, at most one of transpose_x2 or adjoint_x2 may be ``True`` .
2040
+
2041
+ Note:
2042
+ It is assumed that all the inputs can form a legal CSR sparse matrix, otherwise this operator is not defined.
2043
+
2044
+ Args:
2045
+ transpose_x1 (bool): If ``True`` , sparse tensor x1 is transposed before multiplication. Default: ``False`` .
2046
+ transpose_x2 (bool): If ``True`` , dense tensor x2 is transposed before multiplication. Default: ``False`` .
2047
+ adjoint_x1 (bool): If ``True`` , sparse tensor x1 is adjointed before multiplication. Default: ``False`` .
2048
+ adjoint_x2 (bool): If ``True`` , dense tensor x2 is adjointed before multiplication. Default: ``False`` .
2049
+ transpose_output (bool): If ``True`` , output x1*x2 is tansposed. Default: ``False`` .
2050
+ conjugate_output (bool): If ``True`` , output x1*x2 is conjugated. Default: ``False`` .
2051
+
2052
+ Inputs:
2053
+ - **x1_dense_shape** (Tensor) - A 1-D Tensor. It represents the dense form shape of
2054
+ the input CSR sparse matrix x1, the shape of which should be :math:`(2,)` or :math:`(3,)`.
2055
+ - **x1_batch_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix x1 is of
2056
+ batch size `n`, it should have shape :math:`(n+1,)`, while the `i`-th element of which stores
2057
+ acummulated counts of nonzero values of the first `i - 1` batches.
2058
+ - **x1_row_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix x1 is of
2059
+ batch size `n` and row number `m`, it can be divided into `n` parts, each part of length
2060
+ `m + 1`. The `i`-th element of each :math:`(m+1,)` vector stores acummulated counts of
2061
+ nonzero values of the first `i - 1` rows in the corresponding batch.
2062
+ - **x1_col_indices** (Tensor) - A 1-D Tensor. It represents column indices of the nonzero values
2063
+ in the input CSR sparse matrix x1.
2064
+ - **x1_values** (Tensor) - A 1-D Tensor. It represents all the nonzero values
2065
+ in the input CSR sparse matrix x1. Support float32, float64, complex64, complex128.
2066
+ - **x2_dense** (Tensor) - A 2-D or 3-D Tensor, represents the input dense matrix x2.
2067
+ Its dtype is the same as `x1_values`.
2068
+
2069
+ Outputs:
2070
+ Tensor, which represents the output dense matrix y.
2071
+ Its dtype is the same as `x1_values`.
2072
+
2073
+ Raises:
2074
+ TypeError: If the dtype of `x1_dense_shape`, `x1_batch_pointers`, `x1_row_pointers` or `x1_col_indices`
2075
+ is not int32 or int64, or the dtypes of above inputs are not the same.
2076
+ TypeError: If the dtype of `x1_values`, `x2_dense` is not supported.
2077
+ ValueError: If shape[0] of `x1_dense_shape` or the dimension of `x2_dense` is not 2 or 3.
2078
+ ValueError: If shape[0]-1 of `x1_batch_pointers` and shape[0] of `x2_dense` are not the same.
2079
+
2080
+ Supported Platforms:
2081
+ ``CPU``
2082
+
2083
+ Examples:
2084
+ >>> x1_dense_shape = Tensor([4, 5], dtype=ms.int32)
2085
+ >>> x1_batch_pointers = Tensor([0, 4], dtype=ms.int32)
2086
+ >>> x1_row_pointers = Tensor([0, 1, 1, 3, 4], dtype=ms.int32)
2087
+ >>> x1_col_indices = Tensor([0, 3, 4, 0], dtype=ms.int32)
2088
+ >>> x1_values = Tensor([1.0, 5.0, -1.0, -2.0], dtype=ms.float32)
2089
+ >>> x2_dense = Tensor([[2.0, 0.8, 1.0],[ 2.9, 3.2, 0.0],[7.0, 4.6, 0.2],[3.5, 4.9, 1.4],[4.0, 3.7, 6.9]],
2090
+ ... dtype=ms.float32)
2091
+ >>> sparse_matrix_mat_mul = ops.SparseMatrixMatMul()
2092
+ >>> out = sparse_matrix_mat_mul(x1_dense_shape, x1_batch_pointers, x1_row_pointers, x1_col_indices,
2093
+ ... x1_values, x2_dense)
2094
+ >>> print(out)
2095
+ [[ 2. 0.8 1. ]
2096
+ [ 0. 0. 0. ]
2097
+ [13.5 20.8 0.0999999]
2098
+ [-4. -1.6 -2. ]]
2099
+ """
2100
+
2101
+ @prim_attr_register
2102
+ def __init__(self, transpose_x1=False, transpose_x2=False, adjoint_x1=False, adjoint_x2=False,
2103
+ transpose_output=False, conjugate_output=False):
2104
+ """Initialize SparseMatrixMatMul"""
2105
+ validator.check_value_type(
2106
+ "transpose_x1", transpose_x1, [bool], self.name)
2107
+ validator.check_value_type(
2108
+ "transpose_x2", transpose_x2, [bool], self.name)
2109
+ validator.check_value_type("adjoint_x1", adjoint_x1, [bool], self.name)
2110
+ validator.check_value_type("adjoint_x2", adjoint_x2, [bool], self.name)
2111
+ validator.check_value_type(
2112
+ "transpose_output", transpose_output, [bool], self.name)
2113
+ validator.check_value_type(
2114
+ "conjugate_output", conjugate_output, [bool], self.name)
2115
+ self.init_prim_io_names(inputs=['x1_dense_shape', 'x1_batch_pointers', 'x1_row_pointers',
2116
+ 'x1_col_indices', 'x1_values', 'x2_dense'], outputs=['y_dense'])
2117
+
2118
+
2119
+ class SparseMatrixAdd(Primitive):
2120
+ """
2121
+ Addition of two CSR Tensors : C = alpha * A + beta * B
2122
+
2123
+ Inputs:
2124
+ - **x1_dense_shape** (Tensor) - A 1-D Tensor represents the dense form shape of the input CSR sparse matrix.
2125
+ - **x1_batch_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix is of
2126
+ batch size `n`, it should have shape :math:`(n+1,)`, while the `i`-th element of which stores
2127
+ acummulated counts of non-zero values of the first `i - 1` batches.
2128
+ - **x1_row_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix is of
2129
+ batch size `n` and row number `m`, it can be divided into `n` parts, each part of length
2130
+ `m + 1`. The `i`-th element of each :math:`(m+1,)` vector stores acummulated counts of
2131
+ non-zero values of the first `i - 1` rows in the corresponding batch.
2132
+ - **x1_col_indices** (Tensor) - A 1-D Tensor. It represents column indices of the non-zero values
2133
+ in the input CSR sparse matrix.
2134
+ - **x1_values** (Tensor) - A 1-D Tensor. It represents all the non-zero values in the input CSR sparse matrix.
2135
+ - **x2_dense_shape** (Tensor) - A Tensor, same meaning as x1_dense_shape.
2136
+ - **x2_batch_pointers** (Tensor) - A Tensor, same meaning as x1_batch_pointers.
2137
+ - **x2_row_pointers** (Tensor) - A Tensor, same meaning as x1_row_pointers.
2138
+ - **x2_col_indices** (Tensor) - A Tensor, same meaning as x1_col_indices.
2139
+ - **x2_values** (Tensor) - A Tensor, same meaning as x1_values.
2140
+ - **alpha** (Tensor) - A Tensor.
2141
+ - **beta** (Tensor) - A Tensor.
2142
+
2143
+ Outputs:
2144
+ - **y1_dense_shape** (Tensor) - A Tensor.
2145
+ - **y1_batch_pointers** (Tensor) - A Tensor.
2146
+ - **y1_row_pointers** (Tensor) - A Tensor.
2147
+ - **y1_col_indices** (Tensor) - A Tensor.
2148
+ - **y1_values** (Tensor) - A Tensor.
2149
+
2150
+ Supported Platforms:
2151
+ ``GPU`` ``CPU``
2152
+
2153
+ Examples:
2154
+ >>> import mindspore.nn as nn
2155
+ >>> import mindspore.common.dtype as mstype
2156
+ >>> from mindspore import Tensor
2157
+ >>> from mindspore.ops.operations.sparse_ops import SparseMatrixAdd
2158
+ >>> class Net(nn.Cell):
2159
+ ... def __init__(self):
2160
+ ... super(Net, self).__init__()
2161
+ ... self.op = SparseMatrixAdd()
2162
+ ...
2163
+ ... def construct(self, a_shape, a_batch_pointer, a_indptr, a_indices, a_values,
2164
+ ... b_shape, b_batch_pointer, b_indptr, b_indices, b_values, alpha, beta):
2165
+ ... return self.op(a_shape, a_batch_pointer, a_indptr, a_indices, a_values,
2166
+ ... b_shape, b_batch_pointer, b_indptr, b_indices, b_values, alpha, beta)
2167
+ >>> a_indptr = Tensor([0, 1, 2], dtype=mstype.int32)
2168
+ >>> a_indices = Tensor([0, 1], dtype=mstype.int32)
2169
+ >>> a_values = Tensor([1, 2], dtype=mstype.float32)
2170
+ >>> a_pointers = Tensor([0, a_values.shape[0]], dtype=mstype.int32)
2171
+ >>> shape = Tensor([2, 6], dtype=mstype.int32)
2172
+ >>> b_indptr = Tensor([0, 1, 2], dtype=mstype.int32)
2173
+ >>> b_indices = Tensor([0, 1], dtype=mstype.int32)
2174
+ >>> b_values = Tensor([1, 2], dtype=mstype.float32)
2175
+ >>> b_pointers = Tensor([0, b_values.shape[0]], dtype=mstype.int32)
2176
+ >>> alpha = Tensor(1, mstype.float32)
2177
+ >>> beta = Tensor(1, mstype.float32)
2178
+ >>> out = Net()(shape, a_pointers, a_indptr, a_indices, a_values,
2179
+ ... shape, b_pointers, b_indptr, b_indices, b_values, alpha, beta)
2180
+ >>> print(out)
2181
+ (Tensor(shape=[2], dtype=Int32, value =[2, 6]),
2182
+ Tensor(shape[2], dtype=Int32, value = [0, 2]),
2183
+ Tensor(shape=[3], dtype=Int32, values = [0, 1, 2]),
2184
+ Tensor(shape=[2], dtype=Int32, values = [0, 1]),
2185
+ Tensor(shape=[2], dtype=Float32, values = [2.0, 4.0]))
2186
+ """
2187
+
2188
+ @prim_attr_register
2189
+ def __init__(self):
2190
+ '''Initialize for SparseMatrixAdd'''
2191
+ self.init_prim_io_names(inputs=['x1_dense_shape', 'x1_batch_pointers', 'x1_row_pointers', 'x1_col_indices',
2192
+ 'x1_values', 'x2_dense_shape', 'x2_batch_pointers', 'x2_row_pointers',
2193
+ 'x2_col_indices', 'x2_values', 'alpha', 'beta'],
2194
+ outputs=['y_dense_shape', 'y_batch_pointers', 'y_row_pointers', 'y_col_indices',
2195
+ 'y_values'])
2196
+
2197
+
2198
+ class SparseSplit(Primitive):
2199
+ """
2200
+ Split a `SparseTensor` into `num_split` tensors along one dimension.
2201
+ If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
2202
+ `[0 : shape[split_dim] % num_split]` gets one extra dimension.
2203
+
2204
+ Args:
2205
+ num_split (int): An `int` that is `>= 1`. The number of ways to split. Default: ``1`` .
2206
+
2207
+ Inputs:
2208
+ - **split_dim** (Tensor) -A 0-D Tensor of type `int64`.
2209
+ The dimension along which to split. Must be in the range `[0, rank(shape))`.
2210
+ - **indices** (Tensor) - A 2-D Tensor of type `int64`, represents the indices of the sparse tensor.
2211
+ - **values** (Tensor) - A 1-D Tensor, represents the values of the sparse tensor.
2212
+ Support float16, float32, float64, int32, int64, int8, int16, uint8, uint16, uint32,
2213
+ uint64, complex64, complex128, bool.
2214
+ - **shape** (Tensor) - A 1-D Tensor of type `int64`, represents the shape of the sparse tensor.
2215
+
2216
+ Outputs:
2217
+ A tuple of `Tensor` objects (y_indices, y_values, y_shape).
2218
+ - **y_indices** (Tensor) - A 2-D Tensor of type `int64`.
2219
+ - **y_values** (Tensor) - A 1-D Tensor. The type is the same as input Tensor "values".
2220
+ - **y_shape** (Tensor) - A 1-D Tensor of type `int64`.
2221
+
2222
+ Raises:
2223
+ TypeError: If the type of `split_dim` or `indices` or `shape` is not int64.
2224
+ If the type of `values` is not valid.
2225
+ If the type of `num_split` is not int.
2226
+ ValueError: If the num_element of `split_dim` is not 1.
2227
+ If the rank of `values` or `shape` is not 1.
2228
+ If the rank of `indices` is not 1.
2229
+
2230
+ Supported Platforms:
2231
+
2232
+ """
2233
+
2234
+ @prim_attr_register
2235
+ def __init__(self, num_split=1):
2236
+ """Initialize SparseSplit."""
2237
+ self.init_prim_io_names(inputs=['split_dim', 'indices', 'values', 'shape'],
2238
+ outputs=['y_indices', 'y_values', 'y_shape'])
2239
+ validator.check_value_type("num_split", num_split, [int], self.name)
2240
+
2241
+
2242
+ class SparseMatrixOrderingAMD(Primitive):
2243
+ r"""
2244
+ Computes the Approximate Minimum Degree (AMD) ordering of `input`.
2245
+ Computes the Approximate Minimum Degree (AMD) ordering for a sparse matrix.
2246
+
2247
+ The returned permutation may be used to permute the rows and columns of the given sparse matrix.
2248
+ This typically results in permuted sparse matrix's sparse Cholesky (or other decompositions) in
2249
+ having fewer zero fill-in compared to decomposition of the original matrix.
2250
+
2251
+ The input sparse matrix may have rank 2 or rank 3. The output Tensor, representing would then have
2252
+ rank 1 or 2 respectively, with the same batch shape as the `input`.
2253
+
2254
+ Each component of the input sparse matrix must represent a square symmetric matrix; only the lower
2255
+ triangular part of the matrix is read. The values of the sparse matrix does not affect the returned
2256
+ permutation, only the sparsity pattern of the sparse matrix is used. Hence, a single AMD ordering may
2257
+ be reused for the Cholesky decompositions of sparse matrices with the same sparsity pattern but
2258
+ with possibly different values.
2259
+
2260
+ Each batch component of the output permutation represents a permutation of `N` elements, where
2261
+ the input sparse matrix components each have `N` rows. That is, the component contains each of the
2262
+ integers :math:`{0, .. N-1}` exactly once. The `i`th element represents the row index that the `i`th
2263
+ row maps to.
2264
+
2265
+ Inputs:
2266
+ - **x_dense_shape** (Tensor) - A 1-D Tensor. It represents the dense form shape of
2267
+ the input CSR sparse matrix x, the shape of which should be :math:`(2,)` or :math:`(3,)`.
2268
+ - **x_batch_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix x is of
2269
+ batch size `n`, it should have shape :math:`(n+1,)`, while the `i`-th element of which stores
2270
+ acummulated counts of nonzero values of the first `i - 1` batches.
2271
+ - **x_row_pointers** (Tensor) - A 1-D Tensor. Supposing the input CSR sparse matrix x is of
2272
+ batch size `n` and row number `m`, it can be divided into `n` parts, each part of length
2273
+ `m + 1`. The `i`-th element of each :math:`(m+1,)` vector stores acummulated counts of
2274
+ nonzero values of the first `i - 1` rows in the corresponding batch.
2275
+ - **x_col_indices** (Tensor) - A 1-D Tensor. It represents column indices of the nonzero values
2276
+ in the input CSR sparse matrix x.
2277
+ - **x_values** (Tensor) - A 1-D Tensor. It represents all the nonzero values in the
2278
+ input CSR sparse matrix x.
2279
+
2280
+ Outputs:
2281
+ Tensor, the dtype is int32.
2282
+ If there are n batch within input sparse matrix, the shape is :math:`(n,)`.
2283
+
2284
+ Raises:
2285
+ TypeError: If the dtype of `x_dense_shape` is not int64.
2286
+ TypeError: If the dtype of `x_batch_pointers`, `x_row_pointers` or `x_col_indices` is not int32.
2287
+ TypeError: If the dtype of `x_values` is not supported.
2288
+ TypeError: If any of the inputs is not a tensor.
2289
+ ValueError: If any of the inputs is not 1-D.
2290
+ ValueError: If `x_values` and `x_col_indices` have different length.
2291
+ ValueError: If shape[0] of `x_dense_shape` is not 2 or 3.
2292
+
2293
+ Supported Platforms:
2294
+ ``CPU``
2295
+
2296
+ Examples:
2297
+ >>> from mindspore.ops.operations.sparse_ops import SparseMatrixOrderingAMD
2298
+ >>> dense_shape = Tensor([2, 2], dtype=ms.int64)
2299
+ >>> batch_pointers = Tensor([0, 1], dtype=ms.int32)
2300
+ >>> row_pointers = Tensor([0, 1, 1], dtype=ms.int32)
2301
+ >>> col_indices = Tensor([0], dtype=ms.int32)
2302
+ >>> values = Tensor([99], dtype=ms.float32)
2303
+ >>> sparse_matrix_ordering_amd = SparseMatrixOrderingAMD()
2304
+ >>> output = sparse_matrix_ordering_amd(dense_shape, batch_pointers, row_pointers, col_indices, values)
2305
+ >>> print(output)
2306
+ [0 1]
2307
+ """
2308
+
2309
+ @prim_attr_register
2310
+ def __init__(self):
2311
+ """Initialize SparseMatrixOrderingAMD."""
2312
+ self.init_prim_io_names(inputs=['x_dense_shape', 'x_batch_pointers', 'x_row_pointers',
2313
+ 'x_col_indices', 'x_values'], outputs=['y'])
2314
+
2315
+
2316
+ class SparseReshape(Primitive):
2317
+ """
2318
+ Reshapes a SparseTensor to represent values in a new dense shape.
2319
+ This operation has the same semantics as reshape on the represented dense tensor.
2320
+ The `input_indices` are recomputed based on the requested `new_shape`.
2321
+ At most one component of `new_shape` can be -1.
2322
+ Reshaping does not affect the order of values in the SparseTensor.
2323
+
2324
+ Inputs:
2325
+ - **indices** (Tensor) - A 2D Tensor of type int64. The indices of the SparseTensor.
2326
+ The shape is :math:`(n, 2)`.
2327
+ - **shape** (Tensor) - A 1D Tensor of type int64. The shape of the SparseTensor.
2328
+ - **new_shape** (Tensor) - A 1D Tensor of type int64. The requested new dense shape.
2329
+
2330
+ Outputs:
2331
+ - **y_indices** (Tensor) - A 2D Tensor of type int64. The indices of the new dense shape.
2332
+ The tensor has the same data type and shape as `indices`.
2333
+ - **y_shape** (Tensor) - A 1D Tensor of type int64. The shape of the new dense shape.
2334
+
2335
+ Raises:
2336
+ TypeError: If the dtype of `indices`, `shape` or `new_shape` is not int64.
2337
+ ValueError: If the shape[1] of `indices` is not equal to the first dimension of `shape`.
2338
+ ValueError: If `indices` is not a 2D Tensor.
2339
+ ValueError: If `shape` is not a 1D Tensor.
2340
+ ValueError: If `new_shape` is not a 1D Tensor.
2341
+ RuntimeError: If the number of inferred-dims(-1) is larger than 1.
2342
+ RuntimeError: If there is any negative value(except -1) in `new_shape`.
2343
+ RuntimeError: If the numbers of elements that `shape` and `new_shape` represent are not equal.
2344
+ RuntimeError: If inferred-dim(-1) in `new_shape` cannot be correctly inferred.
2345
+
2346
+ Supported Platforms:
2347
+ ``Ascend`` ``GPU`` ``CPU``
2348
+
2349
+ Examples:
2350
+ >>> indices = Tensor([[0, 0, 0],
2351
+ ... [0, 0, 1],
2352
+ ... [0, 1, 0],
2353
+ ... [1, 0, 0],
2354
+ ... [1, 2, 3]],
2355
+ ... dtype=mstype.int64)
2356
+ >>> shape = Tensor([2, 3, 6], dtype=mstype.int64)
2357
+ >>> new_shape = Tensor([9, -1], dtype=mstype.int64)
2358
+ >>> sparse_reshape = sparse_ops.SparseReshape()
2359
+ >>> y_indices, y_shape = sparse_reshape(indices, shape, new_shape)
2360
+ >>> print(y_indices)
2361
+ [[0 0]
2362
+ [0 1]
2363
+ [1 2]
2364
+ [4 2]
2365
+ [8 1]]
2366
+ >>> print(y_shape)
2367
+ [9 4]
2368
+ """
2369
+
2370
+ @prim_attr_register
2371
+ def __init__(self):
2372
+ """Initialize SparseReshape."""
2373
+ self.init_prim_io_names(inputs=['indices', 'shape', 'new_shape'], outputs=[
2374
+ 'y_indices', 'y_shape'])
2375
+
2376
+
2377
+ class SparseCountSparseOutput(Primitive):
2378
+ """
2379
+ Performs sparse-output bin counting for a sparse tensor input.
2380
+ Counts the number of times each value occurs in the input.
2381
+
2382
+ Args:
2383
+ binary_output (bool) - If ``False`` , output the number of occurrences of each value,
2384
+ if ``True`` output 1 for orresponding values. Default: ``False`` .
2385
+ minlength(Scalar) - int type minimum value to count, Default: ``-1`` .
2386
+ maxlength(Scalar) - int type maximum value to count, Default: ``-1`` .
2387
+
2388
+ Inputs:
2389
+ - **indices** (Tensor) - Tensor representing the position of the element in the sparse
2390
+ tensor. Support int64, each element value should be a non-negative int number.
2391
+ - **values** (Tensor) - 1-D Tensor, represents the value corresponding to the position
2392
+ in the `indices`. Support int32, int64
2393
+ - **dense_shape** (Tensor) - A positive int tuple which specifies the shape of sparse
2394
+ tensor, should have 2 elements, support int64
2395
+ - **weights** (Tensor) - A Tensor of the same shape as indices containing per-index
2396
+ weight values. Support int32, int64, float32, float64
2397
+
2398
+ Outputs:
2399
+ - **output_indices** (Tensor) - contains the indices of the output sparse tensor
2400
+ - **output_values** (Tensor) - contains the values of the output sparse tensor
2401
+ - **output_dense_shape** (Tensor) - contains the dense shape of the output sparse tensor
2402
+
2403
+ Raises:
2404
+ TypeError: If binary_output is not a bool
2405
+ TypeError: If minlenght or maxlength are not integers
2406
+ TypeError: If dtype of indices and dense_shape is not int64
2407
+ TypeError: If dtype of values is neither int32 nor int64
2408
+ TypeError: If dtype of weights is not in int32, int64, float32, float64
2409
+ ValueError: If number of values does not match first dimension of indices
2410
+ ValueError: If number of dense_shape dimensions does not match second dimension of indices
2411
+ ValueError: If num dim of dense_shape is < 1
2412
+ RuntimeError: If number of weights is not equal to number of values
2413
+ RuntimeError: If indexes are not in bounds of the dense shape
2414
+
2415
+ Examples:
2416
+ >>> from mindspore.ops.operations.sparse_ops import SparseCountSparseOutput
2417
+ >>> indices = Tensor([[1, 2] ,[2, 3], [2, 1], [0, 2]], dtype=mstype.int64)
2418
+ >>> values = Tensor([0, 2, 8, 8], dtype=mstype.int64)
2419
+ >>> dense_shape = Tensor([4, 4], dtype=mstype.int64)
2420
+ >>> weights = Tensor([1, 2, 1, 0], dtype=mstype.int64)
2421
+ >>> sparse_count_sparse_output = SparseCountSparseOutput()
2422
+ >>> out = sparse_count_sparse_output(indices, values, dense_shape, weights)
2423
+ >>> print(out)
2424
+ (Tensor(shape=[4, 2], dtype=Int64, value=
2425
+ [[0, 8],
2426
+ [1, 0],
2427
+ [2, 2],
2428
+ [2, 8]]), Tensor(shape=[4], dtype=Int64, value= [0, 1, 2, 1]), Tensor(shape=[2], dtype=Int64, value= [4, 9]))
2429
+
2430
+ Supported Platforms:
2431
+ ``CPU``
2432
+
2433
+ """
2434
+
2435
+ @prim_attr_register
2436
+ def __init__(self, binary_output=False, minlength=-1, maxlength=-1):
2437
+ self.init_prim_io_names(
2438
+ inputs=["indices", "values", "dense_shape", "weights"],
2439
+ outputs=["output_indices", "output_values", "output_shape"])
2440
+ validator.check_value_type("binary_output", binary_output, [bool], self.name)
2441
+ validator.check_value_type("minlength", minlength, [int], self.name)
2442
+ validator.check_value_type("maxlength", maxlength, [int], self.name)
2443
+
2444
+
2445
+ class DenseToSparseSetOperation(Primitive):
2446
+ """
2447
+ Applies set operation along last dimension of `x1` and `x2`.
2448
+ Input `x2` is a SparseTensor represented by `x2_indices`, `x2_values`, and `x2_shape`.
2449
+ For `x2` ranked `n`, 1st `n-1` dimensions must be the same as `x1`. Dimension `n` contains values in a set,
2450
+ duplicates are allowed but ignored.
2451
+
2452
+ Args:
2453
+ set_operation (str): The type of set operation, supports four kinds of inputs, case insensitive.
2454
+ Default: ``""`` .
2455
+ "a-b": Get the difference set of x1 to x2.
2456
+ "b-a": Get the difference set of x2 to x1.
2457
+ "intersection": Get the intersection set of x2 to x1.
2458
+ "union": Get the union set of x2 to x1.
2459
+ validate_indices (bool): Optional attributes for DenseToSparseSetOperation. Default: ``True`` .
2460
+
2461
+ Inputs:
2462
+ - **x1** (Tensor) - The input tensor `x1` with rank `n`. 1st `n-1` dimensions must be the same as `x2`.
2463
+ Dimension `n` contains values in a set, duplicates are allowed but ignored. Must be one of the
2464
+ following types: int8, int16, int32, int64, uint8, uint16.
2465
+ - **x2_indices** (Tensor) - A 2-D Tensor, type int64, indices of a SparseTensor.
2466
+ - **x2_values** (Tensor) - A 1-D Tensor, must have the same type as x1, values of a SparseTensor. Size
2467
+ must be the same as `x2_indices`
2468
+ - **x2_shape** (Tensor) - A 1-D Tensor, type int64, shape of a SparseTensor, must have the same size as
2469
+ the second dimensions of `x2_indices`
2470
+
2471
+ Outputs:
2472
+ y_indices: A Tensor of type int64.
2473
+ y_values: A Tensor. Has the same type as x1.
2474
+ y_shape: A Tensor of type int64 .
2475
+
2476
+ Raises:
2477
+ TypeError: If any input is not Tensor.
2478
+ TypeError:If the dtype of `x2_values` is not the same as 'x1'.
2479
+ TypeError:If the dtype of `x2_indices` or `x2_shape` is not int64.
2480
+ ValueError: If the group shape of `x1` or `x2` mismatch with each other.
2481
+ ValueError: If the rank of `x1` is less than 2.
2482
+ ValueError: If the rank of `x2_indices` is not equal 2.
2483
+
2484
+ Supported Platforms:
2485
+ ``Ascend`` ``CPU``
2486
+
2487
+ Examples:
2488
+ >>> from mindspore.ops.operations.sparse_ops import DenseToSparseSetOperation
2489
+ >>> x1 = Tensor([[1, 2], [3, 0], [1, 5]], dtype=ms.int64)
2490
+ >>> x2_indices = Tensor([[0, 1], [0, 2], [1, 2]], dtype=ms.int64)
2491
+ >>> x2_values = Tensor([5, 1, 7],dtype=ms.int64)
2492
+ >>> x2_shape = Tensor([3, 3], dtype=ms.int64)
2493
+ >>> dense_to_sparse_set_operation = DenseToSparseSetOperation(set_operation='intersection')
2494
+ >>> out = dense_to_sparse_set_operation(x1, x2_indices, x2_values, x2_shape)
2495
+ >>> print(out)
2496
+ (Tensor(shape=[1, 2], dtype=Int64, value=
2497
+ [[0, 0]]), Tensor(shape=[1], dtype=Int64, value= [1]), Tensor(shape=[2], dtype=Int64, value= [3, 1]))
2498
+ """
2499
+
2500
+ @prim_attr_register
2501
+ def __init__(self, set_operation="", validate_indices=True):
2502
+ """Initialize DenseToSparseSetOperation."""
2503
+ self.init_prim_io_names(inputs=['x1', 'x2_indices', 'x2_values', 'x2_shape'],
2504
+ outputs=['y_indices', 'y_values', 'y_shape'])
2505
+ self.set_operation = set_operation
2506
+ self.validate_indices = validate_indices
2507
+ self.add_prim_attr('set_operation', self.set_operation)
2508
+ self.add_prim_attr('validate_indices', self.validate_indices)
2509
+
2510
+ validator.check_value_type("set_operation", set_operation, [str], self.name)
2511
+ validator.check_value_type("validate_indices", validate_indices, [bool], self.name)
2512
+
2513
+
2514
+ class RaggedTensorToTensor(Primitive):
2515
+ r"""
2516
+ Create a dense tensor from a ragged tensor, possibly altering its shape.
2517
+
2518
+ Args:
2519
+ row_partition_types(list(str)): A list of `strings`. The types of the row partition tensors.
2520
+ At present, these can be:
2521
+ "ROW_SPLITS": the row_splits tensor from the ragged tensor.
2522
+ "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
2523
+ "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it is preceded by "FIRST_DIM_SIZE".
2524
+
2525
+ Inputs:
2526
+ - **shape** (Tensor) - A 1-D `Tensor`. Must be one of the following types: `int64`, `int32`.
2527
+ The desired shape of the output tensor.
2528
+ - **values** (Tensor) - A 1-D or higher `Tensor` representing the values of the ragged tensor.
2529
+ - **default_value** (Tensor) - A `Tensor` representing the default value of the ragged tensor.
2530
+ Must have the same type as `values` and less dimension than `values`.
2531
+ - **row_partition_tensors** (list(Tensor)) - A list of at least 1 `Tensor` objects with the same
2532
+ type in: `int64`, `int32`. The row partition tensor is 0-D, 1-D, 1-D, when the row partition type is
2533
+ "FIRST_DIM_SIZE", "VALUE_ROWIDS", "ROW_SPLITS" respectively.
2534
+
2535
+ Outputs:
2536
+ A `Tensor`. Has the same type as `values` and the shape is `shape`.
2537
+
2538
+ Raises:
2539
+ TypeError: If the type of `shape`, `values` or `default_value` is not Tensor.
2540
+ ValueError: If the dimension of `shape` or `values` is not 1.
2541
+ ValueError: If the dimension of `default_value` is more than `values`.
2542
+ ValueError: If the order or value of `row_partition_types` is not support.
2543
+ RuntimeError: If the value of `row_partition_tensors` is not in ascending order
2544
+ when the `row_partition_types` is "ROW_SPLITS".
2545
+ RuntimeError: If value rowid is not less than first dim size
2546
+ when the `row_partition_types` is "FIRST_DIM_SIZE", "VALUE_ROWIDS".
2547
+ ValueError: If row partition size plus `values` rank is not equal to `shape` rank.
2548
+
2549
+ Supported Platforms:
2550
+ ``CPU``
2551
+
2552
+ Examples:
2553
+ >>> from mindspore.ops.operations.sparse_ops import RaggedTensorToTensor
2554
+ >>> shape = Tensor([4, 4], mstype.int32)
2555
+ >>> values = Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9], mstype.int64)
2556
+ >>> default_value = Tensor(0, dtype=mstype.int64)
2557
+ >>> row_partition_tensors_list = []
2558
+ >>> row_partition_tensors = Tensor([0, 3, 3, 7, 9], mstype.int32)
2559
+ >>> row_partition_tensors_list.append(row_partition_tensors)
2560
+ >>> row_partition_types = ["ROW_SPLITS"]
2561
+ >>> ragged_tensor_to_tensor = RaggedTensorToTensor(row_partition_types)
2562
+ >>> out = ragged_tensor_to_tensor(shape, values, default_value, row_partition_tensors_list)
2563
+ >>> print(out)
2564
+ [[1 2 3 0]
2565
+ [0 0 0 0]
2566
+ [4 5 6 7]
2567
+ [8 9 0 0]]
2568
+ """
2569
+
2570
+ @prim_attr_register
2571
+ def __init__(self, row_partition_types):
2572
+ """Initialize RaggedTensorToTensor"""
2573
+ self.init_prim_io_names(inputs=['shape', 'values', 'default_value', 'row_partition_tensors'],
2574
+ outputs=['result'])
2575
+ validator.check_value_type("row_partition_types", row_partition_types, [list], self.name)
2576
+
2577
+ if not row_partition_types:
2578
+ raise ValueError(f"For {self.name}, row_partition_types cannot be empty.")
2579
+
2580
+ for i, item in enumerate(row_partition_types):
2581
+ validator.check_value_type(f"row_partition_types[{i}]", item, [str], self.name)
2582
+
2583
+ valid_values = ("ROW_SPLITS", "FIRST_DIM_SIZE", "VALUE_ROWIDS")
2584
+ if not set(row_partition_types).issubset(valid_values):
2585
+ diff = tuple(set(row_partition_types).difference(valid_values))
2586
+ raise ValueError(
2587
+ f"For {self.name}, row_partition_types only support {valid_values}, "
2588
+ f"but got {diff if len(diff) > 1 else repr(diff[0])}.")
2589
+
2590
+ first_element = valid_values[:2]
2591
+ if row_partition_types[0] not in first_element:
2592
+ raise ValueError(
2593
+ f"For {self.name}, the first element of row_partition_types must be in {first_element}, "
2594
+ f"but got '{row_partition_types[0]}'.")
2595
+
2596
+ if row_partition_types[0] == "FIRST_DIM_SIZE":
2597
+ if set(row_partition_types[1:]) != {"VALUE_ROWIDS"}:
2598
+ raise ValueError(
2599
+ f"For {self.name}, 'VALUE_ROWIDS' must be preceded by 'FIRST_DIM_SIZE' in row_partition_types.")
2600
+ else:
2601
+ if set(row_partition_types) != {"ROW_SPLITS"}:
2602
+ raise ValueError(
2603
+ f"For {self.name}, the each element of row_partition_types must be 'ROW_SPLITS' "
2604
+ f"when row_splits tensor.")
2605
+ self.num_row_partition_tensors = len(row_partition_types)
2606
+ self.add_prim_attr("num_row_partition_tensors", self.num_row_partition_tensors)
2607
+
2608
+
2609
+ class SparseCross(Primitive):
2610
+ """
2611
+ Generates sparse cross from a list of sparse and dense tensors.
2612
+
2613
+ Args:
2614
+ hashed_output (bool): If true, returns the hash of the cross instead of the string. This will allow us
2615
+ avoiding string manipulations.
2616
+ num_buckets (int): An int that is >= 0. It is used if "hashed_output" is true.output = hashed_value%num_buckets
2617
+ if num_buckets > 0 else "hashed_value".
2618
+ hash_key (int): Specify the hash_key that will be used by the "FingerprintCat64" function to combine the
2619
+ crosses fingerprints.
2620
+ out_type (mindspore.dtype): The output data type. Defaults to "int64".
2621
+ internal_type (mindspore.dtype): An type int64.
2622
+
2623
+ Inputs:
2624
+ - **indices** (list(Tensor)) - A list of Tensor objects with type int64. 2-D.
2625
+ Indices of each input SparseTensor.
2626
+ - **values** (list(Tensor)) - A list of Tensor objects with types from: int64.
2627
+ 1-D. values of each SparseTensor.
2628
+ - **shapes** (list(Tensor)) - A list with the same length as indices of Tensor objects with type int64.
2629
+ 1-D. Shapes of each SparseTensor.
2630
+ - **dense_inputs** (list(Tensor)) - A list of Tensor objects with types from: int64.
2631
+ 2-D. Columns represented by dense Tensor.
2632
+
2633
+ Outputs:
2634
+ - **output_indices** (Tensor) - A Tensor of type int64. 2-D. Indices of the concatenated SparseTensor.
2635
+ - **output_values** (Tensor) - A Tensor of type "out_type". 1-D.
2636
+ Non-empty values of the concatenated or hashed SparseTensor.
2637
+ - **output_shape** (Tensor) - A Tensor of type int64. 1-D. Shape of the concatenated SparseTensor.
2638
+
2639
+ Raises:
2640
+ TypeError: The indices shape rank is not equal to the shape rank.
2641
+ TypeError: The indices element number is not equal to the value element number.
2642
+ TypeError: The indices shape rank should be 2.
2643
+ TypeError: The denses shape rank should be 2.
2644
+ TypeError: The shapes rank should be 2.
2645
+
2646
+ Supported Platforms:
2647
+ ``CPU``
2648
+
2649
+ Examples:
2650
+ >>> from mindspore.ops.operations.sparse_ops import SparseCross
2651
+ >>> indice1 = Tensor([[0,0],[1,0],[1,1]], dtype=mstype.int64)
2652
+ >>> value1 = Tensor([1, 2, 3], dtype=mstype.int64)
2653
+ >>> shape1 = Tensor([2, 2], dtype=mstype.int64)
2654
+ >>> dense1 = Tensor([[1],[2]], dtype=mstype.int64)
2655
+ >>> indice2 = Tensor([[0,0],[1,0],[1,1]], dtype=mstype.int64)
2656
+ >>> value2 = Tensor([1, 2, 3], dtype=mstype.int64)
2657
+ >>> shape2 = Tensor([2, 2], dtype=mstype.int64)
2658
+ >>> dense2 = Tensor([[1],[2]], dtype=mstype.int64)
2659
+ >>> indices = [indice1, indice2]
2660
+ >>> values = [value1, value2]
2661
+ >>> shapes = [shape1, shape2]
2662
+ >>> dense_inputs = [dense1, dense2]
2663
+ >>> hashed_output=True
2664
+ >>> hash_key= 2
2665
+ >>> out_type= mstype.int64
2666
+ >>> internal_type = mstype.int64
2667
+ >>> num_buckets=0
2668
+ >>> sparse_cross = SparseCross(hashed_output, hash_key, out_type, internal_type, num_buckets)
2669
+ >>> out = sparse_cross(indices, values, shapes, dense_inputs)
2670
+ >>> print(out)
2671
+ (Tensor(shape=[5, 2], dtype=Int64, value=
2672
+ [[0, 0],
2673
+ [1, 0],
2674
+ [1, 1],
2675
+ [1, 2],
2676
+ [1, 3]]), Tensor(shape=[5], dtype=Int64, value= [1350190460805457680, 6319552725219729347,
2677
+ 4652439303631496997, 7670687697825594049, 174086171018132662]), Tensor(shape=[2], dtype=Int64, value= [2, 4]))
2678
+ """
2679
+
2680
+ @prim_attr_register
2681
+ def __init__(self, hashed_output, hash_key, out_type, internal_type, num_buckets=0):
2682
+ """Initialize SparseCross."""
2683
+ self.init_prim_io_names(inputs=["indices", "values", "shapes", "dense_inputs"],
2684
+ outputs=["output_indices", "output_values", "output_shape"])
2685
+ validator.check_value_type("hashed_output", hashed_output, [bool], self.name)
2686
+ validator.check_value_type("hash_key", hash_key, [int], self.name)
2687
+ validator.check_value_type("out_type", out_type, [mstype.Type], self.name)
2688
+ validator.check_value_type("internal_type", internal_type, [mstype.Type], self.name)
2689
+ validator.check_value_type("num_buckets", num_buckets, [int], self.name)
2690
+
2691
+
2692
+ class RaggedTensorToSparse(Primitive):
2693
+ r"""
2694
+ Converts a RaggedTensor into a SparseTensor with the same values.
2695
+
2696
+ Args:
2697
+ Tsplits(mindspore.dtype): A required attribute, the type of the `rt_nested_splits`. Default: `int64`.
2698
+
2699
+ Inputs:
2700
+ - **rt_nested_splits** (list(Tensor)) - A list of at least 1 `Tensor` objects with the same
2701
+ type in: `int64`, `int32`. The row_splits for the RaggedTensor.
2702
+ Ragged splits is in ascending order, first value of splits must be 0 and final value of splits
2703
+ must equal with the length of `rt_dense_values`.
2704
+ - **rt_dense_values** (Tensor) - A `Tensor`. The flat_values for the RaggedTensor. The rank of values
2705
+ must more than 0.
2706
+
2707
+ Outputs:
2708
+ - **sparse_indices** (Tensor) - A `Tensor` of type int64. Contains the indices of the output
2709
+ sparse tensor.
2710
+ - **sparse_values** (Tensor) - A `Tensor`. Has the same type as rt_dense_values.
2711
+ Contains the values of the output sparse tensor.
2712
+ - **sparse_dense_shape** (Tensor) - A `Tensor` of type int64. Contains the dense shape of the
2713
+ output sparse tensor.
2714
+
2715
+ Raises:
2716
+ TypeError: If the type of `Tsplits`, `rt_nested_splits` or `rt_dense_values` is not support.
2717
+ RuntimeError: If the order of `rt_nested_splits` is not support.
2718
+ RuntimeError: If the first value of `rt_nested_splits` is not 0.
2719
+ RuntimeError: If the final value of `rt_nested_splits` is not equal with the length of
2720
+ `rt_dense_values`.
2721
+ ValueError: If the rank of `rt_dense_values` is not more than 0.
2722
+
2723
+ Supported Platforms:
2724
+
2725
+
2726
+ Examples:
2727
+ >>> from mindspore.ops.operations.sparse_ops import RaggedTensorToSparse
2728
+ >>> rt_nested_splits = Tensor([0, 3, 3, 5, 6], mstype.int64)
2729
+ >>> rt_dense_values = Tensor([1, 2, 3, 4, 5, 6], mstype.int32)
2730
+ >>> rt_nested_splits_list = []
2731
+ >>> rt_nested_splits_list.append(rt_nested_splits)
2732
+ >>> Tsplits = mstype.int64
2733
+ >>> ragged_tensor_to_sparse = RaggedTensorToSparse(Tsplits)
2734
+ >>> out = ragged_tensor_to_sparse(rt_nested_splits_list, rt_dense_values)
2735
+ >>> print(out)
2736
+ (Tensor(shape=[6, 2], dtype=Int64, value=
2737
+ [[0, 0],
2738
+ [0, 1],
2739
+ [0, 2],
2740
+ [2, 0],
2741
+ [2, 1],
2742
+ [3, 0]]),
2743
+ Tensor(shape=[6], dtype=Int32, value= [1, 2, 3, 4, 5, 6]),
2744
+ Tensor(shape=[2], dtype=Int64, value= [4, 3]))
2745
+ """
2746
+ @prim_attr_register
2747
+ def __init__(self, Tsplits):
2748
+ """Initialize RaggedTensorToSparse."""
2749
+ self.init_prim_io_names(inputs=['rt_nested_splits', 'rt_dense_values'],
2750
+ outputs=['sparse_indices', 'sparse_values', 'sparse_dense_shape'])
2751
+ validator.check_value_type("Tsplits", Tsplits, [mstype.Type], self.name)
2752
+ valid_values = {mstype.int64, mstype.int32}
2753
+ validator.check_type_name("Tsplits", Tsplits, valid_values, self.name)