mindspore 2.4.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1406) hide show
  1. mindspore/.commit_id +1 -0
  2. mindspore/ConcurrencyCheck.dll +0 -0
  3. mindspore/CppBuildInsights.dll +0 -0
  4. mindspore/CppCoreCheck.dll +0 -0
  5. mindspore/EnumIndex.dll +0 -0
  6. mindspore/EspXEngine.dll +0 -0
  7. mindspore/HResultCheck.dll +0 -0
  8. mindspore/KernelTraceControl.dll +0 -0
  9. mindspore/LocalESPC.dll +0 -0
  10. mindspore/Microsoft.Diagnostics.Tracing.EventSource.dll +0 -0
  11. mindspore/Microsoft.VisualStudio.RemoteControl.dll +0 -0
  12. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  13. mindspore/Microsoft.VisualStudio.Utilities.Internal.dll +0 -0
  14. mindspore/Newtonsoft.Json.dll +0 -0
  15. mindspore/System.Runtime.CompilerServices.Unsafe.dll +0 -0
  16. mindspore/VariantClear.dll +0 -0
  17. mindspore/__init__.py +53 -0
  18. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  19. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  20. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  21. mindspore/_check_jit_forbidden_api.py +106 -0
  22. mindspore/_checkparam.py +1419 -0
  23. mindspore/_extends/__init__.py +23 -0
  24. mindspore/_extends/builtin_operations.py +224 -0
  25. mindspore/_extends/graph_kernel/__init__.py +17 -0
  26. mindspore/_extends/graph_kernel/model/__init__.py +19 -0
  27. mindspore/_extends/graph_kernel/model/graph_parallel.py +311 -0
  28. mindspore/_extends/graph_kernel/model/graph_split.py +1348 -0
  29. mindspore/_extends/graph_kernel/model/model.py +553 -0
  30. mindspore/_extends/graph_kernel/model/model_builder.py +216 -0
  31. mindspore/_extends/graph_kernel/parallel_estimate.py +60 -0
  32. mindspore/_extends/graph_kernel/splitter.py +140 -0
  33. mindspore/_extends/graph_kernel/utils.py +28 -0
  34. mindspore/_extends/parallel_compile/__init__.py +19 -0
  35. mindspore/_extends/parallel_compile/akg_compiler/__init__.py +19 -0
  36. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +269 -0
  37. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +529 -0
  38. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +56 -0
  39. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  40. mindspore/_extends/parallel_compile/akg_compiler/get_file_path.py +36 -0
  41. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +556 -0
  42. mindspore/_extends/parallel_compile/akg_compiler/util.py +159 -0
  43. mindspore/_extends/parse/__init__.py +49 -0
  44. mindspore/_extends/parse/compile_config.py +299 -0
  45. mindspore/_extends/parse/namespace.py +136 -0
  46. mindspore/_extends/parse/parser.py +1448 -0
  47. mindspore/_extends/parse/resources.py +213 -0
  48. mindspore/_extends/parse/standard_method.py +4475 -0
  49. mindspore/_extends/parse/trope.py +97 -0
  50. mindspore/_extends/pijit/__init__.py +23 -0
  51. mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
  52. mindspore/_extends/remote/__init__.py +19 -0
  53. mindspore/_extends/remote/kernel_build_server.py +199 -0
  54. mindspore/_extends/remote/kernel_build_server_akg.py +55 -0
  55. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  56. mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
  57. mindspore/_extends/utils.py +68 -0
  58. mindspore/_install_custom.py +43 -0
  59. mindspore/_profiler.py +30 -0
  60. mindspore/amp.py +433 -0
  61. mindspore/atlprov.dll +0 -0
  62. mindspore/avcodec-59.dll +0 -0
  63. mindspore/avdevice-59.dll +0 -0
  64. mindspore/avfilter-8.dll +0 -0
  65. mindspore/avformat-59.dll +0 -0
  66. mindspore/avutil-57.dll +0 -0
  67. mindspore/boost/__init__.py +42 -0
  68. mindspore/boost/adasum.py +319 -0
  69. mindspore/boost/base.py +535 -0
  70. mindspore/boost/boost.py +400 -0
  71. mindspore/boost/boost_cell_wrapper.py +790 -0
  72. mindspore/boost/dim_reduce.py +323 -0
  73. mindspore/boost/grad_accumulation.py +79 -0
  74. mindspore/boost/grad_freeze.py +382 -0
  75. mindspore/boost/group_loss_scale_manager.py +166 -0
  76. mindspore/boost/less_batch_normalization.py +174 -0
  77. mindspore/c1.dll +0 -0
  78. mindspore/c1xx.dll +0 -0
  79. mindspore/c2.dll +0 -0
  80. mindspore/cfgpersist.dll +0 -0
  81. mindspore/clang_rt.asan_dbg_dynamic-x86_64.dll +0 -0
  82. mindspore/clang_rt.asan_dynamic-x86_64.dll +0 -0
  83. mindspore/common/__init__.py +86 -0
  84. mindspore/common/_auto_dynamic.py +68 -0
  85. mindspore/common/_decorator.py +50 -0
  86. mindspore/common/_jit_fallback_utils.py +110 -0
  87. mindspore/common/_monad.py +25 -0
  88. mindspore/common/_pijit_context.py +190 -0
  89. mindspore/common/_register_for_adapter.py +74 -0
  90. mindspore/common/_register_for_recompute.py +48 -0
  91. mindspore/common/_register_for_tensor.py +46 -0
  92. mindspore/common/_stub_tensor.py +210 -0
  93. mindspore/common/_tensor_overload.py +139 -0
  94. mindspore/common/_utils.py +122 -0
  95. mindspore/common/api.py +2064 -0
  96. mindspore/common/auto_dynamic_shape.py +507 -0
  97. mindspore/common/dtype.py +422 -0
  98. mindspore/common/dump.py +130 -0
  99. mindspore/common/file_system.py +48 -0
  100. mindspore/common/generator.py +254 -0
  101. mindspore/common/hook_handle.py +143 -0
  102. mindspore/common/initializer.py +880 -0
  103. mindspore/common/jit_config.py +98 -0
  104. mindspore/common/lazy_inline.py +240 -0
  105. mindspore/common/mindir_util.py +111 -0
  106. mindspore/common/mutable.py +234 -0
  107. mindspore/common/no_inline.py +54 -0
  108. mindspore/common/np_dtype.py +25 -0
  109. mindspore/common/parameter.py +1081 -0
  110. mindspore/common/recompute.py +292 -0
  111. mindspore/common/seed.py +260 -0
  112. mindspore/common/sparse_tensor.py +1175 -0
  113. mindspore/common/symbol.py +122 -0
  114. mindspore/common/tensor.py +5039 -0
  115. mindspore/communication/__init__.py +37 -0
  116. mindspore/communication/_comm_helper.py +501 -0
  117. mindspore/communication/_hccl_management.py +297 -0
  118. mindspore/communication/comm_func.py +1395 -0
  119. mindspore/communication/management.py +673 -0
  120. mindspore/config/op_info.config +533 -0
  121. mindspore/context.py +2077 -0
  122. mindspore/d3dcompiler_47.dll +0 -0
  123. mindspore/dataset/__init__.py +90 -0
  124. mindspore/dataset/audio/__init__.py +61 -0
  125. mindspore/dataset/audio/transforms.py +3690 -0
  126. mindspore/dataset/audio/utils.py +386 -0
  127. mindspore/dataset/audio/validators.py +1172 -0
  128. mindspore/dataset/callback/__init__.py +20 -0
  129. mindspore/dataset/callback/ds_callback.py +368 -0
  130. mindspore/dataset/callback/validators.py +32 -0
  131. mindspore/dataset/core/__init__.py +13 -0
  132. mindspore/dataset/core/config.py +1095 -0
  133. mindspore/dataset/core/datatypes.py +101 -0
  134. mindspore/dataset/core/py_util_helpers.py +65 -0
  135. mindspore/dataset/core/validator_helpers.py +781 -0
  136. mindspore/dataset/debug/__init__.py +21 -0
  137. mindspore/dataset/debug/debug_hook.py +97 -0
  138. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  139. mindspore/dataset/engine/__init__.py +124 -0
  140. mindspore/dataset/engine/cache_admin.py +47 -0
  141. mindspore/dataset/engine/cache_client.py +129 -0
  142. mindspore/dataset/engine/datasets.py +4582 -0
  143. mindspore/dataset/engine/datasets_audio.py +911 -0
  144. mindspore/dataset/engine/datasets_standard_format.py +543 -0
  145. mindspore/dataset/engine/datasets_text.py +2161 -0
  146. mindspore/dataset/engine/datasets_user_defined.py +1184 -0
  147. mindspore/dataset/engine/datasets_vision.py +4816 -0
  148. mindspore/dataset/engine/iterators.py +371 -0
  149. mindspore/dataset/engine/obs/__init__.py +23 -0
  150. mindspore/dataset/engine/obs/config_loader.py +68 -0
  151. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +508 -0
  152. mindspore/dataset/engine/obs/util.py +482 -0
  153. mindspore/dataset/engine/offload.py +596 -0
  154. mindspore/dataset/engine/queue.py +304 -0
  155. mindspore/dataset/engine/samplers.py +895 -0
  156. mindspore/dataset/engine/serializer_deserializer.py +159 -0
  157. mindspore/dataset/engine/validators.py +2895 -0
  158. mindspore/dataset/text/__init__.py +51 -0
  159. mindspore/dataset/text/transforms.py +1703 -0
  160. mindspore/dataset/text/utils.py +715 -0
  161. mindspore/dataset/text/validators.py +642 -0
  162. mindspore/dataset/transforms/__init__.py +45 -0
  163. mindspore/dataset/transforms/c_transforms.py +638 -0
  164. mindspore/dataset/transforms/py_transforms.py +393 -0
  165. mindspore/dataset/transforms/py_transforms_util.py +255 -0
  166. mindspore/dataset/transforms/transforms.py +1260 -0
  167. mindspore/dataset/transforms/validators.py +410 -0
  168. mindspore/dataset/utils/__init__.py +19 -0
  169. mindspore/dataset/utils/browse_dataset.py +190 -0
  170. mindspore/dataset/utils/line_reader.py +126 -0
  171. mindspore/dataset/vision/__init__.py +65 -0
  172. mindspore/dataset/vision/c_transforms.py +2641 -0
  173. mindspore/dataset/vision/py_transforms.py +2120 -0
  174. mindspore/dataset/vision/py_transforms_util.py +1660 -0
  175. mindspore/dataset/vision/transforms.py +7295 -0
  176. mindspore/dataset/vision/utils.py +863 -0
  177. mindspore/dataset/vision/validators.py +1483 -0
  178. mindspore/default_config.py +2 -0
  179. mindspore/dnnl.dll +0 -0
  180. mindspore/dpcmi.dll +0 -0
  181. mindspore/experimental/__init__.py +20 -0
  182. mindspore/experimental/es/__init__.py +22 -0
  183. mindspore/experimental/es/embedding_service.py +883 -0
  184. mindspore/experimental/es/embedding_service_layer.py +581 -0
  185. mindspore/experimental/llm_boost/__init__.py +21 -0
  186. mindspore/experimental/llm_boost/atb/__init__.py +23 -0
  187. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  188. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  189. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  190. mindspore/experimental/llm_boost/register.py +129 -0
  191. mindspore/experimental/llm_boost/utils.py +31 -0
  192. mindspore/experimental/map_parameter.py +309 -0
  193. mindspore/experimental/optim/__init__.py +40 -0
  194. mindspore/experimental/optim/adadelta.py +161 -0
  195. mindspore/experimental/optim/adagrad.py +168 -0
  196. mindspore/experimental/optim/adam.py +193 -0
  197. mindspore/experimental/optim/adamax.py +170 -0
  198. mindspore/experimental/optim/adamw.py +290 -0
  199. mindspore/experimental/optim/asgd.py +153 -0
  200. mindspore/experimental/optim/lr_scheduler.py +1371 -0
  201. mindspore/experimental/optim/nadam.py +157 -0
  202. mindspore/experimental/optim/optimizer.py +262 -0
  203. mindspore/experimental/optim/radam.py +194 -0
  204. mindspore/experimental/optim/rmsprop.py +154 -0
  205. mindspore/experimental/optim/rprop.py +164 -0
  206. mindspore/experimental/optim/sgd.py +156 -0
  207. mindspore/hal/__init__.py +40 -0
  208. mindspore/hal/_ascend.py +57 -0
  209. mindspore/hal/_base.py +57 -0
  210. mindspore/hal/_cpu.py +56 -0
  211. mindspore/hal/_gpu.py +57 -0
  212. mindspore/hal/contiguous_tensors_handle.py +175 -0
  213. mindspore/hal/device.py +356 -0
  214. mindspore/hal/event.py +179 -0
  215. mindspore/hal/memory.py +326 -0
  216. mindspore/hal/stream.py +357 -0
  217. mindspore/include/OWNERS +7 -0
  218. mindspore/include/api/allocator.h +97 -0
  219. mindspore/include/api/callback/callback.h +93 -0
  220. mindspore/include/api/callback/ckpt_saver.h +41 -0
  221. mindspore/include/api/callback/loss_monitor.h +33 -0
  222. mindspore/include/api/callback/lr_scheduler.h +51 -0
  223. mindspore/include/api/callback/time_monitor.h +34 -0
  224. mindspore/include/api/callback/train_accuracy.h +37 -0
  225. mindspore/include/api/cell.h +90 -0
  226. mindspore/include/api/cfg.h +82 -0
  227. mindspore/include/api/context.h +602 -0
  228. mindspore/include/api/data_type.h +47 -0
  229. mindspore/include/api/delegate.h +178 -0
  230. mindspore/include/api/delegate_api.h +75 -0
  231. mindspore/include/api/dual_abi_helper.h +208 -0
  232. mindspore/include/api/format.h +28 -0
  233. mindspore/include/api/graph.h +46 -0
  234. mindspore/include/api/kernel.h +58 -0
  235. mindspore/include/api/kernel_api.h +168 -0
  236. mindspore/include/api/metrics/accuracy.h +36 -0
  237. mindspore/include/api/metrics/metrics.h +41 -0
  238. mindspore/include/api/model.h +438 -0
  239. mindspore/include/api/model_group.h +91 -0
  240. mindspore/include/api/model_parallel_runner.h +168 -0
  241. mindspore/include/api/serialization.h +185 -0
  242. mindspore/include/api/status.h +192 -0
  243. mindspore/include/api/types.h +431 -0
  244. mindspore/include/api/visible.h +41 -0
  245. mindspore/include/c_api/context_c.h +179 -0
  246. mindspore/include/c_api/data_type_c.h +52 -0
  247. mindspore/include/c_api/format_c.h +46 -0
  248. mindspore/include/c_api/model_c.h +347 -0
  249. mindspore/include/c_api/status_c.h +79 -0
  250. mindspore/include/c_api/tensor_c.h +146 -0
  251. mindspore/include/c_api/types_c.h +67 -0
  252. mindspore/include/dataset/config.h +163 -0
  253. mindspore/include/dataset/constants.h +363 -0
  254. mindspore/include/dataset/execute.h +196 -0
  255. mindspore/include/dataset/text.h +1092 -0
  256. mindspore/include/dataset/transforms.h +638 -0
  257. mindspore/include/dataset/vision.h +2129 -0
  258. mindspore/include/dataset/vision_ascend.h +206 -0
  259. mindspore/include/dataset/vision_lite.h +625 -0
  260. mindspore/jpeg62.dll +0 -0
  261. mindspore/log.py +633 -0
  262. mindspore/mindrecord/__init__.py +43 -0
  263. mindspore/mindrecord/common/__init__.py +17 -0
  264. mindspore/mindrecord/common/constant.py +20 -0
  265. mindspore/mindrecord/common/enums.py +44 -0
  266. mindspore/mindrecord/common/exceptions.py +311 -0
  267. mindspore/mindrecord/config.py +809 -0
  268. mindspore/mindrecord/filereader.py +174 -0
  269. mindspore/mindrecord/filewriter.py +722 -0
  270. mindspore/mindrecord/mindpage.py +210 -0
  271. mindspore/mindrecord/shardheader.py +141 -0
  272. mindspore/mindrecord/shardindexgenerator.py +74 -0
  273. mindspore/mindrecord/shardreader.py +117 -0
  274. mindspore/mindrecord/shardsegment.py +128 -0
  275. mindspore/mindrecord/shardutils.py +185 -0
  276. mindspore/mindrecord/shardwriter.py +237 -0
  277. mindspore/mindrecord/tools/__init__.py +17 -0
  278. mindspore/mindrecord/tools/cifar10.py +140 -0
  279. mindspore/mindrecord/tools/cifar100.py +153 -0
  280. mindspore/mindrecord/tools/cifar100_to_mr.py +185 -0
  281. mindspore/mindrecord/tools/cifar10_to_mr.py +177 -0
  282. mindspore/mindrecord/tools/csv_to_mr.py +200 -0
  283. mindspore/mindrecord/tools/imagenet_to_mr.py +206 -0
  284. mindspore/mindrecord/tools/mnist_to_mr.py +259 -0
  285. mindspore/mindrecord/tools/tfrecord_to_mr.py +360 -0
  286. mindspore/mindspore_backend.dll +0 -0
  287. mindspore/mindspore_common.dll +0 -0
  288. mindspore/mindspore_core.dll +0 -0
  289. mindspore/mindspore_glog.dll +0 -0
  290. mindspore/mindspore_np_dtype.dll +0 -0
  291. mindspore/mindspore_ops.dll +0 -0
  292. mindspore/mint/__init__.py +1586 -0
  293. mindspore/mint/distributed/__init__.py +31 -0
  294. mindspore/mint/distributed/distributed.py +254 -0
  295. mindspore/mint/linalg/__init__.py +22 -0
  296. mindspore/mint/nn/__init__.py +757 -0
  297. mindspore/mint/nn/functional.py +679 -0
  298. mindspore/mint/nn/layer/__init__.py +39 -0
  299. mindspore/mint/nn/layer/activation.py +133 -0
  300. mindspore/mint/nn/layer/normalization.py +477 -0
  301. mindspore/mint/nn/layer/pooling.py +110 -0
  302. mindspore/mint/optim/__init__.py +24 -0
  303. mindspore/mint/optim/adamw.py +206 -0
  304. mindspore/mint/special/__init__.py +63 -0
  305. mindspore/msobj140.dll +0 -0
  306. mindspore/mspdb140.dll +0 -0
  307. mindspore/mspdbcore.dll +0 -0
  308. mindspore/mspdbst.dll +0 -0
  309. mindspore/mspft140.dll +0 -0
  310. mindspore/msvcdis140.dll +0 -0
  311. mindspore/msvcp140.dll +0 -0
  312. mindspore/msvcp140_1.dll +0 -0
  313. mindspore/msvcp140_2.dll +0 -0
  314. mindspore/msvcp140_atomic_wait.dll +0 -0
  315. mindspore/msvcp140_codecvt_ids.dll +0 -0
  316. mindspore/multiprocessing/__init__.py +73 -0
  317. mindspore/nn/__init__.py +47 -0
  318. mindspore/nn/cell.py +2787 -0
  319. mindspore/nn/dynamic_lr.py +482 -0
  320. mindspore/nn/grad/__init__.py +21 -0
  321. mindspore/nn/grad/cell_grad.py +196 -0
  322. mindspore/nn/layer/__init__.py +63 -0
  323. mindspore/nn/layer/activation.py +1822 -0
  324. mindspore/nn/layer/basic.py +1629 -0
  325. mindspore/nn/layer/channel_shuffle.py +90 -0
  326. mindspore/nn/layer/combined.py +248 -0
  327. mindspore/nn/layer/container.py +734 -0
  328. mindspore/nn/layer/conv.py +1505 -0
  329. mindspore/nn/layer/dense.py +204 -0
  330. mindspore/nn/layer/embedding.py +869 -0
  331. mindspore/nn/layer/image.py +661 -0
  332. mindspore/nn/layer/math.py +1069 -0
  333. mindspore/nn/layer/normalization.py +1273 -0
  334. mindspore/nn/layer/padding.py +880 -0
  335. mindspore/nn/layer/pooling.py +2302 -0
  336. mindspore/nn/layer/rnn_cells.py +388 -0
  337. mindspore/nn/layer/rnns.py +849 -0
  338. mindspore/nn/layer/thor_layer.py +963 -0
  339. mindspore/nn/layer/timedistributed.py +155 -0
  340. mindspore/nn/layer/transformer.py +823 -0
  341. mindspore/nn/learning_rate_schedule.py +512 -0
  342. mindspore/nn/loss/__init__.py +36 -0
  343. mindspore/nn/loss/loss.py +2924 -0
  344. mindspore/nn/metrics.py +53 -0
  345. mindspore/nn/optim/__init__.py +45 -0
  346. mindspore/nn/optim/_dist_optimizer_registry.py +111 -0
  347. mindspore/nn/optim/ada_grad.py +217 -0
  348. mindspore/nn/optim/adadelta.py +206 -0
  349. mindspore/nn/optim/adafactor.py +448 -0
  350. mindspore/nn/optim/adam.py +1297 -0
  351. mindspore/nn/optim/adamax.py +220 -0
  352. mindspore/nn/optim/adasum.py +548 -0
  353. mindspore/nn/optim/asgd.py +216 -0
  354. mindspore/nn/optim/ftrl.py +401 -0
  355. mindspore/nn/optim/lamb.py +296 -0
  356. mindspore/nn/optim/lars.py +202 -0
  357. mindspore/nn/optim/lazyadam.py +533 -0
  358. mindspore/nn/optim/momentum.py +239 -0
  359. mindspore/nn/optim/optimizer.py +1034 -0
  360. mindspore/nn/optim/proximal_ada_grad.py +242 -0
  361. mindspore/nn/optim/rmsprop.py +264 -0
  362. mindspore/nn/optim/rprop.py +251 -0
  363. mindspore/nn/optim/sgd.py +237 -0
  364. mindspore/nn/optim/tft_wrapper.py +127 -0
  365. mindspore/nn/optim/thor.py +1310 -0
  366. mindspore/nn/probability/__init__.py +22 -0
  367. mindspore/nn/probability/bijector/__init__.py +35 -0
  368. mindspore/nn/probability/bijector/bijector.py +337 -0
  369. mindspore/nn/probability/bijector/exp.py +65 -0
  370. mindspore/nn/probability/bijector/gumbel_cdf.py +144 -0
  371. mindspore/nn/probability/bijector/invert.py +126 -0
  372. mindspore/nn/probability/bijector/power_transform.py +196 -0
  373. mindspore/nn/probability/bijector/scalar_affine.py +167 -0
  374. mindspore/nn/probability/bijector/softplus.py +189 -0
  375. mindspore/nn/probability/bnn_layers/__init__.py +29 -0
  376. mindspore/nn/probability/bnn_layers/_util.py +46 -0
  377. mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +112 -0
  378. mindspore/nn/probability/bnn_layers/conv_variational.py +267 -0
  379. mindspore/nn/probability/bnn_layers/dense_variational.py +302 -0
  380. mindspore/nn/probability/bnn_layers/layer_distribution.py +123 -0
  381. mindspore/nn/probability/distribution/__init__.py +56 -0
  382. mindspore/nn/probability/distribution/_utils/__init__.py +34 -0
  383. mindspore/nn/probability/distribution/_utils/custom_ops.py +96 -0
  384. mindspore/nn/probability/distribution/_utils/utils.py +362 -0
  385. mindspore/nn/probability/distribution/bernoulli.py +334 -0
  386. mindspore/nn/probability/distribution/beta.py +391 -0
  387. mindspore/nn/probability/distribution/categorical.py +435 -0
  388. mindspore/nn/probability/distribution/cauchy.py +383 -0
  389. mindspore/nn/probability/distribution/distribution.py +827 -0
  390. mindspore/nn/probability/distribution/exponential.py +350 -0
  391. mindspore/nn/probability/distribution/gamma.py +391 -0
  392. mindspore/nn/probability/distribution/geometric.py +335 -0
  393. mindspore/nn/probability/distribution/gumbel.py +257 -0
  394. mindspore/nn/probability/distribution/half_normal.py +133 -0
  395. mindspore/nn/probability/distribution/laplace.py +128 -0
  396. mindspore/nn/probability/distribution/log_normal.py +272 -0
  397. mindspore/nn/probability/distribution/logistic.py +379 -0
  398. mindspore/nn/probability/distribution/normal.py +336 -0
  399. mindspore/nn/probability/distribution/poisson.py +288 -0
  400. mindspore/nn/probability/distribution/student_t.py +149 -0
  401. mindspore/nn/probability/distribution/transformed_distribution.py +235 -0
  402. mindspore/nn/probability/distribution/uniform.py +375 -0
  403. mindspore/nn/reinforcement/__init__.py +24 -0
  404. mindspore/nn/reinforcement/_batch_read_write.py +142 -0
  405. mindspore/nn/reinforcement/_tensors_queue.py +152 -0
  406. mindspore/nn/reinforcement/tensor_array.py +145 -0
  407. mindspore/nn/sparse/__init__.py +23 -0
  408. mindspore/nn/sparse/sparse.py +147 -0
  409. mindspore/nn/wrap/__init__.py +49 -0
  410. mindspore/nn/wrap/cell_wrapper.py +968 -0
  411. mindspore/nn/wrap/grad_reducer.py +608 -0
  412. mindspore/nn/wrap/loss_scale.py +694 -0
  413. mindspore/numpy/__init__.py +121 -0
  414. mindspore/numpy/array_creations.py +2731 -0
  415. mindspore/numpy/array_ops.py +2629 -0
  416. mindspore/numpy/dtypes.py +185 -0
  417. mindspore/numpy/fft.py +966 -0
  418. mindspore/numpy/logic_ops.py +936 -0
  419. mindspore/numpy/math_ops.py +5911 -0
  420. mindspore/numpy/utils.py +214 -0
  421. mindspore/numpy/utils_const.py +565 -0
  422. mindspore/opencv_core452.dll +0 -0
  423. mindspore/opencv_imgcodecs452.dll +0 -0
  424. mindspore/opencv_imgproc452.dll +0 -0
  425. mindspore/ops/__init__.py +56 -0
  426. mindspore/ops/_constants.py +30 -0
  427. mindspore/ops/_grad_experimental/__init__.py +31 -0
  428. mindspore/ops/_grad_experimental/grad_array_ops.py +830 -0
  429. mindspore/ops/_grad_experimental/grad_base.py +143 -0
  430. mindspore/ops/_grad_experimental/grad_comm_ops.py +714 -0
  431. mindspore/ops/_grad_experimental/grad_debug_ops.py +31 -0
  432. mindspore/ops/_grad_experimental/grad_implementations.py +203 -0
  433. mindspore/ops/_grad_experimental/grad_inner_ops.py +79 -0
  434. mindspore/ops/_grad_experimental/grad_math_ops.py +802 -0
  435. mindspore/ops/_grad_experimental/grad_nn_ops.py +231 -0
  436. mindspore/ops/_grad_experimental/grad_quant_ops.py +238 -0
  437. mindspore/ops/_grad_experimental/grad_sparse.py +342 -0
  438. mindspore/ops/_grad_experimental/grad_sparse_ops.py +399 -0
  439. mindspore/ops/_grad_experimental/taylor_rule.py +220 -0
  440. mindspore/ops/_op_impl/__init__.py +23 -0
  441. mindspore/ops/_op_impl/_custom_op/__init__.py +39 -0
  442. mindspore/ops/_op_impl/_custom_op/_basic.py +158 -0
  443. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +279 -0
  444. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +156 -0
  445. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +109 -0
  446. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +125 -0
  447. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +105 -0
  448. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +124 -0
  449. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +116 -0
  450. mindspore/ops/_op_impl/_custom_op/correction_mul.py +89 -0
  451. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +196 -0
  452. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +366 -0
  453. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +162 -0
  454. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +136 -0
  455. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +206 -0
  456. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +88 -0
  457. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +128 -0
  458. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +199 -0
  459. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +88 -0
  460. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +156 -0
  461. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +184 -0
  462. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +143 -0
  463. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +169 -0
  464. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +548 -0
  465. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +881 -0
  466. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +278 -0
  467. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +200 -0
  468. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +334 -0
  469. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +255 -0
  470. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +222 -0
  471. mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +644 -0
  472. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +488 -0
  473. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +87 -0
  474. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +129 -0
  475. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +121 -0
  476. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +352 -0
  477. mindspore/ops/_op_impl/aicpu/__init__.py +441 -0
  478. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  479. mindspore/ops/_op_impl/aicpu/acos.py +32 -0
  480. mindspore/ops/_op_impl/aicpu/acos_grad.py +33 -0
  481. mindspore/ops/_op_impl/aicpu/acosh.py +34 -0
  482. mindspore/ops/_op_impl/aicpu/acosh_grad.py +35 -0
  483. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
  484. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  485. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
  486. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
  487. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
  488. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
  489. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
  490. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
  491. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  492. mindspore/ops/_op_impl/aicpu/add_n.py +41 -0
  493. mindspore/ops/_op_impl/aicpu/add_v2.py +40 -0
  494. mindspore/ops/_op_impl/aicpu/addcdiv.py +41 -0
  495. mindspore/ops/_op_impl/aicpu/addcmul.py +47 -0
  496. mindspore/ops/_op_impl/aicpu/adjust_contrastv2.py +32 -0
  497. mindspore/ops/_op_impl/aicpu/adjust_hue.py +31 -0
  498. mindspore/ops/_op_impl/aicpu/adjust_saturation.py +32 -0
  499. mindspore/ops/_op_impl/aicpu/affine_grid.py +33 -0
  500. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  501. mindspore/ops/_op_impl/aicpu/angle.py +31 -0
  502. mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
  503. mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
  504. mindspore/ops/_op_impl/aicpu/argmax_with_value.py +43 -0
  505. mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
  506. mindspore/ops/_op_impl/aicpu/asin.py +32 -0
  507. mindspore/ops/_op_impl/aicpu/asin_grad.py +33 -0
  508. mindspore/ops/_op_impl/aicpu/asinh.py +34 -0
  509. mindspore/ops/_op_impl/aicpu/asinh_grad.py +35 -0
  510. mindspore/ops/_op_impl/aicpu/atanh.py +34 -0
  511. mindspore/ops/_op_impl/aicpu/avgpool_grad_v1.py +37 -0
  512. mindspore/ops/_op_impl/aicpu/avgpool_v1.py +36 -0
  513. mindspore/ops/_op_impl/aicpu/bartlett_window.py +36 -0
  514. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
  515. mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
  516. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  517. mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
  518. mindspore/ops/_op_impl/aicpu/betainc.py +31 -0
  519. mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
  520. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +42 -0
  521. mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
  522. mindspore/ops/_op_impl/aicpu/blackman_window.py +36 -0
  523. mindspore/ops/_op_impl/aicpu/broadcast_to.py +58 -0
  524. mindspore/ops/_op_impl/aicpu/bucketize.py +34 -0
  525. mindspore/ops/_op_impl/aicpu/cache_swap_table.py +102 -0
  526. mindspore/ops/_op_impl/aicpu/cast.py +225 -0
  527. mindspore/ops/_op_impl/aicpu/cauchy.py +33 -0
  528. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  529. mindspore/ops/_op_impl/aicpu/check_numerics.py +33 -0
  530. mindspore/ops/_op_impl/aicpu/cholesky.py +32 -0
  531. mindspore/ops/_op_impl/aicpu/cholesky_inverse.py +31 -0
  532. mindspore/ops/_op_impl/aicpu/cholesky_solve.py +33 -0
  533. mindspore/ops/_op_impl/aicpu/choleskygrad.py +32 -0
  534. mindspore/ops/_op_impl/aicpu/coalesce.py +37 -0
  535. mindspore/ops/_op_impl/aicpu/col2im.py +38 -0
  536. mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
  537. mindspore/ops/_op_impl/aicpu/compare_and_bitpack.py +37 -0
  538. mindspore/ops/_op_impl/aicpu/complex.py +32 -0
  539. mindspore/ops/_op_impl/aicpu/complex_abs.py +31 -0
  540. mindspore/ops/_op_impl/aicpu/compute_accidental_hits.py +44 -0
  541. mindspore/ops/_op_impl/aicpu/concat.py +57 -0
  542. mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
  543. mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
  544. mindspore/ops/_op_impl/aicpu/conj.py +42 -0
  545. mindspore/ops/_op_impl/aicpu/conjugate_transpose.py +58 -0
  546. mindspore/ops/_op_impl/aicpu/cos.py +34 -0
  547. mindspore/ops/_op_impl/aicpu/cosh.py +34 -0
  548. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  549. mindspore/ops/_op_impl/aicpu/crop_and_resize.py +69 -0
  550. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_boxes.py +68 -0
  551. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
  552. mindspore/ops/_op_impl/aicpu/cross.py +42 -0
  553. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_dense.py +48 -0
  554. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_sparse_tensor.py +51 -0
  555. mindspore/ops/_op_impl/aicpu/ctc_greedy_decoder.py +35 -0
  556. mindspore/ops/_op_impl/aicpu/ctc_loss_v2.py +43 -0
  557. mindspore/ops/_op_impl/aicpu/ctc_loss_v2_grad.py +45 -0
  558. mindspore/ops/_op_impl/aicpu/ctcloss.py +38 -0
  559. mindspore/ops/_op_impl/aicpu/cummax.py +41 -0
  560. mindspore/ops/_op_impl/aicpu/cumprod.py +58 -0
  561. mindspore/ops/_op_impl/aicpu/cumsum.py +58 -0
  562. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
  563. mindspore/ops/_op_impl/aicpu/data_format_vec_permute.py +32 -0
  564. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  565. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  566. mindspore/ops/_op_impl/aicpu/dense_to_csr_sparse_matrix.py +49 -0
  567. mindspore/ops/_op_impl/aicpu/dense_to_dense_set_operation.py +45 -0
  568. mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
  569. mindspore/ops/_op_impl/aicpu/depth_to_space.py +44 -0
  570. mindspore/ops/_op_impl/aicpu/diag.py +36 -0
  571. mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
  572. mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
  573. mindspore/ops/_op_impl/aicpu/digamma.py +31 -0
  574. mindspore/ops/_op_impl/aicpu/div.py +41 -0
  575. mindspore/ops/_op_impl/aicpu/div_no_nan.py +35 -0
  576. mindspore/ops/_op_impl/aicpu/dropout2d.py +42 -0
  577. mindspore/ops/_op_impl/aicpu/dropout3d.py +42 -0
  578. mindspore/ops/_op_impl/aicpu/dropout_genmask.py +41 -0
  579. mindspore/ops/_op_impl/aicpu/dropout_genmask_v3.py +32 -0
  580. mindspore/ops/_op_impl/aicpu/dynamic_stitch.py +42 -0
  581. mindspore/ops/_op_impl/aicpu/edit_distance.py +56 -0
  582. mindspore/ops/_op_impl/aicpu/eig.py +35 -0
  583. mindspore/ops/_op_impl/aicpu/embedding_lookup.py +102 -0
  584. mindspore/ops/_op_impl/aicpu/end_of_sequence.py +30 -0
  585. mindspore/ops/_op_impl/aicpu/environ_create.py +28 -0
  586. mindspore/ops/_op_impl/aicpu/environ_destroy_all.py +28 -0
  587. mindspore/ops/_op_impl/aicpu/environ_get.py +41 -0
  588. mindspore/ops/_op_impl/aicpu/environ_set.py +40 -0
  589. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  590. mindspore/ops/_op_impl/aicpu/equal.py +41 -0
  591. mindspore/ops/_op_impl/aicpu/exp.py +37 -0
  592. mindspore/ops/_op_impl/aicpu/expand.py +45 -0
  593. mindspore/ops/_op_impl/aicpu/expand_dims.py +42 -0
  594. mindspore/ops/_op_impl/aicpu/expm1.py +34 -0
  595. mindspore/ops/_op_impl/aicpu/extract_glimpse.py +35 -0
  596. mindspore/ops/_op_impl/aicpu/eye.py +44 -0
  597. mindspore/ops/_op_impl/aicpu/fft_with_size.py +47 -0
  598. mindspore/ops/_op_impl/aicpu/fill_diagonal.py +39 -0
  599. mindspore/ops/_op_impl/aicpu/fill_v2.py +58 -0
  600. mindspore/ops/_op_impl/aicpu/flatten.py +43 -0
  601. mindspore/ops/_op_impl/aicpu/floor_div.py +38 -0
  602. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  603. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  604. mindspore/ops/_op_impl/aicpu/fractional_avg_pool.py +41 -0
  605. mindspore/ops/_op_impl/aicpu/fractional_avg_pool_grad.py +41 -0
  606. mindspore/ops/_op_impl/aicpu/fractional_max_pool.py +41 -0
  607. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_grad_with_fixed_ksize.py +43 -0
  608. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +65 -0
  609. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad.py +42 -0
  610. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad_with_fixed_ksize.py +42 -0
  611. mindspore/ops/_op_impl/aicpu/fractional_max_pool_with_fixed_ksize.py +49 -0
  612. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  613. mindspore/ops/_op_impl/aicpu/fused_sparse_adam.py +46 -0
  614. mindspore/ops/_op_impl/aicpu/fused_sparse_ftrl.py +41 -0
  615. mindspore/ops/_op_impl/aicpu/fused_sparse_lazy_adam.py +46 -0
  616. mindspore/ops/_op_impl/aicpu/fused_sparse_proximal_adagrad.py +39 -0
  617. mindspore/ops/_op_impl/aicpu/gamma.py +38 -0
  618. mindspore/ops/_op_impl/aicpu/gather.py +46 -0
  619. mindspore/ops/_op_impl/aicpu/gather_d.py +79 -0
  620. mindspore/ops/_op_impl/aicpu/gather_d_grad_v2.py +79 -0
  621. mindspore/ops/_op_impl/aicpu/gather_grad.py +54 -0
  622. mindspore/ops/_op_impl/aicpu/gather_nd.py +56 -0
  623. mindspore/ops/_op_impl/aicpu/gcd.py +32 -0
  624. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
  625. mindspore/ops/_op_impl/aicpu/geqrf.py +32 -0
  626. mindspore/ops/_op_impl/aicpu/get_next.py +39 -0
  627. mindspore/ops/_op_impl/aicpu/glu.py +33 -0
  628. mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
  629. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  630. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  631. mindspore/ops/_op_impl/aicpu/grid_sampler_2d.py +35 -0
  632. mindspore/ops/_op_impl/aicpu/grid_sampler_2d_grad.py +38 -0
  633. mindspore/ops/_op_impl/aicpu/grid_sampler_3d.py +34 -0
  634. mindspore/ops/_op_impl/aicpu/grid_sampler_3d_grad.py +38 -0
  635. mindspore/ops/_op_impl/aicpu/hamming_window.py +57 -0
  636. mindspore/ops/_op_impl/aicpu/hard_sigmoid.py +32 -0
  637. mindspore/ops/_op_impl/aicpu/hard_sigmoid_grad.py +33 -0
  638. mindspore/ops/_op_impl/aicpu/heaviside.py +40 -0
  639. mindspore/ops/_op_impl/aicpu/histogram.py +35 -0
  640. mindspore/ops/_op_impl/aicpu/hsv_to_rgb.py +32 -0
  641. mindspore/ops/_op_impl/aicpu/hypot.py +32 -0
  642. mindspore/ops/_op_impl/aicpu/identity.py +42 -0
  643. mindspore/ops/_op_impl/aicpu/identity_n.py +41 -0
  644. mindspore/ops/_op_impl/aicpu/igamma.py +30 -0
  645. mindspore/ops/_op_impl/aicpu/igammac.py +30 -0
  646. mindspore/ops/_op_impl/aicpu/igammagrada.py +30 -0
  647. mindspore/ops/_op_impl/aicpu/im2col.py +43 -0
  648. mindspore/ops/_op_impl/aicpu/imag.py +31 -0
  649. mindspore/ops/_op_impl/aicpu/index_fill.py +54 -0
  650. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  651. mindspore/ops/_op_impl/aicpu/init_data_set_queue.py +27 -0
  652. mindspore/ops/_op_impl/aicpu/inplace_index_add.py +39 -0
  653. mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
  654. mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
  655. mindspore/ops/_op_impl/aicpu/is_finite.py +40 -0
  656. mindspore/ops/_op_impl/aicpu/is_inf.py +31 -0
  657. mindspore/ops/_op_impl/aicpu/is_nan.py +31 -0
  658. mindspore/ops/_op_impl/aicpu/kldivloss.py +34 -0
  659. mindspore/ops/_op_impl/aicpu/kldivlossgrad.py +35 -0
  660. mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
  661. mindspore/ops/_op_impl/aicpu/lcm.py +32 -0
  662. mindspore/ops/_op_impl/aicpu/left_shift.py +38 -0
  663. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  664. mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
  665. mindspore/ops/_op_impl/aicpu/lgamma.py +33 -0
  666. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +57 -0
  667. mindspore/ops/_op_impl/aicpu/linspace.py +33 -0
  668. mindspore/ops/_op_impl/aicpu/list_diff.py +50 -0
  669. mindspore/ops/_op_impl/aicpu/log.py +37 -0
  670. mindspore/ops/_op_impl/aicpu/log1p.py +34 -0
  671. mindspore/ops/_op_impl/aicpu/log_matrix_determinant.py +31 -0
  672. mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
  673. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +37 -0
  674. mindspore/ops/_op_impl/aicpu/logical_xor.py +30 -0
  675. mindspore/ops/_op_impl/aicpu/logit.py +33 -0
  676. mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
  677. mindspore/ops/_op_impl/aicpu/logspace.py +36 -0
  678. mindspore/ops/_op_impl/aicpu/lower_bound.py +47 -0
  679. mindspore/ops/_op_impl/aicpu/lstsq.py +34 -0
  680. mindspore/ops/_op_impl/aicpu/lu.py +39 -0
  681. mindspore/ops/_op_impl/aicpu/lu_solve.py +32 -0
  682. mindspore/ops/_op_impl/aicpu/lu_unpack.py +114 -0
  683. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +49 -0
  684. mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
  685. mindspore/ops/_op_impl/aicpu/masked_scatter.py +40 -0
  686. mindspore/ops/_op_impl/aicpu/masked_select.py +31 -0
  687. mindspore/ops/_op_impl/aicpu/masked_select_grad.py +35 -0
  688. mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
  689. mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
  690. mindspore/ops/_op_impl/aicpu/matrix_determinant.py +30 -0
  691. mindspore/ops/_op_impl/aicpu/matrix_diag_part_v3.py +54 -0
  692. mindspore/ops/_op_impl/aicpu/matrix_diag_v3.py +56 -0
  693. mindspore/ops/_op_impl/aicpu/matrix_exp.py +34 -0
  694. mindspore/ops/_op_impl/aicpu/matrix_inverse.py +31 -0
  695. mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
  696. mindspore/ops/_op_impl/aicpu/matrix_power.py +37 -0
  697. mindspore/ops/_op_impl/aicpu/matrix_set_diag_v3.py +54 -0
  698. mindspore/ops/_op_impl/aicpu/matrix_solve.py +35 -0
  699. mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
  700. mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
  701. mindspore/ops/_op_impl/aicpu/max_pool3d_grad_with_argmax.py +60 -0
  702. mindspore/ops/_op_impl/aicpu/max_pool3d_with_argmax.py +59 -0
  703. mindspore/ops/_op_impl/aicpu/max_unpool2d.py +57 -0
  704. mindspore/ops/_op_impl/aicpu/max_unpool2d_grad.py +58 -0
  705. mindspore/ops/_op_impl/aicpu/max_unpool3d.py +57 -0
  706. mindspore/ops/_op_impl/aicpu/max_unpool3d_grad.py +58 -0
  707. mindspore/ops/_op_impl/aicpu/maximum_grad_grad.py +40 -0
  708. mindspore/ops/_op_impl/aicpu/maxpool_grad_v1.py +46 -0
  709. mindspore/ops/_op_impl/aicpu/maxpool_v1.py +42 -0
  710. mindspore/ops/_op_impl/aicpu/median.py +39 -0
  711. mindspore/ops/_op_impl/aicpu/median_grad.py +45 -0
  712. mindspore/ops/_op_impl/aicpu/meshgrid.py +41 -0
  713. mindspore/ops/_op_impl/aicpu/minimum_grad_grad.py +40 -0
  714. mindspore/ops/_op_impl/aicpu/mirror_pad.py +50 -0
  715. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +48 -0
  716. mindspore/ops/_op_impl/aicpu/mul.py +43 -0
  717. mindspore/ops/_op_impl/aicpu/mul_no_nan.py +42 -0
  718. mindspore/ops/_op_impl/aicpu/multi_margin_loss.py +37 -0
  719. mindspore/ops/_op_impl/aicpu/multi_margin_loss_grad.py +41 -0
  720. mindspore/ops/_op_impl/aicpu/multilabel_margin_loss_grad.py +37 -0
  721. mindspore/ops/_op_impl/aicpu/multinomial.py +47 -0
  722. mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
  723. mindspore/ops/_op_impl/aicpu/mvlgamma.py +32 -0
  724. mindspore/ops/_op_impl/aicpu/mvlgamma_grad.py +33 -0
  725. mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
  726. mindspore/ops/_op_impl/aicpu/neg.py +36 -0
  727. mindspore/ops/_op_impl/aicpu/nextafter.py +32 -0
  728. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  729. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  730. mindspore/ops/_op_impl/aicpu/no_repeat_ngram.py +34 -0
  731. mindspore/ops/_op_impl/aicpu/non_deterministic_ints.py +33 -0
  732. mindspore/ops/_op_impl/aicpu/non_max_suppression.py +36 -0
  733. mindspore/ops/_op_impl/aicpu/non_max_suppression_with_overlaps.py +35 -0
  734. mindspore/ops/_op_impl/aicpu/non_zero.py +43 -0
  735. mindspore/ops/_op_impl/aicpu/not_equal.py +39 -0
  736. mindspore/ops/_op_impl/aicpu/nth_element.py +39 -0
  737. mindspore/ops/_op_impl/aicpu/nuclear_norm.py +33 -0
  738. mindspore/ops/_op_impl/aicpu/one_hot.py +116 -0
  739. mindspore/ops/_op_impl/aicpu/ones_like.py +39 -0
  740. mindspore/ops/_op_impl/aicpu/orgqr.py +34 -0
  741. mindspore/ops/_op_impl/aicpu/pad_and_shift.py +33 -0
  742. mindspore/ops/_op_impl/aicpu/pad_v3.py +61 -0
  743. mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +59 -0
  744. mindspore/ops/_op_impl/aicpu/padding.py +41 -0
  745. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +54 -0
  746. mindspore/ops/_op_impl/aicpu/pdist_grad.py +33 -0
  747. mindspore/ops/_op_impl/aicpu/poisson.py +37 -0
  748. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  749. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  750. mindspore/ops/_op_impl/aicpu/pow.py +39 -0
  751. mindspore/ops/_op_impl/aicpu/print_tensor.py +39 -0
  752. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +113 -0
  753. mindspore/ops/_op_impl/aicpu/qr.py +36 -0
  754. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  755. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  756. mindspore/ops/_op_impl/aicpu/ragged_range.py +49 -0
  757. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  758. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
  759. mindspore/ops/_op_impl/aicpu/random_categorical.py +68 -0
  760. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +36 -0
  761. mindspore/ops/_op_impl/aicpu/random_gamma.py +38 -0
  762. mindspore/ops/_op_impl/aicpu/random_poisson.py +134 -0
  763. mindspore/ops/_op_impl/aicpu/random_shuffle.py +47 -0
  764. mindspore/ops/_op_impl/aicpu/randperm.py +38 -0
  765. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  766. mindspore/ops/_op_impl/aicpu/range.py +36 -0
  767. mindspore/ops/_op_impl/aicpu/range_v2.py +35 -0
  768. mindspore/ops/_op_impl/aicpu/real.py +31 -0
  769. mindspore/ops/_op_impl/aicpu/real_div.py +40 -0
  770. mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
  771. mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
  772. mindspore/ops/_op_impl/aicpu/reduce_mean.py +57 -0
  773. mindspore/ops/_op_impl/aicpu/reduce_prod.py +57 -0
  774. mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
  775. mindspore/ops/_op_impl/aicpu/relu_grad_v3.py +41 -0
  776. mindspore/ops/_op_impl/aicpu/relu_v3.py +38 -0
  777. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +96 -0
  778. mindspore/ops/_op_impl/aicpu/reshape.py +42 -0
  779. mindspore/ops/_op_impl/aicpu/resize_area.py +40 -0
  780. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +20 -0
  781. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +19 -0
  782. mindspore/ops/_op_impl/aicpu/resize_bilinear.py +32 -0
  783. mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +32 -0
  784. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +36 -0
  785. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +35 -0
  786. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  787. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  788. mindspore/ops/_op_impl/aicpu/reverse_sequence.py +55 -0
  789. mindspore/ops/_op_impl/aicpu/reversev2.py +54 -0
  790. mindspore/ops/_op_impl/aicpu/rgb_to_hsv.py +32 -0
  791. mindspore/ops/_op_impl/aicpu/right_shift.py +38 -0
  792. mindspore/ops/_op_impl/aicpu/rnnt_loss.py +35 -0
  793. mindspore/ops/_op_impl/aicpu/round.py +34 -0
  794. mindspore/ops/_op_impl/aicpu/rsqrt.py +33 -0
  795. mindspore/ops/_op_impl/aicpu/rsqrt_grad.py +36 -0
  796. mindspore/ops/_op_impl/aicpu/sample_distorted_bounding_box_v2.py +49 -0
  797. mindspore/ops/_op_impl/aicpu/scale_and_translate.py +52 -0
  798. mindspore/ops/_op_impl/aicpu/scale_and_translate_grad.py +36 -0
  799. mindspore/ops/_op_impl/aicpu/scatter.py +79 -0
  800. mindspore/ops/_op_impl/aicpu/scatter_add_with_axis.py +53 -0
  801. mindspore/ops/_op_impl/aicpu/scatter_elements.py +39 -0
  802. mindspore/ops/_op_impl/aicpu/scatter_nd.py +59 -0
  803. mindspore/ops/_op_impl/aicpu/scatter_nd_max.py +54 -0
  804. mindspore/ops/_op_impl/aicpu/scatter_nd_min.py +54 -0
  805. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +59 -0
  806. mindspore/ops/_op_impl/aicpu/search_sorted.py +44 -0
  807. mindspore/ops/_op_impl/aicpu/segment_max.py +52 -0
  808. mindspore/ops/_op_impl/aicpu/segment_mean.py +56 -0
  809. mindspore/ops/_op_impl/aicpu/segment_min.py +52 -0
  810. mindspore/ops/_op_impl/aicpu/segment_prod.py +56 -0
  811. mindspore/ops/_op_impl/aicpu/segment_sum.py +56 -0
  812. mindspore/ops/_op_impl/aicpu/select.py +45 -0
  813. mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
  814. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  815. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  816. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  817. mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
  818. mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
  819. mindspore/ops/_op_impl/aicpu/set_size.py +38 -0
  820. mindspore/ops/_op_impl/aicpu/sign.py +36 -0
  821. mindspore/ops/_op_impl/aicpu/sin.py +34 -0
  822. mindspore/ops/_op_impl/aicpu/sinc.py +43 -0
  823. mindspore/ops/_op_impl/aicpu/sinh.py +34 -0
  824. mindspore/ops/_op_impl/aicpu/slice.py +59 -0
  825. mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
  826. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  827. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  828. mindspore/ops/_op_impl/aicpu/sort.py +39 -0
  829. mindspore/ops/_op_impl/aicpu/space_to_depth.py +44 -0
  830. mindspore/ops/_op_impl/aicpu/sparse_addmm.py +87 -0
  831. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +80 -0
  832. mindspore/ops/_op_impl/aicpu/sparse_apply_centered_rms_prop.py +105 -0
  833. mindspore/ops/_op_impl/aicpu/sparse_apply_momentum.py +80 -0
  834. mindspore/ops/_op_impl/aicpu/sparse_apply_proximal_gradient_descent.py +79 -0
  835. mindspore/ops/_op_impl/aicpu/sparse_concat.py +59 -0
  836. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  837. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_add.py +58 -0
  838. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_div.py +58 -0
  839. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_mul.py +58 -0
  840. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
  841. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
  842. mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
  843. mindspore/ops/_op_impl/aicpu/sparse_matrix_nnz.py +81 -0
  844. mindspore/ops/_op_impl/aicpu/sparse_matrix_transpose.py +116 -0
  845. mindspore/ops/_op_impl/aicpu/sparse_reorder.py +56 -0
  846. mindspore/ops/_op_impl/aicpu/sparse_reshape.py +34 -0
  847. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_grad.py +36 -0
  848. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_with_num_segments.py +44 -0
  849. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n.py +43 -0
  850. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_grad.py +38 -0
  851. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_with_num_segments.py +44 -0
  852. mindspore/ops/_op_impl/aicpu/sparse_segment_sum.py +49 -0
  853. mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
  854. mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
  855. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
  856. mindspore/ops/_op_impl/aicpu/sparse_softmax.py +33 -0
  857. mindspore/ops/_op_impl/aicpu/sparse_softmax_cross_entropy_with_logits_v2.py +35 -0
  858. mindspore/ops/_op_impl/aicpu/sparse_sparse_maximum.py +53 -0
  859. mindspore/ops/_op_impl/aicpu/sparse_sparse_minimum.py +53 -0
  860. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_add.py +84 -0
  861. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_mat_mul.py +190 -0
  862. mindspore/ops/_op_impl/aicpu/sparse_tensor_to_csr_sparse_matrix.py +51 -0
  863. mindspore/ops/_op_impl/aicpu/sparse_to_dense_v2.py +73 -0
  864. mindspore/ops/_op_impl/aicpu/split.py +45 -0
  865. mindspore/ops/_op_impl/aicpu/sqrt.py +34 -0
  866. mindspore/ops/_op_impl/aicpu/sqrt_grad.py +35 -0
  867. mindspore/ops/_op_impl/aicpu/square.py +35 -0
  868. mindspore/ops/_op_impl/aicpu/squared_difference.py +37 -0
  869. mindspore/ops/_op_impl/aicpu/squeeze.py +42 -0
  870. mindspore/ops/_op_impl/aicpu/sspaddmm.py +97 -0
  871. mindspore/ops/_op_impl/aicpu/stack.py +45 -0
  872. mindspore/ops/_op_impl/aicpu/stack_push_pop.py +87 -0
  873. mindspore/ops/_op_impl/aicpu/standard_laplace.py +34 -0
  874. mindspore/ops/_op_impl/aicpu/standard_normal.py +34 -0
  875. mindspore/ops/_op_impl/aicpu/stateless_dropout_genmask.py +37 -0
  876. mindspore/ops/_op_impl/aicpu/stft.py +70 -0
  877. mindspore/ops/_op_impl/aicpu/strided_slice.py +43 -0
  878. mindspore/ops/_op_impl/aicpu/strided_slice_grad.py +50 -0
  879. mindspore/ops/_op_impl/aicpu/sub.py +41 -0
  880. mindspore/ops/_op_impl/aicpu/sub_and_filter.py +36 -0
  881. mindspore/ops/_op_impl/aicpu/tan.py +34 -0
  882. mindspore/ops/_op_impl/aicpu/tanh.py +34 -0
  883. mindspore/ops/_op_impl/aicpu/tanh_grad.py +35 -0
  884. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  885. mindspore/ops/_op_impl/aicpu/tile.py +56 -0
  886. mindspore/ops/_op_impl/aicpu/topk.py +34 -0
  887. mindspore/ops/_op_impl/aicpu/trace.py +40 -0
  888. mindspore/ops/_op_impl/aicpu/tracegrad.py +41 -0
  889. mindspore/ops/_op_impl/aicpu/trans_data.py +35 -0
  890. mindspore/ops/_op_impl/aicpu/transpose.py +58 -0
  891. mindspore/ops/_op_impl/aicpu/tridiagonal_matmul.py +42 -0
  892. mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
  893. mindspore/ops/_op_impl/aicpu/tril.py +42 -0
  894. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  895. mindspore/ops/_op_impl/aicpu/triplet_margin_loss.py +62 -0
  896. mindspore/ops/_op_impl/aicpu/triu.py +43 -0
  897. mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
  898. mindspore/ops/_op_impl/aicpu/truncated_normal.py +39 -0
  899. mindspore/ops/_op_impl/aicpu/uniform.py +36 -0
  900. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +41 -0
  901. mindspore/ops/_op_impl/aicpu/uniform_int.py +36 -0
  902. mindspore/ops/_op_impl/aicpu/uniform_real.py +33 -0
  903. mindspore/ops/_op_impl/aicpu/unique.py +31 -0
  904. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +47 -0
  905. mindspore/ops/_op_impl/aicpu/unique_with_pad.py +32 -0
  906. mindspore/ops/_op_impl/aicpu/unravel_index.py +32 -0
  907. mindspore/ops/_op_impl/aicpu/unsorted_segment_prod.py +53 -0
  908. mindspore/ops/_op_impl/aicpu/unsorted_segment_sum.py +57 -0
  909. mindspore/ops/_op_impl/aicpu/unstack.py +45 -0
  910. mindspore/ops/_op_impl/aicpu/update_cache.py +44 -0
  911. mindspore/ops/_op_impl/aicpu/upper_bound.py +47 -0
  912. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +42 -0
  913. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +49 -0
  914. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +40 -0
  915. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +50 -0
  916. mindspore/ops/_op_impl/aicpu/xdivy.py +35 -0
  917. mindspore/ops/_op_impl/aicpu/xlogy.py +33 -0
  918. mindspore/ops/_op_impl/aicpu/zeros_like.py +42 -0
  919. mindspore/ops/_op_impl/aicpu/zeta.py +31 -0
  920. mindspore/ops/_op_impl/akg/__init__.py +19 -0
  921. mindspore/ops/_op_impl/akg/ascend/__init__.py +48 -0
  922. mindspore/ops/_op_impl/akg/ascend/abs.py +35 -0
  923. mindspore/ops/_op_impl/akg/ascend/add.py +42 -0
  924. mindspore/ops/_op_impl/akg/ascend/add_n.py +37 -0
  925. mindspore/ops/_op_impl/akg/ascend/batchmatmul.py +33 -0
  926. mindspore/ops/_op_impl/akg/ascend/cast.py +46 -0
  927. mindspore/ops/_op_impl/akg/ascend/equal.py +35 -0
  928. mindspore/ops/_op_impl/akg/ascend/exp.py +35 -0
  929. mindspore/ops/_op_impl/akg/ascend/expand_dims.py +33 -0
  930. mindspore/ops/_op_impl/akg/ascend/greater.py +34 -0
  931. mindspore/ops/_op_impl/akg/ascend/greater_equal.py +35 -0
  932. mindspore/ops/_op_impl/akg/ascend/less.py +31 -0
  933. mindspore/ops/_op_impl/akg/ascend/less_equal.py +35 -0
  934. mindspore/ops/_op_impl/akg/ascend/load_im2col.py +33 -0
  935. mindspore/ops/_op_impl/akg/ascend/log.py +34 -0
  936. mindspore/ops/_op_impl/akg/ascend/maximum.py +36 -0
  937. mindspore/ops/_op_impl/akg/ascend/minimum.py +39 -0
  938. mindspore/ops/_op_impl/akg/ascend/mul.py +41 -0
  939. mindspore/ops/_op_impl/akg/ascend/neg.py +37 -0
  940. mindspore/ops/_op_impl/akg/ascend/pow.py +35 -0
  941. mindspore/ops/_op_impl/akg/ascend/prod_force_se_a.py +33 -0
  942. mindspore/ops/_op_impl/akg/ascend/real_div.py +36 -0
  943. mindspore/ops/_op_impl/akg/ascend/reciprocal.py +32 -0
  944. mindspore/ops/_op_impl/akg/ascend/reduce_max.py +32 -0
  945. mindspore/ops/_op_impl/akg/ascend/reduce_min.py +32 -0
  946. mindspore/ops/_op_impl/akg/ascend/reduce_sum.py +37 -0
  947. mindspore/ops/_op_impl/akg/ascend/rsqrt.py +35 -0
  948. mindspore/ops/_op_impl/akg/ascend/select.py +37 -0
  949. mindspore/ops/_op_impl/akg/ascend/sqrt.py +35 -0
  950. mindspore/ops/_op_impl/akg/ascend/square.py +35 -0
  951. mindspore/ops/_op_impl/akg/ascend/sub.py +42 -0
  952. mindspore/ops/_op_impl/akg/cpu/__init__.py +23 -0
  953. mindspore/ops/_op_impl/akg/cpu/coo2csr.py +29 -0
  954. mindspore/ops/_op_impl/akg/cpu/csr2coo.py +29 -0
  955. mindspore/ops/_op_impl/akg/cpu/csr_gather.py +33 -0
  956. mindspore/ops/_op_impl/akg/cpu/csr_mm.py +34 -0
  957. mindspore/ops/_op_impl/akg/cpu/csr_mul.py +33 -0
  958. mindspore/ops/_op_impl/akg/cpu/csr_mv.py +33 -0
  959. mindspore/ops/_op_impl/akg/cpu/csr_reduce_sum.py +31 -0
  960. mindspore/ops/_op_impl/akg/gpu/__init__.py +24 -0
  961. mindspore/ops/_op_impl/akg/gpu/coo2csr.py +29 -0
  962. mindspore/ops/_op_impl/akg/gpu/csr2coo.py +29 -0
  963. mindspore/ops/_op_impl/akg/gpu/csr_div.py +36 -0
  964. mindspore/ops/_op_impl/akg/gpu/csr_gather.py +33 -0
  965. mindspore/ops/_op_impl/akg/gpu/csr_mm.py +37 -0
  966. mindspore/ops/_op_impl/akg/gpu/csr_mul.py +36 -0
  967. mindspore/ops/_op_impl/akg/gpu/csr_mv.py +36 -0
  968. mindspore/ops/_op_impl/akg/gpu/csr_reduce_sum.py +33 -0
  969. mindspore/ops/_op_impl/cpu/__init__.py +78 -0
  970. mindspore/ops/_op_impl/cpu/adam.py +49 -0
  971. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +47 -0
  972. mindspore/ops/_op_impl/cpu/arg_max.py +30 -0
  973. mindspore/ops/_op_impl/cpu/arg_max_with_value.py +31 -0
  974. mindspore/ops/_op_impl/cpu/arg_min_with_value.py +31 -0
  975. mindspore/ops/_op_impl/cpu/buffer_append.py +28 -0
  976. mindspore/ops/_op_impl/cpu/buffer_get.py +28 -0
  977. mindspore/ops/_op_impl/cpu/buffer_sample.py +28 -0
  978. mindspore/ops/_op_impl/cpu/cast.py +171 -0
  979. mindspore/ops/_op_impl/cpu/concat_offset.py +38 -0
  980. mindspore/ops/_op_impl/cpu/conv2d.py +30 -0
  981. mindspore/ops/_op_impl/cpu/conv3d.py +30 -0
  982. mindspore/ops/_op_impl/cpu/div.py +32 -0
  983. mindspore/ops/_op_impl/cpu/dropout.py +31 -0
  984. mindspore/ops/_op_impl/cpu/dropout_grad.py +30 -0
  985. mindspore/ops/_op_impl/cpu/dynamic_shape.py +42 -0
  986. mindspore/ops/_op_impl/cpu/dynamic_stitch.py +41 -0
  987. mindspore/ops/_op_impl/cpu/equal_count.py +30 -0
  988. mindspore/ops/_op_impl/cpu/gather_d.py +49 -0
  989. mindspore/ops/_op_impl/cpu/gather_d_grad.py +38 -0
  990. mindspore/ops/_op_impl/cpu/gather_d_grad_v2.py +40 -0
  991. mindspore/ops/_op_impl/cpu/gather_v2.py +40 -0
  992. mindspore/ops/_op_impl/cpu/hsigmoid.py +33 -0
  993. mindspore/ops/_op_impl/cpu/hsigmoid_grad.py +34 -0
  994. mindspore/ops/_op_impl/cpu/hswish.py +32 -0
  995. mindspore/ops/_op_impl/cpu/hswish_grad.py +33 -0
  996. mindspore/ops/_op_impl/cpu/identity_n.py +40 -0
  997. mindspore/ops/_op_impl/cpu/is_finite.py +39 -0
  998. mindspore/ops/_op_impl/cpu/l2loss.py +30 -0
  999. mindspore/ops/_op_impl/cpu/layer_norm.py +36 -0
  1000. mindspore/ops/_op_impl/cpu/layer_norm_grad.py +38 -0
  1001. mindspore/ops/_op_impl/cpu/maximum.py +35 -0
  1002. mindspore/ops/_op_impl/cpu/maximum_grad.py +47 -0
  1003. mindspore/ops/_op_impl/cpu/minimum.py +40 -0
  1004. mindspore/ops/_op_impl/cpu/minimum_grad.py +51 -0
  1005. mindspore/ops/_op_impl/cpu/mirror_pad.py +36 -0
  1006. mindspore/ops/_op_impl/cpu/mirror_pad_grad.py +36 -0
  1007. mindspore/ops/_op_impl/cpu/mul.py +32 -0
  1008. mindspore/ops/_op_impl/cpu/one_hot.py +31 -0
  1009. mindspore/ops/_op_impl/cpu/pad.py +32 -0
  1010. mindspore/ops/_op_impl/cpu/pow.py +32 -0
  1011. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +42 -0
  1012. mindspore/ops/_op_impl/cpu/pyexecute.py +29 -0
  1013. mindspore/ops/_op_impl/cpu/pyfunc.py +29 -0
  1014. mindspore/ops/_op_impl/cpu/range.py +34 -0
  1015. mindspore/ops/_op_impl/cpu/real_div.py +33 -0
  1016. mindspore/ops/_op_impl/cpu/reduce_all.py +29 -0
  1017. mindspore/ops/_op_impl/cpu/reduce_any.py +29 -0
  1018. mindspore/ops/_op_impl/cpu/reduce_max.py +32 -0
  1019. mindspore/ops/_op_impl/cpu/reduce_mean.py +40 -0
  1020. mindspore/ops/_op_impl/cpu/reduce_min.py +32 -0
  1021. mindspore/ops/_op_impl/cpu/reduce_prod.py +40 -0
  1022. mindspore/ops/_op_impl/cpu/reduce_std.py +31 -0
  1023. mindspore/ops/_op_impl/cpu/reduce_sum.py +41 -0
  1024. mindspore/ops/_op_impl/cpu/space_to_batch_nd.py +38 -0
  1025. mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
  1026. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
  1027. mindspore/ops/_op_impl/cpu/split.py +34 -0
  1028. mindspore/ops/_op_impl/cpu/sspaddmm.py +95 -0
  1029. mindspore/ops/_op_impl/cpu/stack.py +38 -0
  1030. mindspore/ops/_op_impl/cpu/sub.py +32 -0
  1031. mindspore/ops/_op_impl/cpu/tensor_copy_slices.py +41 -0
  1032. mindspore/ops/_op_impl/cpu/tile.py +37 -0
  1033. mindspore/ops/_op_impl/cpu/top_k.py +31 -0
  1034. mindspore/ops/_op_impl/cpu/transpose.py +39 -0
  1035. mindspore/ops/_primitive_cache.py +90 -0
  1036. mindspore/ops/_register_for_op.py +73 -0
  1037. mindspore/ops/_utils/__init__.py +20 -0
  1038. mindspore/ops/_utils/utils.py +147 -0
  1039. mindspore/ops/_vmap/__init__.py +25 -0
  1040. mindspore/ops/_vmap/vmap_array_ops.py +2149 -0
  1041. mindspore/ops/_vmap/vmap_base.py +533 -0
  1042. mindspore/ops/_vmap/vmap_convolution_ops.py +441 -0
  1043. mindspore/ops/_vmap/vmap_debug_ops.py +50 -0
  1044. mindspore/ops/_vmap/vmap_grad_math_ops.py +274 -0
  1045. mindspore/ops/_vmap/vmap_grad_nn_ops.py +806 -0
  1046. mindspore/ops/_vmap/vmap_image_ops.py +194 -0
  1047. mindspore/ops/_vmap/vmap_math_ops.py +993 -0
  1048. mindspore/ops/_vmap/vmap_nn_ops.py +2250 -0
  1049. mindspore/ops/_vmap/vmap_other_ops.py +105 -0
  1050. mindspore/ops/_vmap/vmap_random_ops.py +122 -0
  1051. mindspore/ops/_vmap/vmap_sparse_ops.py +89 -0
  1052. mindspore/ops/auto_generate/__init__.py +31 -0
  1053. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
  1054. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
  1055. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  1056. mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
  1057. mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
  1058. mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
  1059. mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
  1060. mindspore/ops/composite/__init__.py +71 -0
  1061. mindspore/ops/composite/base.py +1318 -0
  1062. mindspore/ops/composite/env_ops.py +41 -0
  1063. mindspore/ops/composite/math_ops.py +125 -0
  1064. mindspore/ops/composite/multitype_ops/__init__.py +77 -0
  1065. mindspore/ops/composite/multitype_ops/_compile_utils.py +1459 -0
  1066. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +897 -0
  1067. mindspore/ops/composite/multitype_ops/add_impl.py +606 -0
  1068. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +56 -0
  1069. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +56 -0
  1070. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +56 -0
  1071. mindspore/ops/composite/multitype_ops/div_impl.py +189 -0
  1072. mindspore/ops/composite/multitype_ops/equal_impl.py +335 -0
  1073. mindspore/ops/composite/multitype_ops/floordiv_impl.py +88 -0
  1074. mindspore/ops/composite/multitype_ops/getitem_impl.py +400 -0
  1075. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +109 -0
  1076. mindspore/ops/composite/multitype_ops/greater_impl.py +110 -0
  1077. mindspore/ops/composite/multitype_ops/in_impl.py +196 -0
  1078. mindspore/ops/composite/multitype_ops/left_shift_impl.py +37 -0
  1079. mindspore/ops/composite/multitype_ops/less_equal_impl.py +111 -0
  1080. mindspore/ops/composite/multitype_ops/less_impl.py +112 -0
  1081. mindspore/ops/composite/multitype_ops/logic_not_impl.py +113 -0
  1082. mindspore/ops/composite/multitype_ops/logical_and_impl.py +60 -0
  1083. mindspore/ops/composite/multitype_ops/logical_or_impl.py +61 -0
  1084. mindspore/ops/composite/multitype_ops/mod_impl.py +86 -0
  1085. mindspore/ops/composite/multitype_ops/mul_impl.py +294 -0
  1086. mindspore/ops/composite/multitype_ops/negative_impl.py +79 -0
  1087. mindspore/ops/composite/multitype_ops/not_equal_impl.py +290 -0
  1088. mindspore/ops/composite/multitype_ops/not_in_impl.py +196 -0
  1089. mindspore/ops/composite/multitype_ops/ones_like_impl.py +96 -0
  1090. mindspore/ops/composite/multitype_ops/pow_impl.py +87 -0
  1091. mindspore/ops/composite/multitype_ops/right_shift_impl.py +37 -0
  1092. mindspore/ops/composite/multitype_ops/setitem_impl.py +884 -0
  1093. mindspore/ops/composite/multitype_ops/sub_impl.py +116 -0
  1094. mindspore/ops/composite/multitype_ops/uadd_impl.py +29 -0
  1095. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +228 -0
  1096. mindspore/ops/deprecated.py +315 -0
  1097. mindspore/ops/function/__init__.py +782 -0
  1098. mindspore/ops/function/array_func.py +7226 -0
  1099. mindspore/ops/function/clip_func.py +384 -0
  1100. mindspore/ops/function/debug_func.py +181 -0
  1101. mindspore/ops/function/fft_func.py +44 -0
  1102. mindspore/ops/function/grad/__init__.py +34 -0
  1103. mindspore/ops/function/grad/grad_func.py +1425 -0
  1104. mindspore/ops/function/image_func.py +292 -0
  1105. mindspore/ops/function/linalg_func.py +416 -0
  1106. mindspore/ops/function/math_func.py +12228 -0
  1107. mindspore/ops/function/nn_func.py +8609 -0
  1108. mindspore/ops/function/other_func.py +115 -0
  1109. mindspore/ops/function/parameter_func.py +134 -0
  1110. mindspore/ops/function/random_func.py +1715 -0
  1111. mindspore/ops/function/reshard_func.py +104 -0
  1112. mindspore/ops/function/sparse_func.py +884 -0
  1113. mindspore/ops/function/sparse_unary_func.py +2422 -0
  1114. mindspore/ops/function/spectral_func.py +150 -0
  1115. mindspore/ops/function/vmap_func.py +117 -0
  1116. mindspore/ops/functional.py +464 -0
  1117. mindspore/ops/op_info_register.py +1572 -0
  1118. mindspore/ops/operations/__init__.py +722 -0
  1119. mindspore/ops/operations/_csr_ops.py +403 -0
  1120. mindspore/ops/operations/_custom_grad.py +181 -0
  1121. mindspore/ops/operations/_embedding_cache_ops.py +307 -0
  1122. mindspore/ops/operations/_grad_ops.py +2978 -0
  1123. mindspore/ops/operations/_infer_ops.py +19 -0
  1124. mindspore/ops/operations/_inner_ops.py +2544 -0
  1125. mindspore/ops/operations/_map_tensor_ops.py +112 -0
  1126. mindspore/ops/operations/_ms_kernel.py +601 -0
  1127. mindspore/ops/operations/_ocr_ops.py +379 -0
  1128. mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
  1129. mindspore/ops/operations/_pyfunc_registry.py +58 -0
  1130. mindspore/ops/operations/_quant_ops.py +1844 -0
  1131. mindspore/ops/operations/_rl_inner_ops.py +1231 -0
  1132. mindspore/ops/operations/_scalar_ops.py +106 -0
  1133. mindspore/ops/operations/_sequence_ops.py +1155 -0
  1134. mindspore/ops/operations/_sparse_grad_ops.py +56 -0
  1135. mindspore/ops/operations/_tensor_array.py +359 -0
  1136. mindspore/ops/operations/_thor_ops.py +807 -0
  1137. mindspore/ops/operations/array_ops.py +6124 -0
  1138. mindspore/ops/operations/comm_ops.py +1985 -0
  1139. mindspore/ops/operations/control_ops.py +127 -0
  1140. mindspore/ops/operations/custom_ops.py +1129 -0
  1141. mindspore/ops/operations/debug_ops.py +678 -0
  1142. mindspore/ops/operations/image_ops.py +1041 -0
  1143. mindspore/ops/operations/inner_ops.py +697 -0
  1144. mindspore/ops/operations/linalg_ops.py +95 -0
  1145. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  1146. mindspore/ops/operations/manually_defined/_inner.py +73 -0
  1147. mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
  1148. mindspore/ops/operations/math_ops.py +5095 -0
  1149. mindspore/ops/operations/nn_ops.py +9575 -0
  1150. mindspore/ops/operations/other_ops.py +874 -0
  1151. mindspore/ops/operations/random_ops.py +1288 -0
  1152. mindspore/ops/operations/reshard_ops.py +53 -0
  1153. mindspore/ops/operations/rl_ops.py +288 -0
  1154. mindspore/ops/operations/sparse_ops.py +2753 -0
  1155. mindspore/ops/operations/spectral_ops.py +111 -0
  1156. mindspore/ops/primitive.py +1046 -0
  1157. mindspore/ops/signature.py +54 -0
  1158. mindspore/ops/vm_impl_registry.py +91 -0
  1159. mindspore/ops_generate/__init__.py +27 -0
  1160. mindspore/ops_generate/arg_dtype_cast.py +252 -0
  1161. mindspore/ops_generate/arg_handler.py +197 -0
  1162. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  1163. mindspore/ops_generate/gen_constants.py +36 -0
  1164. mindspore/ops_generate/gen_ops.py +1099 -0
  1165. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  1166. mindspore/ops_generate/gen_pyboost_func.py +1052 -0
  1167. mindspore/ops_generate/gen_utils.py +209 -0
  1168. mindspore/ops_generate/op_proto.py +145 -0
  1169. mindspore/ops_generate/pyboost_utils.py +367 -0
  1170. mindspore/ops_generate/template.py +261 -0
  1171. mindspore/parallel/__init__.py +30 -0
  1172. mindspore/parallel/_auto_parallel_context.py +1486 -0
  1173. mindspore/parallel/_cell_wrapper.py +174 -0
  1174. mindspore/parallel/_cost_model_context.py +700 -0
  1175. mindspore/parallel/_dp_allreduce_fusion.py +159 -0
  1176. mindspore/parallel/_offload_context.py +275 -0
  1177. mindspore/parallel/_parallel_serialization.py +561 -0
  1178. mindspore/parallel/_ps_context.py +242 -0
  1179. mindspore/parallel/_recovery_context.py +110 -0
  1180. mindspore/parallel/_tensor.py +730 -0
  1181. mindspore/parallel/_transformer/__init__.py +35 -0
  1182. mindspore/parallel/_transformer/layers.py +765 -0
  1183. mindspore/parallel/_transformer/loss.py +251 -0
  1184. mindspore/parallel/_transformer/moe.py +693 -0
  1185. mindspore/parallel/_transformer/op_parallel_config.py +222 -0
  1186. mindspore/parallel/_transformer/transformer.py +3119 -0
  1187. mindspore/parallel/_utils.py +612 -0
  1188. mindspore/parallel/algo_parameter_config.py +400 -0
  1189. mindspore/parallel/checkpoint_transform.py +650 -0
  1190. mindspore/parallel/cluster/__init__.py +15 -0
  1191. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  1192. mindspore/parallel/cluster/process_entity/_api.py +352 -0
  1193. mindspore/parallel/cluster/process_entity/_utils.py +101 -0
  1194. mindspore/parallel/cluster/run.py +136 -0
  1195. mindspore/parallel/mpi/__init__.py +14 -0
  1196. mindspore/parallel/mpi/_mpi_config.py +116 -0
  1197. mindspore/parallel/parameter_broadcast.py +151 -0
  1198. mindspore/parallel/shard.py +481 -0
  1199. mindspore/parallel/transform_safetensors.py +993 -0
  1200. mindspore/perf_msvcbuildinsights.dll +0 -0
  1201. mindspore/pgodb140.dll +0 -0
  1202. mindspore/pgort140.dll +0 -0
  1203. mindspore/profiler/__init__.py +28 -0
  1204. mindspore/profiler/common/__init__.py +14 -0
  1205. mindspore/profiler/common/constant.py +29 -0
  1206. mindspore/profiler/common/exceptions/__init__.py +14 -0
  1207. mindspore/profiler/common/exceptions/error_code.py +83 -0
  1208. mindspore/profiler/common/exceptions/exceptions.py +286 -0
  1209. mindspore/profiler/common/process_pool.py +41 -0
  1210. mindspore/profiler/common/registry.py +47 -0
  1211. mindspore/profiler/common/singleton.py +28 -0
  1212. mindspore/profiler/common/struct_type.py +118 -0
  1213. mindspore/profiler/common/util.py +472 -0
  1214. mindspore/profiler/common/validator/__init__.py +14 -0
  1215. mindspore/profiler/common/validator/validate_path.py +84 -0
  1216. mindspore/profiler/dynamic_profiler.py +694 -0
  1217. mindspore/profiler/envprofiling.py +254 -0
  1218. mindspore/profiler/parser/__init__.py +14 -0
  1219. mindspore/profiler/parser/aicpu_data_parser.py +272 -0
  1220. mindspore/profiler/parser/ascend_analysis/__init__.py +14 -0
  1221. mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
  1222. mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
  1223. mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
  1224. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
  1225. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
  1226. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
  1227. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  1228. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
  1229. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  1230. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
  1231. mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
  1232. mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
  1233. mindspore/profiler/parser/ascend_flops_generator.py +116 -0
  1234. mindspore/profiler/parser/ascend_fpbp_generator.py +82 -0
  1235. mindspore/profiler/parser/ascend_hccl_generator.py +271 -0
  1236. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  1237. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  1238. mindspore/profiler/parser/ascend_msprof_exporter.py +282 -0
  1239. mindspore/profiler/parser/ascend_msprof_generator.py +187 -0
  1240. mindspore/profiler/parser/ascend_op_generator.py +334 -0
  1241. mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
  1242. mindspore/profiler/parser/ascend_timeline_generator.py +545 -0
  1243. mindspore/profiler/parser/base_timeline_generator.py +483 -0
  1244. mindspore/profiler/parser/container.py +229 -0
  1245. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +697 -0
  1246. mindspore/profiler/parser/flops_parser.py +531 -0
  1247. mindspore/profiler/parser/framework_enum.py +111 -0
  1248. mindspore/profiler/parser/framework_parser.py +464 -0
  1249. mindspore/profiler/parser/framework_struct.py +61 -0
  1250. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  1251. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  1252. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  1253. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  1254. mindspore/profiler/parser/hccl_parser.py +573 -0
  1255. mindspore/profiler/parser/hwts_log_parser.py +122 -0
  1256. mindspore/profiler/parser/integrator.py +526 -0
  1257. mindspore/profiler/parser/memory_usage_parser.py +277 -0
  1258. mindspore/profiler/parser/minddata_analyzer.py +800 -0
  1259. mindspore/profiler/parser/minddata_parser.py +186 -0
  1260. mindspore/profiler/parser/minddata_pipeline_parser.py +299 -0
  1261. mindspore/profiler/parser/op_intermediate_parser.py +149 -0
  1262. mindspore/profiler/parser/optime_parser.py +250 -0
  1263. mindspore/profiler/parser/profiler_info.py +213 -0
  1264. mindspore/profiler/parser/step_trace_parser.py +666 -0
  1265. mindspore/profiler/profiler.py +153 -0
  1266. mindspore/profiler/profiling.py +1922 -0
  1267. mindspore/rewrite/__init__.py +28 -0
  1268. mindspore/rewrite/api/__init__.py +17 -0
  1269. mindspore/rewrite/api/node.py +519 -0
  1270. mindspore/rewrite/api/node_type.py +53 -0
  1271. mindspore/rewrite/api/pattern_engine.py +490 -0
  1272. mindspore/rewrite/api/scoped_value.py +181 -0
  1273. mindspore/rewrite/api/symbol_tree.py +497 -0
  1274. mindspore/rewrite/ast_helpers/__init__.py +25 -0
  1275. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  1276. mindspore/rewrite/ast_helpers/ast_finder.py +404 -0
  1277. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  1278. mindspore/rewrite/ast_helpers/ast_modifier.py +605 -0
  1279. mindspore/rewrite/ast_helpers/ast_replacer.py +79 -0
  1280. mindspore/rewrite/common/__init__.py +19 -0
  1281. mindspore/rewrite/common/config.py +24 -0
  1282. mindspore/rewrite/common/error_log.py +39 -0
  1283. mindspore/rewrite/common/event.py +28 -0
  1284. mindspore/rewrite/common/namer.py +271 -0
  1285. mindspore/rewrite/common/namespace.py +118 -0
  1286. mindspore/rewrite/common/observable.py +44 -0
  1287. mindspore/rewrite/common/observer.py +54 -0
  1288. mindspore/rewrite/node/__init__.py +22 -0
  1289. mindspore/rewrite/node/call_function.py +95 -0
  1290. mindspore/rewrite/node/cell_container.py +139 -0
  1291. mindspore/rewrite/node/control_flow.py +113 -0
  1292. mindspore/rewrite/node/node.py +1428 -0
  1293. mindspore/rewrite/node/node_manager.py +283 -0
  1294. mindspore/rewrite/node/node_topological_manager.py +223 -0
  1295. mindspore/rewrite/parsers/__init__.py +29 -0
  1296. mindspore/rewrite/parsers/arguments_parser.py +63 -0
  1297. mindspore/rewrite/parsers/assign_parser.py +852 -0
  1298. mindspore/rewrite/parsers/attribute_parser.py +57 -0
  1299. mindspore/rewrite/parsers/class_def_parser.py +289 -0
  1300. mindspore/rewrite/parsers/constant_parser.py +104 -0
  1301. mindspore/rewrite/parsers/container_parser.py +88 -0
  1302. mindspore/rewrite/parsers/expr_parser.py +55 -0
  1303. mindspore/rewrite/parsers/for_parser.py +61 -0
  1304. mindspore/rewrite/parsers/function_def_parser.py +84 -0
  1305. mindspore/rewrite/parsers/if_parser.py +85 -0
  1306. mindspore/rewrite/parsers/module_parser.py +117 -0
  1307. mindspore/rewrite/parsers/parser.py +43 -0
  1308. mindspore/rewrite/parsers/parser_register.py +86 -0
  1309. mindspore/rewrite/parsers/return_parser.py +37 -0
  1310. mindspore/rewrite/parsers/while_parser.py +59 -0
  1311. mindspore/rewrite/sparsify/__init__.py +0 -0
  1312. mindspore/rewrite/sparsify/sparse_transformer.py +457 -0
  1313. mindspore/rewrite/sparsify/sparsify.py +112 -0
  1314. mindspore/rewrite/sparsify/utils.py +179 -0
  1315. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  1316. mindspore/rewrite/symbol_tree/symbol_tree.py +1819 -0
  1317. mindspore/rewrite/symbol_tree/symbol_tree_builder.py +76 -0
  1318. mindspore/rewrite/symbol_tree/symbol_tree_dumper.py +142 -0
  1319. mindspore/run_check/__init__.py +20 -0
  1320. mindspore/run_check/_check_version.py +507 -0
  1321. mindspore/run_check/run_check.py +66 -0
  1322. mindspore/safeguard/__init__.py +18 -0
  1323. mindspore/safeguard/rewrite_obfuscation.py +875 -0
  1324. mindspore/swresample-4.dll +0 -0
  1325. mindspore/swscale-6.dll +0 -0
  1326. mindspore/tbbmalloc.dll +0 -0
  1327. mindspore/tinyxml2.dll +0 -0
  1328. mindspore/train/__init__.py +48 -0
  1329. mindspore/train/_utils.py +465 -0
  1330. mindspore/train/amp.py +935 -0
  1331. mindspore/train/anf_ir_pb2.py +1517 -0
  1332. mindspore/train/callback/__init__.py +44 -0
  1333. mindspore/train/callback/_backup_and_restore.py +117 -0
  1334. mindspore/train/callback/_callback.py +613 -0
  1335. mindspore/train/callback/_checkpoint.py +814 -0
  1336. mindspore/train/callback/_cluster_monitor.py +201 -0
  1337. mindspore/train/callback/_dataset_graph.py +150 -0
  1338. mindspore/train/callback/_early_stop.py +239 -0
  1339. mindspore/train/callback/_flops_collector.py +239 -0
  1340. mindspore/train/callback/_history.py +92 -0
  1341. mindspore/train/callback/_lambda_callback.py +80 -0
  1342. mindspore/train/callback/_landscape.py +1049 -0
  1343. mindspore/train/callback/_loss_monitor.py +107 -0
  1344. mindspore/train/callback/_lr_scheduler_callback.py +76 -0
  1345. mindspore/train/callback/_on_request_exit.py +298 -0
  1346. mindspore/train/callback/_reduce_lr_on_plateau.py +226 -0
  1347. mindspore/train/callback/_summary_collector.py +1184 -0
  1348. mindspore/train/callback/_tft_register.py +352 -0
  1349. mindspore/train/callback/_time_monitor.py +141 -0
  1350. mindspore/train/checkpoint_pb2.py +233 -0
  1351. mindspore/train/data_sink.py +219 -0
  1352. mindspore/train/dataset_helper.py +692 -0
  1353. mindspore/train/lineage_pb2.py +1260 -0
  1354. mindspore/train/loss_scale_manager.py +213 -0
  1355. mindspore/train/memory_profiling_pb2.py +298 -0
  1356. mindspore/train/metrics/__init__.py +175 -0
  1357. mindspore/train/metrics/accuracy.py +133 -0
  1358. mindspore/train/metrics/auc.py +129 -0
  1359. mindspore/train/metrics/bleu_score.py +170 -0
  1360. mindspore/train/metrics/confusion_matrix.py +700 -0
  1361. mindspore/train/metrics/cosine_similarity.py +109 -0
  1362. mindspore/train/metrics/dice.py +116 -0
  1363. mindspore/train/metrics/error.py +175 -0
  1364. mindspore/train/metrics/fbeta.py +167 -0
  1365. mindspore/train/metrics/hausdorff_distance.py +333 -0
  1366. mindspore/train/metrics/loss.py +97 -0
  1367. mindspore/train/metrics/mean_surface_distance.py +189 -0
  1368. mindspore/train/metrics/metric.py +373 -0
  1369. mindspore/train/metrics/occlusion_sensitivity.py +225 -0
  1370. mindspore/train/metrics/perplexity.py +133 -0
  1371. mindspore/train/metrics/precision.py +160 -0
  1372. mindspore/train/metrics/recall.py +159 -0
  1373. mindspore/train/metrics/roc.py +223 -0
  1374. mindspore/train/metrics/root_mean_square_surface_distance.py +191 -0
  1375. mindspore/train/metrics/topk.py +167 -0
  1376. mindspore/train/mind_ir_pb2.py +1908 -0
  1377. mindspore/train/model.py +2252 -0
  1378. mindspore/train/node_strategy_pb2.py +653 -0
  1379. mindspore/train/print_pb2.py +184 -0
  1380. mindspore/train/profiling_parallel_pb2.py +151 -0
  1381. mindspore/train/serialization.py +3325 -0
  1382. mindspore/train/summary/__init__.py +23 -0
  1383. mindspore/train/summary/_lineage_adapter.py +41 -0
  1384. mindspore/train/summary/_summary_adapter.py +496 -0
  1385. mindspore/train/summary/_writer_pool.py +207 -0
  1386. mindspore/train/summary/enums.py +56 -0
  1387. mindspore/train/summary/summary_record.py +581 -0
  1388. mindspore/train/summary/writer.py +167 -0
  1389. mindspore/train/summary_pb2.py +1165 -0
  1390. mindspore/train/train_thor/__init__.py +20 -0
  1391. mindspore/train/train_thor/convert_utils.py +268 -0
  1392. mindspore/train/train_thor/dataset_helper.py +192 -0
  1393. mindspore/train/train_thor/model_thor.py +257 -0
  1394. mindspore/turbojpeg.dll +0 -0
  1395. mindspore/utils/__init__.py +21 -0
  1396. mindspore/utils/utils.py +60 -0
  1397. mindspore/vcmeta.dll +0 -0
  1398. mindspore/vcomp140.dll +0 -0
  1399. mindspore/vcruntime140.dll +0 -0
  1400. mindspore/vcruntime140_1.dll +0 -0
  1401. mindspore/version.py +1 -0
  1402. mindspore-2.4.0.dist-info/METADATA +352 -0
  1403. mindspore-2.4.0.dist-info/RECORD +1406 -0
  1404. mindspore-2.4.0.dist-info/WHEEL +5 -0
  1405. mindspore-2.4.0.dist-info/entry_points.txt +3 -0
  1406. mindspore-2.4.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1715 @@
1
+ # Copyright 2023 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+ """Defines parameter operators with functional form."""
16
+
17
+ from __future__ import absolute_import
18
+ import numpy as np
19
+
20
+ from mindspore import context
21
+ from mindspore.ops import operations as P
22
+ from mindspore.ops import functional as F
23
+ from mindspore.ops.primitive import constexpr, _primexpr
24
+ from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
25
+ from mindspore.common import dtype as mstype
26
+ from mindspore.common.seed import _get_graph_seed
27
+ from mindspore.common.tensor import Tensor
28
+ from mindspore.ops.operations.random_ops import RandomShuffle, RandomChoiceWithMask
29
+ from mindspore.common.api import _function_forbid_reuse
30
+ from mindspore.ops.auto_generate import randperm
31
+ from mindspore.common.generator import default_generator
32
+ from mindspore.ops.auto_generate import UniformExt, NormalTensorTensor, \
33
+ NormalTensorFloat, NormalFloatTensor, NormalFloatFloat, RandExt, RandLikeExt, MultinomialExt
34
+
35
+ normal_tensor_tensor_op = NormalTensorTensor()
36
+ normal_tensor_float_op = NormalTensorFloat()
37
+ normal_float_tensor_op = NormalFloatTensor()
38
+ normal_float_float_op = NormalFloatFloat()
39
+ cast_ = P.Cast()
40
+ log_ = P.Log()
41
+ real_div_ = P.RealDiv()
42
+ reshape_ = P.Reshape()
43
+ shape_ = P.Shape()
44
+ top_k_ = P.TopK()
45
+ uniform_ = UniformExt()
46
+ rand_ext_ = RandExt()
47
+ rand_like_ext_ = RandLikeExt()
48
+ multinomial_ext_ = MultinomialExt()
49
+ generator_step_ = Tensor(10, mstype.int64)
50
+
51
+
52
+ @constexpr
53
+ def _set_prim_op_user_data(prim, key, value):
54
+ prim.add_prim_attr(key, value)
55
+ return prim
56
+
57
+
58
+ @_function_forbid_reuse
59
+ def random_gamma(shape, alpha, seed=None):
60
+ r"""
61
+ Outputs random values from the Gamma distribution(s) described by alpha.
62
+
63
+
64
+ Args:
65
+ shape (Tensor): The shape of random tensor to be generated.
66
+ Must be one of the following types: int32, int64. 1-D integer tensor.
67
+ alpha (Tensor): The :math:`\alpha` distribution parameter.
68
+ A Tensor. Must be one of the following types: half, float32, float64.
69
+ seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
70
+ Default: ``None`` , which will be treated as 0.
71
+
72
+ Returns:
73
+ Tensor. The shape should be equal to the concat shape between the input `shape` and the broadcast
74
+ of `alpha`.
75
+ The dtype is the same type as alpha.
76
+
77
+ Raises:
78
+ TypeError: If `shape` is not a Tensor.
79
+ TypeError: If `alpha` is not a Tensor.
80
+ TypeError: If `seed` is not an int.
81
+ TypeError: If dtype of `alpha` is not half, float32 or float64.
82
+
83
+ Supported Platforms:
84
+ ``CPU``
85
+
86
+ Examples:
87
+ >>> import numpy as np
88
+ >>> import mindspore
89
+ >>> from mindspore import Tensor, ops
90
+ >>> shape = Tensor(np.array([7, 5]), mindspore.int32)
91
+ >>> alpha = Tensor(np.array([0.5, 1.5]), mindspore.float32)
92
+ >>> output = ops.random_gamma(shape, alpha, seed=5)
93
+ >>> result = output.shape
94
+ >>> print(result)
95
+ (7, 5, 2)
96
+ """
97
+ seed1, seed2 = _get_seed(seed, "random_gamma")
98
+ random_gamma_op = P.RandomGamma(seed1, seed2)
99
+ random_gamma_op = _set_prim_op_user_data(
100
+ random_gamma_op, "random_cache", False)
101
+ output = random_gamma_op(shape, alpha)
102
+ return output
103
+
104
+
105
+ @constexpr(reuse_result=False)
106
+ def _get_seed(op_seed, kernel_name):
107
+ """Get the graph-level seed."""
108
+ return _get_graph_seed(op_seed, kernel_name)
109
+
110
+
111
+ @_function_forbid_reuse
112
+ def standard_laplace(shape, seed=None):
113
+ r"""
114
+ Generates random numbers according to the Laplace random number distribution (mean=0, lambda=1).
115
+ It is defined as:
116
+
117
+ .. math::
118
+ \text{f}(x) = \frac{1}{2}\exp(-|x|)
119
+
120
+ .. warning::
121
+ The Ascend backend does not support the reproducibility of random numbers, so
122
+ the `seed` parameter has no effect.
123
+
124
+ Args:
125
+ shape (Union[tuple, Tensor]): The shape of random tensor to be generated. Only constant value is allowed
126
+ when the input type is tuple. And the operator supports dynamic shape only when the input type is Tensor.
127
+ seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
128
+ Default: ``None`` .
129
+
130
+ Returns:
131
+ Tensor. The shape that the input `shape` denotes. The dtype is float32.
132
+
133
+ Raises:
134
+ TypeError: If shape is neither a tuple nor a Tensor.
135
+ ValueError: If shape is a tuple containing non-positive items.
136
+ ValueError: If shape is a Tensor, and the rank of the Tensor is not equal to 1.
137
+
138
+ Supported Platforms:
139
+ ``Ascend`` ``GPU`` ``CPU``
140
+
141
+ Examples:
142
+ >>> from mindspore import ops
143
+ >>> shape = (4, 4)
144
+ >>> output = ops.standard_laplace(shape)
145
+ >>> result = output.shape
146
+ >>> print(result)
147
+ (4, 4)
148
+ """
149
+ seed1, seed2 = _get_seed(seed, "standard_laplace")
150
+ standard_laplace_op = P.StandardLaplace(seed=seed1, seed2=seed2)
151
+ standard_laplace_op = _set_prim_op_user_data(
152
+ standard_laplace_op, "random_cache", False)
153
+ return standard_laplace_op(shape)
154
+
155
+
156
+ @_function_forbid_reuse
157
+ def random_categorical(logits, num_sample, seed=0, dtype=mstype.int64):
158
+ r"""
159
+ Generates random samples from a given categorical distribution tensor.
160
+
161
+ .. warning::
162
+ The Ascend backend does not support the reproducibility of random numbers, so
163
+ the `seed` parameter has no effect.
164
+
165
+ Args:
166
+ logits (Tensor): The input tensor. 2-D Tensor with shape :math:`(batch\_size, num\_classes)`.
167
+ num_sample (int): Number of sample to be drawn. Only constant values is allowed.
168
+ seed (int): Random seed. Only constant values is allowed. Default: ``0`` .
169
+ dtype (mindspore.dtype): The type of output. Its value must be one of mindspore.int16,
170
+ mindspore.int32 and mindspore.int64. Default: ``mstype.int64`` .
171
+
172
+ Returns:
173
+ Tensor, The output Tensor with shape :math:`(batch\_size, num\_samples)`.
174
+
175
+ Raises:
176
+ TypeError: If `dtype` is not one of the following: mindspore.int16, mindspore.int32, mindspore.int64.
177
+ TypeError: If `logits` is not a Tensor.
178
+ TypeError: If neither `num_sample` nor `seed` is an int.
179
+
180
+ Supported Platforms:
181
+ ``Ascend`` ``GPU`` ``CPU``
182
+
183
+ Examples:
184
+ >>> from mindspore import ops
185
+ >>> from mindspore import Tensor
186
+ >>> import mindspore.common.dtype as mstype
187
+ >>> import numpy as np
188
+ >>> logits = Tensor(np.random.random((10, 5)).astype(np.float32), mstype.float32)
189
+ >>> net = ops.random_categorical(logits, 8)
190
+ >>> result = net.shape
191
+ >>> print(result)
192
+ (10, 8)
193
+ """
194
+ random_categorical_ = P.RandomCategorical(dtype)
195
+ random_categorical_ = _set_prim_op_user_data(
196
+ random_categorical_, "random_cache", False)
197
+ return random_categorical_(logits, num_sample, seed)
198
+
199
+
200
+ @_function_forbid_reuse
201
+ def multinomial_with_replacement(x, seed, offset, numsamples, replacement=False):
202
+ r"""
203
+ Returns a tensor where each row contains numsamples indices sampled from the
204
+ multinomial distribution with replacement. It is different from `multinomial` in that it allows
205
+ the same outcome to be chosen multiple times.
206
+
207
+ Note:
208
+ The rows of input do not need to sum to one (in which case we use the values as weights),
209
+ but must be non-negative, finite and have a non-zero sum.
210
+
211
+ Args:
212
+ x (Tensor): the input tensor containing the cumsum of probabilities, must be 1 or 2
213
+ dimensions. Must be one of the following types: float16, float32, float64.
214
+ seed (int): If seed is set to be -1, and offset is set to be 0, the random number
215
+ generator is seeded by a random seed. Otherwise, it is seeded by the given seed.
216
+ offset (int): Offset used to avoid seed collision.
217
+ numsamples (int): the number of samples to draw.
218
+ replacement (bool, optional): Whether to draw with replacement or not. Default: ``False`` .
219
+
220
+ Returns:
221
+ Tensor with the same rows as `x`, each row has `numsamples` sampled indices.
222
+
223
+ Raises:
224
+ TypeError: If `x` is not a 1D or 2D Tensor.
225
+ TypeError: If dtype of `x` is not float16, float32 or float64.
226
+ TypeError: If `numsamples` is not an int.
227
+ TypeError: If `replacement` is not a bool.
228
+ ValueError: If the value of `numsamples` is not greater than x_shape[-1] when `replacement` is False.
229
+ ValueError: If the sum of one row of `x` less than 0.
230
+ ValueError: If one of the element of each row of `x` less than 0.
231
+ ValueError: If `numsamples` equal or less than 0.
232
+
233
+ Supported Platforms:
234
+ ``CPU``
235
+
236
+ Examples:
237
+ >>> from mindspore import Tensor, ops
238
+ >>> from mindspore import dtype as mstype
239
+ >>> x = Tensor([[0., 9., 4., 0.]], mstype.float32)
240
+ >>> output = ops.multinomial_with_replacement(x, 2, 5, 2, True)
241
+ >>> print(output)
242
+ [[1 1]]
243
+ """
244
+ if not isinstance(seed, Tensor):
245
+ if not isinstance(seed, int):
246
+ raise TypeError(f"For multinomial_with_replacement,",
247
+ f"the input[seed] must be int, but got {type(seed)}.")
248
+ seed = Tensor(seed, dtype=mstype.int64)
249
+ if not isinstance(offset, Tensor):
250
+ if not isinstance(offset, int):
251
+ raise TypeError(f"For multinomial_with_replacement,",
252
+ f"the input[offset] must be int, but got {type(offset)}.")
253
+ offset = Tensor(offset, dtype=mstype.int64)
254
+ multinomial_with_replacement_ = P.MultinomialWithReplacement(numsamples=numsamples,
255
+ replacement=replacement)
256
+ multinomial_with_replacement_ = _set_prim_op_user_data(
257
+ multinomial_with_replacement_, "random_cache", False)
258
+ return multinomial_with_replacement_(x, seed, offset)
259
+
260
+
261
+ @_function_forbid_reuse
262
+ def uniform_ext(tensor, a, b, generator=None):
263
+ """
264
+ Generates random numbers in the half-open interval [a, b).
265
+
266
+ Args:
267
+ tensor (Tensor): The origin input tensor.
268
+ a (number): The lower bound of the interval.
269
+ b (number): The upper bound of the interval.
270
+ generator (Generator, optional): The random seed. Default: None.
271
+
272
+ Raises:
273
+ TypeError: If `a` is larger than `b`.
274
+
275
+ Returns:
276
+ Tensor, with the same shape as tensor.
277
+
278
+ Examples:
279
+ >>> import mindspore
280
+ >>> from mindspore import ops
281
+ >>> x = ops.ones((4, 2))
282
+ >>> generator = mindspore.Generator()
283
+ >>> generator.manual_seed(100)
284
+ >>> result = ops.function.random_func.uniform_ext(x, 1., 2., generator)
285
+ >>> print(result.shape)
286
+ (4, 2)
287
+ """
288
+ if generator is None:
289
+ generator = default_generator
290
+ seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
291
+ return uniform_(tensor, a, b, seed, offset)
292
+
293
+
294
+ @_function_forbid_reuse
295
+ def uniform(shape, minval, maxval, seed=None, dtype=mstype.float32):
296
+ """
297
+ Generates random numbers according to the Uniform random number distribution.
298
+
299
+ Note:
300
+ The number in tensor minval should be strictly less than maxval at any position after broadcasting.
301
+
302
+ Args:
303
+ shape (Union[tuple, Tensor]): The shape of random tensor to be generated.
304
+ minval (Tensor): The distribution parameter `a`.
305
+ It defines the minimum possible generated value, with int32 or float32 data type.
306
+ If dtype is int32, only one number is allowed.
307
+ maxval (Tensor): The distribution parameter `b`.
308
+ It defines the maximum possible generated value, with int32 or float32 data type.
309
+ If dtype is int32, only one number is allowed.
310
+ seed (int): Seed is used as entropy source for the random number engines to generate pseudo-random numbers,
311
+ must be non-negative. Default: ``None`` , which will be treated as 0.
312
+ dtype (mindspore.dtype): Type of the Uniform distribution. If it is int32, it generates numbers from discrete
313
+ uniform distribution; if it is float32, it generates numbers from continuous uniform distribution. It only
314
+ supports these two data types. Default: mstype.float32.
315
+
316
+ Returns:
317
+ Tensor. The shape should be equal to the broadcasted shape between the input `shape` and shapes
318
+ of `minval` and `maxval`.
319
+ The dtype is designated as the input `dtype`.
320
+
321
+ Raises:
322
+ TypeError: If `shape` is neither a tuple nor a Tensor.
323
+ TypeError: If 'minval' or 'maxval' is neither int32 nor float32
324
+ and dtype of 'minval' is not the same as 'maxval'.
325
+ TypeError: If `seed` is not an int.
326
+ TypeError: If 'dtype' is neither int32 nor float32.
327
+
328
+ Supported Platforms:
329
+ ``GPU`` ``CPU``
330
+
331
+ Examples:
332
+ >>> from mindspore import Tensor, ops
333
+ >>> import mindspore
334
+ >>> import numpy as np
335
+ >>> # For discrete uniform distribution, only one number is allowed for both minval and maxval:
336
+ >>> shape = (4, 2)
337
+ >>> minval = Tensor(1, mindspore.int32)
338
+ >>> maxval = Tensor(2, mindspore.int32)
339
+ >>> output = ops.uniform(shape, minval, maxval, seed=5, dtype=mindspore.int32)
340
+ >>>
341
+ >>> # For continuous uniform distribution, minval and maxval can be multi-dimentional:
342
+ >>> shape = (3, 1, 2)
343
+ >>> minval = Tensor(np.array([[3, 4], [5, 6]]), mindspore.float32)
344
+ >>> maxval = Tensor([8.0, 10.0], mindspore.float32)
345
+ >>> output = ops.uniform(shape, minval, maxval, seed=5)
346
+ >>> result = output.shape
347
+ >>> print(result)
348
+ (3, 2, 2)
349
+ """
350
+ if not isinstance(minval, Tensor) or not isinstance(maxval, Tensor):
351
+ raise TypeError(
352
+ f"For functional operator[uniform], the input[minval] and input[maxval] must be a Tensor.")
353
+
354
+ minval_dtype = F.dtype(minval)
355
+ maxval_dtype = F.dtype(maxval)
356
+ const_utils.check_type_valid(
357
+ dtype, [mstype.int32, mstype.float32], 'uniform')
358
+ const_utils.check_tensors_dtype_same(minval_dtype, dtype, "uniform")
359
+ const_utils.check_tensors_dtype_same(maxval_dtype, dtype, "uniform")
360
+ seed1, seed2 = _get_seed(seed, "uniform")
361
+ if const_utils.is_same_type(dtype, mstype.int32):
362
+ random_uniform = P.UniformInt(seed1, seed2)
363
+ random_uniform = _set_prim_op_user_data(
364
+ random_uniform, "random_cache", False)
365
+ value = random_uniform(shape, minval, maxval)
366
+ else:
367
+ uniform_real = P.UniformReal(seed1, seed2)
368
+ uniform_real = _set_prim_op_user_data(
369
+ uniform_real, "random_cache", False)
370
+ uniform_real = uniform_real(shape)
371
+ value = uniform_real * (maxval - minval) + minval
372
+ return value
373
+
374
+
375
+ @_function_forbid_reuse
376
+ def standard_normal(shape, seed=None):
377
+ r"""
378
+ Generates random numbers according to the standard Normal (or Gaussian) random number distribution.
379
+
380
+ Returns the tensor with the given shape, the random numbers in it drawn from normal distributions
381
+ whose mean is 0 and standard deviation is 1.
382
+
383
+ .. math::
384
+ f(x)=\frac{1}{\sqrt{2 \pi}} e^{\left(-\frac{x^{2}}{2}\right)}
385
+
386
+ .. warning::
387
+ The Ascend backend does not support the reproducibility of random numbers, so
388
+ the `seed` parameter has no effect.
389
+
390
+ Args:
391
+ shape (Union[tuple, Tensor]): The shape of random tensor to be generated. Only constant value is allowed
392
+ when the input type is tuple. And the operator supports dynamic shape only when the input type is Tensor.
393
+ seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
394
+ Default: ``None`` , which will be treated as 0.
395
+
396
+ Returns:
397
+ Tensor. The shape that the input 'shape' denotes. The dtype is float32.
398
+
399
+ Raises:
400
+ TypeError: If `shape` is neither a tuple nor a Tensor.
401
+ ValueError: If `shape` is a tuple containing non-positive items.
402
+
403
+ Supported Platforms:
404
+ ``Ascend`` ``GPU`` ``CPU``
405
+
406
+ Examples:
407
+ >>> from mindspore import ops
408
+ >>> shape = (4, 4)
409
+ >>> output = ops.standard_normal(shape)
410
+ >>> result = output.shape
411
+ >>> print(result)
412
+ (4, 4)
413
+ """
414
+ seed1, seed2 = _get_seed(seed, "standard_normal")
415
+ standard_normal_op = P.StandardNormal(seed=seed1, seed2=seed2)
416
+ standard_normal_op = _set_prim_op_user_data(
417
+ standard_normal_op, "random_cache", False)
418
+ return standard_normal_op(shape)
419
+
420
+
421
+ @_function_forbid_reuse
422
+ def uniform_candidate_sampler(true_classes,
423
+ num_true,
424
+ num_sampled,
425
+ unique,
426
+ range_max,
427
+ seed=0,
428
+ remove_accidental_hits=False):
429
+ r"""
430
+ Uniform candidate sampler.
431
+
432
+ This function samples a set of classes(sampled_candidates) from [0, range_max-1] based on uniform distribution.
433
+ If unique=True, candidates are drawn without replacement, else unique=False with replacement.
434
+
435
+ .. warning::
436
+ - The Ascend backend does not support the reproducibility of random numbers, so
437
+ the `seed` parameter has no effect.
438
+ - The Ascend backend does not support dynamic shape scenarios currently.
439
+
440
+ Args:
441
+ true_classes (Tensor): A Tensor. The target classes with a Tensor shape of :math:`(batch\_size, num\_true)` .
442
+ The value range of the elements must be :math:`[0, range\_max)`.
443
+ num_true (int): The number of target classes in each training example.
444
+ num_sampled (int): The number of classes to randomly sample. The sampled_candidates will have a shape
445
+ of num_sampled. If unique=True, num_sampled must be less than or equal to range_max.
446
+ unique (bool): Whether all sampled classes in a batch are unique.
447
+ range_max (int): The number of possible classes, must be positive.
448
+ seed (int): Used for random number generation, must be non-negative. If seed has a value of 0,
449
+ the seed will be replaced with a randomly generated value. Default: ``0`` .
450
+ remove_accidental_hits (bool): Whether accidental hit is removed.
451
+ Accidental hit is when one of the true classes matches one of the sample classes.
452
+ Set ``True`` to remove which accidentally sampling the true class as sample class. Default: ``False`` .
453
+
454
+ Returns:
455
+ - **sampled_candidates** (Tensor) - The sampled_candidates is independent of the true classes.
456
+ shape: :math:`(num\_sampled, )` .
457
+ - **true_expected_count** (Tensor) - The expected counts under the sampling distribution of each
458
+ of true_classes. shape: :math:`(batch\_size, num\_true)` .
459
+ - **sampled_expected_count** (Tensor) - The expected counts under the sampling distribution of
460
+ each of sampled_candidates. shape: :math:`(num\_sampled, )` .
461
+
462
+ Raises:
463
+ TypeError: If neither `num_true` nor `num_sampled` is an int.
464
+ TypeError: If neither `unique` nor `remove_accidental_hits` is a bool.
465
+ TypeError: If neither `range_max` nor `seed` is an int.
466
+ TypeError: If `true_classes` is not a Tensor.
467
+
468
+ Supported Platforms:
469
+ ``Ascend`` ``GPU`` ``CPU``
470
+
471
+ Examples:
472
+ >>> import numpy as np
473
+ >>> from mindspore import Tensor, ops
474
+ >>> data = Tensor(np.array([[1], [3], [4], [6], [3]], dtype=np.int64))
475
+ >>> output1, output2, output3 = ops.uniform_candidate_sampler(data, 1, 3, False, 4, 1)
476
+ >>> print(output1.shape)
477
+ (3,)
478
+ >>> print(output2.shape)
479
+ (5, 1)
480
+ >>> print(output3.shape)
481
+ (3,)
482
+ """
483
+ sampler_op = P.UniformCandidateSampler(num_true,
484
+ num_sampled,
485
+ unique,
486
+ range_max,
487
+ seed=seed,
488
+ remove_accidental_hits=remove_accidental_hits)
489
+ sampler_op = _set_prim_op_user_data(sampler_op, "random_cache", False)
490
+ sampled_candidates, true_expected_count, sampled_expected_count = sampler_op(
491
+ true_classes)
492
+ return sampled_candidates, true_expected_count, sampled_expected_count
493
+
494
+
495
+ @_function_forbid_reuse
496
+ def random_poisson(shape, rate, seed=None, dtype=mstype.float32):
497
+ r"""
498
+ Generates random number Tensor with shape `shape` according to a Poisson distribution with mean `rate`.
499
+
500
+
501
+ .. math::
502
+
503
+ \text{P}(i|μ) = \frac{\exp(-μ)μ^{i}}{i!}
504
+
505
+ .. warning::
506
+ The Ascend backend does not support the reproducibility of random numbers, so
507
+ the `seed` parameter has no effect.
508
+
509
+ Args:
510
+ shape (Tensor): The shape of random tensor to be sampled from each poisson distribution, 1-D `Tensor` whose
511
+ dtype is mstype.int32 or mstype.int64.
512
+ rate (Tensor): The :math:`μ` parameter the distribution is constructed with.
513
+ It represents the mean of poisson distribution
514
+ and also the variance of the distribution. It should be a `Tensor` whose dtype is mstype.int64,
515
+ mstype.int32, mstype.float64, mstype.float32 or mstype.float16.
516
+ seed (int, optional): Seed is used as entropy source for the random number engines to generate pseudo-random
517
+ numbers and must be non-negative. Default: ``None`` , which will be treated as 0.
518
+ dtype (mindspore.dtype): The data type of output: ``mstype.int64``, ``mstype.int32``,
519
+ ``mstype.float64``, ``mstype.float32`` or ``mstype.float16``. Default: ``mstype.float32``.
520
+
521
+ Returns:
522
+ A Tensor whose shape is `mindspore.concat(['shape', mindspore.shape('rate')], axis=0)` and data type is equal to
523
+ argument `dtype`.
524
+
525
+ Raises:
526
+ TypeError: If `shape` is not a Tensor.
527
+ TypeError: If datatype of `shape` is not mstype.int64 nor mstype.int32.
528
+ ValueError: If shape of `shape` is not 1-D.
529
+ TypeError: If `rate` is not a Tensor nor a scalar.
530
+ TypeError: If datatype of `rate` is not in [mstype.int64, mstype.int32,
531
+ mstype.float64, mstype.float32 or mstype.float16].
532
+ TypeError: If `seed` is not a non-negtive int.
533
+ TypeError: If `dtype` is not in [mstype.int64, mstype.int32, mstype.float64,
534
+ mstype.float32 nor mstype.float16].
535
+ ValueError: If any element of input `shape` tensor is not positive.
536
+
537
+ Supported Platforms:
538
+ ``GPU`` ``CPU``
539
+
540
+ Examples:
541
+ >>> import mindspore
542
+ >>> import numpy as np
543
+ >>> from mindspore import Tensor, ops
544
+ >>> # case 1: 1-D shape, 2-D rate, float64 output
545
+ >>> shape = Tensor(np.array([2, 2]), mindspore.int64)
546
+ >>> rate = Tensor(np.array([[5.0, 10.0], [5.0, 1.0]]), mindspore.float32)
547
+ >>> output = ops.random_poisson(shape, rate, seed=5, dtype=mindspore.float64)
548
+ >>> print(output.shape, output.dtype)
549
+ (2, 2, 2, 2) Float64
550
+ >>> # case 2: 1-D shape, scalar rate, int64 output
551
+ >>> shape = Tensor(np.array([2, 2]), mindspore.int64)
552
+ >>> rate = Tensor(5.0, mindspore.float64)
553
+ >>> output = ops.random_poisson(shape, rate, seed=5, dtype=mindspore.int64)
554
+ >>> print(output.shape, output.dtype)
555
+ (2, 2) Int64
556
+ """
557
+ seed1, seed2 = _get_seed(seed, "random_poisson")
558
+ prim_random_poisson = P.RandomPoisson(seed1, seed2, dtype)
559
+ prim_random_poisson = _set_prim_op_user_data(
560
+ prim_random_poisson, "random_cache", False)
561
+ value = prim_random_poisson(shape, rate)
562
+ return value
563
+
564
+
565
+ @_function_forbid_reuse
566
+ def shuffle(x, seed=None):
567
+ r"""
568
+ Randomly shuffles a Tensor along its first dimension.
569
+
570
+ Args:
571
+ x (Tensor): The Tensor need be shuffled.
572
+ seed (int, optional): Random seed used for random number generation, must be non-negative. If `seed` is 0,
573
+ which will be replaced with a randomly generated value. Default: ``None`` , which will be treated as 0.
574
+
575
+ Returns:
576
+ Tensor. The shape and type are the same as the input `x`.
577
+
578
+ Raises:
579
+ TypeError: If data type of `seed` is not None or non-negative int.
580
+
581
+ Supported Platforms:
582
+ ``Ascend`` ``GPU`` ``CPU``
583
+
584
+ Examples:
585
+ >>> import numpy as np
586
+ >>> from mindspore import Tensor, ops
587
+ >>> from mindspore import dtype as mstype
588
+ >>> x = Tensor(np.array([1, 2, 3, 4]), mstype.float32)
589
+ >>> output = ops.shuffle(x, seed=1)
590
+ >>> print(output)
591
+ [3. 4. 2. 1.]
592
+ """
593
+ seed, seed2 = _get_seed(seed, "shuffle")
594
+ random_shuffle_ = RandomShuffle(seed=seed, seed2=seed2)
595
+ random_shuffle_ = _set_prim_op_user_data(
596
+ random_shuffle_, "random_cache", False)
597
+ output = random_shuffle_(x)
598
+ return output
599
+
600
+
601
+ @_function_forbid_reuse
602
+ def log_uniform_candidate_sampler(true_classes, num_true=1, num_sampled=5, unique=True, range_max=5, seed=0):
603
+ r"""
604
+ Generates random labels with a log-uniform distribution for sampled_candidates.
605
+
606
+ Randomly samples a tensor of sampled classes from the range of integers [0, range_max).
607
+
608
+ .. warning::
609
+ The Ascend backend does not support the reproducibility of random numbers, so
610
+ the `seed` parameter has no effect.
611
+
612
+ Args:
613
+ true_classes (Tensor): The target classes. With data type of int64 and
614
+ shape :math:`(batch\_size, num\_true)` .
615
+ num_true (int): The number of target classes per training example. Default: ``1`` .
616
+ num_sampled (int): The number of classes to randomly sample. Default: ``5`` .
617
+ unique (bool): Determines whether sample with rejection. If `unique` is ``True`` ,
618
+ all sampled classes in a batch are unique. Default: ``True`` .
619
+ range_max (int): The number of possible classes. When `unique` is ``True`` ,
620
+ `range_max` must be greater than or equal to `num_sampled`. Default: ``5`` .
621
+ seed (int): Random seed, must be non-negative. Default: ``0`` .
622
+
623
+ Returns:
624
+ Tuple of 3 Tensors.
625
+
626
+ - **sampled_candidates** (Tensor) - A Tensor with shape :math:`(num\_sampled,)`
627
+ and the same type as `true_classes`.
628
+ - **true_expected_count** (Tensor) - A Tensor with the same shape as `true_classes and` type float32.
629
+ - **sampled_expected_count** (Tensor) - A Tensor with the same shape as `sampled_candidates` and type float32.
630
+
631
+ Raises:
632
+ TypeError: If neither `num_true` nor `num_sampled` is an int.
633
+ TypeError: If `unique` is not a bool.
634
+ TypeError: If neither `range_max` nor `seed` is an int.
635
+ TypeError: If `true_classes` is not a Tensor.
636
+
637
+ Supported Platforms:
638
+ ``Ascend`` ``CPU``
639
+
640
+ Examples:
641
+ >>> import numpy as np
642
+ >>> from mindspore import Tensor, ops
643
+ >>> output1, output2, output3 = ops.log_uniform_candidate_sampler(
644
+ ... Tensor(np.array([[1, 7], [0, 4], [3, 3]])), 2, 5, True, 5)
645
+ >>> print(output1, output2, output3)
646
+ [3 2 0 4 1]
647
+ [[0.92312991 0.49336370]
648
+ [0.99248987 0.65806371]
649
+ [0.73553443 0.73553443]]
650
+ [0.73553443 0.82625800 0.99248987 0.65806371 0.92312991]
651
+
652
+ """
653
+
654
+ sampler = P.LogUniformCandidateSampler(
655
+ num_true, num_sampled, unique, range_max, seed)
656
+ sampler = _set_prim_op_user_data(sampler, "random_cache", False)
657
+ return sampler(true_classes)
658
+
659
+
660
+ @_function_forbid_reuse
661
+ def choice_with_mask(input_x, count=256, seed=None):
662
+ """
663
+ Generates a random sample as index tensor with a mask tensor from a given tensor.
664
+
665
+ The `input_x` must be a tensor whose dimension is not less than 1. If its dimension is greater than or equal to 2,
666
+ the first dimension specifies the number of samples.
667
+ The returned index tensor denotes the index of the nonzero
668
+ sample, the mask tensor denotes which elements in the index tensor are valid.
669
+
670
+ .. warning::
671
+ The Ascend backend does not support the reproducibility of random numbers, so
672
+ the `seed` parameter has no effect.
673
+
674
+ Args:
675
+ input_x (Tensor[bool]): The input tensor.
676
+ The input tensor rank must be greater than or equal to 1 and less than or equal to 5.
677
+ count (int, optional): Number of items expected to get and the number must be greater than 0. Default: ``256`` .
678
+ seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
679
+ Default: ``None`` .
680
+
681
+ Returns:
682
+ Two tensors, the first one is the index tensor and the other one is the mask tensor.
683
+
684
+ - **index** (Tensor) - The output shape is 2-D.
685
+ - **mask** (Tensor) - The output shape is 1-D.
686
+
687
+ Raises:
688
+ TypeError: If `count` is not an int.
689
+ TypeError: If `seed` is not an int.
690
+ TypeError: If `input_x` is not a Tensor.
691
+
692
+ Supported Platforms:
693
+ ``Ascend`` ``GPU`` ``CPU``
694
+
695
+ Examples:
696
+ >>> import numpy as np
697
+ >>> from mindspore import Tensor, ops
698
+ >>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool_))
699
+ >>> output_y, output_mask = ops.choice_with_mask(input_x)
700
+ >>> result = output_y.shape
701
+ >>> print(result)
702
+ (256, 2)
703
+ >>> result = output_mask.shape
704
+ >>> print(result)
705
+ (256,)
706
+ """
707
+ seed1, seed2 = _get_seed(seed, "choice_with_mask")
708
+ choice_with_mask_ = RandomChoiceWithMask(
709
+ count=count, seed=seed1, seed2=seed2)
710
+ choice_with_mask_ = _set_prim_op_user_data(
711
+ choice_with_mask_, "random_cache", False)
712
+ output = choice_with_mask_(input_x)
713
+ return output
714
+
715
+
716
+ @constexpr
717
+ def is_cpu_backend():
718
+ """Check if the CPU is used"""
719
+ return context.get_context('device_target') == 'CPU'
720
+
721
+
722
+ def normal_ext(mean=0.0, std=1.0, size=None, generator=None):
723
+ r"""
724
+ Generates random numbers according to the standard Normal (or Gaussian) random number distribution.
725
+
726
+ Args:
727
+ mean (Union[float, Tensor], optional): Mean value of each element, the shape of the 'mean' tensor
728
+ should be the same as that of the 'std' tensor. Default: ``0.0``.
729
+ std (Union[float, Tensor], optional): Standard deviation for each element, the shape of the 'std' tensor
730
+ should be the same as that of the 'mean' tensor. The value of std should be greater than or equal to 0.
731
+ Default: ``1.0``.
732
+ size (tuple, optional): output size, where 'mean' and 'std' are constants. Default: ``None``.
733
+ generator (generator, optional): MindSpore generator. Default: ``None``.
734
+
735
+ Returns:
736
+ Outputs a tensor with the same shape as 'mean',
737
+ or when 'mean' and 'std' are constants and shape is specified as 'size'.
738
+
739
+ Raises:
740
+ TypeError: If `mean` or `std` is not Union[float, Tensor].
741
+
742
+ Supported Platforms:
743
+ ``Ascend``
744
+
745
+ Examples:
746
+ >>> import mindspore
747
+ >>> import numpy as np
748
+ >>> from mindspore import ops
749
+ >>> from mindspore import Tensor
750
+ >>> mean = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
751
+ >>> std = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
752
+ >>> output = ops.function.random_func.normal_ext(mean, std)
753
+ >>> print(output.shape)
754
+ (3,)
755
+ """
756
+ if generator is None:
757
+ generator = default_generator
758
+ seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
759
+
760
+ is_mean_tensor = isinstance(mean, Tensor)
761
+ is_std_tensor = isinstance(std, Tensor)
762
+
763
+ if is_mean_tensor and is_std_tensor:
764
+ return normal_tensor_tensor_op(mean, std, seed, offset)
765
+ if is_mean_tensor and not is_std_tensor:
766
+ return normal_tensor_float_op(mean, std, seed, offset)
767
+ if not is_mean_tensor and is_std_tensor:
768
+ return normal_float_tensor_op(mean, std, seed, offset)
769
+ return normal_float_float_op(mean, std, size, seed, offset)
770
+
771
+
772
+ @_function_forbid_reuse
773
+ def normal(shape, mean, stddev, seed=None):
774
+ """
775
+ Generates random numbers according to the Normal (or Gaussian) random number distribution.
776
+
777
+ .. warning::
778
+ The Ascend backend does not support the reproducibility of random numbers, so
779
+ the `seed` parameter has no effect.
780
+
781
+ Args:
782
+ shape (tuple): The shape of random tensor to be generated.
783
+ The format is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
784
+ mean (Union[Tensor, int, float]): The mean μ distribution parameter, which specifies the location of the peak.
785
+ stddev (Union[Tensor, int, float]): The deviation σ distribution parameter. It should be greater than 0.
786
+ seed (int): Seed is used as entropy source for the Random number engines to generate pseudo-random numbers.
787
+ The value must be non-negative. Default: ``None`` , which will be treated as 0.
788
+
789
+ Returns:
790
+ Tensor. The shape should be equal to the broadcasted shape between the input `shape` and shapes
791
+ of `mean` and `stddev`.
792
+ The dtype is [float32, float64].
793
+
794
+ Supported Platforms:
795
+ ``Ascend`` ``GPU`` ``CPU``
796
+
797
+ Examples:
798
+ >>> import mindspore
799
+ >>> import numpy as np
800
+ >>> from mindspore import Tensor, ops
801
+ >>> shape = (3, 1, 2)
802
+ >>> mean = Tensor(np.array([[3, 4], [5, 6]]), mindspore.float32)
803
+ >>> stddev = Tensor(1.0, mindspore.float32)
804
+ >>> output = ops.normal(shape, mean, stddev, seed=5)
805
+ >>> result = output.shape
806
+ >>> print(result)
807
+ (3, 2, 2)
808
+ >>> shape = (3, 1, 3)
809
+ >>> mean = Tensor(np.array([[3, 4, 3], [3, 5, 6]]), mindspore.float32)
810
+ >>> stddev = Tensor(1.0, mindspore.float32)
811
+ >>> output = ops.normal(shape, mean, stddev, seed=5)
812
+ >>> result = output.shape
813
+ >>> print(result)
814
+ (3, 2, 3)
815
+ >>> shape = (3, 1, 3)
816
+ >>> mean = Tensor(np.array([[1, 2, 3], [3, 4, 3], [3, 5, 6]]), mindspore.float32)
817
+ >>> stddev = Tensor(1.0, mindspore.float32)
818
+ >>> output = ops.normal(shape, mean, stddev, seed=5)
819
+ >>> result = output.shape
820
+ >>> print(result)
821
+ (3, 3, 3)
822
+ """
823
+ _check_param("normal", "mean", mean)
824
+ _check_param("normal", "stddev", stddev)
825
+ if not isinstance(mean, Tensor):
826
+ mean = Tensor(mean)
827
+ if not isinstance(stddev, Tensor):
828
+ stddev = Tensor(stddev)
829
+ seed1, seed2 = _get_seed(seed, "normal")
830
+ stdnormal = P.StandardNormal(seed1, seed2)
831
+ stdnormal = _set_prim_op_user_data(stdnormal, "random_cache", False)
832
+ _check_shape(shape)
833
+ random_normal = stdnormal(shape)
834
+ value = random_normal * stddev + mean
835
+ return value
836
+
837
+
838
+ @_function_forbid_reuse
839
+ def laplace(shape, mean, lambda_param, seed=None):
840
+ r"""
841
+ Generates random numbers according to the Laplace random number distribution.
842
+ It is defined as:
843
+
844
+ .. math::
845
+ \text{f}(x;μ,λ) = \frac{1}{2λ}\exp(-\frac{|x-μ|}{λ}),
846
+
847
+ .. warning::
848
+ The Ascend backend does not support the reproducibility of random numbers, so
849
+ the `seed` parameter has no effect.
850
+
851
+ Args:
852
+ shape (tuple): The shape of random tensor to be generated.
853
+ The format is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
854
+ mean (Tensor): The mean μ distribution parameter, which specifies the location of the peak.
855
+ With float32 data type.
856
+ lambda_param (Tensor): The parameter used for controlling the variance of this random distribution. The
857
+ variance of Laplace distribution is equal to twice the square of lambda_param. With float32 data type.
858
+ seed (int, optional): Seed is used as entropy source for Random number engines generating pseudo-random numbers.
859
+ Default: ``None`` , which will be treated as 0.
860
+
861
+ Returns:
862
+ Tensor. The shape should be the broadcasted shape of input `shape` and shapes of `mean` and `lambda_param`.
863
+ The dtype is float32.
864
+
865
+ Supported Platforms:
866
+ ``Ascend`` ``GPU`` ``CPU``
867
+
868
+ Examples:
869
+ >>> import mindspore
870
+ >>> from mindspore import Tensor
871
+ >>> from mindspore import ops as ops
872
+ >>> shape = (2, 3)
873
+ >>> mean = Tensor(1.0, mindspore.float32)
874
+ >>> lambda_param = Tensor(1.0, mindspore.float32)
875
+ >>> output = ops.laplace(shape, mean, lambda_param, seed=5)
876
+ >>> print(output.shape)
877
+ (2, 3)
878
+ """
879
+ mean_dtype = F.dtype(mean)
880
+ lambda_param_dtype = F.dtype(lambda_param)
881
+ const_utils.check_tensors_dtype_same(mean_dtype, mstype.float32, "laplace")
882
+ const_utils.check_tensors_dtype_same(
883
+ lambda_param_dtype, mstype.float32, "laplace")
884
+ seed1, seed2 = _get_seed(seed, "laplace")
885
+ stdlaplace = P.StandardLaplace(seed1, seed2)
886
+ stdlaplace = _set_prim_op_user_data(stdlaplace, "random_cache", False)
887
+ _check_shape(shape)
888
+ rnd = stdlaplace(shape)
889
+ value = rnd * lambda_param + mean
890
+ return value
891
+
892
+
893
+ @_function_forbid_reuse
894
+ def gamma(shape, alpha, beta, seed=None):
895
+ r"""
896
+ Generates random numbers according to the Gamma random number distribution.
897
+
898
+ .. warning::
899
+ The Ascend backend does not support the reproducibility of random numbers, so
900
+ the `seed` parameter has no effect.
901
+
902
+ Args:
903
+ shape (tuple): The shape of random tensor to be generated.
904
+ alpha (Tensor): The :math:`\alpha` distribution parameter. It should be greater than 0 with float32 data type.
905
+ beta (Tensor): The :math:`\beta` distribution parameter. It should be greater than 0 with float32 data type.
906
+ seed (int, optional): Seed is used as entropy source for the random number engines to generate
907
+ pseudo-random numbers, must be non-negative. Default: ``None`` .
908
+
909
+ Returns:
910
+ Tensor. The shape should be equal to the broadcasted shape between the input `shape` and shapes
911
+ of `alpha` and `beta`.
912
+ The dtype is float32.
913
+
914
+ Raises:
915
+ TypeError: If `shape` is not a tuple.
916
+ TypeError: If neither `alpha` nor `beta` is a Tensor.
917
+ TypeError: If `seed` is not an int.
918
+ TypeError: If dtype of `alpha` and `beta` is not float32.
919
+
920
+ Supported Platforms:
921
+ ``Ascend``
922
+
923
+ Examples:
924
+ >>> import mindspore
925
+ >>> import numpy as np
926
+ >>> from mindspore import Tensor, ops
927
+ >>> # case 1: alpha_shape is (2, 2)
928
+ >>> shape = (3, 1, 2)
929
+ >>> alpha = Tensor(np.array([[3, 4], [5, 6]]), mindspore.float32)
930
+ >>> beta = Tensor(np.array([1.0]), mindspore.float32)
931
+ >>> output = ops.gamma(shape, alpha, beta, seed=5)
932
+ >>> result = output.shape
933
+ >>> print(result)
934
+ (3, 2, 2)
935
+ >>> # case 2: alpha_shape is (2, 3), so shape is (3, 1, 3)
936
+ >>> shape = (3, 1, 3)
937
+ >>> alpha = Tensor(np.array([[1, 3, 4], [2, 5, 6]]), mindspore.float32)
938
+ >>> beta = Tensor(np.array([1.0]), mindspore.float32)
939
+ >>> output = ops.gamma(shape, alpha, beta, seed=5)
940
+ >>> result = output.shape
941
+ >>> print(result)
942
+ (3, 2, 3)
943
+ >>> # case 3: beta_shape is (1, 2), the output is different.
944
+ >>> shape = (3, 1, 2)
945
+ >>> alpha = Tensor(np.array([[3, 4], [5, 6]]), mindspore.float32)
946
+ >>> beta = Tensor(np.array([1.0, 2]), mindspore.float32)
947
+ >>> output = ops.gamma(shape, alpha, beta, seed=5)
948
+ >>> print(output)
949
+ [[[ 2.2132034 5.8855834]
950
+ [ 3.8825176 8.6066265]]
951
+ [[ 3.3981476 7.5805717]
952
+ [ 3.7190282 19.941492 ]]
953
+ [[ 2.9512358 2.5969937]
954
+ [ 3.786061 5.160872 ]]]
955
+ >>> # case 4: beta_shape is (2, 1), the output is different.
956
+ >>> shape = (3, 1, 2)
957
+ >>> alpha = Tensor(np.array([[3, 4], [5, 6]]), mindspore.float32)
958
+ >>> beta = Tensor(np.array([[1.0], [2.0]]), mindspore.float32)
959
+ >>> output = ops.gamma(shape, alpha, beta, seed=5)
960
+ >>> print(output)
961
+ [[[ 5.6085486 7.8280783]
962
+ [ 15.97684 16.116285]]
963
+ [[ 1.8347423 1.713663]
964
+ [ 3.2434065 15.667398]]
965
+ [[ 4.2922077 7.3365674]
966
+ [ 5.3876944 13.159832 ]]]
967
+ """
968
+ seed1, seed2 = _get_seed(seed, "gamma")
969
+ gamma_v = P.Gamma(seed1, seed2)
970
+ gamma_v = _set_prim_op_user_data(gamma_v, "random_cache", False)
971
+ value = gamma_v(shape, alpha, beta)
972
+ return value
973
+
974
+
975
+ @_primexpr
976
+ def _generate_shapes(shape):
977
+ """Generate shapes for randn and rand."""
978
+ if not shape:
979
+ size = (1,)
980
+ elif len(shape) == 1:
981
+ if isinstance(shape[0], int):
982
+ size = shape
983
+ elif isinstance(shape[0], list):
984
+ size = tuple(shape[0])
985
+ elif isinstance(shape[0], tuple):
986
+ size = shape[0]
987
+ else:
988
+ raise TypeError(f"If the length of the argument 'shape' is 1, the type of the argument 'shape' must be "
989
+ f"one of ['int', 'list', 'tuple'], but got {shape[0]}.")
990
+ else:
991
+ for value in shape:
992
+ if not isinstance(value, int):
993
+ raise TypeError(f"If the length of the argument 'shape' is > 1, the type of the argument 'shape' must "
994
+ f"all be int, but got {value}.")
995
+ size = shape
996
+ return size
997
+
998
+
999
+ @_function_forbid_reuse
1000
+ def rand(*size, dtype=None, seed=None):
1001
+ r"""
1002
+ Returns a new tensor that fills numbers from the uniform distribution over an interval :math:`[0, 1)`
1003
+ based on the given shape and dtype.
1004
+
1005
+ .. warning::
1006
+ The Ascend backend does not support the reproducibility of random numbers, so
1007
+ the `seed` parameter has no effect.
1008
+
1009
+ Args:
1010
+ size (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g. :math:`(2, 3)` or :math:`2`.
1011
+
1012
+ Keyword Args:
1013
+ dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
1014
+ `mindspore.float32` will be applied. Default: ``None`` .
1015
+ seed (int, optional): Random seed, must be greater or equal to 0. Default: ``None`` , and ``0`` will be used.
1016
+
1017
+ Returns:
1018
+ Tensor, with the designated shape and dtype, filled with random numbers from the uniform distribution on
1019
+ the interval :math:`[0, 1)`.
1020
+
1021
+ Raises:
1022
+ TypeError: `seed` is not a non-negative integer.
1023
+ ValueError: If `dtype` is not a `mstype.float_type` type.
1024
+
1025
+ Supported Platforms:
1026
+ ``Ascend`` ``GPU`` ``CPU``
1027
+
1028
+ Examples:
1029
+ >>> from mindspore import ops
1030
+ >>> print(ops.rand((2,3)))
1031
+ [[4.1702199e-01 9.9718481e-01 7.2032452e-01]
1032
+ [9.3255734e-01 1.1438108e-04 1.2812445e-01]]
1033
+ """
1034
+ if dtype is None:
1035
+ dtype = mstype.float32
1036
+ elif dtype not in mstype.float_type:
1037
+ raise ValueError(
1038
+ f"For 'rand', the 'dtype' must be a float type, but got {dtype}.")
1039
+ shape = _generate_shapes(size)
1040
+ seed1, seed2 = _get_seed(seed, 'rand')
1041
+ rand_op = P.UniformReal(seed1, seed2)
1042
+ rand_op = _set_prim_op_user_data(rand_op, "random_cache", False)
1043
+ output = rand_op(shape)
1044
+ return cast_(output, dtype)
1045
+
1046
+
1047
+ @_function_forbid_reuse
1048
+ def rand_like(input, seed=None, *, dtype=None):
1049
+ r"""
1050
+ Returns a new tensor that fills numbers from the uniform distribution over an interval :math:`[0, 1)`
1051
+ based on the given shape and dtype.
1052
+
1053
+ .. warning::
1054
+ The Ascend backend does not support the reproducibility of random numbers, so
1055
+ the `seed` parameter has no effect.
1056
+
1057
+ Args:
1058
+ input (Tensor): Input Tensor to specify the output shape and its default dtype.
1059
+ seed (int, optional): Random seed, must be greater or equal to 0. Default: ``None`` , and ``0`` will be used.
1060
+
1061
+ Keyword Args:
1062
+ dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
1063
+ the same dtype of `input` will be applied. Default: ``None`` .
1064
+
1065
+ Returns:
1066
+ Tensor, with the designated shape and dtype, filled with random numbers from the uniform distribution on
1067
+ the interval :math:`[0, 1)`.
1068
+
1069
+ Raises:
1070
+ TypeError: If `seed` is not a non-negative integer.
1071
+ ValueError: If `dtype` is not a `mstype.float_type` type.
1072
+
1073
+ Supported Platforms:
1074
+ ``Ascend`` ``GPU`` ``CPU``
1075
+
1076
+ Examples:
1077
+ >>> import mindspore as ms
1078
+ >>> from mindspore import Tensor, ops
1079
+ >>> a = Tensor([[2, 3, 4], [1, 2, 3]])
1080
+ >>> print(ops.rand_like(a, dtype=ms.float32))
1081
+ [[4.1702199e-01 9.9718481e-01 7.2032452e-01]
1082
+ [9.3255734e-01 1.1438108e-04 1.2812445e-01]]
1083
+ """
1084
+ if not isinstance(input, Tensor):
1085
+ raise TypeError(
1086
+ f"For 'rand_like', the 'input' must be a Tensor, but got {type(input)}")
1087
+ if dtype is None:
1088
+ dtype = input.dtype
1089
+ if dtype not in mstype.float_type:
1090
+ raise ValueError(
1091
+ f"For 'rand_like', the 'dtype' must be a float type, but got {dtype}.")
1092
+ shape = input.shape
1093
+ seed1, seed2 = _get_seed(seed, 'rand_like')
1094
+ rand_op = P.UniformReal(seed1, seed2)
1095
+ rand_op = _set_prim_op_user_data(rand_op, "random_cache", False)
1096
+ output = rand_op(shape)
1097
+ return cast_(output, dtype)
1098
+
1099
+
1100
+ @_function_forbid_reuse
1101
+ def rand_ext(*size, generator=None, dtype=None):
1102
+ r"""
1103
+ Returns a new tensor that fills numbers from the uniform distribution over an interval :math:`[0, 1)`
1104
+ based on the given shape and dtype.
1105
+
1106
+ Args:
1107
+ size (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g. :math:`(2, 3)` or :math:`2`.
1108
+
1109
+ Keyword Args:
1110
+ generator (:class:`mindspore.Generator`, optional): a pseudorandom number generator.
1111
+ Default: ``None``, uses the default pseudorandom number generator.
1112
+ dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
1113
+ `mindspore.float32` will be applied. Default: ``None`` .
1114
+
1115
+ Returns:
1116
+ Tensor, with the designated shape and dtype, filled with random numbers from the uniform distribution on
1117
+ the interval :math:`[0, 1)`.
1118
+
1119
+ Raises:
1120
+ ValueError: If `dtype` is not a `mstype.float_type` type.
1121
+
1122
+ Supported Platforms:
1123
+ ``Ascend``
1124
+
1125
+ Examples:
1126
+ >>> from mindspore import ops
1127
+ >>> print(ops.function.random_func.rand_ext(2, 3).shape)
1128
+ (2, 3)
1129
+ """
1130
+ if not generator:
1131
+ generator = default_generator
1132
+ seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
1133
+ return rand_ext_(size, seed, offset, dtype)
1134
+
1135
+
1136
+ @_function_forbid_reuse
1137
+ def rand_like_ext(input, *, dtype=None):
1138
+ r"""
1139
+ Returns a new tensor that fills numbers from the uniform distribution over an interval :math:`[0, 1)`
1140
+ based on the given dtype and shape of the input tensor.
1141
+
1142
+ Args:
1143
+ input (Tensor): Input Tensor to specify the output shape and its default dtype.
1144
+
1145
+ Keyword Args:
1146
+ dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
1147
+ the same dtype of `input` will be applied. Default: ``None`` .
1148
+
1149
+ Returns:
1150
+ Tensor, with the designated shape and dtype, filled with random numbers from the uniform distribution on
1151
+ the interval :math:`[0, 1)`.
1152
+
1153
+ Raises:
1154
+ ValueError: If `dtype` is not a `mstype.float_type` type.
1155
+
1156
+ Supported Platforms:
1157
+ ``Ascend``
1158
+
1159
+ Examples:
1160
+ >>> import mindspore as ms
1161
+ >>> from mindspore import Tensor, ops
1162
+ >>> a = Tensor([[2, 3, 4], [1, 2, 3]])
1163
+ >>> print(ops.function.random_func.rand_like_ext(a, dtype=ms.float32).shape)
1164
+ (2, 3)
1165
+ """
1166
+ seed, offset = default_generator._step(generator_step_) # pylint: disable=protected-access
1167
+ return rand_like_ext_(input, seed, offset, dtype)
1168
+
1169
+
1170
+ @_function_forbid_reuse
1171
+ def randn(*size, dtype=None, seed=None):
1172
+ r"""
1173
+ Returns a new Tensor with given shape and dtype, filled with a sample (or samples)
1174
+ from the standard normal distribution.
1175
+
1176
+ .. warning::
1177
+ The Ascend backend does not support the reproducibility of random numbers, so
1178
+ the `seed` parameter has no effect.
1179
+
1180
+ Args:
1181
+ size (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g., :math:`(2, 3)` or :math:`2`.
1182
+
1183
+ Keyword Args:
1184
+ dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
1185
+ `mindspore.float32` will be used. Default: ``None`` .
1186
+ seed (int, optional): Random seed, must be greater or equal to 0. Default: ``None`` , and 0 will be used.
1187
+
1188
+ Returns:
1189
+ Tensor, with the designated shape and dtype, filled with a sample (or samples) from the
1190
+ "standard normal" distribution.
1191
+
1192
+ Raises:
1193
+ TypeError: `seed` is not a non-negative integer.
1194
+ ValueError: If `dtype` is not a `mstype.float_type`.
1195
+ ValueError: If `size` contains invalid number.
1196
+
1197
+ Supported Platforms:
1198
+ ``Ascend`` ``GPU`` ``CPU``
1199
+
1200
+ Examples:
1201
+ >>> from mindspore import ops
1202
+ >>> print(ops.randn((2, 2)))
1203
+ [[ 0.30639967 -0.42438635]
1204
+ [-0.4287376 1.3054721 ]]
1205
+ """
1206
+ if dtype is None:
1207
+ dtype = mstype.float32
1208
+ elif dtype not in mstype.float_type:
1209
+ raise ValueError(
1210
+ f"For 'randn', the 'dtype' must be a float type, but got {dtype}.")
1211
+ shape = _generate_shapes(size)
1212
+ seed1, seed2 = _get_seed(seed, 'randn')
1213
+ rand_op = P.StandardNormal(seed1, seed2)
1214
+ rand_op = _set_prim_op_user_data(rand_op, "random_cache", False)
1215
+ output = rand_op(shape)
1216
+ return cast_(output, dtype)
1217
+
1218
+
1219
+ @_function_forbid_reuse
1220
+ def randn_like(input, seed=None, *, dtype=None):
1221
+ r"""
1222
+ Returns a new Tensor with given shape and dtype, filled with a sample (or samples) from the standard normal
1223
+ distribution.
1224
+
1225
+ .. warning::
1226
+ The Ascend backend does not support the reproducibility of random numbers, so
1227
+ the `seed` parameter has no effect.
1228
+
1229
+ Args:
1230
+ input (Tensor): Input Tensor to specify the output shape and its default dtype.
1231
+ seed (int, optional): Random seed, must be greater or equal to 0. Default: ``None`` , and 0 will be used.
1232
+
1233
+ Keyword Args:
1234
+ dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be float type. If None,
1235
+ `mindspore.float32` will be used. Default: ``None`` .
1236
+
1237
+ Returns:
1238
+ Tensor, with the designated shape and dtype, filled with a sample (or samples) from the
1239
+ "standard normal" distribution.
1240
+
1241
+ Raises:
1242
+ TypeError: `seed` is not a non-negative integer.
1243
+ ValueError: If `dtype` is not a `mstype.float_type`.
1244
+
1245
+ Supported Platforms:
1246
+ ``Ascend`` ``GPU`` ``CPU``
1247
+
1248
+ Examples:
1249
+ >>> import mindspore as ms
1250
+ >>> from mindspore import Tensor, ops
1251
+ >>> a = Tensor([[1, 2, 3], [4, 5, 6]])
1252
+ >>> print(ops.randn_like(a, dtype=ms.float32))
1253
+ [[ 0.30639967 -0.42438635 -0.20454668]
1254
+ [-0.4287376 1.3054721 0.64747655]]
1255
+ """
1256
+ if not isinstance(input, Tensor):
1257
+ raise TypeError(
1258
+ f"For 'randn_like', the 'input' must be a Tensor, but got {type(input)}")
1259
+ if dtype is None:
1260
+ dtype = mstype.float32
1261
+ if dtype not in mstype.float_type:
1262
+ raise ValueError(
1263
+ f"For 'randn_like', the 'dtype' must be a float type, but got {dtype}.")
1264
+ shape = input.shape
1265
+ seed1, seed2 = _get_seed(seed, 'randn_like')
1266
+ rand_op = P.StandardNormal(seed1, seed2)
1267
+ rand_op = _set_prim_op_user_data(rand_op, "random_cache", False)
1268
+ output = rand_op(shape)
1269
+ return cast_(output, dtype)
1270
+
1271
+
1272
+ @_function_forbid_reuse
1273
+ def randint(low, high, size, seed=None, *, dtype=None):
1274
+ r"""
1275
+ Returns a Tensor whose elements are random integers in the range of [ `low` , `high` ) .
1276
+
1277
+ .. warning::
1278
+ The Ascend backend does not support the reproducibility of random numbers, so
1279
+ the `seed` parameter has no effect.
1280
+
1281
+ Args:
1282
+ low (int): Start value of interval.
1283
+ high (int): End value of interval.
1284
+ size (tuple): Shape of the new tensor.
1285
+ seed (int, optional): Random seed, must be greater or equal to 0. Default: ``None`` , and ``0`` will be used.
1286
+
1287
+ Keyword Args:
1288
+ dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be int type. If ``None`` ,
1289
+ `mindspore.int64` will be used. Default: ``None`` .
1290
+
1291
+ Returns:
1292
+ Tensor, with the designated shape and dtype, filled with random integers from low (inclusive)
1293
+ to high (exclusive).
1294
+
1295
+ Raises:
1296
+ TypeError: `seed` is not a non-negative integer.
1297
+ TypeError: `size` is not a tuple.
1298
+ TypeError: `low` or `high` is not an integer.
1299
+ ValueError: If `dtype` is not a `mstype.int_type`.
1300
+
1301
+
1302
+ Supported Platforms:
1303
+ ``Ascend`` ``GPU`` ``CPU``
1304
+
1305
+ Examples:
1306
+ >>> from mindspore import ops
1307
+ >>> print(ops.randint(1, 10, (2,3)))
1308
+ [[4 9 7]
1309
+ [9 1 2]]
1310
+ """
1311
+ if dtype is None:
1312
+ dtype = mstype.int64
1313
+ elif dtype not in mstype.int_type:
1314
+ raise ValueError(
1315
+ f"For 'randint', the 'dtype' must be an int type, but got {dtype}.")
1316
+ if not isinstance(size, tuple):
1317
+ raise ValueError(
1318
+ f"For 'randint', the input 'size' must be a tuple, but got {size}.")
1319
+ if not isinstance(low, int) or isinstance(low, bool):
1320
+ raise TypeError(
1321
+ f"For 'randint_like', 'low' must be an int, but got {type(low)}.")
1322
+ if not isinstance(high, int) or isinstance(high, bool):
1323
+ raise TypeError(
1324
+ f"For 'randint_like', 'high' must be an int, but got {type(high)}.")
1325
+ seed1, seed2 = _get_seed(seed, 'randint')
1326
+ rand_op = P.UniformInt(seed1, seed2)
1327
+ rand_op = _set_prim_op_user_data(rand_op, "random_cache", False)
1328
+ low_ = Tensor(low, mstype.int32)
1329
+ high_ = Tensor(high, mstype.int32)
1330
+ output = rand_op(size, low_, high_)
1331
+ return cast_(output, dtype)
1332
+
1333
+
1334
+ @_function_forbid_reuse
1335
+ def randint_like(input, low, high, seed=None, *, dtype=None):
1336
+ r"""
1337
+ Returns a tensor with the same shape as Tensor `input` whose elements are random integers in the range
1338
+ of [ `low` , `high` ) .
1339
+
1340
+ .. warning::
1341
+ The Ascend backend does not support the reproducibility of random numbers, so
1342
+ the `seed` parameter has no effect.
1343
+
1344
+ Args:
1345
+ input (Tensor): Input Tensor to specify the output shape and its default dtype.
1346
+ low(int): Start value of interval.
1347
+ high(int): End value of interval.
1348
+ seed (int, optional): Random seed, must be greater or equal to 0. Default: ``None`` , and 0 will be used.
1349
+
1350
+ Keyword Args:
1351
+ dtype (:class:`mindspore.dtype`, optional): Designated tensor dtype, it must be int type. If ``None`` ,
1352
+ the same dtype of `input` will be applied. Default: ``None`` .
1353
+
1354
+ Returns:
1355
+ Tensor, with the designated shape and dtype, filled with random integers from low (inclusive)
1356
+ to high (exclusive).
1357
+
1358
+ Raises:
1359
+ TypeError: `seed` is not a non-negative integer.
1360
+ TypeError: `low` or `high` is not an integer.
1361
+ ValueError: If `dtype` is not a `mstype.int_type`.
1362
+
1363
+ Supported Platforms:
1364
+ ``Ascend`` ``GPU`` ``CPU``
1365
+
1366
+ Examples:
1367
+ >>> from mindspore import Tensor, ops
1368
+ >>> a = Tensor([[1, 2, 3], [3, 2, 1]])
1369
+ >>> print(ops.randint_like(a, 1, 10))
1370
+ [[4 9 7]
1371
+ [9 1 2]]
1372
+ """
1373
+ if not isinstance(input, Tensor):
1374
+ raise TypeError(
1375
+ f"For 'randint_like', the 'input' must be a Tensor, but got {type(input)}")
1376
+ if dtype is None:
1377
+ dtype = input.dtype
1378
+ if dtype not in mstype.int_type:
1379
+ raise ValueError(
1380
+ f"For 'randint_like', the 'dtype' must be an int type, but got {dtype}.")
1381
+ if not isinstance(low, int) or isinstance(low, bool):
1382
+ raise TypeError(
1383
+ f"For 'randint_like', 'low' must be an int, but got {type(low)}.")
1384
+ if not isinstance(high, int) or isinstance(high, bool):
1385
+ raise TypeError(
1386
+ f"For 'randint_like', 'high' must be an int, but got {type(high)}.")
1387
+ size = input.shape
1388
+ seed1, seed2 = _get_seed(seed, 'randint_like')
1389
+ rand_op = P.UniformInt(seed1, seed2)
1390
+ rand_op = _set_prim_op_user_data(rand_op, "random_cache", False)
1391
+ low_ = Tensor(low, mstype.int32)
1392
+ high_ = Tensor(high, mstype.int32)
1393
+ size_ = Tensor(size, mstype.int32)
1394
+ output = rand_op(size_, low_, high_)
1395
+ return cast_(output, dtype)
1396
+
1397
+
1398
+ @_function_forbid_reuse
1399
+ def poisson(shape, mean, seed=None):
1400
+ r"""
1401
+ The ops.poisson is deprecated, please use :class:`mindspore.ops.random_poisson`
1402
+ Generates random numbers according to the Poisson random number distribution.
1403
+
1404
+ .. math::
1405
+
1406
+ \text{P}(i|μ) = \frac{\exp(-μ)μ^{i}}{i!}
1407
+
1408
+ Args:
1409
+ shape (tuple): The shape of random tensor to be generated.
1410
+ The format is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1411
+ mean (Tensor): The mean μ distribution parameter. It should be greater than 0 with float32 data type.
1412
+ seed (int): Seed is used as entropy source for the random number engines to generate pseudo-random numbers
1413
+ and must be non-negative. Default: ``None`` , which will be treated as 0.
1414
+
1415
+ Returns:
1416
+ Tensor. The shape should be equal to the broadcasted shape between the input "shape" and shapes of `mean`.
1417
+ The dtype is float32.
1418
+
1419
+ Raises:
1420
+ TypeError: If `shape` is not a tuple.
1421
+ TypeError: If `mean` is not a Tensor whose dtype is not float32.
1422
+ TypeError: If `seed` is not an int.
1423
+
1424
+ Supported Platforms:
1425
+ deprecated
1426
+
1427
+ Examples:
1428
+ >>> from mindspore import Tensor, ops
1429
+ >>> import mindspore
1430
+ >>> # case 1: It can be broadcast.
1431
+ >>> shape = (4, 1)
1432
+ >>> mean = Tensor(np.array([5.0, 10.0]), mindspore.float32)
1433
+ >>> output = ops.poisson(shape, mean, seed=5)
1434
+ >>> result = output.shape
1435
+ >>> print(result)
1436
+ (4, 2)
1437
+ >>> # case 2: It can not be broadcast. It is recommended to use the same shape.
1438
+ >>> shape = (2, 2)
1439
+ >>> mean = Tensor(np.array([[5.0, 10.0], [5.0, 1.0]]), mindspore.float32)
1440
+ >>> output = ops.poisson(shape, mean, seed=5)
1441
+ >>> result = output.shape
1442
+ >>> print(result)
1443
+ (2, 2)
1444
+ """
1445
+ seed1, seed2 = _get_seed(seed, "poisson")
1446
+ random_poisson_op = P.Poisson(seed1, seed2)
1447
+ random_poisson_op = _set_prim_op_user_data(
1448
+ random_poisson_op, "random_cache", False)
1449
+ value = random_poisson_op(shape, mean)
1450
+ return value
1451
+
1452
+
1453
+ @_function_forbid_reuse
1454
+ def multinomial(input, num_samples, replacement=True, seed=None):
1455
+ r"""
1456
+ Returns a tensor sampled from the multinomial probability distribution located in the corresponding
1457
+ row of the input tensor.
1458
+
1459
+ The polynomial distribution is a probability distribution that generalizes the binomial distribution formula to
1460
+ multiple states. In the polynomial distribution, each event has a fixed probability, and the sum of these
1461
+ probabilities is 1. The purpose of the `mindspore.ops.multinomial` interface is to perform `num_samples` sampling
1462
+ on the input `input`, and the output tensor is the index of the input tensor for each sampling.
1463
+ The values in `input` represent the probability of selecting the corresponding index for each sampling.
1464
+
1465
+ Here is an extreme example for better understanding. Suppose we have an input probability tensor with
1466
+ values `Tensor([90 / 100, 10 / 100, 0], mindspore.float32)`, which means we can sample three indices,
1467
+ namely index 0, index 1, and index 2, with probabilities of 90%, 10%, and 0%, respectively. We perform n samplings,
1468
+ and the resulting sequence is the calculation result of the polynomial distribution, with a length equal to the
1469
+ number of samplings.
1470
+
1471
+ In case 1 of the sample code, we perform two non-replacement samplings (`replacement` is `False`).
1472
+ The calculation result is most likely `[0, 1]`, and less likely `[1, 0]`. Since the probability of selecting
1473
+ index 0 is 90% for each sampling, the first result is most likely to be index 0. Since the probability of selecting
1474
+ index 2 is 0, index 2 cannot appear in the sampling result. Therefore, the second result must be index 1,
1475
+ and the resulting sequence is `[0, 1]`.
1476
+
1477
+ In case 2 of the sample code, we perform 10 replacement samplings (`replacement` is `True`).
1478
+ As expected, about 90% of the sampling results are index 0.
1479
+
1480
+ In case 3 of the sample code, we extend the input to 2 dimensions, and the sampling results
1481
+ in each dimension also match our sampling expectations.
1482
+
1483
+ Note:
1484
+ The rows of input do not need to sum to one (in which case we use the values as weights),
1485
+ but must be non-negative, finite and have a non-zero sum. When using values as weights, it can be understood as
1486
+ normalizing the input along the last dimension.
1487
+
1488
+ .. warning::
1489
+ The Ascend backend does not support the reproducibility of random numbers, so
1490
+ the `seed` parameter has no effect.
1491
+
1492
+ Args:
1493
+ input (Tensor): The input tensor containing probabilities, must be 1 or 2 dimensions, with
1494
+ float32 data type.
1495
+ num_samples (int): Number of samples to draw.
1496
+ replacement (bool, optional): Whether to draw with replacement or not. Default: ``True`` .
1497
+ seed (int, optional): Seed is used as entropy source for the random number engines to generate
1498
+ pseudo-random numbers, must be non-negative. Default: ``None`` .
1499
+
1500
+ Returns:
1501
+ Tensor, has the same rows with input. The number of sampled indices of each row is `num_samples`.
1502
+ The dtype is int32.
1503
+
1504
+ Raises:
1505
+ TypeError: If `input` is not a Tensor whose dtype is not float32.
1506
+ TypeError: If `num_samples` is not an int.
1507
+ TypeError: If `seed` is neither an int nor None.
1508
+
1509
+ Supported Platforms:
1510
+ ``Ascend`` ``GPU`` ``CPU``
1511
+
1512
+ Examples:
1513
+ >>> import mindspore
1514
+ >>> from mindspore import Tensor, ops
1515
+ >>> from mindspore import dtype as mstype
1516
+ >>> # case 1: The output is random, and the length of the output is the same as num_sample.
1517
+ >>> # replacement is False.
1518
+ >>> input1 = Tensor([90 / 100, 10 / 100, 0], mindspore.float32)
1519
+ >>> input2 = Tensor([90, 10, 0], mindspore.float32)
1520
+ >>> # input1 and input2 have the same meaning.
1521
+ >>> output1 = ops.multinomial(input1, 2, replacement=False)
1522
+ >>> output2 = ops.multinomial(input2, 2, replacement=False)
1523
+ >>> # print(output1)
1524
+ >>> # [0 1]
1525
+ >>> # print(output2)
1526
+ >>> # [0 1]
1527
+ >>> print(len(output1))
1528
+ 2
1529
+ >>> print(len(output2))
1530
+ 2
1531
+ >>> # case 2: The output is random, and the length of the output is the same as num_sample.
1532
+ >>> # replacement is True.
1533
+ >>> output3 = ops.multinomial(input1, 10)
1534
+ >>> # print(output3)
1535
+ >>> # [0 0 1 0 0 0 0 0 0 0]
1536
+ >>> print(len(output3))
1537
+ 10
1538
+ >>> # case 3: The output is random, and the length of the output is the same as num_sample.
1539
+ >>> # replacement is True.
1540
+ >>> # rank is 2
1541
+ >>> input4 = Tensor([[90, 10, 0], [10, 90, 0]], mstype.float32)
1542
+ >>> output4 = ops.multinomial(input4, 10)
1543
+ >>> # print(output4)
1544
+ >>> # [[0 0 0 0 0 0 0 0 1 0]
1545
+ >>> # [1 1 1 1 1 0 1 1 1 1]]
1546
+ """
1547
+ def _check_valid_dim(dim, name):
1548
+ if dim not in (1, 2):
1549
+ raise ValueError(
1550
+ f"For '{name}', the dimension of inputs must be 1d or 2d, but got {dim}.")
1551
+
1552
+ _check_valid_dim(len(shape_(input)), "multinomial")
1553
+ seed1, seed2 = _get_seed(seed, "multinomial")
1554
+ if not replacement:
1555
+ if shape_(input)[-1] < num_samples:
1556
+ const_utils.raise_value_error(f"For 'multinomial', the 'num_samples' must be less than "
1557
+ f"the last dimension of input without 'replacement', "
1558
+ f"but got 'num_samples': {num_samples} and "
1559
+ f"'replacement': {replacement}")
1560
+ n_dist = 1
1561
+ if len(shape_(input)) > 1:
1562
+ n_dist = shape_(input)[-2]
1563
+ random_uniform_real = P.UniformReal(seed1, seed2)
1564
+ random_cache_op = _set_prim_op_user_data(
1565
+ random_uniform_real, "random_cache", False)
1566
+ random_uniform = random_cache_op((n_dist * shape_(input)[-1],))
1567
+ if n_dist != 1:
1568
+ random_uniform = reshape_(
1569
+ random_uniform, (n_dist, shape_(input)[-1]))
1570
+
1571
+ vals = real_div_(log_(random_uniform), input + 1e-6)
1572
+ _, indices = top_k_(vals, num_samples)
1573
+ return indices
1574
+ random_nomial = P.Multinomial(seed1, seed2)
1575
+ random_nomial = _set_prim_op_user_data(
1576
+ random_nomial, "random_cache", False)
1577
+ return random_nomial(input, num_samples)
1578
+
1579
+
1580
+ @_function_forbid_reuse
1581
+ def multinomial_ext(input, num_samples, replacement=False, *, generator=None):
1582
+ r"""
1583
+ Returns a tensor sampled from the multinomial probability distribution located in the corresponding
1584
+ row of the input tensor.
1585
+
1586
+ The polynomial distribution is a probability distribution that generalizes the binomial distribution formula to
1587
+ multiple states. In the polynomial distribution, each event has a fixed probability, and the sum of these
1588
+ probabilities is 1. The purpose of the `mindspore.mint.multinomial` interface is to perform `num_samples` sampling
1589
+ on the input `input`, and the output tensor is the index of the input tensor for each sampling.
1590
+ The values in `input` represent the probability of selecting the corresponding index for each sampling.
1591
+
1592
+ Here is an extreme example for better understanding. Suppose we have an input probability tensor with
1593
+ values `Tensor([90 / 100, 10 / 100, 0], mindspore.float32)`, which means we can sample three indices,
1594
+ namely index 0, index 1, and index 2, with probabilities of 90%, 10%, and 0%, respectively. We perform n samplings,
1595
+ and the resulting sequence is the calculation result of the polynomial distribution, with a length equal to the
1596
+ number of samplings.
1597
+
1598
+ In case 1 of the sample code, we perform two non-replacement samplings (`replacement` is `False`).
1599
+ The calculation result is most likely `[0, 1]`, and less likely `[1, 0]`. Since the probability of selecting
1600
+ index 0 is 90% for each sampling, the first result is most likely to be index 0. Since the probability of selecting
1601
+ index 2 is 0, index 2 cannot appear in the sampling result. Therefore, the second result must be index 1,
1602
+ and the resulting sequence is `[0, 1]`.
1603
+
1604
+ In case 2 of the sample code, we perform 10 replacement samplings (`replacement` is `True`).
1605
+ As expected, about 90% of the sampling results are index 0.
1606
+
1607
+ In case 3 of the sample code, we extend the input to 2 dimensions, and the sampling results
1608
+ in each dimension also match our sampling expectations.
1609
+
1610
+ Note:
1611
+ The rows of input do not need to sum to one (in which case we use the values as weights),
1612
+ but must be non-negative, finite and have a non-zero sum.
1613
+ When using values as weights, it can be understood as normalizing the input along the last dimension.
1614
+
1615
+ .. warning::
1616
+ This is an experimental API that is subject to change or deletion.
1617
+
1618
+ Args:
1619
+ input (Tensor): The input tensor containing probabilities, must be 1 or 2 dimensions, with float32 data type.
1620
+ num_samples (int): Number of samples to draw.
1621
+ replacement (bool, optional): Whether to draw with replacement or not. Default: ``False`` .
1622
+
1623
+ Keyword Args:
1624
+ generator (generator, optional): MindSpore generator. Default: ``None``.
1625
+
1626
+ Returns:
1627
+ Tensor, dtype is Int64.
1628
+ If `input` is a vector, out is a vector of size `num_samples`.
1629
+ If `input` is a matrix with m rows, out is an matrix of shape(m * num_samples).
1630
+
1631
+ Raises:
1632
+ TypeError: If `input` is not a Tensor whose dtype is not in float16, float32, float64 or bfloat16.
1633
+ , 或是shape为(1, 1)的Tensor
1634
+ TypeError: If `num_samples` is not an int, a Scalar of int
1635
+ or a Tensor with shape[1,] and only one int element.
1636
+ RuntimeError: If :math:`\text{num_samples} <= 0`.
1637
+ RuntimeError: If `replacement` is False, :math:`\text{num_samples} > shape` of the last dimension of `input`.
1638
+ RuntimeError: If shape of the last dimension of `input` exceeds ``2^24``.
1639
+
1640
+ Supported Platforms:
1641
+ ``Ascend``
1642
+
1643
+ Examples:
1644
+ >>> import mindspore
1645
+ >>> from mindspore import Tensor, ops
1646
+ >>> from mindspore import dtype as mstype
1647
+ >>> # case 1: The output is random, and the length of the output is the same as num_sample.
1648
+ >>> # replacement is False.
1649
+ >>> input1 = Tensor([90 / 100, 10 / 100, 0], mindspore.float32)
1650
+ >>> input2 = Tensor([90, 10, 0], mindspore.float32)
1651
+ >>> # input1 and input2 have the same meaning.
1652
+ >>> output1 = ops.multinomial_ext(input1, 2)
1653
+ >>> output2 = ops.multinomial_ext(input2, 2)
1654
+ >>> # print(output1)
1655
+ >>> # [0 1]
1656
+ >>> # print(output2)
1657
+ >>> # [0 1]
1658
+ >>> print(len(output1))
1659
+ 2
1660
+ >>> print(len(output2))
1661
+ 2
1662
+ >>> # case 2: The output is random, and the length of the output is the same as num_sample.
1663
+ >>> # replacement is True.
1664
+ >>> output3 = ops.multinomial_ext(input1, 10, replacement=True)
1665
+ >>> # print(output3)
1666
+ >>> # [0 0 1 0 0 0 0 0 0 0]
1667
+ >>> print(len(output3))
1668
+ 10
1669
+ >>> # case 3: The output is random, and the length of the output is the same as num_sample.
1670
+ >>> # replacement is True.
1671
+ >>> # rank is 2
1672
+ >>> input4 = Tensor([[90, 10, 0], [10, 90, 0]], mstype.float32)
1673
+ >>> output4 = ops.multinomial_ext(input4, 10, replacement=True)
1674
+ >>> # print(output4)
1675
+ >>> # [[0 0 0 0 0 0 0 0 1 0]
1676
+ >>> # [1 1 1 1 1 0 1 1 1 1]]
1677
+ """
1678
+
1679
+ if generator is None:
1680
+ generator = default_generator
1681
+ seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
1682
+ return multinomial_ext_(input, num_samples, replacement, seed, offset)
1683
+
1684
+
1685
+ def _check_shape(input_shape):
1686
+ """Check 'shape' value."""
1687
+ if not isinstance(input_shape, tuple):
1688
+ const_utils.raise_type_error(
1689
+ f"Type of 'shape' must be tuple, but got: {type(input_shape)}")
1690
+ for item in input_shape:
1691
+ if not isinstance(item, int):
1692
+ const_utils.raise_type_error(
1693
+ f"Elements of 'shape' must be int, but got: {type(item)}")
1694
+ if item < 1:
1695
+ const_utils.raise_value_error(
1696
+ f"Elements of 'shape' must be positive int, but got: {item}")
1697
+ return True
1698
+
1699
+
1700
+ def _check_param(op_name, param_name, param_value):
1701
+ """Check type of param_value is Tensor, int, or float."""
1702
+ if not isinstance(param_value, (Tensor, int, float, np.ndarray)):
1703
+ const_utils.raise_type_error("For '{}', the type of '{}' must be Tensor, int, or float, "
1704
+ "but got: {}".format(op_name, param_name, type(param_value)))
1705
+ return True
1706
+
1707
+
1708
+ __all__ = [
1709
+ 'standard_laplace', 'random_categorical', 'uniform', 'standard_normal', 'random_gamma',
1710
+ 'uniform_candidate_sampler', 'random_poisson', 'log_uniform_candidate_sampler', 'shuffle', 'choice_with_mask',
1711
+ 'normal', 'laplace', 'gamma', 'poisson', 'multinomial', 'rand', 'rand_like',
1712
+ 'randn', 'randn_like',
1713
+ 'randint', 'randint_like', 'multinomial_with_replacement', 'randperm'
1714
+ ]
1715
+ __all__.sort()