mindspore 2.3.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1400) hide show
  1. mindspore/.commit_id +1 -0
  2. mindspore/ConcurrencyCheck.dll +0 -0
  3. mindspore/CppBuildInsights.dll +0 -0
  4. mindspore/CppCoreCheck.dll +0 -0
  5. mindspore/EnumIndex.dll +0 -0
  6. mindspore/EspXEngine.dll +0 -0
  7. mindspore/HResultCheck.dll +0 -0
  8. mindspore/KernelTraceControl.dll +0 -0
  9. mindspore/LocalESPC.dll +0 -0
  10. mindspore/Microsoft.Diagnostics.Tracing.EventSource.dll +0 -0
  11. mindspore/Microsoft.VisualStudio.RemoteControl.dll +0 -0
  12. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  13. mindspore/Microsoft.VisualStudio.Utilities.Internal.dll +0 -0
  14. mindspore/Newtonsoft.Json.dll +0 -0
  15. mindspore/System.Runtime.CompilerServices.Unsafe.dll +0 -0
  16. mindspore/VariantClear.dll +0 -0
  17. mindspore/__init__.py +51 -0
  18. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  19. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  20. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  21. mindspore/_check_jit_forbidden_api.py +106 -0
  22. mindspore/_checkparam.py +1378 -0
  23. mindspore/_extends/__init__.py +23 -0
  24. mindspore/_extends/builtin_operations.py +224 -0
  25. mindspore/_extends/graph_kernel/__init__.py +17 -0
  26. mindspore/_extends/graph_kernel/model/__init__.py +19 -0
  27. mindspore/_extends/graph_kernel/model/graph_parallel.py +311 -0
  28. mindspore/_extends/graph_kernel/model/graph_split.py +1348 -0
  29. mindspore/_extends/graph_kernel/model/model.py +553 -0
  30. mindspore/_extends/graph_kernel/model/model_builder.py +216 -0
  31. mindspore/_extends/graph_kernel/parallel_estimate.py +60 -0
  32. mindspore/_extends/graph_kernel/splitter.py +140 -0
  33. mindspore/_extends/graph_kernel/utils.py +28 -0
  34. mindspore/_extends/parallel_compile/__init__.py +19 -0
  35. mindspore/_extends/parallel_compile/akg_compiler/__init__.py +19 -0
  36. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +269 -0
  37. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +529 -0
  38. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +56 -0
  39. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  40. mindspore/_extends/parallel_compile/akg_compiler/get_file_path.py +36 -0
  41. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +556 -0
  42. mindspore/_extends/parallel_compile/akg_compiler/util.py +159 -0
  43. mindspore/_extends/parse/__init__.py +49 -0
  44. mindspore/_extends/parse/compile_config.py +258 -0
  45. mindspore/_extends/parse/namespace.py +136 -0
  46. mindspore/_extends/parse/parser.py +1446 -0
  47. mindspore/_extends/parse/resources.py +213 -0
  48. mindspore/_extends/parse/standard_method.py +4437 -0
  49. mindspore/_extends/parse/trope.py +97 -0
  50. mindspore/_extends/pijit/__init__.py +23 -0
  51. mindspore/_extends/pijit/pijit_func_white_list.py +343 -0
  52. mindspore/_extends/remote/__init__.py +19 -0
  53. mindspore/_extends/remote/kernel_build_server.py +199 -0
  54. mindspore/_extends/remote/kernel_build_server_akg.py +55 -0
  55. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  56. mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
  57. mindspore/_extends/utils.py +68 -0
  58. mindspore/_install_custom.py +43 -0
  59. mindspore/_profiler.py +30 -0
  60. mindspore/amp.py +419 -0
  61. mindspore/atlprov.dll +0 -0
  62. mindspore/avcodec-59.dll +0 -0
  63. mindspore/avdevice-59.dll +0 -0
  64. mindspore/avfilter-8.dll +0 -0
  65. mindspore/avformat-59.dll +0 -0
  66. mindspore/avutil-57.dll +0 -0
  67. mindspore/boost/__init__.py +42 -0
  68. mindspore/boost/adasum.py +319 -0
  69. mindspore/boost/base.py +535 -0
  70. mindspore/boost/boost.py +400 -0
  71. mindspore/boost/boost_cell_wrapper.py +790 -0
  72. mindspore/boost/dim_reduce.py +323 -0
  73. mindspore/boost/grad_accumulation.py +79 -0
  74. mindspore/boost/grad_freeze.py +382 -0
  75. mindspore/boost/group_loss_scale_manager.py +166 -0
  76. mindspore/boost/less_batch_normalization.py +174 -0
  77. mindspore/c1.dll +0 -0
  78. mindspore/c1xx.dll +0 -0
  79. mindspore/c2.dll +0 -0
  80. mindspore/cfgpersist.dll +0 -0
  81. mindspore/clang_rt.asan_dbg_dynamic-x86_64.dll +0 -0
  82. mindspore/clang_rt.asan_dynamic-x86_64.dll +0 -0
  83. mindspore/common/__init__.py +84 -0
  84. mindspore/common/_auto_dynamic.py +68 -0
  85. mindspore/common/_decorator.py +50 -0
  86. mindspore/common/_jit_fallback_utils.py +110 -0
  87. mindspore/common/_monad.py +25 -0
  88. mindspore/common/_register_for_adapter.py +74 -0
  89. mindspore/common/_register_for_recompute.py +48 -0
  90. mindspore/common/_register_for_tensor.py +45 -0
  91. mindspore/common/_stub_tensor.py +210 -0
  92. mindspore/common/_utils.py +122 -0
  93. mindspore/common/api.py +2049 -0
  94. mindspore/common/auto_dynamic_shape.py +507 -0
  95. mindspore/common/dtype.py +422 -0
  96. mindspore/common/dump.py +131 -0
  97. mindspore/common/file_system.py +48 -0
  98. mindspore/common/generator.py +260 -0
  99. mindspore/common/hook_handle.py +155 -0
  100. mindspore/common/initializer.py +880 -0
  101. mindspore/common/jit_config.py +98 -0
  102. mindspore/common/lazy_inline.py +240 -0
  103. mindspore/common/mindir_util.py +111 -0
  104. mindspore/common/mutable.py +234 -0
  105. mindspore/common/no_inline.py +54 -0
  106. mindspore/common/np_dtype.py +25 -0
  107. mindspore/common/parameter.py +1048 -0
  108. mindspore/common/recompute.py +262 -0
  109. mindspore/common/seed.py +260 -0
  110. mindspore/common/sparse_tensor.py +1171 -0
  111. mindspore/common/symbol.py +122 -0
  112. mindspore/common/tensor.py +4859 -0
  113. mindspore/communication/__init__.py +37 -0
  114. mindspore/communication/_comm_helper.py +466 -0
  115. mindspore/communication/_hccl_management.py +297 -0
  116. mindspore/communication/comm_func.py +1140 -0
  117. mindspore/communication/management.py +673 -0
  118. mindspore/config/op_info.config +533 -0
  119. mindspore/context.py +1976 -0
  120. mindspore/d3dcompiler_47.dll +0 -0
  121. mindspore/dataset/__init__.py +90 -0
  122. mindspore/dataset/audio/__init__.py +61 -0
  123. mindspore/dataset/audio/transforms.py +3690 -0
  124. mindspore/dataset/audio/utils.py +386 -0
  125. mindspore/dataset/audio/validators.py +1172 -0
  126. mindspore/dataset/callback/__init__.py +20 -0
  127. mindspore/dataset/callback/ds_callback.py +368 -0
  128. mindspore/dataset/callback/validators.py +32 -0
  129. mindspore/dataset/core/__init__.py +13 -0
  130. mindspore/dataset/core/config.py +1088 -0
  131. mindspore/dataset/core/datatypes.py +101 -0
  132. mindspore/dataset/core/py_util_helpers.py +65 -0
  133. mindspore/dataset/core/validator_helpers.py +774 -0
  134. mindspore/dataset/debug/__init__.py +21 -0
  135. mindspore/dataset/debug/debug_hook.py +97 -0
  136. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  137. mindspore/dataset/engine/__init__.py +124 -0
  138. mindspore/dataset/engine/cache_admin.py +47 -0
  139. mindspore/dataset/engine/cache_client.py +129 -0
  140. mindspore/dataset/engine/datasets.py +4554 -0
  141. mindspore/dataset/engine/datasets_audio.py +911 -0
  142. mindspore/dataset/engine/datasets_standard_format.py +493 -0
  143. mindspore/dataset/engine/datasets_text.py +2161 -0
  144. mindspore/dataset/engine/datasets_user_defined.py +1114 -0
  145. mindspore/dataset/engine/datasets_vision.py +4816 -0
  146. mindspore/dataset/engine/iterators.py +342 -0
  147. mindspore/dataset/engine/obs/__init__.py +23 -0
  148. mindspore/dataset/engine/obs/config_loader.py +68 -0
  149. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +508 -0
  150. mindspore/dataset/engine/obs/util.py +475 -0
  151. mindspore/dataset/engine/offload.py +596 -0
  152. mindspore/dataset/engine/queue.py +250 -0
  153. mindspore/dataset/engine/samplers.py +895 -0
  154. mindspore/dataset/engine/serializer_deserializer.py +159 -0
  155. mindspore/dataset/engine/validators.py +2875 -0
  156. mindspore/dataset/text/__init__.py +54 -0
  157. mindspore/dataset/text/transforms.py +1703 -0
  158. mindspore/dataset/text/utils.py +715 -0
  159. mindspore/dataset/text/validators.py +642 -0
  160. mindspore/dataset/transforms/__init__.py +48 -0
  161. mindspore/dataset/transforms/c_transforms.py +638 -0
  162. mindspore/dataset/transforms/py_transforms.py +393 -0
  163. mindspore/dataset/transforms/py_transforms_util.py +255 -0
  164. mindspore/dataset/transforms/transforms.py +1260 -0
  165. mindspore/dataset/transforms/validators.py +410 -0
  166. mindspore/dataset/utils/__init__.py +19 -0
  167. mindspore/dataset/utils/browse_dataset.py +190 -0
  168. mindspore/dataset/utils/line_reader.py +124 -0
  169. mindspore/dataset/vision/__init__.py +68 -0
  170. mindspore/dataset/vision/c_transforms.py +2641 -0
  171. mindspore/dataset/vision/py_transforms.py +2120 -0
  172. mindspore/dataset/vision/py_transforms_util.py +1660 -0
  173. mindspore/dataset/vision/transforms.py +7295 -0
  174. mindspore/dataset/vision/utils.py +863 -0
  175. mindspore/dataset/vision/validators.py +1482 -0
  176. mindspore/default_config.py +2 -0
  177. mindspore/dnnl.dll +0 -0
  178. mindspore/dpcmi.dll +0 -0
  179. mindspore/experimental/__init__.py +20 -0
  180. mindspore/experimental/map_parameter.py +309 -0
  181. mindspore/experimental/optim/__init__.py +40 -0
  182. mindspore/experimental/optim/adadelta.py +161 -0
  183. mindspore/experimental/optim/adagrad.py +168 -0
  184. mindspore/experimental/optim/adam.py +193 -0
  185. mindspore/experimental/optim/adamax.py +170 -0
  186. mindspore/experimental/optim/adamw.py +205 -0
  187. mindspore/experimental/optim/asgd.py +153 -0
  188. mindspore/experimental/optim/lr_scheduler.py +1371 -0
  189. mindspore/experimental/optim/nadam.py +157 -0
  190. mindspore/experimental/optim/optimizer.py +259 -0
  191. mindspore/experimental/optim/radam.py +194 -0
  192. mindspore/experimental/optim/rmsprop.py +154 -0
  193. mindspore/experimental/optim/rprop.py +164 -0
  194. mindspore/experimental/optim/sgd.py +156 -0
  195. mindspore/hal/__init__.py +40 -0
  196. mindspore/hal/_ascend.py +57 -0
  197. mindspore/hal/_base.py +57 -0
  198. mindspore/hal/_cpu.py +56 -0
  199. mindspore/hal/_gpu.py +57 -0
  200. mindspore/hal/device.py +356 -0
  201. mindspore/hal/event.py +179 -0
  202. mindspore/hal/memory.py +326 -0
  203. mindspore/hal/stream.py +339 -0
  204. mindspore/include/OWNERS +7 -0
  205. mindspore/include/api/allocator.h +97 -0
  206. mindspore/include/api/callback/callback.h +93 -0
  207. mindspore/include/api/callback/ckpt_saver.h +41 -0
  208. mindspore/include/api/callback/loss_monitor.h +33 -0
  209. mindspore/include/api/callback/lr_scheduler.h +51 -0
  210. mindspore/include/api/callback/time_monitor.h +34 -0
  211. mindspore/include/api/callback/train_accuracy.h +37 -0
  212. mindspore/include/api/cell.h +90 -0
  213. mindspore/include/api/cfg.h +82 -0
  214. mindspore/include/api/context.h +602 -0
  215. mindspore/include/api/data_type.h +47 -0
  216. mindspore/include/api/delegate.h +178 -0
  217. mindspore/include/api/delegate_api.h +75 -0
  218. mindspore/include/api/dual_abi_helper.h +208 -0
  219. mindspore/include/api/format.h +28 -0
  220. mindspore/include/api/graph.h +46 -0
  221. mindspore/include/api/kernel.h +58 -0
  222. mindspore/include/api/kernel_api.h +168 -0
  223. mindspore/include/api/metrics/accuracy.h +36 -0
  224. mindspore/include/api/metrics/metrics.h +41 -0
  225. mindspore/include/api/model.h +438 -0
  226. mindspore/include/api/model_group.h +79 -0
  227. mindspore/include/api/model_parallel_runner.h +168 -0
  228. mindspore/include/api/serialization.h +185 -0
  229. mindspore/include/api/status.h +192 -0
  230. mindspore/include/api/types.h +431 -0
  231. mindspore/include/api/visible.h +41 -0
  232. mindspore/include/c_api/context_c.h +179 -0
  233. mindspore/include/c_api/data_type_c.h +52 -0
  234. mindspore/include/c_api/format_c.h +46 -0
  235. mindspore/include/c_api/model_c.h +347 -0
  236. mindspore/include/c_api/ms/abstract.h +67 -0
  237. mindspore/include/c_api/ms/attribute.h +197 -0
  238. mindspore/include/c_api/ms/base/handle_types.h +43 -0
  239. mindspore/include/c_api/ms/base/macros.h +32 -0
  240. mindspore/include/c_api/ms/base/status.h +33 -0
  241. mindspore/include/c_api/ms/base/types.h +283 -0
  242. mindspore/include/c_api/ms/context.h +102 -0
  243. mindspore/include/c_api/ms/graph.h +160 -0
  244. mindspore/include/c_api/ms/node.h +606 -0
  245. mindspore/include/c_api/ms/tensor.h +161 -0
  246. mindspore/include/c_api/ms/value.h +84 -0
  247. mindspore/include/c_api/status_c.h +79 -0
  248. mindspore/include/c_api/tensor_c.h +146 -0
  249. mindspore/include/c_api/types_c.h +67 -0
  250. mindspore/include/dataset/config.h +163 -0
  251. mindspore/include/dataset/constants.h +363 -0
  252. mindspore/include/dataset/execute.h +196 -0
  253. mindspore/include/dataset/text.h +1092 -0
  254. mindspore/include/dataset/transforms.h +638 -0
  255. mindspore/include/dataset/vision.h +2125 -0
  256. mindspore/include/dataset/vision_ascend.h +206 -0
  257. mindspore/include/dataset/vision_lite.h +625 -0
  258. mindspore/jpeg62.dll +0 -0
  259. mindspore/log.py +633 -0
  260. mindspore/mindrecord/__init__.py +43 -0
  261. mindspore/mindrecord/common/__init__.py +17 -0
  262. mindspore/mindrecord/common/constant.py +20 -0
  263. mindspore/mindrecord/common/enums.py +44 -0
  264. mindspore/mindrecord/common/exceptions.py +311 -0
  265. mindspore/mindrecord/config.py +809 -0
  266. mindspore/mindrecord/filereader.py +174 -0
  267. mindspore/mindrecord/filewriter.py +705 -0
  268. mindspore/mindrecord/mindpage.py +210 -0
  269. mindspore/mindrecord/shardheader.py +141 -0
  270. mindspore/mindrecord/shardindexgenerator.py +74 -0
  271. mindspore/mindrecord/shardreader.py +117 -0
  272. mindspore/mindrecord/shardsegment.py +128 -0
  273. mindspore/mindrecord/shardutils.py +185 -0
  274. mindspore/mindrecord/shardwriter.py +237 -0
  275. mindspore/mindrecord/tools/__init__.py +17 -0
  276. mindspore/mindrecord/tools/cifar10.py +140 -0
  277. mindspore/mindrecord/tools/cifar100.py +153 -0
  278. mindspore/mindrecord/tools/cifar100_to_mr.py +185 -0
  279. mindspore/mindrecord/tools/cifar10_to_mr.py +177 -0
  280. mindspore/mindrecord/tools/csv_to_mr.py +200 -0
  281. mindspore/mindrecord/tools/imagenet_to_mr.py +206 -0
  282. mindspore/mindrecord/tools/mnist_to_mr.py +259 -0
  283. mindspore/mindrecord/tools/tfrecord_to_mr.py +360 -0
  284. mindspore/mindspore_backend.dll +0 -0
  285. mindspore/mindspore_common.dll +0 -0
  286. mindspore/mindspore_core.dll +0 -0
  287. mindspore/mindspore_glog.dll +0 -0
  288. mindspore/mindspore_np_dtype.dll +0 -0
  289. mindspore/mindspore_shared_lib.dll +0 -0
  290. mindspore/mint/__init__.py +1137 -0
  291. mindspore/mint/linalg/__init__.py +22 -0
  292. mindspore/mint/nn/__init__.py +512 -0
  293. mindspore/mint/nn/functional.py +573 -0
  294. mindspore/mint/optim/__init__.py +24 -0
  295. mindspore/mint/optim/adamw.py +185 -0
  296. mindspore/msobj140.dll +0 -0
  297. mindspore/mspdb140.dll +0 -0
  298. mindspore/mspdbcore.dll +0 -0
  299. mindspore/mspdbst.dll +0 -0
  300. mindspore/mspft140.dll +0 -0
  301. mindspore/msvcdis140.dll +0 -0
  302. mindspore/msvcp140.dll +0 -0
  303. mindspore/msvcp140_1.dll +0 -0
  304. mindspore/msvcp140_2.dll +0 -0
  305. mindspore/msvcp140_atomic_wait.dll +0 -0
  306. mindspore/msvcp140_codecvt_ids.dll +0 -0
  307. mindspore/multiprocessing/__init__.py +72 -0
  308. mindspore/nn/__init__.py +48 -0
  309. mindspore/nn/cell.py +2605 -0
  310. mindspore/nn/dynamic_lr.py +482 -0
  311. mindspore/nn/extend/__init__.py +29 -0
  312. mindspore/nn/extend/basic.py +140 -0
  313. mindspore/nn/extend/embedding.py +143 -0
  314. mindspore/nn/extend/layer/__init__.py +27 -0
  315. mindspore/nn/extend/layer/normalization.py +109 -0
  316. mindspore/nn/extend/pooling.py +117 -0
  317. mindspore/nn/grad/__init__.py +21 -0
  318. mindspore/nn/grad/cell_grad.py +196 -0
  319. mindspore/nn/layer/__init__.py +63 -0
  320. mindspore/nn/layer/activation.py +1655 -0
  321. mindspore/nn/layer/basic.py +1519 -0
  322. mindspore/nn/layer/channel_shuffle.py +90 -0
  323. mindspore/nn/layer/combined.py +248 -0
  324. mindspore/nn/layer/container.py +734 -0
  325. mindspore/nn/layer/conv.py +1505 -0
  326. mindspore/nn/layer/dense.py +204 -0
  327. mindspore/nn/layer/embedding.py +751 -0
  328. mindspore/nn/layer/embedding_service.py +531 -0
  329. mindspore/nn/layer/embedding_service_layer.py +393 -0
  330. mindspore/nn/layer/image.py +661 -0
  331. mindspore/nn/layer/math.py +1069 -0
  332. mindspore/nn/layer/normalization.py +1177 -0
  333. mindspore/nn/layer/padding.py +894 -0
  334. mindspore/nn/layer/pooling.py +2148 -0
  335. mindspore/nn/layer/rnn_cells.py +388 -0
  336. mindspore/nn/layer/rnns.py +849 -0
  337. mindspore/nn/layer/thor_layer.py +963 -0
  338. mindspore/nn/layer/timedistributed.py +155 -0
  339. mindspore/nn/layer/transformer.py +823 -0
  340. mindspore/nn/learning_rate_schedule.py +512 -0
  341. mindspore/nn/loss/__init__.py +36 -0
  342. mindspore/nn/loss/loss.py +2846 -0
  343. mindspore/nn/metrics.py +53 -0
  344. mindspore/nn/optim/__init__.py +44 -0
  345. mindspore/nn/optim/_dist_optimizer_registry.py +111 -0
  346. mindspore/nn/optim/ada_grad.py +217 -0
  347. mindspore/nn/optim/adadelta.py +206 -0
  348. mindspore/nn/optim/adafactor.py +448 -0
  349. mindspore/nn/optim/adam.py +1297 -0
  350. mindspore/nn/optim/adamax.py +220 -0
  351. mindspore/nn/optim/adasum.py +548 -0
  352. mindspore/nn/optim/asgd.py +216 -0
  353. mindspore/nn/optim/ftrl.py +401 -0
  354. mindspore/nn/optim/lamb.py +296 -0
  355. mindspore/nn/optim/lars.py +202 -0
  356. mindspore/nn/optim/lazyadam.py +533 -0
  357. mindspore/nn/optim/momentum.py +239 -0
  358. mindspore/nn/optim/optimizer.py +1034 -0
  359. mindspore/nn/optim/proximal_ada_grad.py +242 -0
  360. mindspore/nn/optim/rmsprop.py +264 -0
  361. mindspore/nn/optim/rprop.py +251 -0
  362. mindspore/nn/optim/sgd.py +237 -0
  363. mindspore/nn/optim/thor.py +1310 -0
  364. mindspore/nn/probability/__init__.py +22 -0
  365. mindspore/nn/probability/bijector/__init__.py +35 -0
  366. mindspore/nn/probability/bijector/bijector.py +337 -0
  367. mindspore/nn/probability/bijector/exp.py +65 -0
  368. mindspore/nn/probability/bijector/gumbel_cdf.py +144 -0
  369. mindspore/nn/probability/bijector/invert.py +126 -0
  370. mindspore/nn/probability/bijector/power_transform.py +196 -0
  371. mindspore/nn/probability/bijector/scalar_affine.py +167 -0
  372. mindspore/nn/probability/bijector/softplus.py +189 -0
  373. mindspore/nn/probability/bnn_layers/__init__.py +29 -0
  374. mindspore/nn/probability/bnn_layers/_util.py +46 -0
  375. mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +112 -0
  376. mindspore/nn/probability/bnn_layers/conv_variational.py +267 -0
  377. mindspore/nn/probability/bnn_layers/dense_variational.py +302 -0
  378. mindspore/nn/probability/bnn_layers/layer_distribution.py +123 -0
  379. mindspore/nn/probability/distribution/__init__.py +56 -0
  380. mindspore/nn/probability/distribution/_utils/__init__.py +34 -0
  381. mindspore/nn/probability/distribution/_utils/custom_ops.py +96 -0
  382. mindspore/nn/probability/distribution/_utils/utils.py +362 -0
  383. mindspore/nn/probability/distribution/bernoulli.py +334 -0
  384. mindspore/nn/probability/distribution/beta.py +391 -0
  385. mindspore/nn/probability/distribution/categorical.py +435 -0
  386. mindspore/nn/probability/distribution/cauchy.py +383 -0
  387. mindspore/nn/probability/distribution/distribution.py +827 -0
  388. mindspore/nn/probability/distribution/exponential.py +350 -0
  389. mindspore/nn/probability/distribution/gamma.py +391 -0
  390. mindspore/nn/probability/distribution/geometric.py +335 -0
  391. mindspore/nn/probability/distribution/gumbel.py +257 -0
  392. mindspore/nn/probability/distribution/half_normal.py +133 -0
  393. mindspore/nn/probability/distribution/laplace.py +128 -0
  394. mindspore/nn/probability/distribution/log_normal.py +272 -0
  395. mindspore/nn/probability/distribution/logistic.py +379 -0
  396. mindspore/nn/probability/distribution/normal.py +336 -0
  397. mindspore/nn/probability/distribution/poisson.py +288 -0
  398. mindspore/nn/probability/distribution/student_t.py +149 -0
  399. mindspore/nn/probability/distribution/transformed_distribution.py +235 -0
  400. mindspore/nn/probability/distribution/uniform.py +375 -0
  401. mindspore/nn/reinforcement/__init__.py +24 -0
  402. mindspore/nn/reinforcement/_batch_read_write.py +142 -0
  403. mindspore/nn/reinforcement/_tensors_queue.py +152 -0
  404. mindspore/nn/reinforcement/tensor_array.py +145 -0
  405. mindspore/nn/sparse/__init__.py +23 -0
  406. mindspore/nn/sparse/sparse.py +147 -0
  407. mindspore/nn/wrap/__init__.py +49 -0
  408. mindspore/nn/wrap/cell_wrapper.py +979 -0
  409. mindspore/nn/wrap/grad_reducer.py +608 -0
  410. mindspore/nn/wrap/loss_scale.py +680 -0
  411. mindspore/numpy/__init__.py +121 -0
  412. mindspore/numpy/array_creations.py +2734 -0
  413. mindspore/numpy/array_ops.py +2625 -0
  414. mindspore/numpy/dtypes.py +185 -0
  415. mindspore/numpy/fft.py +431 -0
  416. mindspore/numpy/logic_ops.py +935 -0
  417. mindspore/numpy/math_ops.py +5910 -0
  418. mindspore/numpy/utils.py +214 -0
  419. mindspore/numpy/utils_const.py +565 -0
  420. mindspore/opencv_core452.dll +0 -0
  421. mindspore/opencv_imgcodecs452.dll +0 -0
  422. mindspore/opencv_imgproc452.dll +0 -0
  423. mindspore/ops/__init__.py +54 -0
  424. mindspore/ops/_constants.py +30 -0
  425. mindspore/ops/_grad_experimental/__init__.py +31 -0
  426. mindspore/ops/_grad_experimental/grad_array_ops.py +830 -0
  427. mindspore/ops/_grad_experimental/grad_base.py +143 -0
  428. mindspore/ops/_grad_experimental/grad_comm_ops.py +670 -0
  429. mindspore/ops/_grad_experimental/grad_debug_ops.py +31 -0
  430. mindspore/ops/_grad_experimental/grad_implementations.py +203 -0
  431. mindspore/ops/_grad_experimental/grad_inner_ops.py +79 -0
  432. mindspore/ops/_grad_experimental/grad_math_ops.py +824 -0
  433. mindspore/ops/_grad_experimental/grad_nn_ops.py +231 -0
  434. mindspore/ops/_grad_experimental/grad_quant_ops.py +238 -0
  435. mindspore/ops/_grad_experimental/grad_sparse.py +342 -0
  436. mindspore/ops/_grad_experimental/grad_sparse_ops.py +399 -0
  437. mindspore/ops/_grad_experimental/taylor_rule.py +220 -0
  438. mindspore/ops/_op_impl/__init__.py +23 -0
  439. mindspore/ops/_op_impl/_custom_op/__init__.py +39 -0
  440. mindspore/ops/_op_impl/_custom_op/_basic.py +158 -0
  441. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +279 -0
  442. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +156 -0
  443. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +109 -0
  444. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +125 -0
  445. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +105 -0
  446. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +124 -0
  447. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +116 -0
  448. mindspore/ops/_op_impl/_custom_op/correction_mul.py +89 -0
  449. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +196 -0
  450. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +366 -0
  451. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +162 -0
  452. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +136 -0
  453. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +206 -0
  454. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +88 -0
  455. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +128 -0
  456. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +199 -0
  457. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +88 -0
  458. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +156 -0
  459. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +184 -0
  460. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +143 -0
  461. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +169 -0
  462. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +548 -0
  463. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +881 -0
  464. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +278 -0
  465. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +200 -0
  466. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +334 -0
  467. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +255 -0
  468. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +222 -0
  469. mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +644 -0
  470. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +488 -0
  471. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +87 -0
  472. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +129 -0
  473. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +121 -0
  474. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +352 -0
  475. mindspore/ops/_op_impl/aicpu/__init__.py +441 -0
  476. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  477. mindspore/ops/_op_impl/aicpu/acos.py +32 -0
  478. mindspore/ops/_op_impl/aicpu/acos_grad.py +33 -0
  479. mindspore/ops/_op_impl/aicpu/acosh.py +34 -0
  480. mindspore/ops/_op_impl/aicpu/acosh_grad.py +35 -0
  481. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
  482. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  483. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
  484. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
  485. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
  486. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
  487. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
  488. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
  489. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  490. mindspore/ops/_op_impl/aicpu/add_n.py +41 -0
  491. mindspore/ops/_op_impl/aicpu/add_v2.py +40 -0
  492. mindspore/ops/_op_impl/aicpu/addcdiv.py +41 -0
  493. mindspore/ops/_op_impl/aicpu/addcmul.py +47 -0
  494. mindspore/ops/_op_impl/aicpu/adjust_contrastv2.py +32 -0
  495. mindspore/ops/_op_impl/aicpu/adjust_hue.py +31 -0
  496. mindspore/ops/_op_impl/aicpu/adjust_saturation.py +32 -0
  497. mindspore/ops/_op_impl/aicpu/affine_grid.py +33 -0
  498. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  499. mindspore/ops/_op_impl/aicpu/angle.py +31 -0
  500. mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
  501. mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
  502. mindspore/ops/_op_impl/aicpu/argmax_with_value.py +43 -0
  503. mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
  504. mindspore/ops/_op_impl/aicpu/asin.py +32 -0
  505. mindspore/ops/_op_impl/aicpu/asin_grad.py +33 -0
  506. mindspore/ops/_op_impl/aicpu/asinh.py +34 -0
  507. mindspore/ops/_op_impl/aicpu/asinh_grad.py +35 -0
  508. mindspore/ops/_op_impl/aicpu/atanh.py +34 -0
  509. mindspore/ops/_op_impl/aicpu/avgpool_grad_v1.py +37 -0
  510. mindspore/ops/_op_impl/aicpu/avgpool_v1.py +36 -0
  511. mindspore/ops/_op_impl/aicpu/bartlett_window.py +36 -0
  512. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
  513. mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
  514. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  515. mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
  516. mindspore/ops/_op_impl/aicpu/betainc.py +31 -0
  517. mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
  518. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +42 -0
  519. mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
  520. mindspore/ops/_op_impl/aicpu/blackman_window.py +36 -0
  521. mindspore/ops/_op_impl/aicpu/broadcast_to.py +58 -0
  522. mindspore/ops/_op_impl/aicpu/bucketize.py +34 -0
  523. mindspore/ops/_op_impl/aicpu/cache_swap_table.py +102 -0
  524. mindspore/ops/_op_impl/aicpu/cast.py +225 -0
  525. mindspore/ops/_op_impl/aicpu/cauchy.py +33 -0
  526. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  527. mindspore/ops/_op_impl/aicpu/check_numerics.py +33 -0
  528. mindspore/ops/_op_impl/aicpu/cholesky.py +32 -0
  529. mindspore/ops/_op_impl/aicpu/cholesky_inverse.py +31 -0
  530. mindspore/ops/_op_impl/aicpu/cholesky_solve.py +33 -0
  531. mindspore/ops/_op_impl/aicpu/choleskygrad.py +32 -0
  532. mindspore/ops/_op_impl/aicpu/coalesce.py +37 -0
  533. mindspore/ops/_op_impl/aicpu/col2im.py +38 -0
  534. mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
  535. mindspore/ops/_op_impl/aicpu/compare_and_bitpack.py +37 -0
  536. mindspore/ops/_op_impl/aicpu/complex.py +32 -0
  537. mindspore/ops/_op_impl/aicpu/complex_abs.py +31 -0
  538. mindspore/ops/_op_impl/aicpu/compute_accidental_hits.py +44 -0
  539. mindspore/ops/_op_impl/aicpu/concat.py +57 -0
  540. mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
  541. mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
  542. mindspore/ops/_op_impl/aicpu/conj.py +42 -0
  543. mindspore/ops/_op_impl/aicpu/conjugate_transpose.py +58 -0
  544. mindspore/ops/_op_impl/aicpu/cos.py +34 -0
  545. mindspore/ops/_op_impl/aicpu/cosh.py +34 -0
  546. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  547. mindspore/ops/_op_impl/aicpu/crop_and_resize.py +69 -0
  548. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_boxes.py +68 -0
  549. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
  550. mindspore/ops/_op_impl/aicpu/cross.py +42 -0
  551. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_dense.py +48 -0
  552. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_sparse_tensor.py +51 -0
  553. mindspore/ops/_op_impl/aicpu/ctc_greedy_decoder.py +35 -0
  554. mindspore/ops/_op_impl/aicpu/ctc_loss_v2.py +43 -0
  555. mindspore/ops/_op_impl/aicpu/ctc_loss_v2_grad.py +45 -0
  556. mindspore/ops/_op_impl/aicpu/ctcloss.py +38 -0
  557. mindspore/ops/_op_impl/aicpu/cummax.py +41 -0
  558. mindspore/ops/_op_impl/aicpu/cumprod.py +58 -0
  559. mindspore/ops/_op_impl/aicpu/cumsum.py +58 -0
  560. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
  561. mindspore/ops/_op_impl/aicpu/data_format_vec_permute.py +32 -0
  562. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  563. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  564. mindspore/ops/_op_impl/aicpu/dense_to_csr_sparse_matrix.py +49 -0
  565. mindspore/ops/_op_impl/aicpu/dense_to_dense_set_operation.py +45 -0
  566. mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
  567. mindspore/ops/_op_impl/aicpu/depth_to_space.py +44 -0
  568. mindspore/ops/_op_impl/aicpu/diag.py +36 -0
  569. mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
  570. mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
  571. mindspore/ops/_op_impl/aicpu/digamma.py +31 -0
  572. mindspore/ops/_op_impl/aicpu/div.py +41 -0
  573. mindspore/ops/_op_impl/aicpu/div_no_nan.py +35 -0
  574. mindspore/ops/_op_impl/aicpu/dropout2d.py +42 -0
  575. mindspore/ops/_op_impl/aicpu/dropout3d.py +42 -0
  576. mindspore/ops/_op_impl/aicpu/dropout_genmask.py +41 -0
  577. mindspore/ops/_op_impl/aicpu/dropout_genmask_v3.py +32 -0
  578. mindspore/ops/_op_impl/aicpu/dynamic_stitch.py +42 -0
  579. mindspore/ops/_op_impl/aicpu/edit_distance.py +56 -0
  580. mindspore/ops/_op_impl/aicpu/eig.py +35 -0
  581. mindspore/ops/_op_impl/aicpu/embedding_lookup.py +102 -0
  582. mindspore/ops/_op_impl/aicpu/end_of_sequence.py +30 -0
  583. mindspore/ops/_op_impl/aicpu/environ_create.py +28 -0
  584. mindspore/ops/_op_impl/aicpu/environ_destroy_all.py +28 -0
  585. mindspore/ops/_op_impl/aicpu/environ_get.py +41 -0
  586. mindspore/ops/_op_impl/aicpu/environ_set.py +40 -0
  587. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  588. mindspore/ops/_op_impl/aicpu/equal.py +41 -0
  589. mindspore/ops/_op_impl/aicpu/exp.py +37 -0
  590. mindspore/ops/_op_impl/aicpu/expand.py +45 -0
  591. mindspore/ops/_op_impl/aicpu/expand_dims.py +42 -0
  592. mindspore/ops/_op_impl/aicpu/expm1.py +34 -0
  593. mindspore/ops/_op_impl/aicpu/extract_glimpse.py +35 -0
  594. mindspore/ops/_op_impl/aicpu/eye.py +44 -0
  595. mindspore/ops/_op_impl/aicpu/fft_with_size.py +47 -0
  596. mindspore/ops/_op_impl/aicpu/fill_diagonal.py +39 -0
  597. mindspore/ops/_op_impl/aicpu/fill_v2.py +58 -0
  598. mindspore/ops/_op_impl/aicpu/flatten.py +43 -0
  599. mindspore/ops/_op_impl/aicpu/floor_div.py +38 -0
  600. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  601. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  602. mindspore/ops/_op_impl/aicpu/fractional_avg_pool.py +41 -0
  603. mindspore/ops/_op_impl/aicpu/fractional_avg_pool_grad.py +41 -0
  604. mindspore/ops/_op_impl/aicpu/fractional_max_pool.py +41 -0
  605. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_grad_with_fixed_ksize.py +43 -0
  606. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +65 -0
  607. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad.py +42 -0
  608. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad_with_fixed_ksize.py +42 -0
  609. mindspore/ops/_op_impl/aicpu/fractional_max_pool_with_fixed_ksize.py +49 -0
  610. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  611. mindspore/ops/_op_impl/aicpu/fused_sparse_adam.py +46 -0
  612. mindspore/ops/_op_impl/aicpu/fused_sparse_ftrl.py +41 -0
  613. mindspore/ops/_op_impl/aicpu/fused_sparse_lazy_adam.py +46 -0
  614. mindspore/ops/_op_impl/aicpu/fused_sparse_proximal_adagrad.py +39 -0
  615. mindspore/ops/_op_impl/aicpu/gamma.py +38 -0
  616. mindspore/ops/_op_impl/aicpu/gather.py +46 -0
  617. mindspore/ops/_op_impl/aicpu/gather_d.py +79 -0
  618. mindspore/ops/_op_impl/aicpu/gather_d_grad_v2.py +79 -0
  619. mindspore/ops/_op_impl/aicpu/gather_grad.py +54 -0
  620. mindspore/ops/_op_impl/aicpu/gather_nd.py +56 -0
  621. mindspore/ops/_op_impl/aicpu/gcd.py +32 -0
  622. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
  623. mindspore/ops/_op_impl/aicpu/geqrf.py +32 -0
  624. mindspore/ops/_op_impl/aicpu/get_next.py +39 -0
  625. mindspore/ops/_op_impl/aicpu/glu.py +33 -0
  626. mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
  627. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  628. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  629. mindspore/ops/_op_impl/aicpu/grid_sampler_2d.py +35 -0
  630. mindspore/ops/_op_impl/aicpu/grid_sampler_2d_grad.py +38 -0
  631. mindspore/ops/_op_impl/aicpu/grid_sampler_3d.py +34 -0
  632. mindspore/ops/_op_impl/aicpu/grid_sampler_3d_grad.py +38 -0
  633. mindspore/ops/_op_impl/aicpu/hamming_window.py +57 -0
  634. mindspore/ops/_op_impl/aicpu/hard_sigmoid.py +32 -0
  635. mindspore/ops/_op_impl/aicpu/hard_sigmoid_grad.py +33 -0
  636. mindspore/ops/_op_impl/aicpu/heaviside.py +40 -0
  637. mindspore/ops/_op_impl/aicpu/histogram.py +35 -0
  638. mindspore/ops/_op_impl/aicpu/hsv_to_rgb.py +32 -0
  639. mindspore/ops/_op_impl/aicpu/hypot.py +32 -0
  640. mindspore/ops/_op_impl/aicpu/identity.py +42 -0
  641. mindspore/ops/_op_impl/aicpu/identity_n.py +41 -0
  642. mindspore/ops/_op_impl/aicpu/igamma.py +30 -0
  643. mindspore/ops/_op_impl/aicpu/igammac.py +30 -0
  644. mindspore/ops/_op_impl/aicpu/igammagrada.py +30 -0
  645. mindspore/ops/_op_impl/aicpu/im2col.py +43 -0
  646. mindspore/ops/_op_impl/aicpu/imag.py +31 -0
  647. mindspore/ops/_op_impl/aicpu/index_fill.py +54 -0
  648. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  649. mindspore/ops/_op_impl/aicpu/init_data_set_queue.py +27 -0
  650. mindspore/ops/_op_impl/aicpu/inplace_index_add.py +39 -0
  651. mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
  652. mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
  653. mindspore/ops/_op_impl/aicpu/is_finite.py +40 -0
  654. mindspore/ops/_op_impl/aicpu/is_inf.py +31 -0
  655. mindspore/ops/_op_impl/aicpu/is_nan.py +31 -0
  656. mindspore/ops/_op_impl/aicpu/kldivloss.py +34 -0
  657. mindspore/ops/_op_impl/aicpu/kldivlossgrad.py +35 -0
  658. mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
  659. mindspore/ops/_op_impl/aicpu/lcm.py +32 -0
  660. mindspore/ops/_op_impl/aicpu/left_shift.py +38 -0
  661. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  662. mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
  663. mindspore/ops/_op_impl/aicpu/lgamma.py +33 -0
  664. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +57 -0
  665. mindspore/ops/_op_impl/aicpu/linspace.py +33 -0
  666. mindspore/ops/_op_impl/aicpu/list_diff.py +50 -0
  667. mindspore/ops/_op_impl/aicpu/log.py +37 -0
  668. mindspore/ops/_op_impl/aicpu/log1p.py +34 -0
  669. mindspore/ops/_op_impl/aicpu/log_matrix_determinant.py +31 -0
  670. mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
  671. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +37 -0
  672. mindspore/ops/_op_impl/aicpu/logical_xor.py +30 -0
  673. mindspore/ops/_op_impl/aicpu/logit.py +33 -0
  674. mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
  675. mindspore/ops/_op_impl/aicpu/logspace.py +36 -0
  676. mindspore/ops/_op_impl/aicpu/lower_bound.py +47 -0
  677. mindspore/ops/_op_impl/aicpu/lstsq.py +34 -0
  678. mindspore/ops/_op_impl/aicpu/lu.py +39 -0
  679. mindspore/ops/_op_impl/aicpu/lu_solve.py +32 -0
  680. mindspore/ops/_op_impl/aicpu/lu_unpack.py +114 -0
  681. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +49 -0
  682. mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
  683. mindspore/ops/_op_impl/aicpu/masked_scatter.py +40 -0
  684. mindspore/ops/_op_impl/aicpu/masked_select.py +31 -0
  685. mindspore/ops/_op_impl/aicpu/masked_select_grad.py +35 -0
  686. mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
  687. mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
  688. mindspore/ops/_op_impl/aicpu/matrix_determinant.py +30 -0
  689. mindspore/ops/_op_impl/aicpu/matrix_diag_part_v3.py +54 -0
  690. mindspore/ops/_op_impl/aicpu/matrix_diag_v3.py +56 -0
  691. mindspore/ops/_op_impl/aicpu/matrix_exp.py +34 -0
  692. mindspore/ops/_op_impl/aicpu/matrix_inverse.py +31 -0
  693. mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
  694. mindspore/ops/_op_impl/aicpu/matrix_power.py +37 -0
  695. mindspore/ops/_op_impl/aicpu/matrix_set_diag_v3.py +54 -0
  696. mindspore/ops/_op_impl/aicpu/matrix_solve.py +35 -0
  697. mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
  698. mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
  699. mindspore/ops/_op_impl/aicpu/max_pool3d_grad_with_argmax.py +60 -0
  700. mindspore/ops/_op_impl/aicpu/max_pool3d_with_argmax.py +59 -0
  701. mindspore/ops/_op_impl/aicpu/max_unpool2d.py +57 -0
  702. mindspore/ops/_op_impl/aicpu/max_unpool2d_grad.py +58 -0
  703. mindspore/ops/_op_impl/aicpu/max_unpool3d.py +57 -0
  704. mindspore/ops/_op_impl/aicpu/max_unpool3d_grad.py +58 -0
  705. mindspore/ops/_op_impl/aicpu/maximum_grad_grad.py +40 -0
  706. mindspore/ops/_op_impl/aicpu/maxpool_grad_v1.py +46 -0
  707. mindspore/ops/_op_impl/aicpu/maxpool_v1.py +42 -0
  708. mindspore/ops/_op_impl/aicpu/median.py +39 -0
  709. mindspore/ops/_op_impl/aicpu/median_grad.py +45 -0
  710. mindspore/ops/_op_impl/aicpu/meshgrid.py +41 -0
  711. mindspore/ops/_op_impl/aicpu/minimum_grad_grad.py +40 -0
  712. mindspore/ops/_op_impl/aicpu/mirror_pad.py +50 -0
  713. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +48 -0
  714. mindspore/ops/_op_impl/aicpu/mul.py +43 -0
  715. mindspore/ops/_op_impl/aicpu/mul_no_nan.py +42 -0
  716. mindspore/ops/_op_impl/aicpu/multi_margin_loss.py +37 -0
  717. mindspore/ops/_op_impl/aicpu/multi_margin_loss_grad.py +41 -0
  718. mindspore/ops/_op_impl/aicpu/multilabel_margin_loss_grad.py +37 -0
  719. mindspore/ops/_op_impl/aicpu/multinomial.py +47 -0
  720. mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
  721. mindspore/ops/_op_impl/aicpu/mvlgamma.py +32 -0
  722. mindspore/ops/_op_impl/aicpu/mvlgamma_grad.py +33 -0
  723. mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
  724. mindspore/ops/_op_impl/aicpu/neg.py +36 -0
  725. mindspore/ops/_op_impl/aicpu/nextafter.py +32 -0
  726. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  727. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  728. mindspore/ops/_op_impl/aicpu/no_repeat_ngram.py +34 -0
  729. mindspore/ops/_op_impl/aicpu/non_deterministic_ints.py +33 -0
  730. mindspore/ops/_op_impl/aicpu/non_max_suppression.py +36 -0
  731. mindspore/ops/_op_impl/aicpu/non_max_suppression_with_overlaps.py +35 -0
  732. mindspore/ops/_op_impl/aicpu/non_zero.py +43 -0
  733. mindspore/ops/_op_impl/aicpu/not_equal.py +39 -0
  734. mindspore/ops/_op_impl/aicpu/nth_element.py +39 -0
  735. mindspore/ops/_op_impl/aicpu/nuclear_norm.py +33 -0
  736. mindspore/ops/_op_impl/aicpu/one_hot.py +116 -0
  737. mindspore/ops/_op_impl/aicpu/ones_like.py +39 -0
  738. mindspore/ops/_op_impl/aicpu/orgqr.py +34 -0
  739. mindspore/ops/_op_impl/aicpu/pad_and_shift.py +33 -0
  740. mindspore/ops/_op_impl/aicpu/pad_v3.py +61 -0
  741. mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +59 -0
  742. mindspore/ops/_op_impl/aicpu/padding.py +41 -0
  743. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +54 -0
  744. mindspore/ops/_op_impl/aicpu/pdist_grad.py +33 -0
  745. mindspore/ops/_op_impl/aicpu/poisson.py +37 -0
  746. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  747. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  748. mindspore/ops/_op_impl/aicpu/pow.py +39 -0
  749. mindspore/ops/_op_impl/aicpu/print_tensor.py +39 -0
  750. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +113 -0
  751. mindspore/ops/_op_impl/aicpu/qr.py +36 -0
  752. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  753. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  754. mindspore/ops/_op_impl/aicpu/ragged_range.py +49 -0
  755. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  756. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
  757. mindspore/ops/_op_impl/aicpu/random_categorical.py +68 -0
  758. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +36 -0
  759. mindspore/ops/_op_impl/aicpu/random_gamma.py +38 -0
  760. mindspore/ops/_op_impl/aicpu/random_poisson.py +134 -0
  761. mindspore/ops/_op_impl/aicpu/random_shuffle.py +47 -0
  762. mindspore/ops/_op_impl/aicpu/randperm.py +38 -0
  763. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  764. mindspore/ops/_op_impl/aicpu/range.py +36 -0
  765. mindspore/ops/_op_impl/aicpu/range_v2.py +35 -0
  766. mindspore/ops/_op_impl/aicpu/real.py +31 -0
  767. mindspore/ops/_op_impl/aicpu/real_div.py +40 -0
  768. mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
  769. mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
  770. mindspore/ops/_op_impl/aicpu/reduce_mean.py +57 -0
  771. mindspore/ops/_op_impl/aicpu/reduce_prod.py +57 -0
  772. mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
  773. mindspore/ops/_op_impl/aicpu/relu_grad_v3.py +41 -0
  774. mindspore/ops/_op_impl/aicpu/relu_v3.py +38 -0
  775. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +96 -0
  776. mindspore/ops/_op_impl/aicpu/reshape.py +42 -0
  777. mindspore/ops/_op_impl/aicpu/resize_area.py +40 -0
  778. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +20 -0
  779. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +19 -0
  780. mindspore/ops/_op_impl/aicpu/resize_bilinear.py +32 -0
  781. mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +32 -0
  782. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +36 -0
  783. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +35 -0
  784. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  785. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  786. mindspore/ops/_op_impl/aicpu/reverse_sequence.py +55 -0
  787. mindspore/ops/_op_impl/aicpu/reversev2.py +54 -0
  788. mindspore/ops/_op_impl/aicpu/rgb_to_hsv.py +32 -0
  789. mindspore/ops/_op_impl/aicpu/right_shift.py +38 -0
  790. mindspore/ops/_op_impl/aicpu/rnnt_loss.py +35 -0
  791. mindspore/ops/_op_impl/aicpu/round.py +34 -0
  792. mindspore/ops/_op_impl/aicpu/rsqrt.py +33 -0
  793. mindspore/ops/_op_impl/aicpu/rsqrt_grad.py +36 -0
  794. mindspore/ops/_op_impl/aicpu/sample_distorted_bounding_box_v2.py +49 -0
  795. mindspore/ops/_op_impl/aicpu/scale_and_translate.py +52 -0
  796. mindspore/ops/_op_impl/aicpu/scale_and_translate_grad.py +36 -0
  797. mindspore/ops/_op_impl/aicpu/scatter.py +79 -0
  798. mindspore/ops/_op_impl/aicpu/scatter_add_with_axis.py +53 -0
  799. mindspore/ops/_op_impl/aicpu/scatter_elements.py +39 -0
  800. mindspore/ops/_op_impl/aicpu/scatter_nd.py +59 -0
  801. mindspore/ops/_op_impl/aicpu/scatter_nd_max.py +54 -0
  802. mindspore/ops/_op_impl/aicpu/scatter_nd_min.py +54 -0
  803. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +59 -0
  804. mindspore/ops/_op_impl/aicpu/search_sorted.py +44 -0
  805. mindspore/ops/_op_impl/aicpu/segment_max.py +52 -0
  806. mindspore/ops/_op_impl/aicpu/segment_mean.py +56 -0
  807. mindspore/ops/_op_impl/aicpu/segment_min.py +52 -0
  808. mindspore/ops/_op_impl/aicpu/segment_prod.py +56 -0
  809. mindspore/ops/_op_impl/aicpu/segment_sum.py +56 -0
  810. mindspore/ops/_op_impl/aicpu/select.py +45 -0
  811. mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
  812. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  813. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  814. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  815. mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
  816. mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
  817. mindspore/ops/_op_impl/aicpu/set_size.py +38 -0
  818. mindspore/ops/_op_impl/aicpu/sign.py +36 -0
  819. mindspore/ops/_op_impl/aicpu/sin.py +34 -0
  820. mindspore/ops/_op_impl/aicpu/sinc.py +43 -0
  821. mindspore/ops/_op_impl/aicpu/sinh.py +34 -0
  822. mindspore/ops/_op_impl/aicpu/slice.py +59 -0
  823. mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
  824. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  825. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  826. mindspore/ops/_op_impl/aicpu/sort.py +39 -0
  827. mindspore/ops/_op_impl/aicpu/space_to_depth.py +44 -0
  828. mindspore/ops/_op_impl/aicpu/sparse_addmm.py +87 -0
  829. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +80 -0
  830. mindspore/ops/_op_impl/aicpu/sparse_apply_centered_rms_prop.py +105 -0
  831. mindspore/ops/_op_impl/aicpu/sparse_apply_momentum.py +80 -0
  832. mindspore/ops/_op_impl/aicpu/sparse_apply_proximal_gradient_descent.py +79 -0
  833. mindspore/ops/_op_impl/aicpu/sparse_concat.py +59 -0
  834. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  835. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_add.py +58 -0
  836. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_div.py +58 -0
  837. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_mul.py +58 -0
  838. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
  839. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
  840. mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
  841. mindspore/ops/_op_impl/aicpu/sparse_matrix_nnz.py +81 -0
  842. mindspore/ops/_op_impl/aicpu/sparse_matrix_transpose.py +116 -0
  843. mindspore/ops/_op_impl/aicpu/sparse_reorder.py +56 -0
  844. mindspore/ops/_op_impl/aicpu/sparse_reshape.py +34 -0
  845. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_grad.py +36 -0
  846. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_with_num_segments.py +44 -0
  847. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n.py +43 -0
  848. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_grad.py +38 -0
  849. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_with_num_segments.py +44 -0
  850. mindspore/ops/_op_impl/aicpu/sparse_segment_sum.py +49 -0
  851. mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
  852. mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
  853. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
  854. mindspore/ops/_op_impl/aicpu/sparse_softmax.py +33 -0
  855. mindspore/ops/_op_impl/aicpu/sparse_softmax_cross_entropy_with_logits_v2.py +35 -0
  856. mindspore/ops/_op_impl/aicpu/sparse_sparse_maximum.py +53 -0
  857. mindspore/ops/_op_impl/aicpu/sparse_sparse_minimum.py +53 -0
  858. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_add.py +84 -0
  859. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_mat_mul.py +190 -0
  860. mindspore/ops/_op_impl/aicpu/sparse_tensor_to_csr_sparse_matrix.py +51 -0
  861. mindspore/ops/_op_impl/aicpu/sparse_to_dense_v2.py +73 -0
  862. mindspore/ops/_op_impl/aicpu/split.py +45 -0
  863. mindspore/ops/_op_impl/aicpu/sqrt.py +34 -0
  864. mindspore/ops/_op_impl/aicpu/sqrt_grad.py +35 -0
  865. mindspore/ops/_op_impl/aicpu/square.py +35 -0
  866. mindspore/ops/_op_impl/aicpu/squared_difference.py +37 -0
  867. mindspore/ops/_op_impl/aicpu/squeeze.py +42 -0
  868. mindspore/ops/_op_impl/aicpu/sspaddmm.py +97 -0
  869. mindspore/ops/_op_impl/aicpu/stack.py +45 -0
  870. mindspore/ops/_op_impl/aicpu/stack_push_pop.py +87 -0
  871. mindspore/ops/_op_impl/aicpu/standard_laplace.py +34 -0
  872. mindspore/ops/_op_impl/aicpu/standard_normal.py +34 -0
  873. mindspore/ops/_op_impl/aicpu/stateless_dropout_genmask.py +37 -0
  874. mindspore/ops/_op_impl/aicpu/stft.py +70 -0
  875. mindspore/ops/_op_impl/aicpu/strided_slice.py +43 -0
  876. mindspore/ops/_op_impl/aicpu/strided_slice_grad.py +50 -0
  877. mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +93 -0
  878. mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +66 -0
  879. mindspore/ops/_op_impl/aicpu/sub.py +41 -0
  880. mindspore/ops/_op_impl/aicpu/sub_and_filter.py +36 -0
  881. mindspore/ops/_op_impl/aicpu/tan.py +34 -0
  882. mindspore/ops/_op_impl/aicpu/tanh.py +34 -0
  883. mindspore/ops/_op_impl/aicpu/tanh_grad.py +35 -0
  884. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  885. mindspore/ops/_op_impl/aicpu/tile.py +56 -0
  886. mindspore/ops/_op_impl/aicpu/topk.py +34 -0
  887. mindspore/ops/_op_impl/aicpu/trace.py +40 -0
  888. mindspore/ops/_op_impl/aicpu/tracegrad.py +41 -0
  889. mindspore/ops/_op_impl/aicpu/trans_data.py +35 -0
  890. mindspore/ops/_op_impl/aicpu/transpose.py +58 -0
  891. mindspore/ops/_op_impl/aicpu/tridiagonal_matmul.py +42 -0
  892. mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
  893. mindspore/ops/_op_impl/aicpu/tril.py +42 -0
  894. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  895. mindspore/ops/_op_impl/aicpu/triplet_margin_loss.py +62 -0
  896. mindspore/ops/_op_impl/aicpu/triu.py +43 -0
  897. mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
  898. mindspore/ops/_op_impl/aicpu/truncated_normal.py +39 -0
  899. mindspore/ops/_op_impl/aicpu/uniform.py +36 -0
  900. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +41 -0
  901. mindspore/ops/_op_impl/aicpu/uniform_int.py +36 -0
  902. mindspore/ops/_op_impl/aicpu/uniform_real.py +33 -0
  903. mindspore/ops/_op_impl/aicpu/unique.py +31 -0
  904. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +47 -0
  905. mindspore/ops/_op_impl/aicpu/unique_with_pad.py +32 -0
  906. mindspore/ops/_op_impl/aicpu/unravel_index.py +32 -0
  907. mindspore/ops/_op_impl/aicpu/unsorted_segment_prod.py +53 -0
  908. mindspore/ops/_op_impl/aicpu/unsorted_segment_sum.py +57 -0
  909. mindspore/ops/_op_impl/aicpu/unstack.py +45 -0
  910. mindspore/ops/_op_impl/aicpu/update_cache.py +44 -0
  911. mindspore/ops/_op_impl/aicpu/upper_bound.py +47 -0
  912. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +42 -0
  913. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +49 -0
  914. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +40 -0
  915. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +50 -0
  916. mindspore/ops/_op_impl/aicpu/xdivy.py +35 -0
  917. mindspore/ops/_op_impl/aicpu/xlogy.py +33 -0
  918. mindspore/ops/_op_impl/aicpu/zeros_like.py +42 -0
  919. mindspore/ops/_op_impl/aicpu/zeta.py +31 -0
  920. mindspore/ops/_op_impl/akg/__init__.py +19 -0
  921. mindspore/ops/_op_impl/akg/ascend/__init__.py +48 -0
  922. mindspore/ops/_op_impl/akg/ascend/abs.py +35 -0
  923. mindspore/ops/_op_impl/akg/ascend/add.py +42 -0
  924. mindspore/ops/_op_impl/akg/ascend/add_n.py +37 -0
  925. mindspore/ops/_op_impl/akg/ascend/batchmatmul.py +33 -0
  926. mindspore/ops/_op_impl/akg/ascend/cast.py +46 -0
  927. mindspore/ops/_op_impl/akg/ascend/equal.py +35 -0
  928. mindspore/ops/_op_impl/akg/ascend/exp.py +35 -0
  929. mindspore/ops/_op_impl/akg/ascend/expand_dims.py +33 -0
  930. mindspore/ops/_op_impl/akg/ascend/greater.py +34 -0
  931. mindspore/ops/_op_impl/akg/ascend/greater_equal.py +35 -0
  932. mindspore/ops/_op_impl/akg/ascend/less.py +31 -0
  933. mindspore/ops/_op_impl/akg/ascend/less_equal.py +35 -0
  934. mindspore/ops/_op_impl/akg/ascend/load_im2col.py +33 -0
  935. mindspore/ops/_op_impl/akg/ascend/log.py +34 -0
  936. mindspore/ops/_op_impl/akg/ascend/maximum.py +36 -0
  937. mindspore/ops/_op_impl/akg/ascend/minimum.py +39 -0
  938. mindspore/ops/_op_impl/akg/ascend/mul.py +41 -0
  939. mindspore/ops/_op_impl/akg/ascend/neg.py +37 -0
  940. mindspore/ops/_op_impl/akg/ascend/pow.py +35 -0
  941. mindspore/ops/_op_impl/akg/ascend/prod_force_se_a.py +33 -0
  942. mindspore/ops/_op_impl/akg/ascend/real_div.py +36 -0
  943. mindspore/ops/_op_impl/akg/ascend/reciprocal.py +32 -0
  944. mindspore/ops/_op_impl/akg/ascend/reduce_max.py +32 -0
  945. mindspore/ops/_op_impl/akg/ascend/reduce_min.py +32 -0
  946. mindspore/ops/_op_impl/akg/ascend/reduce_sum.py +37 -0
  947. mindspore/ops/_op_impl/akg/ascend/rsqrt.py +35 -0
  948. mindspore/ops/_op_impl/akg/ascend/select.py +37 -0
  949. mindspore/ops/_op_impl/akg/ascend/sqrt.py +35 -0
  950. mindspore/ops/_op_impl/akg/ascend/square.py +35 -0
  951. mindspore/ops/_op_impl/akg/ascend/sub.py +42 -0
  952. mindspore/ops/_op_impl/akg/cpu/__init__.py +23 -0
  953. mindspore/ops/_op_impl/akg/cpu/coo2csr.py +29 -0
  954. mindspore/ops/_op_impl/akg/cpu/csr2coo.py +29 -0
  955. mindspore/ops/_op_impl/akg/cpu/csr_gather.py +33 -0
  956. mindspore/ops/_op_impl/akg/cpu/csr_mm.py +34 -0
  957. mindspore/ops/_op_impl/akg/cpu/csr_mul.py +33 -0
  958. mindspore/ops/_op_impl/akg/cpu/csr_mv.py +33 -0
  959. mindspore/ops/_op_impl/akg/cpu/csr_reduce_sum.py +31 -0
  960. mindspore/ops/_op_impl/akg/gpu/__init__.py +24 -0
  961. mindspore/ops/_op_impl/akg/gpu/coo2csr.py +29 -0
  962. mindspore/ops/_op_impl/akg/gpu/csr2coo.py +29 -0
  963. mindspore/ops/_op_impl/akg/gpu/csr_div.py +36 -0
  964. mindspore/ops/_op_impl/akg/gpu/csr_gather.py +33 -0
  965. mindspore/ops/_op_impl/akg/gpu/csr_mm.py +37 -0
  966. mindspore/ops/_op_impl/akg/gpu/csr_mul.py +36 -0
  967. mindspore/ops/_op_impl/akg/gpu/csr_mv.py +36 -0
  968. mindspore/ops/_op_impl/akg/gpu/csr_reduce_sum.py +33 -0
  969. mindspore/ops/_op_impl/cpu/__init__.py +78 -0
  970. mindspore/ops/_op_impl/cpu/adam.py +49 -0
  971. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +47 -0
  972. mindspore/ops/_op_impl/cpu/arg_max.py +30 -0
  973. mindspore/ops/_op_impl/cpu/arg_max_with_value.py +31 -0
  974. mindspore/ops/_op_impl/cpu/arg_min_with_value.py +31 -0
  975. mindspore/ops/_op_impl/cpu/buffer_append.py +28 -0
  976. mindspore/ops/_op_impl/cpu/buffer_get.py +28 -0
  977. mindspore/ops/_op_impl/cpu/buffer_sample.py +28 -0
  978. mindspore/ops/_op_impl/cpu/cast.py +171 -0
  979. mindspore/ops/_op_impl/cpu/concat_offset.py +38 -0
  980. mindspore/ops/_op_impl/cpu/conv2d.py +30 -0
  981. mindspore/ops/_op_impl/cpu/conv3d.py +30 -0
  982. mindspore/ops/_op_impl/cpu/div.py +32 -0
  983. mindspore/ops/_op_impl/cpu/dropout.py +31 -0
  984. mindspore/ops/_op_impl/cpu/dropout_grad.py +30 -0
  985. mindspore/ops/_op_impl/cpu/dynamic_shape.py +42 -0
  986. mindspore/ops/_op_impl/cpu/dynamic_stitch.py +41 -0
  987. mindspore/ops/_op_impl/cpu/equal_count.py +30 -0
  988. mindspore/ops/_op_impl/cpu/gather_d.py +49 -0
  989. mindspore/ops/_op_impl/cpu/gather_d_grad.py +38 -0
  990. mindspore/ops/_op_impl/cpu/gather_d_grad_v2.py +40 -0
  991. mindspore/ops/_op_impl/cpu/gather_v2.py +40 -0
  992. mindspore/ops/_op_impl/cpu/hsigmoid.py +33 -0
  993. mindspore/ops/_op_impl/cpu/hsigmoid_grad.py +34 -0
  994. mindspore/ops/_op_impl/cpu/hswish.py +32 -0
  995. mindspore/ops/_op_impl/cpu/hswish_grad.py +33 -0
  996. mindspore/ops/_op_impl/cpu/identity_n.py +40 -0
  997. mindspore/ops/_op_impl/cpu/is_finite.py +39 -0
  998. mindspore/ops/_op_impl/cpu/l2loss.py +30 -0
  999. mindspore/ops/_op_impl/cpu/layer_norm.py +36 -0
  1000. mindspore/ops/_op_impl/cpu/layer_norm_grad.py +38 -0
  1001. mindspore/ops/_op_impl/cpu/maximum.py +35 -0
  1002. mindspore/ops/_op_impl/cpu/maximum_grad.py +47 -0
  1003. mindspore/ops/_op_impl/cpu/minimum.py +40 -0
  1004. mindspore/ops/_op_impl/cpu/minimum_grad.py +51 -0
  1005. mindspore/ops/_op_impl/cpu/mirror_pad.py +36 -0
  1006. mindspore/ops/_op_impl/cpu/mirror_pad_grad.py +36 -0
  1007. mindspore/ops/_op_impl/cpu/mul.py +32 -0
  1008. mindspore/ops/_op_impl/cpu/one_hot.py +31 -0
  1009. mindspore/ops/_op_impl/cpu/pad.py +32 -0
  1010. mindspore/ops/_op_impl/cpu/pow.py +32 -0
  1011. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +42 -0
  1012. mindspore/ops/_op_impl/cpu/pyexecute.py +29 -0
  1013. mindspore/ops/_op_impl/cpu/pyfunc.py +29 -0
  1014. mindspore/ops/_op_impl/cpu/range.py +34 -0
  1015. mindspore/ops/_op_impl/cpu/real_div.py +33 -0
  1016. mindspore/ops/_op_impl/cpu/reduce_all.py +29 -0
  1017. mindspore/ops/_op_impl/cpu/reduce_any.py +29 -0
  1018. mindspore/ops/_op_impl/cpu/reduce_max.py +32 -0
  1019. mindspore/ops/_op_impl/cpu/reduce_mean.py +40 -0
  1020. mindspore/ops/_op_impl/cpu/reduce_min.py +32 -0
  1021. mindspore/ops/_op_impl/cpu/reduce_prod.py +40 -0
  1022. mindspore/ops/_op_impl/cpu/reduce_std.py +31 -0
  1023. mindspore/ops/_op_impl/cpu/reduce_sum.py +41 -0
  1024. mindspore/ops/_op_impl/cpu/space_to_batch_nd.py +38 -0
  1025. mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
  1026. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
  1027. mindspore/ops/_op_impl/cpu/split.py +34 -0
  1028. mindspore/ops/_op_impl/cpu/sspaddmm.py +95 -0
  1029. mindspore/ops/_op_impl/cpu/stack.py +38 -0
  1030. mindspore/ops/_op_impl/cpu/sub.py +32 -0
  1031. mindspore/ops/_op_impl/cpu/tensor_copy_slices.py +41 -0
  1032. mindspore/ops/_op_impl/cpu/tile.py +37 -0
  1033. mindspore/ops/_op_impl/cpu/top_k.py +31 -0
  1034. mindspore/ops/_op_impl/cpu/transpose.py +39 -0
  1035. mindspore/ops/_primitive_cache.py +90 -0
  1036. mindspore/ops/_register_for_op.py +73 -0
  1037. mindspore/ops/_utils/__init__.py +20 -0
  1038. mindspore/ops/_utils/utils.py +147 -0
  1039. mindspore/ops/_vmap/__init__.py +25 -0
  1040. mindspore/ops/_vmap/vmap_array_ops.py +2151 -0
  1041. mindspore/ops/_vmap/vmap_base.py +533 -0
  1042. mindspore/ops/_vmap/vmap_convolution_ops.py +441 -0
  1043. mindspore/ops/_vmap/vmap_debug_ops.py +50 -0
  1044. mindspore/ops/_vmap/vmap_grad_math_ops.py +274 -0
  1045. mindspore/ops/_vmap/vmap_grad_nn_ops.py +806 -0
  1046. mindspore/ops/_vmap/vmap_image_ops.py +194 -0
  1047. mindspore/ops/_vmap/vmap_math_ops.py +977 -0
  1048. mindspore/ops/_vmap/vmap_nn_ops.py +2209 -0
  1049. mindspore/ops/_vmap/vmap_other_ops.py +105 -0
  1050. mindspore/ops/_vmap/vmap_random_ops.py +122 -0
  1051. mindspore/ops/_vmap/vmap_sparse_ops.py +89 -0
  1052. mindspore/ops/auto_generate/__init__.py +31 -0
  1053. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +231 -0
  1054. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +250 -0
  1055. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  1056. mindspore/ops/auto_generate/gen_extend_func.py +980 -0
  1057. mindspore/ops/auto_generate/gen_ops_def.py +6443 -0
  1058. mindspore/ops/auto_generate/gen_ops_prim.py +13167 -0
  1059. mindspore/ops/auto_generate/pyboost_inner_prim.py +429 -0
  1060. mindspore/ops/composite/__init__.py +71 -0
  1061. mindspore/ops/composite/base.py +1281 -0
  1062. mindspore/ops/composite/env_ops.py +41 -0
  1063. mindspore/ops/composite/math_ops.py +125 -0
  1064. mindspore/ops/composite/multitype_ops/__init__.py +77 -0
  1065. mindspore/ops/composite/multitype_ops/_compile_utils.py +1458 -0
  1066. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +897 -0
  1067. mindspore/ops/composite/multitype_ops/add_impl.py +606 -0
  1068. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +56 -0
  1069. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +56 -0
  1070. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +56 -0
  1071. mindspore/ops/composite/multitype_ops/div_impl.py +189 -0
  1072. mindspore/ops/composite/multitype_ops/equal_impl.py +335 -0
  1073. mindspore/ops/composite/multitype_ops/floordiv_impl.py +88 -0
  1074. mindspore/ops/composite/multitype_ops/getitem_impl.py +400 -0
  1075. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +109 -0
  1076. mindspore/ops/composite/multitype_ops/greater_impl.py +110 -0
  1077. mindspore/ops/composite/multitype_ops/in_impl.py +196 -0
  1078. mindspore/ops/composite/multitype_ops/left_shift_impl.py +37 -0
  1079. mindspore/ops/composite/multitype_ops/less_equal_impl.py +111 -0
  1080. mindspore/ops/composite/multitype_ops/less_impl.py +112 -0
  1081. mindspore/ops/composite/multitype_ops/logic_not_impl.py +113 -0
  1082. mindspore/ops/composite/multitype_ops/logical_and_impl.py +60 -0
  1083. mindspore/ops/composite/multitype_ops/logical_or_impl.py +61 -0
  1084. mindspore/ops/composite/multitype_ops/mod_impl.py +86 -0
  1085. mindspore/ops/composite/multitype_ops/mul_impl.py +294 -0
  1086. mindspore/ops/composite/multitype_ops/negative_impl.py +79 -0
  1087. mindspore/ops/composite/multitype_ops/not_equal_impl.py +290 -0
  1088. mindspore/ops/composite/multitype_ops/not_in_impl.py +196 -0
  1089. mindspore/ops/composite/multitype_ops/ones_like_impl.py +96 -0
  1090. mindspore/ops/composite/multitype_ops/pow_impl.py +87 -0
  1091. mindspore/ops/composite/multitype_ops/right_shift_impl.py +37 -0
  1092. mindspore/ops/composite/multitype_ops/setitem_impl.py +884 -0
  1093. mindspore/ops/composite/multitype_ops/sub_impl.py +116 -0
  1094. mindspore/ops/composite/multitype_ops/uadd_impl.py +29 -0
  1095. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +228 -0
  1096. mindspore/ops/deprecated.py +315 -0
  1097. mindspore/ops/extend/__init__.py +53 -0
  1098. mindspore/ops/extend/array_func.py +218 -0
  1099. mindspore/ops/extend/math_func.py +76 -0
  1100. mindspore/ops/extend/nn_func.py +308 -0
  1101. mindspore/ops/function/__init__.py +760 -0
  1102. mindspore/ops/function/array_func.py +6889 -0
  1103. mindspore/ops/function/clip_func.py +384 -0
  1104. mindspore/ops/function/debug_func.py +69 -0
  1105. mindspore/ops/function/fft_func.py +31 -0
  1106. mindspore/ops/function/grad/__init__.py +34 -0
  1107. mindspore/ops/function/grad/grad_func.py +1424 -0
  1108. mindspore/ops/function/image_func.py +292 -0
  1109. mindspore/ops/function/linalg_func.py +416 -0
  1110. mindspore/ops/function/math_func.py +11877 -0
  1111. mindspore/ops/function/nn_func.py +8175 -0
  1112. mindspore/ops/function/other_func.py +114 -0
  1113. mindspore/ops/function/parameter_func.py +134 -0
  1114. mindspore/ops/function/random_func.py +1539 -0
  1115. mindspore/ops/function/reshard_func.py +102 -0
  1116. mindspore/ops/function/sparse_func.py +884 -0
  1117. mindspore/ops/function/sparse_unary_func.py +2422 -0
  1118. mindspore/ops/function/spectral_func.py +150 -0
  1119. mindspore/ops/function/vmap_func.py +116 -0
  1120. mindspore/ops/functional.py +454 -0
  1121. mindspore/ops/op_info_register.py +1572 -0
  1122. mindspore/ops/operations/__init__.py +717 -0
  1123. mindspore/ops/operations/_csr_ops.py +403 -0
  1124. mindspore/ops/operations/_custom_grad.py +181 -0
  1125. mindspore/ops/operations/_embedding_cache_ops.py +307 -0
  1126. mindspore/ops/operations/_grad_ops.py +3052 -0
  1127. mindspore/ops/operations/_infer_ops.py +19 -0
  1128. mindspore/ops/operations/_inner_ops.py +2567 -0
  1129. mindspore/ops/operations/_map_tensor_ops.py +112 -0
  1130. mindspore/ops/operations/_ms_kernel.py +601 -0
  1131. mindspore/ops/operations/_ocr_ops.py +379 -0
  1132. mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
  1133. mindspore/ops/operations/_pyfunc_registry.py +58 -0
  1134. mindspore/ops/operations/_quant_ops.py +1844 -0
  1135. mindspore/ops/operations/_rl_inner_ops.py +1231 -0
  1136. mindspore/ops/operations/_scalar_ops.py +106 -0
  1137. mindspore/ops/operations/_sequence_ops.py +1155 -0
  1138. mindspore/ops/operations/_sparse_grad_ops.py +56 -0
  1139. mindspore/ops/operations/_tensor_array.py +359 -0
  1140. mindspore/ops/operations/_thor_ops.py +807 -0
  1141. mindspore/ops/operations/array_ops.py +6258 -0
  1142. mindspore/ops/operations/comm_ops.py +1996 -0
  1143. mindspore/ops/operations/control_ops.py +127 -0
  1144. mindspore/ops/operations/custom_ops.py +1065 -0
  1145. mindspore/ops/operations/debug_ops.py +646 -0
  1146. mindspore/ops/operations/image_ops.py +1041 -0
  1147. mindspore/ops/operations/inner_ops.py +697 -0
  1148. mindspore/ops/operations/linalg_ops.py +95 -0
  1149. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  1150. mindspore/ops/operations/manually_defined/_inner.py +61 -0
  1151. mindspore/ops/operations/manually_defined/ops_def.py +2016 -0
  1152. mindspore/ops/operations/math_ops.py +5306 -0
  1153. mindspore/ops/operations/nn_ops.py +9669 -0
  1154. mindspore/ops/operations/other_ops.py +871 -0
  1155. mindspore/ops/operations/random_ops.py +1243 -0
  1156. mindspore/ops/operations/reshard_ops.py +53 -0
  1157. mindspore/ops/operations/rl_ops.py +288 -0
  1158. mindspore/ops/operations/sparse_ops.py +2753 -0
  1159. mindspore/ops/operations/spectral_ops.py +111 -0
  1160. mindspore/ops/primitive.py +1034 -0
  1161. mindspore/ops/signature.py +54 -0
  1162. mindspore/ops/silent_check.py +162 -0
  1163. mindspore/ops/vm_impl_registry.py +91 -0
  1164. mindspore/ops_generate/__init__.py +27 -0
  1165. mindspore/ops_generate/arg_dtype_cast.py +250 -0
  1166. mindspore/ops_generate/arg_handler.py +197 -0
  1167. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  1168. mindspore/ops_generate/gen_ops.py +1084 -0
  1169. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  1170. mindspore/ops_generate/gen_pyboost_func.py +968 -0
  1171. mindspore/ops_generate/gen_utils.py +209 -0
  1172. mindspore/ops_generate/op_proto.py +138 -0
  1173. mindspore/ops_generate/pyboost_utils.py +354 -0
  1174. mindspore/ops_generate/template.py +239 -0
  1175. mindspore/parallel/__init__.py +28 -0
  1176. mindspore/parallel/_auto_parallel_context.py +1466 -0
  1177. mindspore/parallel/_cell_wrapper.py +91 -0
  1178. mindspore/parallel/_cost_model_context.py +700 -0
  1179. mindspore/parallel/_dp_allreduce_fusion.py +159 -0
  1180. mindspore/parallel/_offload_context.py +275 -0
  1181. mindspore/parallel/_parallel_serialization.py +533 -0
  1182. mindspore/parallel/_ps_context.py +242 -0
  1183. mindspore/parallel/_recovery_context.py +110 -0
  1184. mindspore/parallel/_tensor.py +660 -0
  1185. mindspore/parallel/_transformer/__init__.py +35 -0
  1186. mindspore/parallel/_transformer/layers.py +765 -0
  1187. mindspore/parallel/_transformer/loss.py +251 -0
  1188. mindspore/parallel/_transformer/moe.py +693 -0
  1189. mindspore/parallel/_transformer/op_parallel_config.py +222 -0
  1190. mindspore/parallel/_transformer/transformer.py +3119 -0
  1191. mindspore/parallel/_utils.py +600 -0
  1192. mindspore/parallel/algo_parameter_config.py +400 -0
  1193. mindspore/parallel/checkpoint_transform.py +643 -0
  1194. mindspore/parallel/cluster/__init__.py +15 -0
  1195. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  1196. mindspore/parallel/cluster/process_entity/_api.py +344 -0
  1197. mindspore/parallel/cluster/process_entity/_utils.py +126 -0
  1198. mindspore/parallel/cluster/run.py +136 -0
  1199. mindspore/parallel/mpi/__init__.py +14 -0
  1200. mindspore/parallel/mpi/_mpi_config.py +116 -0
  1201. mindspore/parallel/parameter_broadcast.py +152 -0
  1202. mindspore/parallel/shard.py +350 -0
  1203. mindspore/perf_msvcbuildinsights.dll +0 -0
  1204. mindspore/pgodb140.dll +0 -0
  1205. mindspore/pgort140.dll +0 -0
  1206. mindspore/profiler/__init__.py +27 -0
  1207. mindspore/profiler/common/__init__.py +14 -0
  1208. mindspore/profiler/common/exceptions/__init__.py +14 -0
  1209. mindspore/profiler/common/exceptions/error_code.py +83 -0
  1210. mindspore/profiler/common/exceptions/exceptions.py +286 -0
  1211. mindspore/profiler/common/process_pool.py +41 -0
  1212. mindspore/profiler/common/singleton.py +28 -0
  1213. mindspore/profiler/common/struct_type.py +118 -0
  1214. mindspore/profiler/common/util.py +444 -0
  1215. mindspore/profiler/common/validator/__init__.py +14 -0
  1216. mindspore/profiler/common/validator/validate_path.py +84 -0
  1217. mindspore/profiler/envprofiling.py +256 -0
  1218. mindspore/profiler/parser/__init__.py +14 -0
  1219. mindspore/profiler/parser/aicpu_data_parser.py +272 -0
  1220. mindspore/profiler/parser/ascend_analysis/__init__.py +14 -0
  1221. mindspore/profiler/parser/ascend_analysis/constant.py +53 -0
  1222. mindspore/profiler/parser/ascend_analysis/file_manager.py +159 -0
  1223. mindspore/profiler/parser/ascend_analysis/function_event.py +161 -0
  1224. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +131 -0
  1225. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +85 -0
  1226. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +57 -0
  1227. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
  1228. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  1229. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +68 -0
  1230. mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
  1231. mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
  1232. mindspore/profiler/parser/ascend_flops_generator.py +116 -0
  1233. mindspore/profiler/parser/ascend_fpbp_generator.py +82 -0
  1234. mindspore/profiler/parser/ascend_hccl_generator.py +271 -0
  1235. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  1236. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  1237. mindspore/profiler/parser/ascend_msprof_exporter.py +281 -0
  1238. mindspore/profiler/parser/ascend_msprof_generator.py +187 -0
  1239. mindspore/profiler/parser/ascend_op_generator.py +334 -0
  1240. mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
  1241. mindspore/profiler/parser/ascend_timeline_generator.py +543 -0
  1242. mindspore/profiler/parser/base_timeline_generator.py +489 -0
  1243. mindspore/profiler/parser/container.py +229 -0
  1244. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +684 -0
  1245. mindspore/profiler/parser/flops_parser.py +531 -0
  1246. mindspore/profiler/parser/framework_enum.py +111 -0
  1247. mindspore/profiler/parser/framework_parser.py +854 -0
  1248. mindspore/profiler/parser/framework_struct.py +61 -0
  1249. mindspore/profiler/parser/hccl_parser.py +573 -0
  1250. mindspore/profiler/parser/hwts_log_parser.py +122 -0
  1251. mindspore/profiler/parser/integrator.py +526 -0
  1252. mindspore/profiler/parser/memory_usage_parser.py +431 -0
  1253. mindspore/profiler/parser/minddata_analyzer.py +800 -0
  1254. mindspore/profiler/parser/minddata_parser.py +186 -0
  1255. mindspore/profiler/parser/minddata_pipeline_parser.py +299 -0
  1256. mindspore/profiler/parser/msadvisor_analyzer.py +82 -0
  1257. mindspore/profiler/parser/msadvisor_parser.py +240 -0
  1258. mindspore/profiler/parser/op_intermediate_parser.py +149 -0
  1259. mindspore/profiler/parser/optime_parser.py +250 -0
  1260. mindspore/profiler/parser/profiler_info.py +141 -0
  1261. mindspore/profiler/parser/step_trace_parser.py +666 -0
  1262. mindspore/profiler/profiling.py +2054 -0
  1263. mindspore/rewrite/__init__.py +29 -0
  1264. mindspore/rewrite/api/__init__.py +17 -0
  1265. mindspore/rewrite/api/node.py +519 -0
  1266. mindspore/rewrite/api/node_type.py +53 -0
  1267. mindspore/rewrite/api/pattern_engine.py +490 -0
  1268. mindspore/rewrite/api/scoped_value.py +181 -0
  1269. mindspore/rewrite/api/symbol_tree.py +497 -0
  1270. mindspore/rewrite/ast_helpers/__init__.py +25 -0
  1271. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  1272. mindspore/rewrite/ast_helpers/ast_finder.py +404 -0
  1273. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  1274. mindspore/rewrite/ast_helpers/ast_modifier.py +605 -0
  1275. mindspore/rewrite/ast_helpers/ast_replacer.py +79 -0
  1276. mindspore/rewrite/common/__init__.py +19 -0
  1277. mindspore/rewrite/common/config.py +24 -0
  1278. mindspore/rewrite/common/error_log.py +39 -0
  1279. mindspore/rewrite/common/event.py +28 -0
  1280. mindspore/rewrite/common/namer.py +271 -0
  1281. mindspore/rewrite/common/namespace.py +118 -0
  1282. mindspore/rewrite/common/observable.py +44 -0
  1283. mindspore/rewrite/common/observer.py +54 -0
  1284. mindspore/rewrite/node/__init__.py +22 -0
  1285. mindspore/rewrite/node/call_function.py +95 -0
  1286. mindspore/rewrite/node/cell_container.py +139 -0
  1287. mindspore/rewrite/node/control_flow.py +113 -0
  1288. mindspore/rewrite/node/node.py +1428 -0
  1289. mindspore/rewrite/node/node_manager.py +283 -0
  1290. mindspore/rewrite/node/node_topological_manager.py +223 -0
  1291. mindspore/rewrite/parsers/__init__.py +29 -0
  1292. mindspore/rewrite/parsers/arguments_parser.py +63 -0
  1293. mindspore/rewrite/parsers/assign_parser.py +852 -0
  1294. mindspore/rewrite/parsers/attribute_parser.py +57 -0
  1295. mindspore/rewrite/parsers/class_def_parser.py +289 -0
  1296. mindspore/rewrite/parsers/constant_parser.py +104 -0
  1297. mindspore/rewrite/parsers/container_parser.py +88 -0
  1298. mindspore/rewrite/parsers/expr_parser.py +55 -0
  1299. mindspore/rewrite/parsers/for_parser.py +61 -0
  1300. mindspore/rewrite/parsers/function_def_parser.py +84 -0
  1301. mindspore/rewrite/parsers/if_parser.py +85 -0
  1302. mindspore/rewrite/parsers/module_parser.py +117 -0
  1303. mindspore/rewrite/parsers/parser.py +43 -0
  1304. mindspore/rewrite/parsers/parser_register.py +86 -0
  1305. mindspore/rewrite/parsers/return_parser.py +37 -0
  1306. mindspore/rewrite/parsers/while_parser.py +59 -0
  1307. mindspore/rewrite/sparsify/__init__.py +0 -0
  1308. mindspore/rewrite/sparsify/sparse_transformer.py +457 -0
  1309. mindspore/rewrite/sparsify/sparsify.py +112 -0
  1310. mindspore/rewrite/sparsify/utils.py +179 -0
  1311. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  1312. mindspore/rewrite/symbol_tree/symbol_tree.py +1819 -0
  1313. mindspore/rewrite/symbol_tree/symbol_tree_builder.py +76 -0
  1314. mindspore/rewrite/symbol_tree/symbol_tree_dumper.py +142 -0
  1315. mindspore/run_check/__init__.py +20 -0
  1316. mindspore/run_check/_check_version.py +574 -0
  1317. mindspore/run_check/run_check.py +66 -0
  1318. mindspore/safeguard/__init__.py +18 -0
  1319. mindspore/safeguard/rewrite_obfuscation.py +531 -0
  1320. mindspore/swresample-4.dll +0 -0
  1321. mindspore/swscale-6.dll +0 -0
  1322. mindspore/tbbmalloc.dll +0 -0
  1323. mindspore/tinyxml2.dll +0 -0
  1324. mindspore/train/__init__.py +47 -0
  1325. mindspore/train/_utils.py +439 -0
  1326. mindspore/train/amp.py +817 -0
  1327. mindspore/train/anf_ir_pb2.py +1517 -0
  1328. mindspore/train/callback/__init__.py +44 -0
  1329. mindspore/train/callback/_backup_and_restore.py +117 -0
  1330. mindspore/train/callback/_callback.py +613 -0
  1331. mindspore/train/callback/_checkpoint.py +751 -0
  1332. mindspore/train/callback/_cluster_monitor.py +201 -0
  1333. mindspore/train/callback/_dataset_graph.py +150 -0
  1334. mindspore/train/callback/_early_stop.py +239 -0
  1335. mindspore/train/callback/_flops_collector.py +238 -0
  1336. mindspore/train/callback/_history.py +92 -0
  1337. mindspore/train/callback/_lambda_callback.py +80 -0
  1338. mindspore/train/callback/_landscape.py +1049 -0
  1339. mindspore/train/callback/_loss_monitor.py +107 -0
  1340. mindspore/train/callback/_lr_scheduler_callback.py +76 -0
  1341. mindspore/train/callback/_mindio_ttp.py +443 -0
  1342. mindspore/train/callback/_on_request_exit.py +195 -0
  1343. mindspore/train/callback/_reduce_lr_on_plateau.py +226 -0
  1344. mindspore/train/callback/_summary_collector.py +1184 -0
  1345. mindspore/train/callback/_time_monitor.py +141 -0
  1346. mindspore/train/checkpoint_pb2.py +233 -0
  1347. mindspore/train/data_sink.py +219 -0
  1348. mindspore/train/dataset_helper.py +688 -0
  1349. mindspore/train/lineage_pb2.py +1260 -0
  1350. mindspore/train/loss_scale_manager.py +213 -0
  1351. mindspore/train/memory_profiling_pb2.py +298 -0
  1352. mindspore/train/metrics/__init__.py +175 -0
  1353. mindspore/train/metrics/accuracy.py +133 -0
  1354. mindspore/train/metrics/auc.py +129 -0
  1355. mindspore/train/metrics/bleu_score.py +170 -0
  1356. mindspore/train/metrics/confusion_matrix.py +700 -0
  1357. mindspore/train/metrics/cosine_similarity.py +109 -0
  1358. mindspore/train/metrics/dice.py +116 -0
  1359. mindspore/train/metrics/error.py +175 -0
  1360. mindspore/train/metrics/fbeta.py +167 -0
  1361. mindspore/train/metrics/hausdorff_distance.py +333 -0
  1362. mindspore/train/metrics/loss.py +97 -0
  1363. mindspore/train/metrics/mean_surface_distance.py +189 -0
  1364. mindspore/train/metrics/metric.py +373 -0
  1365. mindspore/train/metrics/occlusion_sensitivity.py +225 -0
  1366. mindspore/train/metrics/perplexity.py +133 -0
  1367. mindspore/train/metrics/precision.py +160 -0
  1368. mindspore/train/metrics/recall.py +159 -0
  1369. mindspore/train/metrics/roc.py +223 -0
  1370. mindspore/train/metrics/root_mean_square_surface_distance.py +191 -0
  1371. mindspore/train/metrics/topk.py +167 -0
  1372. mindspore/train/mind_ir_pb2.py +1903 -0
  1373. mindspore/train/model.py +2176 -0
  1374. mindspore/train/node_strategy_pb2.py +653 -0
  1375. mindspore/train/print_pb2.py +184 -0
  1376. mindspore/train/profiling_parallel_pb2.py +151 -0
  1377. mindspore/train/serialization.py +3101 -0
  1378. mindspore/train/summary/__init__.py +23 -0
  1379. mindspore/train/summary/_lineage_adapter.py +41 -0
  1380. mindspore/train/summary/_summary_adapter.py +496 -0
  1381. mindspore/train/summary/_writer_pool.py +207 -0
  1382. mindspore/train/summary/enums.py +56 -0
  1383. mindspore/train/summary/summary_record.py +581 -0
  1384. mindspore/train/summary/writer.py +167 -0
  1385. mindspore/train/summary_pb2.py +1165 -0
  1386. mindspore/train/train_thor/__init__.py +20 -0
  1387. mindspore/train/train_thor/convert_utils.py +268 -0
  1388. mindspore/train/train_thor/dataset_helper.py +192 -0
  1389. mindspore/train/train_thor/model_thor.py +257 -0
  1390. mindspore/turbojpeg.dll +0 -0
  1391. mindspore/vcmeta.dll +0 -0
  1392. mindspore/vcomp140.dll +0 -0
  1393. mindspore/vcruntime140.dll +0 -0
  1394. mindspore/vcruntime140_1.dll +0 -0
  1395. mindspore/version.py +1 -0
  1396. mindspore-2.3.0.dist-info/METADATA +351 -0
  1397. mindspore-2.3.0.dist-info/RECORD +1400 -0
  1398. mindspore-2.3.0.dist-info/WHEEL +5 -0
  1399. mindspore-2.3.0.dist-info/entry_points.txt +4 -0
  1400. mindspore-2.3.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1655 @@
1
+ # Copyright 2020-2022 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+ """activation"""
16
+ from __future__ import absolute_import
17
+
18
+ import numpy as np
19
+
20
+ from mindspore import _checkparam as validator
21
+ from mindspore._extends import cell_attr_register
22
+ from mindspore.common import dtype as mstype
23
+ from mindspore.common.parameter import Parameter
24
+ from mindspore.common.tensor import Tensor
25
+ from mindspore.ops import functional as F
26
+ from mindspore.ops import operations as P
27
+ from mindspore.ops.operations import nn_ops as NN_OPS
28
+ from mindspore.nn.cell import Cell
29
+ from mindspore import ops
30
+ from mindspore.ops.primitive import _primexpr
31
+
32
+ __all__ = ['Softmin',
33
+ 'Softmax',
34
+ 'Softmax2d',
35
+ 'LogSoftmax',
36
+ 'ReLU',
37
+ 'ReLU6',
38
+ 'RReLU',
39
+ 'SeLU',
40
+ 'SiLU',
41
+ 'Tanh',
42
+ 'Tanhshrink',
43
+ 'Hardtanh',
44
+ 'GELU',
45
+ 'FastGelu',
46
+ 'Sigmoid',
47
+ 'Softsign',
48
+ 'PReLU',
49
+ 'get_activation',
50
+ 'LeakyReLU',
51
+ 'HSigmoid',
52
+ 'HSwish',
53
+ 'ELU',
54
+ 'LogSigmoid',
55
+ 'LRN',
56
+ 'SoftShrink',
57
+ 'HShrink',
58
+ 'CELU',
59
+ 'Threshold',
60
+ 'Mish',
61
+ 'GLU'
62
+ ]
63
+
64
+
65
+ class CELU(Cell):
66
+ r"""
67
+ CELU Activation Operator.
68
+
69
+ Applies the continuously differentiable exponential linear units function element-wise.
70
+
71
+ .. math::
72
+
73
+ \text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))
74
+
75
+ For more details, refer to `CELU <https://arxiv.org/abs/1704.07483>`_ .
76
+
77
+ CELU Activation Function Graph:
78
+
79
+ .. image:: ../images/CELU.png
80
+ :align: center
81
+
82
+ Args:
83
+ alpha (float): The :math:`\alpha` value for the Celu formulation. Default: ``1.0`` .
84
+
85
+ Inputs:
86
+ - **x** (Tensor) - The input of CELU. The required dtype is float16 or float32.
87
+ The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
88
+
89
+ Outputs:
90
+ Tensor, with the same type and shape as the `x`.
91
+
92
+ Raises:
93
+ TypeError: If `alpha` is not a float.
94
+ ValueError: If `alpha` has the value of 0.
95
+ TypeError: If `x` is not a Tensor.
96
+ TypeError: If the dtype of `x` is neither float16 nor float32.
97
+
98
+ Supported Platforms:
99
+ ``Ascend`` ``GPU`` ``CPU``
100
+
101
+ Examples:
102
+ >>> import mindspore
103
+ >>> from mindspore import Tensor, nn
104
+ >>> import numpy as np
105
+ >>> x = Tensor(np.array([-2.0, -1.0, 1.0, 2.0]), mindspore.float32)
106
+ >>> celu = nn.CELU()
107
+ >>> output = celu(x)
108
+ >>> print(output)
109
+ [-0.86466473 -0.63212055 1. 2. ]
110
+ """
111
+
112
+ def __init__(self, alpha=1.0):
113
+ """Initialize CELU."""
114
+ super(CELU, self).__init__()
115
+ self.celu = P.CeLU(alpha=alpha)
116
+
117
+ def construct(self, x):
118
+ return self.celu(x)
119
+
120
+
121
+ class Softmin(Cell):
122
+ r"""
123
+ Softmin activation function, which is a two-category function :class:`mindspore.nn.Sigmoid` in the promotion of
124
+ multi-classification, and the purpose is to show the results of multi-classification in the form of probability.
125
+
126
+ Calculate the value of the exponential function for the elements of the input Tensor on the `axis`, and then
127
+ normalized to lie in range [0, 1] and sum up to 1.
128
+
129
+ Softmin is defined as:
130
+
131
+ .. math::
132
+ \text{softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_{j=0}^{n-1}\exp(-x_j)},
133
+
134
+ where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
135
+
136
+ Args:
137
+ axis (Union[int, tuple[int]]): The axis to apply Softmin operation, if the dimension of input `x` is x.ndim,
138
+ the range of axis is `[-x.ndim, x.ndim)`. -1 means the last dimension. Default: ``-1`` .
139
+
140
+ Inputs:
141
+ - **x** (Tensor) - Tensor for computing Softmin functions with data type of float16 or float32.
142
+
143
+ Outputs:
144
+ Tensor, which has the same type and shape as `x` with values in the range [0,1].
145
+
146
+ Raises:
147
+ TypeError: If `axis` is neither an int nor a tuple.
148
+ TypeError: If dtype of `x` is neither float16 nor float32.
149
+ ValueError: If `axis` is a tuple whose length is less than 1.
150
+ ValueError: If `axis` is a tuple whose elements are not all in the range [-x.ndim, x.ndim).
151
+
152
+ Supported Platforms:
153
+ ``Ascend`` ``GPU`` ``CPU``
154
+
155
+ Examples:
156
+ >>> import mindspore
157
+ >>> from mindspore import Tensor, nn
158
+ >>> import numpy as np
159
+ >>> # axis = -1(default), and the sum of return value is 1.0.
160
+ >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
161
+ >>> softmin = nn.Softmin()
162
+ >>> output = softmin(x)
163
+ >>> print(output)
164
+ [0.2341 0.636 0.0862 0.01165 0.03168 ]
165
+ """
166
+
167
+ def __init__(self, axis=-1):
168
+ """Initialize Softmin."""
169
+ super(Softmin, self).__init__()
170
+ self.axis = axis
171
+
172
+ def construct(self, x):
173
+ return ops.function.softmin(x, self.axis)
174
+
175
+
176
+ class Softmax2d(Cell):
177
+ r"""
178
+ Softmax function applied to 2D features data.
179
+
180
+ Applies `Softmax` to each location :math:`(c, h, w)` with an input Tensor of shape :math:`(C, H, W)` .
181
+
182
+ Inputs:
183
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
184
+ The input of Softmax with data type of float16 or float32.
185
+
186
+ Outputs:
187
+ Tensor, which has the same type and shape as `x` with values in the range[0,1].
188
+
189
+ Raises:
190
+ TypeError: If dtype of `x` is neither float16 nor float32.
191
+ ValueError: If `data_format` is neither 'NCHW' nor 'CHW'.
192
+
193
+ Supported Platforms:
194
+ ``Ascend`` ``GPU`` ``CPU``
195
+
196
+ Examples:
197
+ >>> import mindspore
198
+ >>> from mindspore import Tensor, nn
199
+ >>> import numpy as np
200
+ >>> x = Tensor(np.array([[[[0.1, 0.2]], [[0.3, 0.4]], [[0.6, 0.5]]]]), mindspore.float32)
201
+ >>> softmax2d = nn.Softmax2d()
202
+ >>> output = softmax2d(x)
203
+ >>> print(output)
204
+ [[[[0.25838965 0.28001308]]
205
+ [[0.31559783 0.34200877]]
206
+ [[0.42601252 0.37797815]]]]
207
+ """
208
+
209
+ def __init__(self):
210
+ """Initialize Softmax2d."""
211
+ super(Softmax2d, self).__init__()
212
+ self.softmax = P.Softmax(axis=-3)
213
+ self.shape = P.Shape()
214
+
215
+ @staticmethod
216
+ @_primexpr
217
+ def _check_input_dim(shape, cls_name):
218
+ dim = len(shape)
219
+ if dim not in (3, 4):
220
+ raise ValueError(f"For '{cls_name}', the in_shape must have 3 or 4 dims, but got {dim}.")
221
+
222
+ def construct(self, x):
223
+ x_shape = self.shape(x)
224
+ self._check_input_dim(x_shape, self.cls_name)
225
+ return self.softmax(x)
226
+
227
+
228
+ class Softmax(Cell):
229
+ r"""
230
+ Softmax activation function, which is a two-category function :class:`mindspore.nn.Sigmoid` in the promotion of
231
+ multi-classification, the purpose is to show the results of multi-classification in the form of probability.
232
+
233
+ Calculate the value of the exponential function for the elements of the input Tensor on the `axis`, and then
234
+ normalized to lie in range [0, 1] and sum up to 1.
235
+
236
+ Softmax is defined as:
237
+
238
+ .. math::
239
+ \text{softmax}(input_{i}) = \frac{\exp(input_i)}{\sum_{j=0}^{n-1}\exp(input_j)},
240
+
241
+ where :math:`input_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
242
+
243
+ Args:
244
+ axis (int, optional): The axis to apply Softmax operation, if the dimension of `input` is input.ndim,
245
+ the range of axis is `[-input.ndim, input.ndim)`, -1 means the last dimension. Default: ``-1`` .
246
+
247
+ Inputs:
248
+ - **input** (Tensor) - The input of Softmax.
249
+
250
+ Outputs:
251
+ Tensor, which has the same type and shape as `input` with values in the range[0, 1].
252
+
253
+ Raises:
254
+ TypeError: If `axis` is neither an int nor a tuple.
255
+ ValueError: If `axis` is a tuple whose length is less than 1.
256
+ ValueError: If `axis` is a tuple whose elements are not all in range `[-input.ndim, input.ndim)`.
257
+
258
+ Supported Platforms:
259
+ ``Ascend`` ``GPU`` ``CPU``
260
+
261
+ Examples:
262
+ >>> import mindspore
263
+ >>> from mindspore import Tensor, nn
264
+ >>> import numpy as np
265
+ >>> # axis = -1(default), and the sum of return value is 1.0.
266
+ >>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
267
+ >>> softmax = nn.Softmax()
268
+ >>> output = softmax(input)
269
+ >>> print(output)
270
+ [0.03168 0.01166 0.0861 0.636 0.2341 ]
271
+ """
272
+
273
+ def __init__(self, axis=-1):
274
+ """Initialize Softmax."""
275
+ super(Softmax, self).__init__()
276
+ self.softmax = P.Softmax(axis)
277
+
278
+ def construct(self, input):
279
+ return self.softmax(input)
280
+
281
+
282
+ class LogSoftmax(Cell):
283
+ r"""
284
+ Applies the LogSoftmax function to n-dimensional input tensor element-wise.
285
+
286
+ The input is transformed by the Softmax function and then by the log function to lie in range[-inf,0).
287
+
288
+ Logsoftmax is defined as:
289
+
290
+ .. math::
291
+
292
+ \text{logsoftmax}(x_i) = \log \left(\frac{\exp(x_i)}{\sum_{j=0}^{n-1} \exp(x_j)}\right)
293
+
294
+ Args:
295
+ axis (int): The axis to apply LogSoftmax operation, -1 means the last dimension. Default: ``-1`` .
296
+
297
+ Inputs:
298
+ - **x** (Tensor) - The input of LogSoftmax, with float16 or float32 data type.
299
+
300
+ Outputs:
301
+ Tensor, which has the same type and shape as `x` with output values in the range[-inf,0).
302
+
303
+ Raises:
304
+ TypeError: If `axis` is not an int.
305
+ TypeError: If dtype of `x` is neither float16 nor float32.
306
+ ValueError: If `axis` is not in range [-len(x), len(x)).
307
+
308
+ Supported Platforms:
309
+ ``Ascend`` ``GPU`` ``CPU``
310
+
311
+ Examples:
312
+ >>> import mindspore
313
+ >>> from mindspore import Tensor, nn
314
+ >>> import numpy as np
315
+ >>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
316
+ >>> log_softmax = nn.LogSoftmax()
317
+ >>> output = log_softmax(x)
318
+ >>> print(output)
319
+ [[-5.00672150e+00 -6.72150636e-03 -1.20067215e+01]
320
+ [-7.00091219e+00 -1.40009127e+01 -9.12250078e-04]]
321
+ """
322
+
323
+ def __init__(self, axis=-1):
324
+ """Initialize LogSoftmax."""
325
+ super(LogSoftmax, self).__init__()
326
+ self.log_softmax = P.LogSoftmax(axis)
327
+
328
+ def construct(self, x):
329
+ return self.log_softmax(x)
330
+
331
+
332
+ class ELU(Cell):
333
+ r"""
334
+ Applies the exponential linear unit function element-wise.
335
+
336
+ The activation function is defined as:
337
+
338
+ .. math::
339
+ E_{i} =
340
+ \begin{cases}
341
+ x_i, &\text{if } x_i \geq 0; \cr
342
+ \alpha * (\exp(x_i) - 1), &\text{otherwise.}
343
+ \end{cases}
344
+
345
+ where :math:`x_i` represents the element of the input and :math:`\alpha` represents the `alpha` parameter.
346
+
347
+ ELU Activation Function Graph:
348
+
349
+ .. image:: ../images/ELU.png
350
+ :align: center
351
+
352
+ Args:
353
+ alpha (float): The alpha value of ELU, the data type is float. Default: ``1.0`` .
354
+ Only alpha equal to ``1.0`` is supported currently.
355
+
356
+ Inputs:
357
+ - **input_x** (Tensor) - The input of ELU is a Tensor of any dimension with data type of float16 or float32.
358
+
359
+ Outputs:
360
+ Tensor, with the same type and shape as the `input_x`.
361
+
362
+ Raises:
363
+ TypeError: If `alpha` is not a float.
364
+ TypeError: If dtype of `input_x` is neither float16 nor float32.
365
+ ValueError: If `alpha` is not equal to 1.0.
366
+
367
+ Supported Platforms:
368
+ ``Ascend`` ``GPU`` ``CPU``
369
+
370
+ Examples:
371
+ >>> import mindspore
372
+ >>> from mindspore import Tensor, nn
373
+ >>> import numpy as np
374
+ >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float32)
375
+ >>> elu = nn.ELU()
376
+ >>> result = elu(x)
377
+ >>> print(result)
378
+ [-0.63212055 -0.86466473 0. 2. 1.]
379
+ """
380
+
381
+ def __init__(self, alpha=1.0):
382
+ """Initialize ELU."""
383
+ super(ELU, self).__init__()
384
+ self.elu = P.Elu(alpha)
385
+
386
+ def construct(self, x):
387
+ return self.elu(x)
388
+
389
+
390
+ class ReLU(Cell):
391
+ r"""
392
+ Applies ReLU (Rectified Linear Unit activation function) element-wise.
393
+
394
+ .. math::
395
+
396
+ \text{ReLU}(input) = (input)^+ = \max(0, input),
397
+
398
+ It returns element-wise :math:`\max(0, input)`.
399
+
400
+ .. note::
401
+ The neurons with the negative output
402
+ will be suppressed and the active neurons will stay the same.
403
+
404
+ ReLU Activation Function Graph:
405
+
406
+ .. image:: ../images/ReLU.png
407
+ :align: center
408
+
409
+ Inputs:
410
+ - **input** (Tensor) - The input of ReLU is a Tensor of any dimension.
411
+
412
+ Outputs:
413
+ Tensor, with the same type and shape as the `input`.
414
+
415
+ Raises:
416
+ TypeError: If dtype of `input` is not supported.
417
+
418
+ Supported Platforms:
419
+ ``Ascend`` ``GPU`` ``CPU``
420
+
421
+ Examples:
422
+ >>> import numpy as np
423
+ >>> import mindspore
424
+ >>> from mindspore import Tensor, nn
425
+ >>> input = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16)
426
+ >>> relu = nn.ReLU()
427
+ >>> output = relu(input)
428
+ >>> print(output)
429
+ [0. 2. 0. 2. 0.]
430
+ """
431
+
432
+ def __init__(self):
433
+ """Initialize ReLU."""
434
+ super(ReLU, self).__init__()
435
+ self.relu = P.ReLU()
436
+
437
+ def construct(self, x):
438
+ return self.relu(x)
439
+
440
+
441
+ class ReLU6(Cell):
442
+ r"""
443
+ Compute ReLU6 activation function element-wise.
444
+
445
+ ReLU6 is similar to ReLU with a upper limit of 6, which if the inputs are greater than 6, the outputs
446
+ will be suppressed to 6.
447
+ It computes element-wise as
448
+
449
+ .. math::
450
+
451
+ Y = \min(\max(0, x), 6)
452
+
453
+ ReLU6 Activation Function Graph:
454
+
455
+ .. image:: ../images/ReLU6.png
456
+ :align: center
457
+
458
+ Inputs:
459
+ - **x** (Tensor) - The input of ReLU6 with data type of float16 or float32 and that
460
+ is a Tensor of any valid shape.
461
+
462
+ Outputs:
463
+ Tensor, which has the same type as `x`.
464
+
465
+ Raises:
466
+ TypeError: If dtype of `x` is neither float16 nor float32.
467
+
468
+ Supported Platforms:
469
+ ``Ascend`` ``GPU`` ``CPU``
470
+
471
+ Examples:
472
+ >>> import mindspore
473
+ >>> from mindspore import Tensor, nn
474
+ >>> import numpy as np
475
+ >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
476
+ >>> relu6 = nn.ReLU6()
477
+ >>> output = relu6(x)
478
+ >>> print(output)
479
+ [0. 0. 0. 2. 1.]
480
+ """
481
+
482
+ def __init__(self):
483
+ """Initialize ReLU6."""
484
+ super(ReLU6, self).__init__()
485
+ self.relu6 = P.ReLU6()
486
+
487
+ def construct(self, x):
488
+ return self.relu6(x)
489
+
490
+
491
+ class LeakyReLU(Cell):
492
+ r"""
493
+ Leaky ReLU activation function.
494
+
495
+ The activation function is defined as:
496
+
497
+ .. math::
498
+ \text{leaky_relu}(x) = \begin{cases}x, &\text{if } x \geq 0; \cr
499
+ {\alpha} * x, &\text{otherwise.}\end{cases}
500
+
501
+ where :math:`\alpha` represents the `alpha` parameter.
502
+
503
+ For more details, see `Rectifier Nonlinearities Improve Neural Network Acoustic Models
504
+ <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`_.
505
+
506
+ LeakyReLU Activation Function Graph:
507
+
508
+ .. image:: ../images/LeakyReLU.png
509
+ :align: center
510
+
511
+ Args:
512
+ alpha (Union[int, float]): Slope of the activation function at x < 0. Default: ``0.2`` .
513
+
514
+ Inputs:
515
+ - **x** (Tensor) - The input of LeakyReLU is a Tensor of any dimension.
516
+
517
+ Outputs:
518
+ Tensor, has the same type and shape as the `x`.
519
+
520
+ Raises:
521
+ TypeError: If `alpha` is not a float or an int.
522
+
523
+ Supported Platforms:
524
+ ``Ascend`` ``GPU`` ``CPU``
525
+
526
+ Examples:
527
+ >>> import mindspore
528
+ >>> from mindspore import Tensor, nn
529
+ >>> import numpy as np
530
+ >>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
531
+ >>> leaky_relu = nn.LeakyReLU()
532
+ >>> output = leaky_relu(x)
533
+ >>> print(output)
534
+ [[-0.2 4. -1.6]
535
+ [ 2. -1. 9. ]]
536
+ """
537
+
538
+ def __init__(self, alpha=0.2):
539
+ """Initialize LeakyReLU."""
540
+ super(LeakyReLU, self).__init__()
541
+ self.alpha = alpha
542
+
543
+ def construct(self, x):
544
+ out = ops.leaky_relu(x, self.alpha)
545
+ return out
546
+
547
+
548
+ class RReLU(Cell):
549
+ r"""
550
+ Applies RReLU (Randomized Leaky ReLU activation function) element-wise.
551
+
552
+ The activation function is defined as:
553
+
554
+ .. math::
555
+ \text{RReLU}(x_{ji}) = \begin{cases}x_{ji}, &\text{if } x_{ji} \geq 0; \cr
556
+ {\alpha_{ji}} * x_{ji}, &\text{otherwise.}\end{cases}
557
+
558
+ where :math:`\alpha_{ji}` ~ :math:`U(l, u)`, :math:`l \le u`.
559
+
560
+ Applies the RReLU function elementally, as described in the paper:
561
+ `Empirical Evaluation of Rectified Activations in Convolution Network <https://arxiv.org/pdf/1505.00853.pdf>`_ .
562
+
563
+ Args:
564
+ lower (Union[int, float]): Slope of the activation function at x < 0. Default: ``1 / 8`` .
565
+ upper (Union[int, float]): Slope of the activation function at x < 0. Default: ``1 / 3`` .
566
+
567
+ Inputs:
568
+ - **x** (Tensor) - The input of RReLU is a Tensor of any dimension.
569
+
570
+ Outputs:
571
+ Tensor, after RReLU, has the same type and shape as the `x`.
572
+
573
+ Raises:
574
+ TypeError: If `lower` is not a float or an int.
575
+ TypeError: If `upper` is not a float or an int.
576
+ TypeError: If `x` is not a Tensor.
577
+ TypeError: If `x` is not a Tensor of mindspore.float16 or mindspore.float32.
578
+ ValueError: If `lower` is greater than upper.
579
+
580
+ Supported Platforms:
581
+ ``Ascend`` ``GPU`` ``CPU``
582
+
583
+ Examples:
584
+ >>> import mindspore
585
+ >>> from mindspore import Tensor, nn
586
+ >>> import numpy as np
587
+ >>> x = Tensor(np.array([[-1.0, 4.0], [2.0, 0]]), mindspore.float32)
588
+ >>> r_relu = nn.RReLU()
589
+ >>> output = r_relu(x)
590
+ >>> print(output)
591
+ [[-0.31465699 4. ]
592
+ [ 2. 0. ]]
593
+ """
594
+
595
+ def __init__(self, lower=1 / 8, upper=1 / 3):
596
+ super(RReLU, self).__init__()
597
+ validator.check_value_type('upper', upper, [float, int], self.cls_name)
598
+ validator.check_value_type('lower', lower, [float, int], self.cls_name)
599
+ if lower > upper:
600
+ raise ValueError(f"For {self.cls_name}, the value of 'upper' must be greater than or equal to 'lower', "
601
+ f"but got upper: {upper}, lower: {lower}. ")
602
+ self.lower = Tensor(lower, dtype=mstype.float32)
603
+ self.upper = Tensor(upper, dtype=mstype.float32)
604
+ self.sign = P.Sign()
605
+
606
+ def construct(self, x):
607
+ if not isinstance(x, Tensor):
608
+ raise TypeError(f"For 'rrelu', the input must be a Tensor, but got {type(x)}.")
609
+ _size = x.shape
610
+ _dtype = x.dtype
611
+ sign_matrix = self.sign(x)
612
+ negative_filter = sign_matrix.clip(None, 0)
613
+ positive_filter = sign_matrix.clip(0, None)
614
+ mask = ops.uniform(_size, self.lower, self.upper).astype(_dtype)
615
+ negative_mask = negative_filter * mask * -1
616
+ total_mask = negative_mask + positive_filter
617
+ out = total_mask * x
618
+ return out
619
+
620
+
621
+ class SeLU(Cell):
622
+ r"""
623
+ Applies activation function SeLU (Scaled exponential Linear Unit) element-wise.
624
+
625
+ SeLU Activation Function Graph:
626
+
627
+ .. image:: ../images/SeLU.png
628
+ :align: center
629
+
630
+ Refer to :func:`mindspore.ops.selu` for more details.
631
+
632
+ Supported Platforms:
633
+ ``Ascend`` ``GPU`` ``CPU``
634
+
635
+ Examples:
636
+ >>> import mindspore
637
+ >>> from mindspore import Tensor, nn
638
+ >>> import numpy as np
639
+ >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
640
+ >>> selu = nn.SeLU()
641
+ >>> output = selu(input_x)
642
+ >>> print(output)
643
+ [[-1.1113307 4.202804 -1.7575096]
644
+ [ 2.101402 -1.7462534 9.456309 ]]
645
+ """
646
+
647
+ def __init__(self):
648
+ """Initialize SeLU"""
649
+ super(SeLU, self).__init__()
650
+ self.selu = P.SeLU()
651
+
652
+ def construct(self, input_x):
653
+ return self.selu(input_x)
654
+
655
+
656
+ class SiLU(Cell):
657
+ r"""
658
+ Applies the silu linear unit function element-wise.
659
+
660
+ .. math::
661
+
662
+ \text{SiLU}(x) = x * \sigma(x),
663
+
664
+ where :math:`x_i` is an element of the input, :math:`\sigma(x)` is Sigmoid function.
665
+
666
+ .. math::
667
+
668
+ \text{sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)},
669
+
670
+ SiLU Activation Function Graph:
671
+
672
+ .. image:: ../images/SiLU.png
673
+ :align: center
674
+
675
+ Inputs:
676
+ - **input** (Tensor) - `input` is :math:`x` in the preceding formula.
677
+ Input with the data type float16 or float32. Tensor of any dimension.
678
+
679
+ Outputs:
680
+ Tensor, with the same type and shape as the `input`.
681
+
682
+ Raises:
683
+ TypeError: If dtype of `input` is neither float16 nor float32.
684
+
685
+ Supported Platforms:
686
+ ``Ascend`` ``GPU`` ``CPU``
687
+
688
+ Examples:
689
+ >>> import mindspore
690
+ >>> from mindspore import Tensor, nn
691
+ >>> import numpy as np
692
+ >>> input = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16)
693
+ >>> silu = nn.SiLU()
694
+ >>> output = silu(input)
695
+ >>> print(output)
696
+ [-0.269 1.762 -0.1423 1.762 -0.269]
697
+ """
698
+
699
+ def __init__(self):
700
+ """Initialize SiLU."""
701
+ super(SiLU, self).__init__()
702
+
703
+ def construct(self, x):
704
+ return ops.function.silu(x)
705
+
706
+
707
+ class Tanh(Cell):
708
+ r"""
709
+ Applies the Tanh function element-wise, returns a new tensor with the hyperbolic tangent of the elements of input,
710
+ The input is a Tensor with any valid shape.
711
+
712
+ Tanh function is defined as:
713
+
714
+ .. math::
715
+ tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
716
+
717
+ where :math:`x_i` is an element of the input Tensor.
718
+
719
+ Tanh Activation Function Graph:
720
+
721
+ .. image:: ../images/Tanh.png
722
+ :align: center
723
+
724
+ Inputs:
725
+ - **x** (Tensor) - Tensor of any dimension, input with data type of float16 or float32.
726
+
727
+ Outputs:
728
+ Tensor, with the same type and shape as the `x`.
729
+
730
+ Raises:
731
+ TypeError: If dtype of `x` is neither float16 nor float32.
732
+
733
+ Supported Platforms:
734
+ ``Ascend`` ``GPU`` ``CPU``
735
+
736
+ Examples:
737
+ >>> import mindspore
738
+ >>> from mindspore import Tensor, nn
739
+ >>> import numpy as np
740
+ >>> x = Tensor(np.array([1, 2, 3, 2, 1]), mindspore.float16)
741
+ >>> tanh = nn.Tanh()
742
+ >>> output = tanh(x)
743
+ >>> print(output)
744
+ [0.7617 0.964 0.995 0.964 0.7617]
745
+ """
746
+
747
+ def __init__(self):
748
+ """Initialize Tanh."""
749
+ super(Tanh, self).__init__()
750
+ self.tanh = P.Tanh()
751
+
752
+ def construct(self, x):
753
+ return self.tanh(x)
754
+
755
+
756
+ class Tanhshrink(Cell):
757
+ r"""
758
+ Applies Tanhshrink activation function element-wise and returns a new tensor.
759
+
760
+ Tanh function is defined as:
761
+
762
+ .. math::
763
+ tanhshrink(x_i) =x_i- \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)}
764
+ = x_i-\frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
765
+
766
+ where :math:`x_i` is an element of the input Tensor.
767
+
768
+ Inputs:
769
+ - **x** (Tensor) - Tensor of any dimension.
770
+
771
+ Outputs:
772
+ Tensor, with the same shape as the `x`.
773
+
774
+ Raises:
775
+ TypeError: If `x` is not a Tensor.
776
+
777
+ Supported Platforms:
778
+ ``Ascend`` ``GPU`` ``CPU``
779
+
780
+ Examples:
781
+ >>> import mindspore as ms
782
+ >>> from mindspore import Tensor, nn
783
+ >>> import numpy as np
784
+ >>> x = Tensor(np.array([1, 2, 3, 2, 1]), ms.float16)
785
+ >>> tanhshrink = nn.Tanhshrink()
786
+ >>> output = tanhshrink(x)
787
+ >>> print(output)
788
+ [0.2383 1.036 2.004 1.036 0.2383]
789
+ """
790
+
791
+ def __init__(self):
792
+ """Initialize Tanhshrink."""
793
+ super(Tanhshrink, self).__init__()
794
+
795
+ def construct(self, x):
796
+ return F.tanhshrink(x)
797
+
798
+
799
+ class Hardtanh(Cell):
800
+ r"""
801
+ Applies the Hardtanh function element-wise. The activation function is defined as:
802
+
803
+ .. math::
804
+ \text{Hardtanh}(x) = \begin{cases}
805
+ 1, & \text{ if } x > 1; \\
806
+ -1, & \text{ if } x < -1; \\
807
+ x, & \text{ otherwise. }
808
+ \end{cases}
809
+
810
+ Linear region range :math:`[-1, 1]` can be adjusted using `min_val` and `max_val`.
811
+
812
+ Hardtanh Activation Function Graph:
813
+
814
+ .. image:: ../images/Hardtanh.png
815
+ :align: center
816
+
817
+ Note:
818
+ On Ascend, data type of float16 might lead to accidental accuracy problem.
819
+
820
+ Args:
821
+ min_val (Union[int, float]): Minimum value of the linear region range. Default: ``-1.0`` .
822
+ max_val (Union[int, float]): Maximum value of the linear region range. Default: ``1.0`` .
823
+
824
+ Inputs:
825
+ - **x** (Tensor) - Input Tensor with data type of float16 or float32.
826
+ On CPU and Ascend support dimension 0-7D. On GPU support dimension 0-4D.
827
+
828
+ Outputs:
829
+ Tensor, with the same dtype and shape as `x`.
830
+
831
+ Raises:
832
+ TypeError: If `x` is not a Tensor.
833
+ TypeError: If dtype of `x` is neither float16 nor float32.
834
+ TypeError: If dtype of `min_val` is neither float nor int.
835
+ TypeError: If dtype of `max_val` is neither float nor int.
836
+ ValueError: If `min_val` is not less than `max_val`.
837
+
838
+ Supported Platforms:
839
+ ``Ascend`` ``GPU`` ``CPU``
840
+
841
+ Examples:
842
+ >>> import mindspore
843
+ >>> from mindspore import Tensor, nn
844
+ >>> import numpy as np
845
+ >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
846
+ >>> hardtanh = nn.Hardtanh(min_val=-1.0, max_val=1.0)
847
+ >>> output = hardtanh(x)
848
+ >>> print(output)
849
+ [-1. -1. 0. 1. 1.]
850
+ """
851
+
852
+ def __init__(self, min_val=-1.0, max_val=1.0):
853
+ """Initialize Hardtanh."""
854
+ super(Hardtanh, self).__init__()
855
+ self.min_val = min_val
856
+ self.max_val = max_val
857
+ if self.min_val >= self.max_val:
858
+ raise ValueError(f"For Hardtanh, min_val should be less than max_val,"
859
+ f"but got {self.min_val} and {self.max_val}")
860
+
861
+ def construct(self, x):
862
+ return F.hardtanh(x, self.min_val, self.max_val)
863
+
864
+
865
+ class GELU(Cell):
866
+ r"""
867
+ Applies GELU function to each element of the input. The input is a Tensor with any valid shape.
868
+
869
+ GELU is defined as:
870
+
871
+ .. math::
872
+
873
+ GELU(x_i) = x_i*P(X < x_i),
874
+
875
+ where :math:`P` is the cumulative distribution function
876
+ of standard Gaussian distribution and :math:`x_i` is the element of the input.
877
+
878
+ GELU Activation Function Graph:
879
+
880
+ .. image:: ../images/GELU.png
881
+ :align: center
882
+
883
+ Args:
884
+ approximate (bool): Whether to enable approximation. Default: ``True`` .
885
+
886
+ If `approximate` is ``True``, The gaussian error linear activation is:
887
+
888
+ :math:`0.5 * x * (1 + tanh(\sqrt(2 / \pi) * (x + 0.044715 * x^3)))`
889
+
890
+ else, it is:
891
+
892
+ :math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / \sqrt(2)))`, where P(X) ~ N(0, 1).
893
+
894
+ Inputs:
895
+ - **x** (Tensor) - The input of GELU with data type of float16, float32, or float64.
896
+ The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
897
+
898
+ Outputs:
899
+ Tensor, with the same type and shape as the `x`.
900
+
901
+ Raises:
902
+ TypeError: If dtype of `x` is not one of float16, float32, or float64.
903
+
904
+ Supported Platforms:
905
+ ``Ascend`` ``GPU`` ``CPU``
906
+
907
+ Examples:
908
+ >>> import mindspore
909
+ >>> from mindspore import Tensor, nn
910
+ >>> import numpy as np
911
+ >>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
912
+ >>> gelu = nn.GELU()
913
+ >>> output = gelu(x)
914
+ >>> print(output)
915
+ [[-1.5880802e-01 3.9999299e+00 -3.1077917e-21]
916
+ [ 1.9545976e+00 -2.2918017e-07 9.0000000e+00]]
917
+ >>> gelu = nn.GELU(approximate=False)
918
+ >>> # CPU not support "approximate=False", using "approximate=True" instead
919
+ >>> output = gelu(x)
920
+ >>> print(output)
921
+ [[-1.5865526e-01 3.9998732e+00 -0.0000000e+00]
922
+ [ 1.9544997e+00 -1.4901161e-06 9.0000000e+00]]
923
+ """
924
+
925
+ def __init__(self, approximate=True):
926
+ """Initialize GELU."""
927
+ super(GELU, self).__init__()
928
+ validator.check_bool(approximate, 'approximate', self.cls_name)
929
+ self.approximate = 'tanh'
930
+ if not approximate:
931
+ self.approximate = 'none'
932
+
933
+ def construct(self, x):
934
+ return ops.gelu(x, approximate=self.approximate)
935
+
936
+
937
+ class FastGelu(Cell):
938
+ r"""
939
+ Applies FastGelu function to each element of the input. The input is a Tensor with any valid shape.
940
+
941
+ FastGelu is defined as:
942
+
943
+ .. math::
944
+ FastGelu(x_i) = \frac {x_i} {1 + \exp(-1.702 * \left| x_i \right|)} *
945
+ \exp(0.851 * (x_i - \left| x_i \right|))
946
+
947
+ where :math:`x_i` is the element of the input.
948
+
949
+ FastGelu Activation Function Graph:
950
+
951
+ .. image:: ../images/FastGelu.png
952
+ :align: center
953
+
954
+ Inputs:
955
+ - **x** (Tensor) - The input of FastGelu with data type of float16 or float32.
956
+ The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
957
+
958
+ Outputs:
959
+ Tensor, with the same type and shape as the `x`.
960
+
961
+ Raises:
962
+ TypeError: If dtype of `x` is neither float16 nor float32.
963
+
964
+ Supported Platforms:
965
+ ``Ascend`` ``GPU`` ``CPU``
966
+
967
+ Examples:
968
+ >>> import mindspore
969
+ >>> from mindspore import Tensor, nn
970
+ >>> import numpy as np
971
+ >>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
972
+ >>> fast_gelu = nn.FastGelu()
973
+ >>> output = fast_gelu(x)
974
+ >>> print(output)
975
+ [[-1.5418735e-01 3.9921875e+00 -9.7473649e-06]
976
+ [ 1.9375000e+00 -1.0052517e-03 8.9824219e+00]]
977
+ """
978
+
979
+ def __init__(self):
980
+ """Initialize FastGelu."""
981
+ super(FastGelu, self).__init__()
982
+ self.fast_gelu = P.FastGeLU()
983
+
984
+ def construct(self, x):
985
+ return self.fast_gelu(x)
986
+
987
+
988
+ class Sigmoid(Cell):
989
+ r"""
990
+ Applies sigmoid activation function element-wise.
991
+
992
+ Sigmoid function is defined as:
993
+
994
+ .. math::
995
+
996
+ \text{sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)},
997
+
998
+ where :math:`x_i` is the element of `x`.
999
+
1000
+ Sigmoid Activation Function Graph:
1001
+
1002
+ .. image:: ../images/Sigmoid.png
1003
+ :align: center
1004
+
1005
+ Inputs:
1006
+ - **input** (Tensor) - `input` is :math:`x` in the preceding formula. Tensor of any dimension,
1007
+ the data type is float16, float32, float64, complex64 or complex128.
1008
+
1009
+ Outputs:
1010
+ Tensor, with the same type and shape as the `input`.
1011
+
1012
+ Raises:
1013
+ TypeError: If dtype of `input` is not float16, float32, float64, complex64 or complex128.
1014
+ TypeError: If `input` is not a Tensor.
1015
+
1016
+ Supported Platforms:
1017
+ ``Ascend`` ``GPU`` ``CPU``
1018
+
1019
+ Examples:
1020
+ >>> import mindspore
1021
+ >>> from mindspore import Tensor, nn
1022
+ >>> import numpy as np
1023
+ >>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
1024
+ >>> sigmoid = nn.Sigmoid()
1025
+ >>> output = sigmoid(input)
1026
+ >>> print(output)
1027
+ [0.2688 0.11914 0.5 0.881 0.7305 ]
1028
+ """
1029
+
1030
+ def __init__(self):
1031
+ """Initialize Sigmoid."""
1032
+ super(Sigmoid, self).__init__()
1033
+ self.sigmoid = P.Sigmoid()
1034
+
1035
+ def construct(self, x):
1036
+ return self.sigmoid(x)
1037
+
1038
+
1039
+ class Softsign(Cell):
1040
+ r"""
1041
+ Applies softsign activation function element-wise.
1042
+
1043
+ Softsign Activation Function Graph:
1044
+
1045
+ .. image:: ../images/Softsign.png
1046
+ :align: center
1047
+
1048
+ Refer to :func:`mindspore.ops.softsign` for more details.
1049
+
1050
+ Supported Platforms:
1051
+ ``Ascend`` ``GPU`` ``CPU``
1052
+
1053
+ Examples:
1054
+ >>> import mindspore
1055
+ >>> from mindspore import Tensor, nn
1056
+ >>> import numpy as np
1057
+ >>> x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)
1058
+ >>> softsign = nn.Softsign()
1059
+ >>> output = softsign(x)
1060
+ >>> print(output)
1061
+ [ 0. -0.5 0.6666667 0.9677419 -0.9677419]
1062
+ """
1063
+
1064
+ def __init__(self):
1065
+ """Initialize Softsign."""
1066
+ super(Softsign, self).__init__()
1067
+ self.softsign = P.Softsign()
1068
+
1069
+ def construct(self, x):
1070
+ return self.softsign(x)
1071
+
1072
+
1073
+ class PReLU(Cell):
1074
+ r"""
1075
+ Applies PReLU activation function element-wise.
1076
+
1077
+ PReLU is defined as:
1078
+
1079
+ .. math::
1080
+
1081
+ PReLU(x_i)= \max(0, x_i) + w * \min(0, x_i),
1082
+
1083
+ where :math:`x_i` is an element of an channel of the input.
1084
+
1085
+ Here :math:`w` is a learnable parameter with a default initial value 0.25.
1086
+ Parameter :math:`w` has dimensionality of the argument channel. If called without argument
1087
+ channel, a single parameter :math:`w` will be shared across all channels.
1088
+
1089
+ PReLU Activation Function Graph:
1090
+
1091
+ .. image:: ../images/PReLU.png
1092
+ :align: center
1093
+
1094
+ Args:
1095
+ channel (int): The elements number of parameter :math:`w`.
1096
+ It could be an int, and the value is 1 or the channels number of input tensor `x`. Default: ``1`` .
1097
+ w (Union[float, list, Tensor]): The initial value of parameter. It could be a float, a float list or
1098
+ a tensor has the same dtype as the input tensor `x`. Default: ``0.25`` .
1099
+
1100
+ Inputs:
1101
+ - **x** (Tensor) - The input of PReLU with data type of float16 or float32.
1102
+ The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
1103
+
1104
+ Outputs:
1105
+ Tensor, with the same dtype and shape as the `x`.
1106
+
1107
+ Raises:
1108
+ TypeError: If `channel` is not an int.
1109
+ TypeError: If `w` is not one of a float, a float list, a float Tensor.
1110
+ TypeError: If dtype of `x` is neither float16 nor float32.
1111
+ ValueError: If the `x` is a 0-D or 1-D Tensor on Ascend.
1112
+ ValueError: If `channel` is less than 1.
1113
+
1114
+ Supported Platforms:
1115
+ ``Ascend`` ``GPU`` ``CPU``
1116
+
1117
+ Examples:
1118
+ >>> import mindspore
1119
+ >>> from mindspore import Tensor, nn
1120
+ >>> import numpy as np
1121
+ >>> x = Tensor(np.array([[[[0.1, 0.6], [0.9, 0.9]]]]), mindspore.float32)
1122
+ >>> prelu = nn.PReLU()
1123
+ >>> output = prelu(x)
1124
+ >>> print(output)
1125
+ [[[[0.1 0.6]
1126
+ [0.9 0.9]]]]
1127
+
1128
+ """
1129
+
1130
+ @cell_attr_register(attrs="")
1131
+ def __init__(self, channel=1, w=0.25):
1132
+ """Initialize PReLU."""
1133
+ super(PReLU, self).__init__()
1134
+ validator.check_positive_int(channel, 'channel', self.cls_name)
1135
+ if isinstance(w, (float, np.float32)):
1136
+ tmp = np.empty((channel,), dtype=np.float32)
1137
+ tmp.fill(w)
1138
+ w = Tensor(tmp, dtype=mstype.float32)
1139
+ elif isinstance(w, list):
1140
+ if len(w) != channel:
1141
+ raise ValueError(f"For '{self.cls_name}', the length of 'w' must be equal to the 'channel' when "
1142
+ f"the 'w' is a list, but got the length of 'w': {len(w)}, the 'channel': {channel}.")
1143
+
1144
+ for i in w:
1145
+ if not isinstance(i, (float, np.float32)):
1146
+ raise ValueError(f"For '{self.cls_name}', all elements in 'w' must be "
1147
+ f"float when the 'w' is a list, but got {i}.")
1148
+ w = Tensor(w, dtype=mstype.float32)
1149
+ elif isinstance(w, Tensor):
1150
+ if w.dtype not in (mstype.float16, mstype.float32):
1151
+ raise ValueError(f"For '{self.cls_name}', the dtype of 'w' must be float16 or "
1152
+ f"float32 when the 'w' is a tensor, but got {w.dtype}.")
1153
+ if len(w.shape) != 1 or w.shape[0] != channel:
1154
+ raise ValueError(f"For '{self.cls_name}', the dimension of 'w' must be 1, and the elements number "
1155
+ f"should be equal to the 'channel' when the 'w' is a tensor, "
1156
+ f"but got 'w' shape {w.shape}, the 'channel' {channel}.")
1157
+ else:
1158
+ raise TypeError(f"For '{self.cls_name}', the 'w' only supported float, list and tensor, "
1159
+ f"but got {type(w).__name__}.")
1160
+ self.w = Parameter(w, name='a')
1161
+ self.prelu = P.PReLU()
1162
+
1163
+ def construct(self, x):
1164
+ return self.prelu(x, F.cast(self.w, x.dtype))
1165
+
1166
+
1167
+ class HSwish(Cell):
1168
+ r"""
1169
+ Applies hswish-type activation element-wise.
1170
+
1171
+ Hard swish is defined as:
1172
+
1173
+ .. math::
1174
+ \text{hswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6},
1175
+
1176
+ HSwish Activation Function Graph:
1177
+
1178
+ .. image:: ../images/HSwish.png
1179
+ :align: center
1180
+
1181
+ Inputs:
1182
+ - **x** (Tensor) - The input of HSwish, data type must be float16 or float32.
1183
+ The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1184
+
1185
+ Outputs:
1186
+ Tensor, with the same type and shape as the `x`.
1187
+
1188
+ Raises:
1189
+ TypeError: If dtype of `x` is neither float16 nor float32.
1190
+
1191
+ Supported Platforms:
1192
+ ``Ascend`` ``GPU`` ``CPU``
1193
+
1194
+ Examples:
1195
+ >>> import mindspore
1196
+ >>> from mindspore import Tensor, nn
1197
+ >>> import numpy as np
1198
+ >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
1199
+ >>> hswish = nn.HSwish()
1200
+ >>> result = hswish(x)
1201
+ >>> print(result)
1202
+ [-0.3333 -0.3333 0. 1.667 0.6665]
1203
+ """
1204
+
1205
+ def __init__(self):
1206
+ """Initialize HSwish."""
1207
+ super(HSwish, self).__init__()
1208
+ self.hswish = P.HSwish()
1209
+
1210
+ def construct(self, x):
1211
+ return self.hswish(x)
1212
+
1213
+
1214
+ class HSigmoid(Cell):
1215
+ r"""
1216
+ Applies Hard sigmoid activation function element-wise.
1217
+
1218
+ Hard sigmoid is defined as:
1219
+
1220
+ .. math::
1221
+ \text{hsigmoid}(x_{i}) = \max(0, \min(1, \frac{x_{i} + 3}{6})),
1222
+
1223
+ HSigmoid Activation Function Graph:
1224
+
1225
+ .. image:: ../images/HSigmoid.png
1226
+ :align: center
1227
+
1228
+ Inputs:
1229
+ - **input_x** (Tensor) - The input of HSigmoid. Tensor of any dimension.
1230
+
1231
+ Outputs:
1232
+ Tensor, with the same type and shape as the `input_x`.
1233
+
1234
+ Raises:
1235
+ TypeError: If `input_x` is not a Tensor.
1236
+
1237
+ Supported Platforms:
1238
+ ``Ascend`` ``GPU`` ``CPU``
1239
+
1240
+ Examples:
1241
+ >>> import mindspore
1242
+ >>> from mindspore import Tensor, nn
1243
+ >>> import numpy as np
1244
+ >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
1245
+ >>> hsigmoid = nn.HSigmoid()
1246
+ >>> result = hsigmoid(x)
1247
+ >>> print(result)
1248
+ [0.3333 0.1666 0.5 0.8335 0.6665]
1249
+ """
1250
+
1251
+ def __init__(self):
1252
+ """Initialize HSigmoid."""
1253
+ super(HSigmoid, self).__init__()
1254
+ self.hsigmoid = P.HSigmoid()
1255
+
1256
+ def construct(self, input_x):
1257
+ return self.hsigmoid(input_x)
1258
+
1259
+
1260
+ class LogSigmoid(Cell):
1261
+ r"""
1262
+ Applies logsigmoid activation element-wise. The input is a Tensor with any valid shape.
1263
+
1264
+ Logsigmoid is defined as:
1265
+
1266
+ .. math::
1267
+ \text{logsigmoid}(x_{i}) = \log(\frac{1}{1 + \exp(-x_i)}),
1268
+
1269
+ where :math:`x_{i}` is the element of the input.
1270
+
1271
+ LogSigmoid Activation Function Graph:
1272
+
1273
+ .. image:: ../images/LogSigmoid.png
1274
+ :align: center
1275
+
1276
+ Inputs:
1277
+ - **x** (Tensor) - The input of LogSigmoid with data type of float16 or float32.
1278
+ The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1279
+
1280
+ Outputs:
1281
+ Tensor, with the same type and shape as the `x`.
1282
+
1283
+ Raises:
1284
+ TypeError: If dtype of `x` is neither float16 nor float32.
1285
+
1286
+ Supported Platforms:
1287
+ ``Ascend`` ``GPU`` ``CPU``
1288
+
1289
+ Examples:
1290
+ >>> import mindspore
1291
+ >>> from mindspore import Tensor, nn
1292
+ >>> import numpy as np
1293
+ >>> net = nn.LogSigmoid()
1294
+ >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
1295
+ >>> output = net(x)
1296
+ >>> print(output)
1297
+ [-0.31326166 -0.12692806 -0.04858734]
1298
+ """
1299
+
1300
+ def __init__(self):
1301
+ """Initialize LogSigmoid."""
1302
+ super(LogSigmoid, self).__init__()
1303
+ self.mul = P.Mul()
1304
+ self.exp = P.Exp()
1305
+ self.add = P.Add()
1306
+ self.rec = P.Reciprocal()
1307
+ self.log = P.Log()
1308
+
1309
+ def construct(self, input_x):
1310
+ neg_input = self.mul(input_x, -1)
1311
+ exp_neg_input = self.exp(neg_input)
1312
+ exp_neg_input_1 = self.add(exp_neg_input, 1)
1313
+ rec_exp_neg_input_1 = self.rec(exp_neg_input_1)
1314
+ ret = self.log(rec_exp_neg_input_1)
1315
+ return ret
1316
+
1317
+
1318
+ class LRN(Cell):
1319
+ r"""
1320
+ Local Response Normalization.
1321
+
1322
+ .. warning::
1323
+ LRN is deprecated on Ascend due to potential accuracy problem. It's recommended to use other
1324
+ normalization methods, e.g. :class:`mindspore.nn.BatchNorm1d` ,
1325
+ :class:`mindspore.nn.BatchNorm2d` , :class:`mindspore.nn.BatchNorm3d`.
1326
+
1327
+ Refer to :func:`mindspore.ops.lrn` for more details.
1328
+
1329
+ Supported Platforms:
1330
+ ``GPU`` ``CPU``
1331
+
1332
+ Examples:
1333
+ >>> import mindspore
1334
+ >>> from mindspore import Tensor, nn
1335
+ >>> import numpy as np
1336
+ >>> input_x = Tensor(np.array([[[[0.1], [0.2]],
1337
+ ... [[0.3], [0.4]]]]), mindspore.float32)
1338
+ >>> output = nn.LRN()(input_x)
1339
+ >>> print(output)
1340
+ [[[[0.09534626]
1341
+ [0.1825742 ]]
1342
+ [[0.2860388 ]
1343
+ [0.3651484 ]]]]
1344
+ """
1345
+
1346
+ def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, norm_region="ACROSS_CHANNELS"):
1347
+ """Initialize LRN."""
1348
+ super(LRN, self).__init__()
1349
+ self.lrn_op = NN_OPS.LRN(depth_radius, bias, alpha, beta, norm_region)
1350
+
1351
+ def construct(self, input_x):
1352
+ return self.lrn_op(input_x)
1353
+
1354
+
1355
+ class SoftShrink(Cell):
1356
+ r"""
1357
+ Applies the SoftShrink function element-wise.
1358
+
1359
+ .. math::
1360
+ \text{SoftShrink}(x) =
1361
+ \begin{cases}
1362
+ x - \lambda, & \text{ if } x > \lambda \\
1363
+ x + \lambda, & \text{ if } x < -\lambda \\
1364
+ 0, & \text{ otherwise }
1365
+ \end{cases}
1366
+
1367
+ SoftShrink Activation Function Graph:
1368
+
1369
+ .. image:: ../images/Softshrink.png
1370
+ :align: center
1371
+
1372
+ Args:
1373
+ lambd (float): the :math:`\lambda` must be no less than zero for the SoftShrink formulation.
1374
+ Default: ``0.5`` .
1375
+
1376
+ Inputs:
1377
+ - **input_x** (Tensor) - The input of SoftShrink with data type of float16 or float32.
1378
+ Any number of additional dimensions.
1379
+
1380
+ Outputs:
1381
+ Tensor, has the same shape and data type as `input_x`.
1382
+
1383
+ Raises:
1384
+ TypeError: If lambd is not a float.
1385
+ TypeError: If input_x is not a Tensor.
1386
+ TypeError: If dtype of input_x is neither float16 nor float32.
1387
+ ValueError: If lambd is less than 0.
1388
+
1389
+ Supported Platforms:
1390
+ ``Ascend`` ``GPU`` ``CPU``
1391
+
1392
+ Examples:
1393
+ >>> import mindspore
1394
+ >>> from mindspore import Tensor, nn
1395
+ >>> import numpy as np
1396
+ >>> input_x = Tensor(np.array([[ 0.5297, 0.7871, 1.1754], [ 0.7836, 0.6218, -1.1542]]), mindspore.float16)
1397
+ >>> softshrink = nn.SoftShrink()
1398
+ >>> output = softshrink(input_x)
1399
+ >>> print(output)
1400
+ [[ 0.02979 0.287 0.676 ]
1401
+ [ 0.2837 0.1216 -0.6543 ]]
1402
+ """
1403
+
1404
+ def __init__(self, lambd=0.5):
1405
+ super(SoftShrink, self).__init__()
1406
+ self.softshrink = P.SoftShrink(lambd)
1407
+
1408
+ def construct(self, input_x):
1409
+ output = self.softshrink(input_x)
1410
+ return output
1411
+
1412
+
1413
+ class HShrink(Cell):
1414
+ r"""
1415
+ Applies Hard Shrink activation function element-wise.
1416
+
1417
+ The formula is defined as follows:
1418
+
1419
+ .. math::
1420
+ \text{HardShrink}(x) =
1421
+ \begin{cases}
1422
+ x, & \text{ if } x > \lambda \\
1423
+ x, & \text{ if } x < -\lambda \\
1424
+ 0, & \text{ otherwise }
1425
+ \end{cases}
1426
+
1427
+ HShrink Activation Function Graph:
1428
+
1429
+ .. image:: ../images/HShrink.png
1430
+ :align: center
1431
+
1432
+ Args:
1433
+ lambd (float): The threshold :math:`\lambda` defined by the Hard Shrink formula. Default: ``0.5`` .
1434
+
1435
+ Inputs:
1436
+ - **input_x** (Tensor) - The input of Hard Shrink with data type of float16 or float32.
1437
+
1438
+ Outputs:
1439
+ Tensor, the same shape and data type as the input.
1440
+
1441
+ Raises:
1442
+ TypeError: If `lambd` is not a float.
1443
+ TypeError: If dtype of `input_x` is neither float16 nor float32.
1444
+
1445
+ Supported Platforms:
1446
+ ``Ascend`` ``GPU`` ``CPU``
1447
+
1448
+ Examples:
1449
+ >>> import mindspore
1450
+ >>> from mindspore import Tensor, nn
1451
+ >>> import numpy as np
1452
+ >>> input_x = Tensor(np.array([[ 0.5, 1, 2.0], [0.0533,0.0776,-2.1233]]), mindspore.float32)
1453
+ >>> hshrink = nn.HShrink()
1454
+ >>> output = hshrink(input_x)
1455
+ >>> print(output)
1456
+ [[ 0. 1. 2. ]
1457
+ [ 0. 0. -2.1233]]
1458
+ """
1459
+
1460
+ def __init__(self, lambd=0.5):
1461
+ super(HShrink, self).__init__()
1462
+ self.hshrink = P.HShrink(lambd)
1463
+
1464
+ def construct(self, input_x):
1465
+ return self.hshrink(input_x)
1466
+
1467
+
1468
+ class Threshold(Cell):
1469
+ r"""
1470
+ Thresholds each element of the input Tensor.
1471
+
1472
+ The formula is defined as follows:
1473
+
1474
+ .. math::
1475
+ y =
1476
+ \begin{cases}
1477
+ x, &\text{ if } x > \text{threshold} \\
1478
+ \text{value}, &\text{ otherwise }
1479
+ \end{cases}
1480
+
1481
+ Args:
1482
+ threshold (Union[int, float]): The value to threshold at.
1483
+ value (Union[int, float]): The value to replace with when element is less than threshold.
1484
+
1485
+ Inputs:
1486
+ - **input_x** (Tensor) - The input of Threshold with data type of float16 or float32.
1487
+
1488
+ Outputs:
1489
+ Tensor, the same shape and data type as the `input_x`.
1490
+
1491
+ Raises:
1492
+ TypeError: If `threshold` is not a float or an int.
1493
+ TypeError: If `value` is not a float or an int.
1494
+
1495
+ Supported Platforms:
1496
+ ``Ascend`` ``GPU`` ``CPU``
1497
+
1498
+ Examples:
1499
+ >>> import mindspore
1500
+ >>> from mindspore import Tensor, nn
1501
+ >>> m = nn.Threshold(0.1, 20)
1502
+ >>> inputs = Tensor([0.1, 0.2, 0.3], mindspore.float32)
1503
+ >>> outputs = m(inputs)
1504
+ >>> print(outputs)
1505
+ [ 20.0 0.2 0.3]
1506
+ """
1507
+
1508
+ def __init__(self, threshold, value):
1509
+ """Initialize Threshold."""
1510
+ super(Threshold, self).__init__()
1511
+ self.threshold = threshold
1512
+ self.value = value
1513
+
1514
+ def construct(self, input_x):
1515
+ return F.threshold(input_x, self.threshold, self.value)
1516
+
1517
+
1518
+ class Mish(Cell):
1519
+ r"""
1520
+ Computes MISH (A Self Regularized Non-Monotonic Neural Activation Function)
1521
+ of input tensors element-wise.
1522
+
1523
+ Refer to :func:`mindspore.ops.mish` for more details.
1524
+
1525
+ Mish Activation Function Graph:
1526
+
1527
+ .. image:: ../images/Mish.png
1528
+ :align: center
1529
+
1530
+ Supported Platforms:
1531
+ ``Ascend`` ``GPU`` ``CPU``
1532
+
1533
+ Examples:
1534
+ >>> import mindspore
1535
+ >>> from mindspore import Tensor, nn
1536
+ >>> import numpy as np
1537
+ >>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
1538
+ >>> mish = nn.Mish()
1539
+ >>> output = mish(x)
1540
+ >>> print(output)
1541
+ [[-3.03401530e-01 3.99741292e+00 -2.68321624e-03]
1542
+ [ 1.94395900e+00 -3.35762873e-02 9.00000000e+00]]
1543
+ """
1544
+
1545
+ def __init__(self):
1546
+ """Initialize Mish."""
1547
+ super().__init__("Mish")
1548
+ self.mish = NN_OPS.Mish()
1549
+
1550
+ def construct(self, input_x):
1551
+ return self.mish(input_x)
1552
+
1553
+
1554
+ class GLU(Cell):
1555
+ r"""
1556
+ The gated linear unit function.
1557
+
1558
+ .. math::
1559
+ {GLU}(a, b)= a \otimes \sigma(b)
1560
+
1561
+ where :math:`a` is the first half of the input matrices and :math:`b` is the second half.
1562
+
1563
+ Here :math:`\sigma` is the sigmoid function, and :math:`\otimes` is the Hadamard product.
1564
+
1565
+ Args:
1566
+ axis (int): the axis to split the input. Default: ``-1`` , the last axis in `x`.
1567
+
1568
+ Inputs:
1569
+ - **x** (Tensor) - :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional dimensions.
1570
+
1571
+ Outputs:
1572
+ Tensor, the same dtype as the `x`, with the shape :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`.
1573
+
1574
+ Supported Platforms:
1575
+ ``Ascend`` ``GPU`` ``CPU``
1576
+
1577
+ Examples:
1578
+ >>> import mindspore as ms
1579
+ >>> m = ms.nn.GLU()
1580
+ >>> input = ms.Tensor([[0.1,0.2,0.3,0.4],[0.5,0.6,0.7,0.8]])
1581
+ >>> output = m(input)
1582
+ >>> print(output)
1583
+ [[0.05744425 0.11973753]
1584
+ [0.33409387 0.41398472]]
1585
+ """
1586
+
1587
+ def __init__(self, axis=-1):
1588
+ """Initialize GLU."""
1589
+ super().__init__("GLU")
1590
+ self.dim = axis
1591
+ self.spilt = P.Split(axis=axis, output_num=2)
1592
+ self.sigmoid = P.Sigmoid()
1593
+
1594
+ def construct(self, x):
1595
+ x1, x2 = self.spilt(x)
1596
+ x2 = self.sigmoid(x2)
1597
+ return x1 * x2
1598
+
1599
+
1600
+ _activation = {
1601
+ 'softmin': Softmin,
1602
+ 'softmax': Softmax,
1603
+ 'softmax2d': Softmax2d,
1604
+ 'logsoftmax': LogSoftmax,
1605
+ 'relu': ReLU,
1606
+ 'relu6': ReLU6,
1607
+ 'rrelu': RReLU,
1608
+ 'silu': SiLU,
1609
+ 'tanh': Tanh,
1610
+ 'tanhshrink': Tanhshrink,
1611
+ 'hardtanh': Hardtanh,
1612
+ 'gelu': GELU,
1613
+ 'fast_gelu': FastGelu,
1614
+ 'elu': ELU,
1615
+ 'sigmoid': Sigmoid,
1616
+ 'softsign': Softsign,
1617
+ 'prelu': PReLU,
1618
+ 'leakyrelu': LeakyReLU,
1619
+ 'hswish': HSwish,
1620
+ 'hsigmoid': HSigmoid,
1621
+ 'logsigmoid': LogSigmoid,
1622
+ 'softshrink': SoftShrink,
1623
+ 'hshrink': HShrink,
1624
+ 'threshold': Threshold,
1625
+ 'mish': Mish,
1626
+ }
1627
+
1628
+
1629
+ def get_activation(name, prim_name=None):
1630
+ """
1631
+ Gets the activation function.
1632
+
1633
+ Args:
1634
+ name (str): The name of the activation function.
1635
+ prim_name (Union[str, None]): The name of primitive. Default: ``None`` .
1636
+
1637
+ Returns:
1638
+ Function, the activation function.
1639
+
1640
+ Supported Platforms:
1641
+ ``Ascend`` ``GPU`` ``CPU``
1642
+
1643
+ Examples:
1644
+ >>> import mindspore.nn as nn
1645
+ >>> sigmoid = nn.get_activation('sigmoid')
1646
+ >>> print(sigmoid)
1647
+ Sigmoid<>
1648
+ """
1649
+ msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
1650
+ if name is None:
1651
+ return None
1652
+
1653
+ if name not in _activation:
1654
+ raise KeyError(f"{msg_prefix} 'name' must be in {list(_activation.keys())}, but got {name}.")
1655
+ return _activation[name]()