mindspore 2.4.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1406) hide show
  1. mindspore/.commit_id +1 -0
  2. mindspore/ConcurrencyCheck.dll +0 -0
  3. mindspore/CppBuildInsights.dll +0 -0
  4. mindspore/CppCoreCheck.dll +0 -0
  5. mindspore/EnumIndex.dll +0 -0
  6. mindspore/EspXEngine.dll +0 -0
  7. mindspore/HResultCheck.dll +0 -0
  8. mindspore/KernelTraceControl.dll +0 -0
  9. mindspore/LocalESPC.dll +0 -0
  10. mindspore/Microsoft.Diagnostics.Tracing.EventSource.dll +0 -0
  11. mindspore/Microsoft.VisualStudio.RemoteControl.dll +0 -0
  12. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  13. mindspore/Microsoft.VisualStudio.Utilities.Internal.dll +0 -0
  14. mindspore/Newtonsoft.Json.dll +0 -0
  15. mindspore/System.Runtime.CompilerServices.Unsafe.dll +0 -0
  16. mindspore/VariantClear.dll +0 -0
  17. mindspore/__init__.py +53 -0
  18. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  19. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  20. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  21. mindspore/_check_jit_forbidden_api.py +106 -0
  22. mindspore/_checkparam.py +1419 -0
  23. mindspore/_extends/__init__.py +23 -0
  24. mindspore/_extends/builtin_operations.py +224 -0
  25. mindspore/_extends/graph_kernel/__init__.py +17 -0
  26. mindspore/_extends/graph_kernel/model/__init__.py +19 -0
  27. mindspore/_extends/graph_kernel/model/graph_parallel.py +311 -0
  28. mindspore/_extends/graph_kernel/model/graph_split.py +1348 -0
  29. mindspore/_extends/graph_kernel/model/model.py +553 -0
  30. mindspore/_extends/graph_kernel/model/model_builder.py +216 -0
  31. mindspore/_extends/graph_kernel/parallel_estimate.py +60 -0
  32. mindspore/_extends/graph_kernel/splitter.py +140 -0
  33. mindspore/_extends/graph_kernel/utils.py +28 -0
  34. mindspore/_extends/parallel_compile/__init__.py +19 -0
  35. mindspore/_extends/parallel_compile/akg_compiler/__init__.py +19 -0
  36. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +269 -0
  37. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +529 -0
  38. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +56 -0
  39. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  40. mindspore/_extends/parallel_compile/akg_compiler/get_file_path.py +36 -0
  41. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +556 -0
  42. mindspore/_extends/parallel_compile/akg_compiler/util.py +159 -0
  43. mindspore/_extends/parse/__init__.py +49 -0
  44. mindspore/_extends/parse/compile_config.py +299 -0
  45. mindspore/_extends/parse/namespace.py +136 -0
  46. mindspore/_extends/parse/parser.py +1448 -0
  47. mindspore/_extends/parse/resources.py +213 -0
  48. mindspore/_extends/parse/standard_method.py +4475 -0
  49. mindspore/_extends/parse/trope.py +97 -0
  50. mindspore/_extends/pijit/__init__.py +23 -0
  51. mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
  52. mindspore/_extends/remote/__init__.py +19 -0
  53. mindspore/_extends/remote/kernel_build_server.py +199 -0
  54. mindspore/_extends/remote/kernel_build_server_akg.py +55 -0
  55. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  56. mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
  57. mindspore/_extends/utils.py +68 -0
  58. mindspore/_install_custom.py +43 -0
  59. mindspore/_profiler.py +30 -0
  60. mindspore/amp.py +433 -0
  61. mindspore/atlprov.dll +0 -0
  62. mindspore/avcodec-59.dll +0 -0
  63. mindspore/avdevice-59.dll +0 -0
  64. mindspore/avfilter-8.dll +0 -0
  65. mindspore/avformat-59.dll +0 -0
  66. mindspore/avutil-57.dll +0 -0
  67. mindspore/boost/__init__.py +42 -0
  68. mindspore/boost/adasum.py +319 -0
  69. mindspore/boost/base.py +535 -0
  70. mindspore/boost/boost.py +400 -0
  71. mindspore/boost/boost_cell_wrapper.py +790 -0
  72. mindspore/boost/dim_reduce.py +323 -0
  73. mindspore/boost/grad_accumulation.py +79 -0
  74. mindspore/boost/grad_freeze.py +382 -0
  75. mindspore/boost/group_loss_scale_manager.py +166 -0
  76. mindspore/boost/less_batch_normalization.py +174 -0
  77. mindspore/c1.dll +0 -0
  78. mindspore/c1xx.dll +0 -0
  79. mindspore/c2.dll +0 -0
  80. mindspore/cfgpersist.dll +0 -0
  81. mindspore/clang_rt.asan_dbg_dynamic-x86_64.dll +0 -0
  82. mindspore/clang_rt.asan_dynamic-x86_64.dll +0 -0
  83. mindspore/common/__init__.py +86 -0
  84. mindspore/common/_auto_dynamic.py +68 -0
  85. mindspore/common/_decorator.py +50 -0
  86. mindspore/common/_jit_fallback_utils.py +110 -0
  87. mindspore/common/_monad.py +25 -0
  88. mindspore/common/_pijit_context.py +190 -0
  89. mindspore/common/_register_for_adapter.py +74 -0
  90. mindspore/common/_register_for_recompute.py +48 -0
  91. mindspore/common/_register_for_tensor.py +46 -0
  92. mindspore/common/_stub_tensor.py +210 -0
  93. mindspore/common/_tensor_overload.py +139 -0
  94. mindspore/common/_utils.py +122 -0
  95. mindspore/common/api.py +2064 -0
  96. mindspore/common/auto_dynamic_shape.py +507 -0
  97. mindspore/common/dtype.py +422 -0
  98. mindspore/common/dump.py +130 -0
  99. mindspore/common/file_system.py +48 -0
  100. mindspore/common/generator.py +254 -0
  101. mindspore/common/hook_handle.py +143 -0
  102. mindspore/common/initializer.py +880 -0
  103. mindspore/common/jit_config.py +98 -0
  104. mindspore/common/lazy_inline.py +240 -0
  105. mindspore/common/mindir_util.py +111 -0
  106. mindspore/common/mutable.py +234 -0
  107. mindspore/common/no_inline.py +54 -0
  108. mindspore/common/np_dtype.py +25 -0
  109. mindspore/common/parameter.py +1081 -0
  110. mindspore/common/recompute.py +292 -0
  111. mindspore/common/seed.py +260 -0
  112. mindspore/common/sparse_tensor.py +1175 -0
  113. mindspore/common/symbol.py +122 -0
  114. mindspore/common/tensor.py +5039 -0
  115. mindspore/communication/__init__.py +37 -0
  116. mindspore/communication/_comm_helper.py +501 -0
  117. mindspore/communication/_hccl_management.py +297 -0
  118. mindspore/communication/comm_func.py +1395 -0
  119. mindspore/communication/management.py +673 -0
  120. mindspore/config/op_info.config +533 -0
  121. mindspore/context.py +2077 -0
  122. mindspore/d3dcompiler_47.dll +0 -0
  123. mindspore/dataset/__init__.py +90 -0
  124. mindspore/dataset/audio/__init__.py +61 -0
  125. mindspore/dataset/audio/transforms.py +3690 -0
  126. mindspore/dataset/audio/utils.py +386 -0
  127. mindspore/dataset/audio/validators.py +1172 -0
  128. mindspore/dataset/callback/__init__.py +20 -0
  129. mindspore/dataset/callback/ds_callback.py +368 -0
  130. mindspore/dataset/callback/validators.py +32 -0
  131. mindspore/dataset/core/__init__.py +13 -0
  132. mindspore/dataset/core/config.py +1095 -0
  133. mindspore/dataset/core/datatypes.py +101 -0
  134. mindspore/dataset/core/py_util_helpers.py +65 -0
  135. mindspore/dataset/core/validator_helpers.py +781 -0
  136. mindspore/dataset/debug/__init__.py +21 -0
  137. mindspore/dataset/debug/debug_hook.py +97 -0
  138. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  139. mindspore/dataset/engine/__init__.py +124 -0
  140. mindspore/dataset/engine/cache_admin.py +47 -0
  141. mindspore/dataset/engine/cache_client.py +129 -0
  142. mindspore/dataset/engine/datasets.py +4582 -0
  143. mindspore/dataset/engine/datasets_audio.py +911 -0
  144. mindspore/dataset/engine/datasets_standard_format.py +543 -0
  145. mindspore/dataset/engine/datasets_text.py +2161 -0
  146. mindspore/dataset/engine/datasets_user_defined.py +1184 -0
  147. mindspore/dataset/engine/datasets_vision.py +4816 -0
  148. mindspore/dataset/engine/iterators.py +371 -0
  149. mindspore/dataset/engine/obs/__init__.py +23 -0
  150. mindspore/dataset/engine/obs/config_loader.py +68 -0
  151. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +508 -0
  152. mindspore/dataset/engine/obs/util.py +482 -0
  153. mindspore/dataset/engine/offload.py +596 -0
  154. mindspore/dataset/engine/queue.py +304 -0
  155. mindspore/dataset/engine/samplers.py +895 -0
  156. mindspore/dataset/engine/serializer_deserializer.py +159 -0
  157. mindspore/dataset/engine/validators.py +2895 -0
  158. mindspore/dataset/text/__init__.py +51 -0
  159. mindspore/dataset/text/transforms.py +1703 -0
  160. mindspore/dataset/text/utils.py +715 -0
  161. mindspore/dataset/text/validators.py +642 -0
  162. mindspore/dataset/transforms/__init__.py +45 -0
  163. mindspore/dataset/transforms/c_transforms.py +638 -0
  164. mindspore/dataset/transforms/py_transforms.py +393 -0
  165. mindspore/dataset/transforms/py_transforms_util.py +255 -0
  166. mindspore/dataset/transforms/transforms.py +1260 -0
  167. mindspore/dataset/transforms/validators.py +410 -0
  168. mindspore/dataset/utils/__init__.py +19 -0
  169. mindspore/dataset/utils/browse_dataset.py +190 -0
  170. mindspore/dataset/utils/line_reader.py +126 -0
  171. mindspore/dataset/vision/__init__.py +65 -0
  172. mindspore/dataset/vision/c_transforms.py +2641 -0
  173. mindspore/dataset/vision/py_transforms.py +2120 -0
  174. mindspore/dataset/vision/py_transforms_util.py +1660 -0
  175. mindspore/dataset/vision/transforms.py +7295 -0
  176. mindspore/dataset/vision/utils.py +863 -0
  177. mindspore/dataset/vision/validators.py +1483 -0
  178. mindspore/default_config.py +2 -0
  179. mindspore/dnnl.dll +0 -0
  180. mindspore/dpcmi.dll +0 -0
  181. mindspore/experimental/__init__.py +20 -0
  182. mindspore/experimental/es/__init__.py +22 -0
  183. mindspore/experimental/es/embedding_service.py +883 -0
  184. mindspore/experimental/es/embedding_service_layer.py +581 -0
  185. mindspore/experimental/llm_boost/__init__.py +21 -0
  186. mindspore/experimental/llm_boost/atb/__init__.py +23 -0
  187. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  188. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  189. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  190. mindspore/experimental/llm_boost/register.py +129 -0
  191. mindspore/experimental/llm_boost/utils.py +31 -0
  192. mindspore/experimental/map_parameter.py +309 -0
  193. mindspore/experimental/optim/__init__.py +40 -0
  194. mindspore/experimental/optim/adadelta.py +161 -0
  195. mindspore/experimental/optim/adagrad.py +168 -0
  196. mindspore/experimental/optim/adam.py +193 -0
  197. mindspore/experimental/optim/adamax.py +170 -0
  198. mindspore/experimental/optim/adamw.py +290 -0
  199. mindspore/experimental/optim/asgd.py +153 -0
  200. mindspore/experimental/optim/lr_scheduler.py +1371 -0
  201. mindspore/experimental/optim/nadam.py +157 -0
  202. mindspore/experimental/optim/optimizer.py +262 -0
  203. mindspore/experimental/optim/radam.py +194 -0
  204. mindspore/experimental/optim/rmsprop.py +154 -0
  205. mindspore/experimental/optim/rprop.py +164 -0
  206. mindspore/experimental/optim/sgd.py +156 -0
  207. mindspore/hal/__init__.py +40 -0
  208. mindspore/hal/_ascend.py +57 -0
  209. mindspore/hal/_base.py +57 -0
  210. mindspore/hal/_cpu.py +56 -0
  211. mindspore/hal/_gpu.py +57 -0
  212. mindspore/hal/contiguous_tensors_handle.py +175 -0
  213. mindspore/hal/device.py +356 -0
  214. mindspore/hal/event.py +179 -0
  215. mindspore/hal/memory.py +326 -0
  216. mindspore/hal/stream.py +357 -0
  217. mindspore/include/OWNERS +7 -0
  218. mindspore/include/api/allocator.h +97 -0
  219. mindspore/include/api/callback/callback.h +93 -0
  220. mindspore/include/api/callback/ckpt_saver.h +41 -0
  221. mindspore/include/api/callback/loss_monitor.h +33 -0
  222. mindspore/include/api/callback/lr_scheduler.h +51 -0
  223. mindspore/include/api/callback/time_monitor.h +34 -0
  224. mindspore/include/api/callback/train_accuracy.h +37 -0
  225. mindspore/include/api/cell.h +90 -0
  226. mindspore/include/api/cfg.h +82 -0
  227. mindspore/include/api/context.h +602 -0
  228. mindspore/include/api/data_type.h +47 -0
  229. mindspore/include/api/delegate.h +178 -0
  230. mindspore/include/api/delegate_api.h +75 -0
  231. mindspore/include/api/dual_abi_helper.h +208 -0
  232. mindspore/include/api/format.h +28 -0
  233. mindspore/include/api/graph.h +46 -0
  234. mindspore/include/api/kernel.h +58 -0
  235. mindspore/include/api/kernel_api.h +168 -0
  236. mindspore/include/api/metrics/accuracy.h +36 -0
  237. mindspore/include/api/metrics/metrics.h +41 -0
  238. mindspore/include/api/model.h +438 -0
  239. mindspore/include/api/model_group.h +91 -0
  240. mindspore/include/api/model_parallel_runner.h +168 -0
  241. mindspore/include/api/serialization.h +185 -0
  242. mindspore/include/api/status.h +192 -0
  243. mindspore/include/api/types.h +431 -0
  244. mindspore/include/api/visible.h +41 -0
  245. mindspore/include/c_api/context_c.h +179 -0
  246. mindspore/include/c_api/data_type_c.h +52 -0
  247. mindspore/include/c_api/format_c.h +46 -0
  248. mindspore/include/c_api/model_c.h +347 -0
  249. mindspore/include/c_api/status_c.h +79 -0
  250. mindspore/include/c_api/tensor_c.h +146 -0
  251. mindspore/include/c_api/types_c.h +67 -0
  252. mindspore/include/dataset/config.h +163 -0
  253. mindspore/include/dataset/constants.h +363 -0
  254. mindspore/include/dataset/execute.h +196 -0
  255. mindspore/include/dataset/text.h +1092 -0
  256. mindspore/include/dataset/transforms.h +638 -0
  257. mindspore/include/dataset/vision.h +2129 -0
  258. mindspore/include/dataset/vision_ascend.h +206 -0
  259. mindspore/include/dataset/vision_lite.h +625 -0
  260. mindspore/jpeg62.dll +0 -0
  261. mindspore/log.py +633 -0
  262. mindspore/mindrecord/__init__.py +43 -0
  263. mindspore/mindrecord/common/__init__.py +17 -0
  264. mindspore/mindrecord/common/constant.py +20 -0
  265. mindspore/mindrecord/common/enums.py +44 -0
  266. mindspore/mindrecord/common/exceptions.py +311 -0
  267. mindspore/mindrecord/config.py +809 -0
  268. mindspore/mindrecord/filereader.py +174 -0
  269. mindspore/mindrecord/filewriter.py +722 -0
  270. mindspore/mindrecord/mindpage.py +210 -0
  271. mindspore/mindrecord/shardheader.py +141 -0
  272. mindspore/mindrecord/shardindexgenerator.py +74 -0
  273. mindspore/mindrecord/shardreader.py +117 -0
  274. mindspore/mindrecord/shardsegment.py +128 -0
  275. mindspore/mindrecord/shardutils.py +185 -0
  276. mindspore/mindrecord/shardwriter.py +237 -0
  277. mindspore/mindrecord/tools/__init__.py +17 -0
  278. mindspore/mindrecord/tools/cifar10.py +140 -0
  279. mindspore/mindrecord/tools/cifar100.py +153 -0
  280. mindspore/mindrecord/tools/cifar100_to_mr.py +185 -0
  281. mindspore/mindrecord/tools/cifar10_to_mr.py +177 -0
  282. mindspore/mindrecord/tools/csv_to_mr.py +200 -0
  283. mindspore/mindrecord/tools/imagenet_to_mr.py +206 -0
  284. mindspore/mindrecord/tools/mnist_to_mr.py +259 -0
  285. mindspore/mindrecord/tools/tfrecord_to_mr.py +360 -0
  286. mindspore/mindspore_backend.dll +0 -0
  287. mindspore/mindspore_common.dll +0 -0
  288. mindspore/mindspore_core.dll +0 -0
  289. mindspore/mindspore_glog.dll +0 -0
  290. mindspore/mindspore_np_dtype.dll +0 -0
  291. mindspore/mindspore_ops.dll +0 -0
  292. mindspore/mint/__init__.py +1586 -0
  293. mindspore/mint/distributed/__init__.py +31 -0
  294. mindspore/mint/distributed/distributed.py +254 -0
  295. mindspore/mint/linalg/__init__.py +22 -0
  296. mindspore/mint/nn/__init__.py +757 -0
  297. mindspore/mint/nn/functional.py +679 -0
  298. mindspore/mint/nn/layer/__init__.py +39 -0
  299. mindspore/mint/nn/layer/activation.py +133 -0
  300. mindspore/mint/nn/layer/normalization.py +477 -0
  301. mindspore/mint/nn/layer/pooling.py +110 -0
  302. mindspore/mint/optim/__init__.py +24 -0
  303. mindspore/mint/optim/adamw.py +206 -0
  304. mindspore/mint/special/__init__.py +63 -0
  305. mindspore/msobj140.dll +0 -0
  306. mindspore/mspdb140.dll +0 -0
  307. mindspore/mspdbcore.dll +0 -0
  308. mindspore/mspdbst.dll +0 -0
  309. mindspore/mspft140.dll +0 -0
  310. mindspore/msvcdis140.dll +0 -0
  311. mindspore/msvcp140.dll +0 -0
  312. mindspore/msvcp140_1.dll +0 -0
  313. mindspore/msvcp140_2.dll +0 -0
  314. mindspore/msvcp140_atomic_wait.dll +0 -0
  315. mindspore/msvcp140_codecvt_ids.dll +0 -0
  316. mindspore/multiprocessing/__init__.py +73 -0
  317. mindspore/nn/__init__.py +47 -0
  318. mindspore/nn/cell.py +2787 -0
  319. mindspore/nn/dynamic_lr.py +482 -0
  320. mindspore/nn/grad/__init__.py +21 -0
  321. mindspore/nn/grad/cell_grad.py +196 -0
  322. mindspore/nn/layer/__init__.py +63 -0
  323. mindspore/nn/layer/activation.py +1822 -0
  324. mindspore/nn/layer/basic.py +1629 -0
  325. mindspore/nn/layer/channel_shuffle.py +90 -0
  326. mindspore/nn/layer/combined.py +248 -0
  327. mindspore/nn/layer/container.py +734 -0
  328. mindspore/nn/layer/conv.py +1505 -0
  329. mindspore/nn/layer/dense.py +204 -0
  330. mindspore/nn/layer/embedding.py +869 -0
  331. mindspore/nn/layer/image.py +661 -0
  332. mindspore/nn/layer/math.py +1069 -0
  333. mindspore/nn/layer/normalization.py +1273 -0
  334. mindspore/nn/layer/padding.py +880 -0
  335. mindspore/nn/layer/pooling.py +2302 -0
  336. mindspore/nn/layer/rnn_cells.py +388 -0
  337. mindspore/nn/layer/rnns.py +849 -0
  338. mindspore/nn/layer/thor_layer.py +963 -0
  339. mindspore/nn/layer/timedistributed.py +155 -0
  340. mindspore/nn/layer/transformer.py +823 -0
  341. mindspore/nn/learning_rate_schedule.py +512 -0
  342. mindspore/nn/loss/__init__.py +36 -0
  343. mindspore/nn/loss/loss.py +2924 -0
  344. mindspore/nn/metrics.py +53 -0
  345. mindspore/nn/optim/__init__.py +45 -0
  346. mindspore/nn/optim/_dist_optimizer_registry.py +111 -0
  347. mindspore/nn/optim/ada_grad.py +217 -0
  348. mindspore/nn/optim/adadelta.py +206 -0
  349. mindspore/nn/optim/adafactor.py +448 -0
  350. mindspore/nn/optim/adam.py +1297 -0
  351. mindspore/nn/optim/adamax.py +220 -0
  352. mindspore/nn/optim/adasum.py +548 -0
  353. mindspore/nn/optim/asgd.py +216 -0
  354. mindspore/nn/optim/ftrl.py +401 -0
  355. mindspore/nn/optim/lamb.py +296 -0
  356. mindspore/nn/optim/lars.py +202 -0
  357. mindspore/nn/optim/lazyadam.py +533 -0
  358. mindspore/nn/optim/momentum.py +239 -0
  359. mindspore/nn/optim/optimizer.py +1034 -0
  360. mindspore/nn/optim/proximal_ada_grad.py +242 -0
  361. mindspore/nn/optim/rmsprop.py +264 -0
  362. mindspore/nn/optim/rprop.py +251 -0
  363. mindspore/nn/optim/sgd.py +237 -0
  364. mindspore/nn/optim/tft_wrapper.py +127 -0
  365. mindspore/nn/optim/thor.py +1310 -0
  366. mindspore/nn/probability/__init__.py +22 -0
  367. mindspore/nn/probability/bijector/__init__.py +35 -0
  368. mindspore/nn/probability/bijector/bijector.py +337 -0
  369. mindspore/nn/probability/bijector/exp.py +65 -0
  370. mindspore/nn/probability/bijector/gumbel_cdf.py +144 -0
  371. mindspore/nn/probability/bijector/invert.py +126 -0
  372. mindspore/nn/probability/bijector/power_transform.py +196 -0
  373. mindspore/nn/probability/bijector/scalar_affine.py +167 -0
  374. mindspore/nn/probability/bijector/softplus.py +189 -0
  375. mindspore/nn/probability/bnn_layers/__init__.py +29 -0
  376. mindspore/nn/probability/bnn_layers/_util.py +46 -0
  377. mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +112 -0
  378. mindspore/nn/probability/bnn_layers/conv_variational.py +267 -0
  379. mindspore/nn/probability/bnn_layers/dense_variational.py +302 -0
  380. mindspore/nn/probability/bnn_layers/layer_distribution.py +123 -0
  381. mindspore/nn/probability/distribution/__init__.py +56 -0
  382. mindspore/nn/probability/distribution/_utils/__init__.py +34 -0
  383. mindspore/nn/probability/distribution/_utils/custom_ops.py +96 -0
  384. mindspore/nn/probability/distribution/_utils/utils.py +362 -0
  385. mindspore/nn/probability/distribution/bernoulli.py +334 -0
  386. mindspore/nn/probability/distribution/beta.py +391 -0
  387. mindspore/nn/probability/distribution/categorical.py +435 -0
  388. mindspore/nn/probability/distribution/cauchy.py +383 -0
  389. mindspore/nn/probability/distribution/distribution.py +827 -0
  390. mindspore/nn/probability/distribution/exponential.py +350 -0
  391. mindspore/nn/probability/distribution/gamma.py +391 -0
  392. mindspore/nn/probability/distribution/geometric.py +335 -0
  393. mindspore/nn/probability/distribution/gumbel.py +257 -0
  394. mindspore/nn/probability/distribution/half_normal.py +133 -0
  395. mindspore/nn/probability/distribution/laplace.py +128 -0
  396. mindspore/nn/probability/distribution/log_normal.py +272 -0
  397. mindspore/nn/probability/distribution/logistic.py +379 -0
  398. mindspore/nn/probability/distribution/normal.py +336 -0
  399. mindspore/nn/probability/distribution/poisson.py +288 -0
  400. mindspore/nn/probability/distribution/student_t.py +149 -0
  401. mindspore/nn/probability/distribution/transformed_distribution.py +235 -0
  402. mindspore/nn/probability/distribution/uniform.py +375 -0
  403. mindspore/nn/reinforcement/__init__.py +24 -0
  404. mindspore/nn/reinforcement/_batch_read_write.py +142 -0
  405. mindspore/nn/reinforcement/_tensors_queue.py +152 -0
  406. mindspore/nn/reinforcement/tensor_array.py +145 -0
  407. mindspore/nn/sparse/__init__.py +23 -0
  408. mindspore/nn/sparse/sparse.py +147 -0
  409. mindspore/nn/wrap/__init__.py +49 -0
  410. mindspore/nn/wrap/cell_wrapper.py +968 -0
  411. mindspore/nn/wrap/grad_reducer.py +608 -0
  412. mindspore/nn/wrap/loss_scale.py +694 -0
  413. mindspore/numpy/__init__.py +121 -0
  414. mindspore/numpy/array_creations.py +2731 -0
  415. mindspore/numpy/array_ops.py +2629 -0
  416. mindspore/numpy/dtypes.py +185 -0
  417. mindspore/numpy/fft.py +966 -0
  418. mindspore/numpy/logic_ops.py +936 -0
  419. mindspore/numpy/math_ops.py +5911 -0
  420. mindspore/numpy/utils.py +214 -0
  421. mindspore/numpy/utils_const.py +565 -0
  422. mindspore/opencv_core452.dll +0 -0
  423. mindspore/opencv_imgcodecs452.dll +0 -0
  424. mindspore/opencv_imgproc452.dll +0 -0
  425. mindspore/ops/__init__.py +56 -0
  426. mindspore/ops/_constants.py +30 -0
  427. mindspore/ops/_grad_experimental/__init__.py +31 -0
  428. mindspore/ops/_grad_experimental/grad_array_ops.py +830 -0
  429. mindspore/ops/_grad_experimental/grad_base.py +143 -0
  430. mindspore/ops/_grad_experimental/grad_comm_ops.py +714 -0
  431. mindspore/ops/_grad_experimental/grad_debug_ops.py +31 -0
  432. mindspore/ops/_grad_experimental/grad_implementations.py +203 -0
  433. mindspore/ops/_grad_experimental/grad_inner_ops.py +79 -0
  434. mindspore/ops/_grad_experimental/grad_math_ops.py +802 -0
  435. mindspore/ops/_grad_experimental/grad_nn_ops.py +231 -0
  436. mindspore/ops/_grad_experimental/grad_quant_ops.py +238 -0
  437. mindspore/ops/_grad_experimental/grad_sparse.py +342 -0
  438. mindspore/ops/_grad_experimental/grad_sparse_ops.py +399 -0
  439. mindspore/ops/_grad_experimental/taylor_rule.py +220 -0
  440. mindspore/ops/_op_impl/__init__.py +23 -0
  441. mindspore/ops/_op_impl/_custom_op/__init__.py +39 -0
  442. mindspore/ops/_op_impl/_custom_op/_basic.py +158 -0
  443. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +279 -0
  444. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +156 -0
  445. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +109 -0
  446. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +125 -0
  447. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +105 -0
  448. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +124 -0
  449. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +116 -0
  450. mindspore/ops/_op_impl/_custom_op/correction_mul.py +89 -0
  451. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +196 -0
  452. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +366 -0
  453. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +162 -0
  454. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +136 -0
  455. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +206 -0
  456. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +88 -0
  457. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +128 -0
  458. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +199 -0
  459. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +88 -0
  460. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +156 -0
  461. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +184 -0
  462. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +143 -0
  463. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +169 -0
  464. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +548 -0
  465. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +881 -0
  466. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +278 -0
  467. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +200 -0
  468. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +334 -0
  469. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +255 -0
  470. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +222 -0
  471. mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +644 -0
  472. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +488 -0
  473. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +87 -0
  474. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +129 -0
  475. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +121 -0
  476. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +352 -0
  477. mindspore/ops/_op_impl/aicpu/__init__.py +441 -0
  478. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  479. mindspore/ops/_op_impl/aicpu/acos.py +32 -0
  480. mindspore/ops/_op_impl/aicpu/acos_grad.py +33 -0
  481. mindspore/ops/_op_impl/aicpu/acosh.py +34 -0
  482. mindspore/ops/_op_impl/aicpu/acosh_grad.py +35 -0
  483. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
  484. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  485. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
  486. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
  487. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
  488. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
  489. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
  490. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
  491. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  492. mindspore/ops/_op_impl/aicpu/add_n.py +41 -0
  493. mindspore/ops/_op_impl/aicpu/add_v2.py +40 -0
  494. mindspore/ops/_op_impl/aicpu/addcdiv.py +41 -0
  495. mindspore/ops/_op_impl/aicpu/addcmul.py +47 -0
  496. mindspore/ops/_op_impl/aicpu/adjust_contrastv2.py +32 -0
  497. mindspore/ops/_op_impl/aicpu/adjust_hue.py +31 -0
  498. mindspore/ops/_op_impl/aicpu/adjust_saturation.py +32 -0
  499. mindspore/ops/_op_impl/aicpu/affine_grid.py +33 -0
  500. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  501. mindspore/ops/_op_impl/aicpu/angle.py +31 -0
  502. mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
  503. mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
  504. mindspore/ops/_op_impl/aicpu/argmax_with_value.py +43 -0
  505. mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
  506. mindspore/ops/_op_impl/aicpu/asin.py +32 -0
  507. mindspore/ops/_op_impl/aicpu/asin_grad.py +33 -0
  508. mindspore/ops/_op_impl/aicpu/asinh.py +34 -0
  509. mindspore/ops/_op_impl/aicpu/asinh_grad.py +35 -0
  510. mindspore/ops/_op_impl/aicpu/atanh.py +34 -0
  511. mindspore/ops/_op_impl/aicpu/avgpool_grad_v1.py +37 -0
  512. mindspore/ops/_op_impl/aicpu/avgpool_v1.py +36 -0
  513. mindspore/ops/_op_impl/aicpu/bartlett_window.py +36 -0
  514. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
  515. mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
  516. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  517. mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
  518. mindspore/ops/_op_impl/aicpu/betainc.py +31 -0
  519. mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
  520. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +42 -0
  521. mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
  522. mindspore/ops/_op_impl/aicpu/blackman_window.py +36 -0
  523. mindspore/ops/_op_impl/aicpu/broadcast_to.py +58 -0
  524. mindspore/ops/_op_impl/aicpu/bucketize.py +34 -0
  525. mindspore/ops/_op_impl/aicpu/cache_swap_table.py +102 -0
  526. mindspore/ops/_op_impl/aicpu/cast.py +225 -0
  527. mindspore/ops/_op_impl/aicpu/cauchy.py +33 -0
  528. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  529. mindspore/ops/_op_impl/aicpu/check_numerics.py +33 -0
  530. mindspore/ops/_op_impl/aicpu/cholesky.py +32 -0
  531. mindspore/ops/_op_impl/aicpu/cholesky_inverse.py +31 -0
  532. mindspore/ops/_op_impl/aicpu/cholesky_solve.py +33 -0
  533. mindspore/ops/_op_impl/aicpu/choleskygrad.py +32 -0
  534. mindspore/ops/_op_impl/aicpu/coalesce.py +37 -0
  535. mindspore/ops/_op_impl/aicpu/col2im.py +38 -0
  536. mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
  537. mindspore/ops/_op_impl/aicpu/compare_and_bitpack.py +37 -0
  538. mindspore/ops/_op_impl/aicpu/complex.py +32 -0
  539. mindspore/ops/_op_impl/aicpu/complex_abs.py +31 -0
  540. mindspore/ops/_op_impl/aicpu/compute_accidental_hits.py +44 -0
  541. mindspore/ops/_op_impl/aicpu/concat.py +57 -0
  542. mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
  543. mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
  544. mindspore/ops/_op_impl/aicpu/conj.py +42 -0
  545. mindspore/ops/_op_impl/aicpu/conjugate_transpose.py +58 -0
  546. mindspore/ops/_op_impl/aicpu/cos.py +34 -0
  547. mindspore/ops/_op_impl/aicpu/cosh.py +34 -0
  548. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  549. mindspore/ops/_op_impl/aicpu/crop_and_resize.py +69 -0
  550. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_boxes.py +68 -0
  551. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
  552. mindspore/ops/_op_impl/aicpu/cross.py +42 -0
  553. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_dense.py +48 -0
  554. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_sparse_tensor.py +51 -0
  555. mindspore/ops/_op_impl/aicpu/ctc_greedy_decoder.py +35 -0
  556. mindspore/ops/_op_impl/aicpu/ctc_loss_v2.py +43 -0
  557. mindspore/ops/_op_impl/aicpu/ctc_loss_v2_grad.py +45 -0
  558. mindspore/ops/_op_impl/aicpu/ctcloss.py +38 -0
  559. mindspore/ops/_op_impl/aicpu/cummax.py +41 -0
  560. mindspore/ops/_op_impl/aicpu/cumprod.py +58 -0
  561. mindspore/ops/_op_impl/aicpu/cumsum.py +58 -0
  562. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
  563. mindspore/ops/_op_impl/aicpu/data_format_vec_permute.py +32 -0
  564. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  565. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  566. mindspore/ops/_op_impl/aicpu/dense_to_csr_sparse_matrix.py +49 -0
  567. mindspore/ops/_op_impl/aicpu/dense_to_dense_set_operation.py +45 -0
  568. mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
  569. mindspore/ops/_op_impl/aicpu/depth_to_space.py +44 -0
  570. mindspore/ops/_op_impl/aicpu/diag.py +36 -0
  571. mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
  572. mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
  573. mindspore/ops/_op_impl/aicpu/digamma.py +31 -0
  574. mindspore/ops/_op_impl/aicpu/div.py +41 -0
  575. mindspore/ops/_op_impl/aicpu/div_no_nan.py +35 -0
  576. mindspore/ops/_op_impl/aicpu/dropout2d.py +42 -0
  577. mindspore/ops/_op_impl/aicpu/dropout3d.py +42 -0
  578. mindspore/ops/_op_impl/aicpu/dropout_genmask.py +41 -0
  579. mindspore/ops/_op_impl/aicpu/dropout_genmask_v3.py +32 -0
  580. mindspore/ops/_op_impl/aicpu/dynamic_stitch.py +42 -0
  581. mindspore/ops/_op_impl/aicpu/edit_distance.py +56 -0
  582. mindspore/ops/_op_impl/aicpu/eig.py +35 -0
  583. mindspore/ops/_op_impl/aicpu/embedding_lookup.py +102 -0
  584. mindspore/ops/_op_impl/aicpu/end_of_sequence.py +30 -0
  585. mindspore/ops/_op_impl/aicpu/environ_create.py +28 -0
  586. mindspore/ops/_op_impl/aicpu/environ_destroy_all.py +28 -0
  587. mindspore/ops/_op_impl/aicpu/environ_get.py +41 -0
  588. mindspore/ops/_op_impl/aicpu/environ_set.py +40 -0
  589. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  590. mindspore/ops/_op_impl/aicpu/equal.py +41 -0
  591. mindspore/ops/_op_impl/aicpu/exp.py +37 -0
  592. mindspore/ops/_op_impl/aicpu/expand.py +45 -0
  593. mindspore/ops/_op_impl/aicpu/expand_dims.py +42 -0
  594. mindspore/ops/_op_impl/aicpu/expm1.py +34 -0
  595. mindspore/ops/_op_impl/aicpu/extract_glimpse.py +35 -0
  596. mindspore/ops/_op_impl/aicpu/eye.py +44 -0
  597. mindspore/ops/_op_impl/aicpu/fft_with_size.py +47 -0
  598. mindspore/ops/_op_impl/aicpu/fill_diagonal.py +39 -0
  599. mindspore/ops/_op_impl/aicpu/fill_v2.py +58 -0
  600. mindspore/ops/_op_impl/aicpu/flatten.py +43 -0
  601. mindspore/ops/_op_impl/aicpu/floor_div.py +38 -0
  602. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  603. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  604. mindspore/ops/_op_impl/aicpu/fractional_avg_pool.py +41 -0
  605. mindspore/ops/_op_impl/aicpu/fractional_avg_pool_grad.py +41 -0
  606. mindspore/ops/_op_impl/aicpu/fractional_max_pool.py +41 -0
  607. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_grad_with_fixed_ksize.py +43 -0
  608. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +65 -0
  609. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad.py +42 -0
  610. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad_with_fixed_ksize.py +42 -0
  611. mindspore/ops/_op_impl/aicpu/fractional_max_pool_with_fixed_ksize.py +49 -0
  612. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  613. mindspore/ops/_op_impl/aicpu/fused_sparse_adam.py +46 -0
  614. mindspore/ops/_op_impl/aicpu/fused_sparse_ftrl.py +41 -0
  615. mindspore/ops/_op_impl/aicpu/fused_sparse_lazy_adam.py +46 -0
  616. mindspore/ops/_op_impl/aicpu/fused_sparse_proximal_adagrad.py +39 -0
  617. mindspore/ops/_op_impl/aicpu/gamma.py +38 -0
  618. mindspore/ops/_op_impl/aicpu/gather.py +46 -0
  619. mindspore/ops/_op_impl/aicpu/gather_d.py +79 -0
  620. mindspore/ops/_op_impl/aicpu/gather_d_grad_v2.py +79 -0
  621. mindspore/ops/_op_impl/aicpu/gather_grad.py +54 -0
  622. mindspore/ops/_op_impl/aicpu/gather_nd.py +56 -0
  623. mindspore/ops/_op_impl/aicpu/gcd.py +32 -0
  624. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
  625. mindspore/ops/_op_impl/aicpu/geqrf.py +32 -0
  626. mindspore/ops/_op_impl/aicpu/get_next.py +39 -0
  627. mindspore/ops/_op_impl/aicpu/glu.py +33 -0
  628. mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
  629. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  630. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  631. mindspore/ops/_op_impl/aicpu/grid_sampler_2d.py +35 -0
  632. mindspore/ops/_op_impl/aicpu/grid_sampler_2d_grad.py +38 -0
  633. mindspore/ops/_op_impl/aicpu/grid_sampler_3d.py +34 -0
  634. mindspore/ops/_op_impl/aicpu/grid_sampler_3d_grad.py +38 -0
  635. mindspore/ops/_op_impl/aicpu/hamming_window.py +57 -0
  636. mindspore/ops/_op_impl/aicpu/hard_sigmoid.py +32 -0
  637. mindspore/ops/_op_impl/aicpu/hard_sigmoid_grad.py +33 -0
  638. mindspore/ops/_op_impl/aicpu/heaviside.py +40 -0
  639. mindspore/ops/_op_impl/aicpu/histogram.py +35 -0
  640. mindspore/ops/_op_impl/aicpu/hsv_to_rgb.py +32 -0
  641. mindspore/ops/_op_impl/aicpu/hypot.py +32 -0
  642. mindspore/ops/_op_impl/aicpu/identity.py +42 -0
  643. mindspore/ops/_op_impl/aicpu/identity_n.py +41 -0
  644. mindspore/ops/_op_impl/aicpu/igamma.py +30 -0
  645. mindspore/ops/_op_impl/aicpu/igammac.py +30 -0
  646. mindspore/ops/_op_impl/aicpu/igammagrada.py +30 -0
  647. mindspore/ops/_op_impl/aicpu/im2col.py +43 -0
  648. mindspore/ops/_op_impl/aicpu/imag.py +31 -0
  649. mindspore/ops/_op_impl/aicpu/index_fill.py +54 -0
  650. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  651. mindspore/ops/_op_impl/aicpu/init_data_set_queue.py +27 -0
  652. mindspore/ops/_op_impl/aicpu/inplace_index_add.py +39 -0
  653. mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
  654. mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
  655. mindspore/ops/_op_impl/aicpu/is_finite.py +40 -0
  656. mindspore/ops/_op_impl/aicpu/is_inf.py +31 -0
  657. mindspore/ops/_op_impl/aicpu/is_nan.py +31 -0
  658. mindspore/ops/_op_impl/aicpu/kldivloss.py +34 -0
  659. mindspore/ops/_op_impl/aicpu/kldivlossgrad.py +35 -0
  660. mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
  661. mindspore/ops/_op_impl/aicpu/lcm.py +32 -0
  662. mindspore/ops/_op_impl/aicpu/left_shift.py +38 -0
  663. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  664. mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
  665. mindspore/ops/_op_impl/aicpu/lgamma.py +33 -0
  666. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +57 -0
  667. mindspore/ops/_op_impl/aicpu/linspace.py +33 -0
  668. mindspore/ops/_op_impl/aicpu/list_diff.py +50 -0
  669. mindspore/ops/_op_impl/aicpu/log.py +37 -0
  670. mindspore/ops/_op_impl/aicpu/log1p.py +34 -0
  671. mindspore/ops/_op_impl/aicpu/log_matrix_determinant.py +31 -0
  672. mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
  673. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +37 -0
  674. mindspore/ops/_op_impl/aicpu/logical_xor.py +30 -0
  675. mindspore/ops/_op_impl/aicpu/logit.py +33 -0
  676. mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
  677. mindspore/ops/_op_impl/aicpu/logspace.py +36 -0
  678. mindspore/ops/_op_impl/aicpu/lower_bound.py +47 -0
  679. mindspore/ops/_op_impl/aicpu/lstsq.py +34 -0
  680. mindspore/ops/_op_impl/aicpu/lu.py +39 -0
  681. mindspore/ops/_op_impl/aicpu/lu_solve.py +32 -0
  682. mindspore/ops/_op_impl/aicpu/lu_unpack.py +114 -0
  683. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +49 -0
  684. mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
  685. mindspore/ops/_op_impl/aicpu/masked_scatter.py +40 -0
  686. mindspore/ops/_op_impl/aicpu/masked_select.py +31 -0
  687. mindspore/ops/_op_impl/aicpu/masked_select_grad.py +35 -0
  688. mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
  689. mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
  690. mindspore/ops/_op_impl/aicpu/matrix_determinant.py +30 -0
  691. mindspore/ops/_op_impl/aicpu/matrix_diag_part_v3.py +54 -0
  692. mindspore/ops/_op_impl/aicpu/matrix_diag_v3.py +56 -0
  693. mindspore/ops/_op_impl/aicpu/matrix_exp.py +34 -0
  694. mindspore/ops/_op_impl/aicpu/matrix_inverse.py +31 -0
  695. mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
  696. mindspore/ops/_op_impl/aicpu/matrix_power.py +37 -0
  697. mindspore/ops/_op_impl/aicpu/matrix_set_diag_v3.py +54 -0
  698. mindspore/ops/_op_impl/aicpu/matrix_solve.py +35 -0
  699. mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
  700. mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
  701. mindspore/ops/_op_impl/aicpu/max_pool3d_grad_with_argmax.py +60 -0
  702. mindspore/ops/_op_impl/aicpu/max_pool3d_with_argmax.py +59 -0
  703. mindspore/ops/_op_impl/aicpu/max_unpool2d.py +57 -0
  704. mindspore/ops/_op_impl/aicpu/max_unpool2d_grad.py +58 -0
  705. mindspore/ops/_op_impl/aicpu/max_unpool3d.py +57 -0
  706. mindspore/ops/_op_impl/aicpu/max_unpool3d_grad.py +58 -0
  707. mindspore/ops/_op_impl/aicpu/maximum_grad_grad.py +40 -0
  708. mindspore/ops/_op_impl/aicpu/maxpool_grad_v1.py +46 -0
  709. mindspore/ops/_op_impl/aicpu/maxpool_v1.py +42 -0
  710. mindspore/ops/_op_impl/aicpu/median.py +39 -0
  711. mindspore/ops/_op_impl/aicpu/median_grad.py +45 -0
  712. mindspore/ops/_op_impl/aicpu/meshgrid.py +41 -0
  713. mindspore/ops/_op_impl/aicpu/minimum_grad_grad.py +40 -0
  714. mindspore/ops/_op_impl/aicpu/mirror_pad.py +50 -0
  715. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +48 -0
  716. mindspore/ops/_op_impl/aicpu/mul.py +43 -0
  717. mindspore/ops/_op_impl/aicpu/mul_no_nan.py +42 -0
  718. mindspore/ops/_op_impl/aicpu/multi_margin_loss.py +37 -0
  719. mindspore/ops/_op_impl/aicpu/multi_margin_loss_grad.py +41 -0
  720. mindspore/ops/_op_impl/aicpu/multilabel_margin_loss_grad.py +37 -0
  721. mindspore/ops/_op_impl/aicpu/multinomial.py +47 -0
  722. mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
  723. mindspore/ops/_op_impl/aicpu/mvlgamma.py +32 -0
  724. mindspore/ops/_op_impl/aicpu/mvlgamma_grad.py +33 -0
  725. mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
  726. mindspore/ops/_op_impl/aicpu/neg.py +36 -0
  727. mindspore/ops/_op_impl/aicpu/nextafter.py +32 -0
  728. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  729. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  730. mindspore/ops/_op_impl/aicpu/no_repeat_ngram.py +34 -0
  731. mindspore/ops/_op_impl/aicpu/non_deterministic_ints.py +33 -0
  732. mindspore/ops/_op_impl/aicpu/non_max_suppression.py +36 -0
  733. mindspore/ops/_op_impl/aicpu/non_max_suppression_with_overlaps.py +35 -0
  734. mindspore/ops/_op_impl/aicpu/non_zero.py +43 -0
  735. mindspore/ops/_op_impl/aicpu/not_equal.py +39 -0
  736. mindspore/ops/_op_impl/aicpu/nth_element.py +39 -0
  737. mindspore/ops/_op_impl/aicpu/nuclear_norm.py +33 -0
  738. mindspore/ops/_op_impl/aicpu/one_hot.py +116 -0
  739. mindspore/ops/_op_impl/aicpu/ones_like.py +39 -0
  740. mindspore/ops/_op_impl/aicpu/orgqr.py +34 -0
  741. mindspore/ops/_op_impl/aicpu/pad_and_shift.py +33 -0
  742. mindspore/ops/_op_impl/aicpu/pad_v3.py +61 -0
  743. mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +59 -0
  744. mindspore/ops/_op_impl/aicpu/padding.py +41 -0
  745. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +54 -0
  746. mindspore/ops/_op_impl/aicpu/pdist_grad.py +33 -0
  747. mindspore/ops/_op_impl/aicpu/poisson.py +37 -0
  748. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  749. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  750. mindspore/ops/_op_impl/aicpu/pow.py +39 -0
  751. mindspore/ops/_op_impl/aicpu/print_tensor.py +39 -0
  752. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +113 -0
  753. mindspore/ops/_op_impl/aicpu/qr.py +36 -0
  754. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  755. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  756. mindspore/ops/_op_impl/aicpu/ragged_range.py +49 -0
  757. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  758. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
  759. mindspore/ops/_op_impl/aicpu/random_categorical.py +68 -0
  760. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +36 -0
  761. mindspore/ops/_op_impl/aicpu/random_gamma.py +38 -0
  762. mindspore/ops/_op_impl/aicpu/random_poisson.py +134 -0
  763. mindspore/ops/_op_impl/aicpu/random_shuffle.py +47 -0
  764. mindspore/ops/_op_impl/aicpu/randperm.py +38 -0
  765. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  766. mindspore/ops/_op_impl/aicpu/range.py +36 -0
  767. mindspore/ops/_op_impl/aicpu/range_v2.py +35 -0
  768. mindspore/ops/_op_impl/aicpu/real.py +31 -0
  769. mindspore/ops/_op_impl/aicpu/real_div.py +40 -0
  770. mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
  771. mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
  772. mindspore/ops/_op_impl/aicpu/reduce_mean.py +57 -0
  773. mindspore/ops/_op_impl/aicpu/reduce_prod.py +57 -0
  774. mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
  775. mindspore/ops/_op_impl/aicpu/relu_grad_v3.py +41 -0
  776. mindspore/ops/_op_impl/aicpu/relu_v3.py +38 -0
  777. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +96 -0
  778. mindspore/ops/_op_impl/aicpu/reshape.py +42 -0
  779. mindspore/ops/_op_impl/aicpu/resize_area.py +40 -0
  780. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +20 -0
  781. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +19 -0
  782. mindspore/ops/_op_impl/aicpu/resize_bilinear.py +32 -0
  783. mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +32 -0
  784. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +36 -0
  785. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +35 -0
  786. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  787. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  788. mindspore/ops/_op_impl/aicpu/reverse_sequence.py +55 -0
  789. mindspore/ops/_op_impl/aicpu/reversev2.py +54 -0
  790. mindspore/ops/_op_impl/aicpu/rgb_to_hsv.py +32 -0
  791. mindspore/ops/_op_impl/aicpu/right_shift.py +38 -0
  792. mindspore/ops/_op_impl/aicpu/rnnt_loss.py +35 -0
  793. mindspore/ops/_op_impl/aicpu/round.py +34 -0
  794. mindspore/ops/_op_impl/aicpu/rsqrt.py +33 -0
  795. mindspore/ops/_op_impl/aicpu/rsqrt_grad.py +36 -0
  796. mindspore/ops/_op_impl/aicpu/sample_distorted_bounding_box_v2.py +49 -0
  797. mindspore/ops/_op_impl/aicpu/scale_and_translate.py +52 -0
  798. mindspore/ops/_op_impl/aicpu/scale_and_translate_grad.py +36 -0
  799. mindspore/ops/_op_impl/aicpu/scatter.py +79 -0
  800. mindspore/ops/_op_impl/aicpu/scatter_add_with_axis.py +53 -0
  801. mindspore/ops/_op_impl/aicpu/scatter_elements.py +39 -0
  802. mindspore/ops/_op_impl/aicpu/scatter_nd.py +59 -0
  803. mindspore/ops/_op_impl/aicpu/scatter_nd_max.py +54 -0
  804. mindspore/ops/_op_impl/aicpu/scatter_nd_min.py +54 -0
  805. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +59 -0
  806. mindspore/ops/_op_impl/aicpu/search_sorted.py +44 -0
  807. mindspore/ops/_op_impl/aicpu/segment_max.py +52 -0
  808. mindspore/ops/_op_impl/aicpu/segment_mean.py +56 -0
  809. mindspore/ops/_op_impl/aicpu/segment_min.py +52 -0
  810. mindspore/ops/_op_impl/aicpu/segment_prod.py +56 -0
  811. mindspore/ops/_op_impl/aicpu/segment_sum.py +56 -0
  812. mindspore/ops/_op_impl/aicpu/select.py +45 -0
  813. mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
  814. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  815. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  816. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  817. mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
  818. mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
  819. mindspore/ops/_op_impl/aicpu/set_size.py +38 -0
  820. mindspore/ops/_op_impl/aicpu/sign.py +36 -0
  821. mindspore/ops/_op_impl/aicpu/sin.py +34 -0
  822. mindspore/ops/_op_impl/aicpu/sinc.py +43 -0
  823. mindspore/ops/_op_impl/aicpu/sinh.py +34 -0
  824. mindspore/ops/_op_impl/aicpu/slice.py +59 -0
  825. mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
  826. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  827. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  828. mindspore/ops/_op_impl/aicpu/sort.py +39 -0
  829. mindspore/ops/_op_impl/aicpu/space_to_depth.py +44 -0
  830. mindspore/ops/_op_impl/aicpu/sparse_addmm.py +87 -0
  831. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +80 -0
  832. mindspore/ops/_op_impl/aicpu/sparse_apply_centered_rms_prop.py +105 -0
  833. mindspore/ops/_op_impl/aicpu/sparse_apply_momentum.py +80 -0
  834. mindspore/ops/_op_impl/aicpu/sparse_apply_proximal_gradient_descent.py +79 -0
  835. mindspore/ops/_op_impl/aicpu/sparse_concat.py +59 -0
  836. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  837. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_add.py +58 -0
  838. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_div.py +58 -0
  839. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_mul.py +58 -0
  840. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
  841. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
  842. mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
  843. mindspore/ops/_op_impl/aicpu/sparse_matrix_nnz.py +81 -0
  844. mindspore/ops/_op_impl/aicpu/sparse_matrix_transpose.py +116 -0
  845. mindspore/ops/_op_impl/aicpu/sparse_reorder.py +56 -0
  846. mindspore/ops/_op_impl/aicpu/sparse_reshape.py +34 -0
  847. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_grad.py +36 -0
  848. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_with_num_segments.py +44 -0
  849. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n.py +43 -0
  850. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_grad.py +38 -0
  851. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_with_num_segments.py +44 -0
  852. mindspore/ops/_op_impl/aicpu/sparse_segment_sum.py +49 -0
  853. mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
  854. mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
  855. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
  856. mindspore/ops/_op_impl/aicpu/sparse_softmax.py +33 -0
  857. mindspore/ops/_op_impl/aicpu/sparse_softmax_cross_entropy_with_logits_v2.py +35 -0
  858. mindspore/ops/_op_impl/aicpu/sparse_sparse_maximum.py +53 -0
  859. mindspore/ops/_op_impl/aicpu/sparse_sparse_minimum.py +53 -0
  860. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_add.py +84 -0
  861. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_mat_mul.py +190 -0
  862. mindspore/ops/_op_impl/aicpu/sparse_tensor_to_csr_sparse_matrix.py +51 -0
  863. mindspore/ops/_op_impl/aicpu/sparse_to_dense_v2.py +73 -0
  864. mindspore/ops/_op_impl/aicpu/split.py +45 -0
  865. mindspore/ops/_op_impl/aicpu/sqrt.py +34 -0
  866. mindspore/ops/_op_impl/aicpu/sqrt_grad.py +35 -0
  867. mindspore/ops/_op_impl/aicpu/square.py +35 -0
  868. mindspore/ops/_op_impl/aicpu/squared_difference.py +37 -0
  869. mindspore/ops/_op_impl/aicpu/squeeze.py +42 -0
  870. mindspore/ops/_op_impl/aicpu/sspaddmm.py +97 -0
  871. mindspore/ops/_op_impl/aicpu/stack.py +45 -0
  872. mindspore/ops/_op_impl/aicpu/stack_push_pop.py +87 -0
  873. mindspore/ops/_op_impl/aicpu/standard_laplace.py +34 -0
  874. mindspore/ops/_op_impl/aicpu/standard_normal.py +34 -0
  875. mindspore/ops/_op_impl/aicpu/stateless_dropout_genmask.py +37 -0
  876. mindspore/ops/_op_impl/aicpu/stft.py +70 -0
  877. mindspore/ops/_op_impl/aicpu/strided_slice.py +43 -0
  878. mindspore/ops/_op_impl/aicpu/strided_slice_grad.py +50 -0
  879. mindspore/ops/_op_impl/aicpu/sub.py +41 -0
  880. mindspore/ops/_op_impl/aicpu/sub_and_filter.py +36 -0
  881. mindspore/ops/_op_impl/aicpu/tan.py +34 -0
  882. mindspore/ops/_op_impl/aicpu/tanh.py +34 -0
  883. mindspore/ops/_op_impl/aicpu/tanh_grad.py +35 -0
  884. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  885. mindspore/ops/_op_impl/aicpu/tile.py +56 -0
  886. mindspore/ops/_op_impl/aicpu/topk.py +34 -0
  887. mindspore/ops/_op_impl/aicpu/trace.py +40 -0
  888. mindspore/ops/_op_impl/aicpu/tracegrad.py +41 -0
  889. mindspore/ops/_op_impl/aicpu/trans_data.py +35 -0
  890. mindspore/ops/_op_impl/aicpu/transpose.py +58 -0
  891. mindspore/ops/_op_impl/aicpu/tridiagonal_matmul.py +42 -0
  892. mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
  893. mindspore/ops/_op_impl/aicpu/tril.py +42 -0
  894. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  895. mindspore/ops/_op_impl/aicpu/triplet_margin_loss.py +62 -0
  896. mindspore/ops/_op_impl/aicpu/triu.py +43 -0
  897. mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
  898. mindspore/ops/_op_impl/aicpu/truncated_normal.py +39 -0
  899. mindspore/ops/_op_impl/aicpu/uniform.py +36 -0
  900. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +41 -0
  901. mindspore/ops/_op_impl/aicpu/uniform_int.py +36 -0
  902. mindspore/ops/_op_impl/aicpu/uniform_real.py +33 -0
  903. mindspore/ops/_op_impl/aicpu/unique.py +31 -0
  904. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +47 -0
  905. mindspore/ops/_op_impl/aicpu/unique_with_pad.py +32 -0
  906. mindspore/ops/_op_impl/aicpu/unravel_index.py +32 -0
  907. mindspore/ops/_op_impl/aicpu/unsorted_segment_prod.py +53 -0
  908. mindspore/ops/_op_impl/aicpu/unsorted_segment_sum.py +57 -0
  909. mindspore/ops/_op_impl/aicpu/unstack.py +45 -0
  910. mindspore/ops/_op_impl/aicpu/update_cache.py +44 -0
  911. mindspore/ops/_op_impl/aicpu/upper_bound.py +47 -0
  912. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +42 -0
  913. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +49 -0
  914. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +40 -0
  915. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +50 -0
  916. mindspore/ops/_op_impl/aicpu/xdivy.py +35 -0
  917. mindspore/ops/_op_impl/aicpu/xlogy.py +33 -0
  918. mindspore/ops/_op_impl/aicpu/zeros_like.py +42 -0
  919. mindspore/ops/_op_impl/aicpu/zeta.py +31 -0
  920. mindspore/ops/_op_impl/akg/__init__.py +19 -0
  921. mindspore/ops/_op_impl/akg/ascend/__init__.py +48 -0
  922. mindspore/ops/_op_impl/akg/ascend/abs.py +35 -0
  923. mindspore/ops/_op_impl/akg/ascend/add.py +42 -0
  924. mindspore/ops/_op_impl/akg/ascend/add_n.py +37 -0
  925. mindspore/ops/_op_impl/akg/ascend/batchmatmul.py +33 -0
  926. mindspore/ops/_op_impl/akg/ascend/cast.py +46 -0
  927. mindspore/ops/_op_impl/akg/ascend/equal.py +35 -0
  928. mindspore/ops/_op_impl/akg/ascend/exp.py +35 -0
  929. mindspore/ops/_op_impl/akg/ascend/expand_dims.py +33 -0
  930. mindspore/ops/_op_impl/akg/ascend/greater.py +34 -0
  931. mindspore/ops/_op_impl/akg/ascend/greater_equal.py +35 -0
  932. mindspore/ops/_op_impl/akg/ascend/less.py +31 -0
  933. mindspore/ops/_op_impl/akg/ascend/less_equal.py +35 -0
  934. mindspore/ops/_op_impl/akg/ascend/load_im2col.py +33 -0
  935. mindspore/ops/_op_impl/akg/ascend/log.py +34 -0
  936. mindspore/ops/_op_impl/akg/ascend/maximum.py +36 -0
  937. mindspore/ops/_op_impl/akg/ascend/minimum.py +39 -0
  938. mindspore/ops/_op_impl/akg/ascend/mul.py +41 -0
  939. mindspore/ops/_op_impl/akg/ascend/neg.py +37 -0
  940. mindspore/ops/_op_impl/akg/ascend/pow.py +35 -0
  941. mindspore/ops/_op_impl/akg/ascend/prod_force_se_a.py +33 -0
  942. mindspore/ops/_op_impl/akg/ascend/real_div.py +36 -0
  943. mindspore/ops/_op_impl/akg/ascend/reciprocal.py +32 -0
  944. mindspore/ops/_op_impl/akg/ascend/reduce_max.py +32 -0
  945. mindspore/ops/_op_impl/akg/ascend/reduce_min.py +32 -0
  946. mindspore/ops/_op_impl/akg/ascend/reduce_sum.py +37 -0
  947. mindspore/ops/_op_impl/akg/ascend/rsqrt.py +35 -0
  948. mindspore/ops/_op_impl/akg/ascend/select.py +37 -0
  949. mindspore/ops/_op_impl/akg/ascend/sqrt.py +35 -0
  950. mindspore/ops/_op_impl/akg/ascend/square.py +35 -0
  951. mindspore/ops/_op_impl/akg/ascend/sub.py +42 -0
  952. mindspore/ops/_op_impl/akg/cpu/__init__.py +23 -0
  953. mindspore/ops/_op_impl/akg/cpu/coo2csr.py +29 -0
  954. mindspore/ops/_op_impl/akg/cpu/csr2coo.py +29 -0
  955. mindspore/ops/_op_impl/akg/cpu/csr_gather.py +33 -0
  956. mindspore/ops/_op_impl/akg/cpu/csr_mm.py +34 -0
  957. mindspore/ops/_op_impl/akg/cpu/csr_mul.py +33 -0
  958. mindspore/ops/_op_impl/akg/cpu/csr_mv.py +33 -0
  959. mindspore/ops/_op_impl/akg/cpu/csr_reduce_sum.py +31 -0
  960. mindspore/ops/_op_impl/akg/gpu/__init__.py +24 -0
  961. mindspore/ops/_op_impl/akg/gpu/coo2csr.py +29 -0
  962. mindspore/ops/_op_impl/akg/gpu/csr2coo.py +29 -0
  963. mindspore/ops/_op_impl/akg/gpu/csr_div.py +36 -0
  964. mindspore/ops/_op_impl/akg/gpu/csr_gather.py +33 -0
  965. mindspore/ops/_op_impl/akg/gpu/csr_mm.py +37 -0
  966. mindspore/ops/_op_impl/akg/gpu/csr_mul.py +36 -0
  967. mindspore/ops/_op_impl/akg/gpu/csr_mv.py +36 -0
  968. mindspore/ops/_op_impl/akg/gpu/csr_reduce_sum.py +33 -0
  969. mindspore/ops/_op_impl/cpu/__init__.py +78 -0
  970. mindspore/ops/_op_impl/cpu/adam.py +49 -0
  971. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +47 -0
  972. mindspore/ops/_op_impl/cpu/arg_max.py +30 -0
  973. mindspore/ops/_op_impl/cpu/arg_max_with_value.py +31 -0
  974. mindspore/ops/_op_impl/cpu/arg_min_with_value.py +31 -0
  975. mindspore/ops/_op_impl/cpu/buffer_append.py +28 -0
  976. mindspore/ops/_op_impl/cpu/buffer_get.py +28 -0
  977. mindspore/ops/_op_impl/cpu/buffer_sample.py +28 -0
  978. mindspore/ops/_op_impl/cpu/cast.py +171 -0
  979. mindspore/ops/_op_impl/cpu/concat_offset.py +38 -0
  980. mindspore/ops/_op_impl/cpu/conv2d.py +30 -0
  981. mindspore/ops/_op_impl/cpu/conv3d.py +30 -0
  982. mindspore/ops/_op_impl/cpu/div.py +32 -0
  983. mindspore/ops/_op_impl/cpu/dropout.py +31 -0
  984. mindspore/ops/_op_impl/cpu/dropout_grad.py +30 -0
  985. mindspore/ops/_op_impl/cpu/dynamic_shape.py +42 -0
  986. mindspore/ops/_op_impl/cpu/dynamic_stitch.py +41 -0
  987. mindspore/ops/_op_impl/cpu/equal_count.py +30 -0
  988. mindspore/ops/_op_impl/cpu/gather_d.py +49 -0
  989. mindspore/ops/_op_impl/cpu/gather_d_grad.py +38 -0
  990. mindspore/ops/_op_impl/cpu/gather_d_grad_v2.py +40 -0
  991. mindspore/ops/_op_impl/cpu/gather_v2.py +40 -0
  992. mindspore/ops/_op_impl/cpu/hsigmoid.py +33 -0
  993. mindspore/ops/_op_impl/cpu/hsigmoid_grad.py +34 -0
  994. mindspore/ops/_op_impl/cpu/hswish.py +32 -0
  995. mindspore/ops/_op_impl/cpu/hswish_grad.py +33 -0
  996. mindspore/ops/_op_impl/cpu/identity_n.py +40 -0
  997. mindspore/ops/_op_impl/cpu/is_finite.py +39 -0
  998. mindspore/ops/_op_impl/cpu/l2loss.py +30 -0
  999. mindspore/ops/_op_impl/cpu/layer_norm.py +36 -0
  1000. mindspore/ops/_op_impl/cpu/layer_norm_grad.py +38 -0
  1001. mindspore/ops/_op_impl/cpu/maximum.py +35 -0
  1002. mindspore/ops/_op_impl/cpu/maximum_grad.py +47 -0
  1003. mindspore/ops/_op_impl/cpu/minimum.py +40 -0
  1004. mindspore/ops/_op_impl/cpu/minimum_grad.py +51 -0
  1005. mindspore/ops/_op_impl/cpu/mirror_pad.py +36 -0
  1006. mindspore/ops/_op_impl/cpu/mirror_pad_grad.py +36 -0
  1007. mindspore/ops/_op_impl/cpu/mul.py +32 -0
  1008. mindspore/ops/_op_impl/cpu/one_hot.py +31 -0
  1009. mindspore/ops/_op_impl/cpu/pad.py +32 -0
  1010. mindspore/ops/_op_impl/cpu/pow.py +32 -0
  1011. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +42 -0
  1012. mindspore/ops/_op_impl/cpu/pyexecute.py +29 -0
  1013. mindspore/ops/_op_impl/cpu/pyfunc.py +29 -0
  1014. mindspore/ops/_op_impl/cpu/range.py +34 -0
  1015. mindspore/ops/_op_impl/cpu/real_div.py +33 -0
  1016. mindspore/ops/_op_impl/cpu/reduce_all.py +29 -0
  1017. mindspore/ops/_op_impl/cpu/reduce_any.py +29 -0
  1018. mindspore/ops/_op_impl/cpu/reduce_max.py +32 -0
  1019. mindspore/ops/_op_impl/cpu/reduce_mean.py +40 -0
  1020. mindspore/ops/_op_impl/cpu/reduce_min.py +32 -0
  1021. mindspore/ops/_op_impl/cpu/reduce_prod.py +40 -0
  1022. mindspore/ops/_op_impl/cpu/reduce_std.py +31 -0
  1023. mindspore/ops/_op_impl/cpu/reduce_sum.py +41 -0
  1024. mindspore/ops/_op_impl/cpu/space_to_batch_nd.py +38 -0
  1025. mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
  1026. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
  1027. mindspore/ops/_op_impl/cpu/split.py +34 -0
  1028. mindspore/ops/_op_impl/cpu/sspaddmm.py +95 -0
  1029. mindspore/ops/_op_impl/cpu/stack.py +38 -0
  1030. mindspore/ops/_op_impl/cpu/sub.py +32 -0
  1031. mindspore/ops/_op_impl/cpu/tensor_copy_slices.py +41 -0
  1032. mindspore/ops/_op_impl/cpu/tile.py +37 -0
  1033. mindspore/ops/_op_impl/cpu/top_k.py +31 -0
  1034. mindspore/ops/_op_impl/cpu/transpose.py +39 -0
  1035. mindspore/ops/_primitive_cache.py +90 -0
  1036. mindspore/ops/_register_for_op.py +73 -0
  1037. mindspore/ops/_utils/__init__.py +20 -0
  1038. mindspore/ops/_utils/utils.py +147 -0
  1039. mindspore/ops/_vmap/__init__.py +25 -0
  1040. mindspore/ops/_vmap/vmap_array_ops.py +2149 -0
  1041. mindspore/ops/_vmap/vmap_base.py +533 -0
  1042. mindspore/ops/_vmap/vmap_convolution_ops.py +441 -0
  1043. mindspore/ops/_vmap/vmap_debug_ops.py +50 -0
  1044. mindspore/ops/_vmap/vmap_grad_math_ops.py +274 -0
  1045. mindspore/ops/_vmap/vmap_grad_nn_ops.py +806 -0
  1046. mindspore/ops/_vmap/vmap_image_ops.py +194 -0
  1047. mindspore/ops/_vmap/vmap_math_ops.py +993 -0
  1048. mindspore/ops/_vmap/vmap_nn_ops.py +2250 -0
  1049. mindspore/ops/_vmap/vmap_other_ops.py +105 -0
  1050. mindspore/ops/_vmap/vmap_random_ops.py +122 -0
  1051. mindspore/ops/_vmap/vmap_sparse_ops.py +89 -0
  1052. mindspore/ops/auto_generate/__init__.py +31 -0
  1053. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
  1054. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
  1055. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  1056. mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
  1057. mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
  1058. mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
  1059. mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
  1060. mindspore/ops/composite/__init__.py +71 -0
  1061. mindspore/ops/composite/base.py +1318 -0
  1062. mindspore/ops/composite/env_ops.py +41 -0
  1063. mindspore/ops/composite/math_ops.py +125 -0
  1064. mindspore/ops/composite/multitype_ops/__init__.py +77 -0
  1065. mindspore/ops/composite/multitype_ops/_compile_utils.py +1459 -0
  1066. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +897 -0
  1067. mindspore/ops/composite/multitype_ops/add_impl.py +606 -0
  1068. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +56 -0
  1069. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +56 -0
  1070. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +56 -0
  1071. mindspore/ops/composite/multitype_ops/div_impl.py +189 -0
  1072. mindspore/ops/composite/multitype_ops/equal_impl.py +335 -0
  1073. mindspore/ops/composite/multitype_ops/floordiv_impl.py +88 -0
  1074. mindspore/ops/composite/multitype_ops/getitem_impl.py +400 -0
  1075. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +109 -0
  1076. mindspore/ops/composite/multitype_ops/greater_impl.py +110 -0
  1077. mindspore/ops/composite/multitype_ops/in_impl.py +196 -0
  1078. mindspore/ops/composite/multitype_ops/left_shift_impl.py +37 -0
  1079. mindspore/ops/composite/multitype_ops/less_equal_impl.py +111 -0
  1080. mindspore/ops/composite/multitype_ops/less_impl.py +112 -0
  1081. mindspore/ops/composite/multitype_ops/logic_not_impl.py +113 -0
  1082. mindspore/ops/composite/multitype_ops/logical_and_impl.py +60 -0
  1083. mindspore/ops/composite/multitype_ops/logical_or_impl.py +61 -0
  1084. mindspore/ops/composite/multitype_ops/mod_impl.py +86 -0
  1085. mindspore/ops/composite/multitype_ops/mul_impl.py +294 -0
  1086. mindspore/ops/composite/multitype_ops/negative_impl.py +79 -0
  1087. mindspore/ops/composite/multitype_ops/not_equal_impl.py +290 -0
  1088. mindspore/ops/composite/multitype_ops/not_in_impl.py +196 -0
  1089. mindspore/ops/composite/multitype_ops/ones_like_impl.py +96 -0
  1090. mindspore/ops/composite/multitype_ops/pow_impl.py +87 -0
  1091. mindspore/ops/composite/multitype_ops/right_shift_impl.py +37 -0
  1092. mindspore/ops/composite/multitype_ops/setitem_impl.py +884 -0
  1093. mindspore/ops/composite/multitype_ops/sub_impl.py +116 -0
  1094. mindspore/ops/composite/multitype_ops/uadd_impl.py +29 -0
  1095. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +228 -0
  1096. mindspore/ops/deprecated.py +315 -0
  1097. mindspore/ops/function/__init__.py +782 -0
  1098. mindspore/ops/function/array_func.py +7226 -0
  1099. mindspore/ops/function/clip_func.py +384 -0
  1100. mindspore/ops/function/debug_func.py +181 -0
  1101. mindspore/ops/function/fft_func.py +44 -0
  1102. mindspore/ops/function/grad/__init__.py +34 -0
  1103. mindspore/ops/function/grad/grad_func.py +1425 -0
  1104. mindspore/ops/function/image_func.py +292 -0
  1105. mindspore/ops/function/linalg_func.py +416 -0
  1106. mindspore/ops/function/math_func.py +12228 -0
  1107. mindspore/ops/function/nn_func.py +8609 -0
  1108. mindspore/ops/function/other_func.py +115 -0
  1109. mindspore/ops/function/parameter_func.py +134 -0
  1110. mindspore/ops/function/random_func.py +1715 -0
  1111. mindspore/ops/function/reshard_func.py +104 -0
  1112. mindspore/ops/function/sparse_func.py +884 -0
  1113. mindspore/ops/function/sparse_unary_func.py +2422 -0
  1114. mindspore/ops/function/spectral_func.py +150 -0
  1115. mindspore/ops/function/vmap_func.py +117 -0
  1116. mindspore/ops/functional.py +464 -0
  1117. mindspore/ops/op_info_register.py +1572 -0
  1118. mindspore/ops/operations/__init__.py +722 -0
  1119. mindspore/ops/operations/_csr_ops.py +403 -0
  1120. mindspore/ops/operations/_custom_grad.py +181 -0
  1121. mindspore/ops/operations/_embedding_cache_ops.py +307 -0
  1122. mindspore/ops/operations/_grad_ops.py +2978 -0
  1123. mindspore/ops/operations/_infer_ops.py +19 -0
  1124. mindspore/ops/operations/_inner_ops.py +2544 -0
  1125. mindspore/ops/operations/_map_tensor_ops.py +112 -0
  1126. mindspore/ops/operations/_ms_kernel.py +601 -0
  1127. mindspore/ops/operations/_ocr_ops.py +379 -0
  1128. mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
  1129. mindspore/ops/operations/_pyfunc_registry.py +58 -0
  1130. mindspore/ops/operations/_quant_ops.py +1844 -0
  1131. mindspore/ops/operations/_rl_inner_ops.py +1231 -0
  1132. mindspore/ops/operations/_scalar_ops.py +106 -0
  1133. mindspore/ops/operations/_sequence_ops.py +1155 -0
  1134. mindspore/ops/operations/_sparse_grad_ops.py +56 -0
  1135. mindspore/ops/operations/_tensor_array.py +359 -0
  1136. mindspore/ops/operations/_thor_ops.py +807 -0
  1137. mindspore/ops/operations/array_ops.py +6124 -0
  1138. mindspore/ops/operations/comm_ops.py +1985 -0
  1139. mindspore/ops/operations/control_ops.py +127 -0
  1140. mindspore/ops/operations/custom_ops.py +1129 -0
  1141. mindspore/ops/operations/debug_ops.py +678 -0
  1142. mindspore/ops/operations/image_ops.py +1041 -0
  1143. mindspore/ops/operations/inner_ops.py +697 -0
  1144. mindspore/ops/operations/linalg_ops.py +95 -0
  1145. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  1146. mindspore/ops/operations/manually_defined/_inner.py +73 -0
  1147. mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
  1148. mindspore/ops/operations/math_ops.py +5095 -0
  1149. mindspore/ops/operations/nn_ops.py +9575 -0
  1150. mindspore/ops/operations/other_ops.py +874 -0
  1151. mindspore/ops/operations/random_ops.py +1288 -0
  1152. mindspore/ops/operations/reshard_ops.py +53 -0
  1153. mindspore/ops/operations/rl_ops.py +288 -0
  1154. mindspore/ops/operations/sparse_ops.py +2753 -0
  1155. mindspore/ops/operations/spectral_ops.py +111 -0
  1156. mindspore/ops/primitive.py +1046 -0
  1157. mindspore/ops/signature.py +54 -0
  1158. mindspore/ops/vm_impl_registry.py +91 -0
  1159. mindspore/ops_generate/__init__.py +27 -0
  1160. mindspore/ops_generate/arg_dtype_cast.py +252 -0
  1161. mindspore/ops_generate/arg_handler.py +197 -0
  1162. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  1163. mindspore/ops_generate/gen_constants.py +36 -0
  1164. mindspore/ops_generate/gen_ops.py +1099 -0
  1165. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  1166. mindspore/ops_generate/gen_pyboost_func.py +1052 -0
  1167. mindspore/ops_generate/gen_utils.py +209 -0
  1168. mindspore/ops_generate/op_proto.py +145 -0
  1169. mindspore/ops_generate/pyboost_utils.py +367 -0
  1170. mindspore/ops_generate/template.py +261 -0
  1171. mindspore/parallel/__init__.py +30 -0
  1172. mindspore/parallel/_auto_parallel_context.py +1486 -0
  1173. mindspore/parallel/_cell_wrapper.py +174 -0
  1174. mindspore/parallel/_cost_model_context.py +700 -0
  1175. mindspore/parallel/_dp_allreduce_fusion.py +159 -0
  1176. mindspore/parallel/_offload_context.py +275 -0
  1177. mindspore/parallel/_parallel_serialization.py +561 -0
  1178. mindspore/parallel/_ps_context.py +242 -0
  1179. mindspore/parallel/_recovery_context.py +110 -0
  1180. mindspore/parallel/_tensor.py +730 -0
  1181. mindspore/parallel/_transformer/__init__.py +35 -0
  1182. mindspore/parallel/_transformer/layers.py +765 -0
  1183. mindspore/parallel/_transformer/loss.py +251 -0
  1184. mindspore/parallel/_transformer/moe.py +693 -0
  1185. mindspore/parallel/_transformer/op_parallel_config.py +222 -0
  1186. mindspore/parallel/_transformer/transformer.py +3119 -0
  1187. mindspore/parallel/_utils.py +612 -0
  1188. mindspore/parallel/algo_parameter_config.py +400 -0
  1189. mindspore/parallel/checkpoint_transform.py +650 -0
  1190. mindspore/parallel/cluster/__init__.py +15 -0
  1191. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  1192. mindspore/parallel/cluster/process_entity/_api.py +352 -0
  1193. mindspore/parallel/cluster/process_entity/_utils.py +101 -0
  1194. mindspore/parallel/cluster/run.py +136 -0
  1195. mindspore/parallel/mpi/__init__.py +14 -0
  1196. mindspore/parallel/mpi/_mpi_config.py +116 -0
  1197. mindspore/parallel/parameter_broadcast.py +151 -0
  1198. mindspore/parallel/shard.py +481 -0
  1199. mindspore/parallel/transform_safetensors.py +993 -0
  1200. mindspore/perf_msvcbuildinsights.dll +0 -0
  1201. mindspore/pgodb140.dll +0 -0
  1202. mindspore/pgort140.dll +0 -0
  1203. mindspore/profiler/__init__.py +28 -0
  1204. mindspore/profiler/common/__init__.py +14 -0
  1205. mindspore/profiler/common/constant.py +29 -0
  1206. mindspore/profiler/common/exceptions/__init__.py +14 -0
  1207. mindspore/profiler/common/exceptions/error_code.py +83 -0
  1208. mindspore/profiler/common/exceptions/exceptions.py +286 -0
  1209. mindspore/profiler/common/process_pool.py +41 -0
  1210. mindspore/profiler/common/registry.py +47 -0
  1211. mindspore/profiler/common/singleton.py +28 -0
  1212. mindspore/profiler/common/struct_type.py +118 -0
  1213. mindspore/profiler/common/util.py +472 -0
  1214. mindspore/profiler/common/validator/__init__.py +14 -0
  1215. mindspore/profiler/common/validator/validate_path.py +84 -0
  1216. mindspore/profiler/dynamic_profiler.py +694 -0
  1217. mindspore/profiler/envprofiling.py +254 -0
  1218. mindspore/profiler/parser/__init__.py +14 -0
  1219. mindspore/profiler/parser/aicpu_data_parser.py +272 -0
  1220. mindspore/profiler/parser/ascend_analysis/__init__.py +14 -0
  1221. mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
  1222. mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
  1223. mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
  1224. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
  1225. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
  1226. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
  1227. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  1228. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
  1229. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  1230. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
  1231. mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
  1232. mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
  1233. mindspore/profiler/parser/ascend_flops_generator.py +116 -0
  1234. mindspore/profiler/parser/ascend_fpbp_generator.py +82 -0
  1235. mindspore/profiler/parser/ascend_hccl_generator.py +271 -0
  1236. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  1237. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  1238. mindspore/profiler/parser/ascend_msprof_exporter.py +282 -0
  1239. mindspore/profiler/parser/ascend_msprof_generator.py +187 -0
  1240. mindspore/profiler/parser/ascend_op_generator.py +334 -0
  1241. mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
  1242. mindspore/profiler/parser/ascend_timeline_generator.py +545 -0
  1243. mindspore/profiler/parser/base_timeline_generator.py +483 -0
  1244. mindspore/profiler/parser/container.py +229 -0
  1245. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +697 -0
  1246. mindspore/profiler/parser/flops_parser.py +531 -0
  1247. mindspore/profiler/parser/framework_enum.py +111 -0
  1248. mindspore/profiler/parser/framework_parser.py +464 -0
  1249. mindspore/profiler/parser/framework_struct.py +61 -0
  1250. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  1251. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  1252. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  1253. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  1254. mindspore/profiler/parser/hccl_parser.py +573 -0
  1255. mindspore/profiler/parser/hwts_log_parser.py +122 -0
  1256. mindspore/profiler/parser/integrator.py +526 -0
  1257. mindspore/profiler/parser/memory_usage_parser.py +277 -0
  1258. mindspore/profiler/parser/minddata_analyzer.py +800 -0
  1259. mindspore/profiler/parser/minddata_parser.py +186 -0
  1260. mindspore/profiler/parser/minddata_pipeline_parser.py +299 -0
  1261. mindspore/profiler/parser/op_intermediate_parser.py +149 -0
  1262. mindspore/profiler/parser/optime_parser.py +250 -0
  1263. mindspore/profiler/parser/profiler_info.py +213 -0
  1264. mindspore/profiler/parser/step_trace_parser.py +666 -0
  1265. mindspore/profiler/profiler.py +153 -0
  1266. mindspore/profiler/profiling.py +1922 -0
  1267. mindspore/rewrite/__init__.py +28 -0
  1268. mindspore/rewrite/api/__init__.py +17 -0
  1269. mindspore/rewrite/api/node.py +519 -0
  1270. mindspore/rewrite/api/node_type.py +53 -0
  1271. mindspore/rewrite/api/pattern_engine.py +490 -0
  1272. mindspore/rewrite/api/scoped_value.py +181 -0
  1273. mindspore/rewrite/api/symbol_tree.py +497 -0
  1274. mindspore/rewrite/ast_helpers/__init__.py +25 -0
  1275. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  1276. mindspore/rewrite/ast_helpers/ast_finder.py +404 -0
  1277. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  1278. mindspore/rewrite/ast_helpers/ast_modifier.py +605 -0
  1279. mindspore/rewrite/ast_helpers/ast_replacer.py +79 -0
  1280. mindspore/rewrite/common/__init__.py +19 -0
  1281. mindspore/rewrite/common/config.py +24 -0
  1282. mindspore/rewrite/common/error_log.py +39 -0
  1283. mindspore/rewrite/common/event.py +28 -0
  1284. mindspore/rewrite/common/namer.py +271 -0
  1285. mindspore/rewrite/common/namespace.py +118 -0
  1286. mindspore/rewrite/common/observable.py +44 -0
  1287. mindspore/rewrite/common/observer.py +54 -0
  1288. mindspore/rewrite/node/__init__.py +22 -0
  1289. mindspore/rewrite/node/call_function.py +95 -0
  1290. mindspore/rewrite/node/cell_container.py +139 -0
  1291. mindspore/rewrite/node/control_flow.py +113 -0
  1292. mindspore/rewrite/node/node.py +1428 -0
  1293. mindspore/rewrite/node/node_manager.py +283 -0
  1294. mindspore/rewrite/node/node_topological_manager.py +223 -0
  1295. mindspore/rewrite/parsers/__init__.py +29 -0
  1296. mindspore/rewrite/parsers/arguments_parser.py +63 -0
  1297. mindspore/rewrite/parsers/assign_parser.py +852 -0
  1298. mindspore/rewrite/parsers/attribute_parser.py +57 -0
  1299. mindspore/rewrite/parsers/class_def_parser.py +289 -0
  1300. mindspore/rewrite/parsers/constant_parser.py +104 -0
  1301. mindspore/rewrite/parsers/container_parser.py +88 -0
  1302. mindspore/rewrite/parsers/expr_parser.py +55 -0
  1303. mindspore/rewrite/parsers/for_parser.py +61 -0
  1304. mindspore/rewrite/parsers/function_def_parser.py +84 -0
  1305. mindspore/rewrite/parsers/if_parser.py +85 -0
  1306. mindspore/rewrite/parsers/module_parser.py +117 -0
  1307. mindspore/rewrite/parsers/parser.py +43 -0
  1308. mindspore/rewrite/parsers/parser_register.py +86 -0
  1309. mindspore/rewrite/parsers/return_parser.py +37 -0
  1310. mindspore/rewrite/parsers/while_parser.py +59 -0
  1311. mindspore/rewrite/sparsify/__init__.py +0 -0
  1312. mindspore/rewrite/sparsify/sparse_transformer.py +457 -0
  1313. mindspore/rewrite/sparsify/sparsify.py +112 -0
  1314. mindspore/rewrite/sparsify/utils.py +179 -0
  1315. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  1316. mindspore/rewrite/symbol_tree/symbol_tree.py +1819 -0
  1317. mindspore/rewrite/symbol_tree/symbol_tree_builder.py +76 -0
  1318. mindspore/rewrite/symbol_tree/symbol_tree_dumper.py +142 -0
  1319. mindspore/run_check/__init__.py +20 -0
  1320. mindspore/run_check/_check_version.py +507 -0
  1321. mindspore/run_check/run_check.py +66 -0
  1322. mindspore/safeguard/__init__.py +18 -0
  1323. mindspore/safeguard/rewrite_obfuscation.py +875 -0
  1324. mindspore/swresample-4.dll +0 -0
  1325. mindspore/swscale-6.dll +0 -0
  1326. mindspore/tbbmalloc.dll +0 -0
  1327. mindspore/tinyxml2.dll +0 -0
  1328. mindspore/train/__init__.py +48 -0
  1329. mindspore/train/_utils.py +465 -0
  1330. mindspore/train/amp.py +935 -0
  1331. mindspore/train/anf_ir_pb2.py +1517 -0
  1332. mindspore/train/callback/__init__.py +44 -0
  1333. mindspore/train/callback/_backup_and_restore.py +117 -0
  1334. mindspore/train/callback/_callback.py +613 -0
  1335. mindspore/train/callback/_checkpoint.py +814 -0
  1336. mindspore/train/callback/_cluster_monitor.py +201 -0
  1337. mindspore/train/callback/_dataset_graph.py +150 -0
  1338. mindspore/train/callback/_early_stop.py +239 -0
  1339. mindspore/train/callback/_flops_collector.py +239 -0
  1340. mindspore/train/callback/_history.py +92 -0
  1341. mindspore/train/callback/_lambda_callback.py +80 -0
  1342. mindspore/train/callback/_landscape.py +1049 -0
  1343. mindspore/train/callback/_loss_monitor.py +107 -0
  1344. mindspore/train/callback/_lr_scheduler_callback.py +76 -0
  1345. mindspore/train/callback/_on_request_exit.py +298 -0
  1346. mindspore/train/callback/_reduce_lr_on_plateau.py +226 -0
  1347. mindspore/train/callback/_summary_collector.py +1184 -0
  1348. mindspore/train/callback/_tft_register.py +352 -0
  1349. mindspore/train/callback/_time_monitor.py +141 -0
  1350. mindspore/train/checkpoint_pb2.py +233 -0
  1351. mindspore/train/data_sink.py +219 -0
  1352. mindspore/train/dataset_helper.py +692 -0
  1353. mindspore/train/lineage_pb2.py +1260 -0
  1354. mindspore/train/loss_scale_manager.py +213 -0
  1355. mindspore/train/memory_profiling_pb2.py +298 -0
  1356. mindspore/train/metrics/__init__.py +175 -0
  1357. mindspore/train/metrics/accuracy.py +133 -0
  1358. mindspore/train/metrics/auc.py +129 -0
  1359. mindspore/train/metrics/bleu_score.py +170 -0
  1360. mindspore/train/metrics/confusion_matrix.py +700 -0
  1361. mindspore/train/metrics/cosine_similarity.py +109 -0
  1362. mindspore/train/metrics/dice.py +116 -0
  1363. mindspore/train/metrics/error.py +175 -0
  1364. mindspore/train/metrics/fbeta.py +167 -0
  1365. mindspore/train/metrics/hausdorff_distance.py +333 -0
  1366. mindspore/train/metrics/loss.py +97 -0
  1367. mindspore/train/metrics/mean_surface_distance.py +189 -0
  1368. mindspore/train/metrics/metric.py +373 -0
  1369. mindspore/train/metrics/occlusion_sensitivity.py +225 -0
  1370. mindspore/train/metrics/perplexity.py +133 -0
  1371. mindspore/train/metrics/precision.py +160 -0
  1372. mindspore/train/metrics/recall.py +159 -0
  1373. mindspore/train/metrics/roc.py +223 -0
  1374. mindspore/train/metrics/root_mean_square_surface_distance.py +191 -0
  1375. mindspore/train/metrics/topk.py +167 -0
  1376. mindspore/train/mind_ir_pb2.py +1908 -0
  1377. mindspore/train/model.py +2252 -0
  1378. mindspore/train/node_strategy_pb2.py +653 -0
  1379. mindspore/train/print_pb2.py +184 -0
  1380. mindspore/train/profiling_parallel_pb2.py +151 -0
  1381. mindspore/train/serialization.py +3325 -0
  1382. mindspore/train/summary/__init__.py +23 -0
  1383. mindspore/train/summary/_lineage_adapter.py +41 -0
  1384. mindspore/train/summary/_summary_adapter.py +496 -0
  1385. mindspore/train/summary/_writer_pool.py +207 -0
  1386. mindspore/train/summary/enums.py +56 -0
  1387. mindspore/train/summary/summary_record.py +581 -0
  1388. mindspore/train/summary/writer.py +167 -0
  1389. mindspore/train/summary_pb2.py +1165 -0
  1390. mindspore/train/train_thor/__init__.py +20 -0
  1391. mindspore/train/train_thor/convert_utils.py +268 -0
  1392. mindspore/train/train_thor/dataset_helper.py +192 -0
  1393. mindspore/train/train_thor/model_thor.py +257 -0
  1394. mindspore/turbojpeg.dll +0 -0
  1395. mindspore/utils/__init__.py +21 -0
  1396. mindspore/utils/utils.py +60 -0
  1397. mindspore/vcmeta.dll +0 -0
  1398. mindspore/vcomp140.dll +0 -0
  1399. mindspore/vcruntime140.dll +0 -0
  1400. mindspore/vcruntime140_1.dll +0 -0
  1401. mindspore/version.py +1 -0
  1402. mindspore-2.4.0.dist-info/METADATA +352 -0
  1403. mindspore-2.4.0.dist-info/RECORD +1406 -0
  1404. mindspore-2.4.0.dist-info/WHEEL +5 -0
  1405. mindspore-2.4.0.dist-info/entry_points.txt +3 -0
  1406. mindspore-2.4.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1505 @@
1
+ # Copyright 2020-2021 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+ """conv"""
16
+ from __future__ import absolute_import
17
+
18
+ import math
19
+ import numpy as np
20
+
21
+ from mindspore import context
22
+ from mindspore.ops import operations as P
23
+ import mindspore.common.dtype as mstype
24
+ from mindspore.common.parameter import Parameter
25
+ from mindspore.common.initializer import initializer, HeUniform, Uniform, _calculate_fan_in_and_fan_out
26
+ from mindspore.common.tensor import Tensor
27
+ from mindspore import _checkparam as Validator
28
+ from mindspore._checkparam import twice, _check_3d_int_or_tuple
29
+ from mindspore._extends import cell_attr_register
30
+ from mindspore.nn.cell import Cell
31
+ from mindspore.ops.primitive import _primexpr
32
+
33
+ __all__ = ['Conv2d', 'Conv2dTranspose', 'Conv1d', 'Conv1dTranspose', 'Conv3d', 'Conv3dTranspose']
34
+
35
+
36
+ class _Conv(Cell):
37
+ """
38
+ Applies a N-D convolution over an input signal composed of several input planes.
39
+ """
40
+
41
+ def __init__(self,
42
+ in_channels,
43
+ out_channels,
44
+ kernel_size,
45
+ stride,
46
+ pad_mode,
47
+ padding,
48
+ dilation,
49
+ group,
50
+ has_bias,
51
+ weight_init,
52
+ bias_init,
53
+ data_format='NCHW',
54
+ transposed=False,
55
+ dtype=mstype.float32):
56
+ """Initialize _Conv."""
57
+ super(_Conv, self).__init__()
58
+ self.in_channels = Validator.check_positive_int(in_channels, 'in_channels', self.cls_name)
59
+ self.out_channels = Validator.check_positive_int(out_channels, 'out_channels', self.cls_name)
60
+ self.kernel_size = kernel_size
61
+ self.stride = stride
62
+ self.pad_mode = pad_mode
63
+ self.data_format = Validator.check_string(data_format, ['NCHW', 'NHWC', 'NCDHW'], 'format', self.cls_name)
64
+ if context.get_context("device_target") != "GPU" and self.data_format == "NHWC":
65
+ raise ValueError(f"For '{self.cls_name}', the \"NHWC\" format only support in GPU target, "
66
+ f"but got the 'format' is {self.data_format} and "
67
+ f"the platform is {context.get_context('device_target')}.")
68
+ if isinstance(padding, int):
69
+ Validator.check_non_negative_int(padding, 'padding', self.cls_name)
70
+ self.padding = padding
71
+ elif isinstance(padding, tuple):
72
+ for pad in padding:
73
+ Validator.check_non_negative_int(pad, 'padding item', self.cls_name)
74
+ self.padding = padding
75
+ else:
76
+ raise TypeError(f"For '{self.cls_name}', the type of 'padding' must be int or tuple(int), "
77
+ f"but got {type(padding).__name__}.")
78
+
79
+ self.dilation = dilation
80
+ self.group = Validator.check_positive_int(group)
81
+ self.has_bias = has_bias
82
+ for kernel_size_elem in kernel_size:
83
+ Validator.check_positive_int(kernel_size_elem, 'kernel_size item', self.cls_name)
84
+ for stride_elem in stride:
85
+ Validator.check_positive_int(stride_elem, 'stride item', self.cls_name)
86
+ for dilation_elem in dilation:
87
+ Validator.check_positive_int(dilation_elem, 'dilation item', self.cls_name)
88
+ if in_channels % group != 0:
89
+ raise ValueError(f"For '{self.cls_name}', the attr 'in_channels' must be divisible by attr 'group', "
90
+ f"but got 'in_channels': {in_channels} and 'group': {group}.")
91
+ if out_channels % group != 0:
92
+ raise ValueError(f"For '{self.cls_name}', the 'out_channels' must be divisible by attr 'group', "
93
+ f"but got 'out_channels': {out_channels} and 'group': {group}.")
94
+ if transposed:
95
+ shape = [in_channels, out_channels // group, *kernel_size]
96
+ else:
97
+ shape = [out_channels, *kernel_size, in_channels // group] if self.data_format == "NHWC" else \
98
+ [out_channels, in_channels // group, *kernel_size]
99
+ if weight_init is None:
100
+ weight_init = HeUniform(math.sqrt(5))
101
+ self.weight_init = weight_init
102
+ self.weight = Parameter(initializer(self.weight_init, shape, dtype=dtype), name='weight')
103
+
104
+ self.bias_init = bias_init
105
+ if Validator.check_bool(has_bias, "has_bias", self.cls_name):
106
+ if bias_init is None:
107
+ fan_in, _ = _calculate_fan_in_and_fan_out(shape)
108
+ if fan_in != 0:
109
+ bound = 1 / math.sqrt(fan_in)
110
+ bias_init = Uniform(bound)
111
+ else:
112
+ bias_init = 'zeros'
113
+ self.bias_init = bias_init
114
+ self.bias = Parameter(initializer(self.bias_init, [out_channels], dtype=dtype), name='bias')
115
+ else:
116
+ self.bias = None
117
+
118
+ def construct(self, *inputs):
119
+ """Must be overridden by all subclasses."""
120
+ raise NotImplementedError
121
+
122
+ def extend_repr(self):
123
+ s = 'input_channels={}, output_channels={}, kernel_size={}, ' \
124
+ 'stride={}, pad_mode={}, padding={}, dilation={}, ' \
125
+ 'group={}, has_bias={}, ' \
126
+ 'weight_init={}, bias_init={}, format={}'.format(
127
+ self.in_channels,
128
+ self.out_channels,
129
+ self.kernel_size,
130
+ self.stride,
131
+ self.pad_mode,
132
+ self.padding,
133
+ self.dilation,
134
+ self.group,
135
+ self.has_bias,
136
+ self.weight_init,
137
+ self.bias_init,
138
+ self.data_format)
139
+ return s
140
+
141
+
142
+ class Conv2d(_Conv):
143
+ r"""
144
+ 2D convolution layer.
145
+
146
+ Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
147
+ where :math:`N` is batch size, :math:`C` is channel number, :math:`H` is feature height, :math:`W` is feature width.
148
+
149
+ The output is calculated based on formula:
150
+
151
+ .. math::
152
+
153
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
154
+ \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
155
+
156
+ where :math:`bias` is the output channel bias, :math:`ccor` is
157
+ the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
158
+ :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
159
+
160
+ Here are the indices' meanings:
161
+
162
+ - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
163
+ where :math:`N` is the batch size of the input.
164
+
165
+ - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
166
+ where :math:`C_{out}` is the number of
167
+ output channels, which is also equal to the number of kernels.
168
+
169
+ - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
170
+ where :math:`C_{in}` is the number of
171
+ input channels, which is also equal to the number of channels in the convolutional kernels.
172
+
173
+ Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
174
+ output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
175
+ kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
176
+ channel in the :math:`i`-th batch of the input feature map.
177
+
178
+ The shape of the convolutional kernel is given by :math:`(\text{kernel_size[0]},\text{kernel_size[1]})`,
179
+ where :math:`\text{kernel_size[0]}`
180
+ and :math:`\text{kernel_size[1]}` are the height and width of the kernel, respectively.
181
+ If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
182
+ will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]}, \text{kernel_size[1]})`,
183
+ where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
184
+
185
+ For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
186
+ <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
187
+
188
+ Note:
189
+ On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
190
+ That is, when `group>1`, condition `in\_channels` = `out\_channels` = `group` must be satisfied.
191
+
192
+ Args:
193
+ in_channels (int): The channel number of the input tensor of the Conv2d layer.
194
+ out_channels (int): The channel number of the output tensor of the Conv2d layer.
195
+ kernel_size (Union[int, tuple[int]]): Specifies the height and width of the 2D convolution kernel.
196
+ The data type is an integer or a tuple of two integers. An integer represents the height
197
+ and width of the convolution kernel. A tuple of two integers represents the height
198
+ and width of the convolution kernel respectively.
199
+ stride (Union[int, tuple[int]], optional): The movement stride of the 2D convolution kernel.
200
+ The data type is an integer or a tuple of two or four integers. An integer represents the movement step size
201
+ in both height and width directions. A tuple of two integers represents the movement step size in the height
202
+ and width directions respectively. Default: ``1`` .
203
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
204
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
205
+
206
+ - ``"same"``: Pad the input around its edges so that the shape of input and output
207
+ are the same when `stride` is set to ``1``.
208
+ The amount of padding to is calculated by the operator internally, If the amount is even, it is
209
+ uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
210
+ If this mode is set, `padding` must be 0.
211
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
212
+ possible height and width. Extra pixels that could not complete a full stride will
213
+ be discarded. If this mode is set, `padding` must be 0.
214
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
215
+ in the height and width directions is determined by the `padding` parameter.
216
+ If this mode is set, `padding` must be greater than or equal to 0.
217
+
218
+ padding (Union[int, tuple[int]], optional): The number of padding
219
+ on the height and width directions of the input.
220
+ The data type is an integer or a tuple of four integers. If `padding` is an integer,
221
+ then the top, bottom, left, and right padding are all equal to `padding`.
222
+ If `padding` is a tuple of 4 integers, then the top, bottom, left, and right padding
223
+ is equal to `padding[0]`, `padding[1]`, `padding[2]`, and `padding[3]` respectively.
224
+ The value should be greater than or equal to 0. Default: ``0`` .
225
+ dilation (Union(int, tuple[int]), optional): Specifies the dilation rate to use for dilated convolution.
226
+ It can be a single int or a tuple of 2 or 4 integers. A single int means the dilation size is the same
227
+ in both the height and width directions. A tuple of two ints represents the dilation size in
228
+ the height and width directions, respectively. For a tuple of four ints, the two ints correspond
229
+ to (N, C) dimension are treated as 1, and the two correspond to (H, W) dimensions is the
230
+ dilation size in the height and width directions respectively.
231
+ Assuming :math:`dilation=(d0, d1)`, the convolutional kernel samples the input with a
232
+ spacing of :math:`d0-1` elements in the height direction and :math:`d1-1` elements in the width direction.
233
+ The values in the height and width dimensions are in the ranges [1, H] and [1, W], respectively.
234
+ Default: ``1`` .
235
+ group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
236
+ divisible by `group`. If the group is equal to `in_channels` and `out_channels`,
237
+ this 2D convolution layer also can be called 2D depthwise convolution layer. Default: ``1`` .
238
+ has_bias (bool, optional): Whether the Conv2d layer has a bias parameter. Default: ``False`` .
239
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of
240
+ weight parameter.
241
+ It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
242
+ values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
243
+ distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
244
+ ``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
245
+ lowercase are both acceptable. Refer to the values of
246
+ `Initializer <https://www.mindspore.cn/docs/en/master/api_python/mindspore.common.initializer.html>`_,
247
+ for more details. Default: ``None`` , weight will be initialized using ``'HeUniform'``.
248
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
249
+ Available initialization methods are the same as 'weight_init'. Refer to the values of
250
+ `Initializer <https://www.mindspore.cn/docs/en/master/api_python/mindspore.common.initializer.html>`_,
251
+ for more details. Default: ``None`` , bias will be initialized using ``'Uniform'`` .
252
+ data_format (str, optional): The optional value for data format, is ``'NHWC'`` or ``'NCHW'`` .
253
+ Default: ``'NCHW'`` . (NHWC is only supported in GPU now.)
254
+ dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
255
+
256
+ Inputs:
257
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` \
258
+ or :math:`(N, H_{in}, W_{in}, C_{in})`.
259
+
260
+ Outputs:
261
+ Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(N, H_{out}, W_{out}, C_{out})`.
262
+
263
+ pad_mode is ``'same'``:
264
+
265
+ .. math::
266
+ \begin{array}{ll} \\
267
+ H_{out} = \left \lceil{\frac{H_{in}}{\text{stride[0]}}} \right \rceil \\
268
+ W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[1]}}} \right \rceil \\
269
+ \end{array}
270
+
271
+ pad_mode is ``'valid'``:
272
+
273
+ .. math::
274
+ \begin{array}{ll} \\
275
+ H_{out} = \left \lceil{\frac{H_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
276
+ {\text{stride[0]}}} \right \rceil \\
277
+ W_{out} = \left \lceil{\frac{W_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
278
+ {\text{stride[1]}}} \right \rceil \\
279
+ \end{array}
280
+
281
+ pad_mode is ``'pad'``:
282
+
283
+ .. math::
284
+ \begin{array}{ll} \\
285
+ H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - (\text{kernel_size[0]} - 1) \times
286
+ \text{dilation[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
287
+ W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - (\text{kernel_size[1]} - 1) \times
288
+ \text{dilation[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
289
+ \end{array}
290
+
291
+ Raises:
292
+ TypeError: If `in_channels`, `out_channels` or `group` is not an int.
293
+ TypeError: If `kernel_size`, `stride`, `padding` or `dilation` is neither an int not a tuple.
294
+ ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
295
+ ValueError: If `padding` is less than 0.
296
+ ValueError: If `pad_mode` is not one of 'same', 'valid', 'pad'.
297
+ ValueError: If `padding` is a tuple whose length is not equal to 4.
298
+ ValueError: If `pad_mode` is not equal to 'pad' and `padding` is not equal to (0, 0, 0, 0).
299
+ ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'.
300
+
301
+ Supported Platforms:
302
+ ``Ascend`` ``GPU`` ``CPU``
303
+
304
+ Examples:
305
+ >>> import mindspore
306
+ >>> from mindspore import Tensor, nn
307
+ >>> import numpy as np
308
+ >>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal')
309
+ >>> x = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
310
+ >>> output = net(x).shape
311
+ >>> print(output)
312
+ (1, 240, 1024, 640)
313
+ """
314
+
315
+ @cell_attr_register
316
+ def __init__(self,
317
+ in_channels,
318
+ out_channels,
319
+ kernel_size,
320
+ stride=1,
321
+ pad_mode='same',
322
+ padding=0,
323
+ dilation=1,
324
+ group=1,
325
+ has_bias=False,
326
+ weight_init=None,
327
+ bias_init=None,
328
+ data_format='NCHW',
329
+ dtype=mstype.float32):
330
+ """Initialize Conv2d."""
331
+ kernel_size = twice(kernel_size)
332
+ stride = twice(stride)
333
+ self._dilation = dilation
334
+ dilation = twice(dilation)
335
+ Validator.check_positive_int(group, 'group', self.cls_name)
336
+ if not (in_channels % group == 0 and out_channels % group == 0):
337
+ raise ValueError(f"The argument 'group' should be divisible by 'in_channels' " \
338
+ f"and 'out_channels', but got group:{group}, in_channels:{in_channels}, " \
339
+ f"out_channels:{out_channels}.")
340
+ super(Conv2d, self).__init__(
341
+ in_channels,
342
+ out_channels,
343
+ kernel_size,
344
+ stride,
345
+ pad_mode,
346
+ padding,
347
+ dilation,
348
+ group,
349
+ has_bias,
350
+ weight_init,
351
+ bias_init,
352
+ data_format,
353
+ dtype=dtype)
354
+ self.conv2d = P.Conv2D(out_channel=self.out_channels,
355
+ kernel_size=self.kernel_size,
356
+ mode=1,
357
+ pad_mode=self.pad_mode,
358
+ pad=self.padding,
359
+ stride=self.stride,
360
+ dilation=self.dilation,
361
+ group=self.group,
362
+ data_format=self.data_format)
363
+ self.bias_add = P.BiasAdd(data_format=self.data_format)
364
+
365
+ def construct(self, x):
366
+ output = self.conv2d(x, self.weight)
367
+ if self.has_bias:
368
+ output = self.bias_add(output, self.bias)
369
+ return output
370
+
371
+
372
+ class Conv1d(_Conv):
373
+ r"""
374
+ 1D convolution layer.
375
+
376
+ Applies a 1D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, L_{in})`,
377
+ where :math:`N` is batch size, :math:`C` is channel number, :math:`L` is input sequence width.
378
+
379
+ The output is calculated based on formula:
380
+
381
+ .. math::
382
+
383
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
384
+ \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
385
+
386
+ where :math:`bias` is the output channel bias, :math:`ccor` is
387
+ the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
388
+ :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
389
+
390
+ Here are the indices' meanings:
391
+
392
+ - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
393
+ where :math:`N` is the batch size of the input.
394
+
395
+ - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
396
+ where :math:`C_{out}` is the number of
397
+ output channels, which is also equal to the number of kernels.
398
+
399
+ - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
400
+ where :math:`C_{in}` is the number of
401
+ input channels, which is also equal to the number of channels in the convolutional kernels.
402
+
403
+ Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
404
+ output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
405
+ kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
406
+ channel in the :math:`i`-th batch of the input feature map.
407
+
408
+ The shape of the convolutional kernel is given by :math:`(\text{kernel_size})`,
409
+ where :math:`\text{kernel_size}` is the width of the kernel.
410
+ If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
411
+ will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size})`,
412
+ where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
413
+
414
+ For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
415
+ <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_
416
+ and `ConvNets <http://cs231n.github.io/convolutional-networks/>`_ .
417
+
418
+ Note:
419
+ On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
420
+ That is, when `group>1`, condition `in\_channels` = `out\_channels` = `group` must be satisfied.
421
+
422
+ Args:
423
+ in_channels (int): The channel number of the input tensor of the Conv1d layer.
424
+ out_channels (int): The channel number of the output tensor of the Conv1d layer.
425
+ kernel_size (int): Specifies the width of the 1D convolution kernel.
426
+ stride (int, optional): The movement stride of the 1D convolution kernel. Default: ``1`` .
427
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
428
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
429
+
430
+ - ``"same"``: Pad the input at the begin and end so that the shape of input and output
431
+ are the same when `stride` is set to ``1``.
432
+ The amount of padding to is calculated by the operator internally. If the amount is even, it is
433
+ uniformly distributed around the input, if it is odd, the excess padding is goes to the right side.
434
+ If this mode is set, `padding` must be 0.
435
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
436
+ possible length. Extra pixels that could not complete a full stride will
437
+ be discarded. If this mode is set, `padding` must be 0.
438
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
439
+ at the begin and end is determined by the `padding` parameter.
440
+ If this mode is set, `padding` must be greater than or equal to 0.
441
+
442
+ padding (Union(int, tuple[int], list[int]), optional): Specifies the amount of padding to apply on
443
+ both side of `input` when `pad_mode` is set to ``"pad"``. The
444
+ paddings of left and right are the same, equal to padding or padding[0] when padding is a tuple of
445
+ 1 integer. Default: ``0`` .
446
+ dilation (Union(int, tuple[int]), optional): Specifies the dilation rate to use for dilated convolution.
447
+ It can be a single int or a tuple of 1 integer.
448
+ Assuming :math:`dilation=(d0,)`, the convolutional kernel samples the input with a
449
+ spacing of :math:`d0-1` elements in the width direction.
450
+ The value should be in the ranges [1, L].
451
+ Default: ``1`` .
452
+ group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
453
+ divisible by `group`. Default: ``1`` .
454
+ has_bias (bool, optional): Whether the Conv1d layer has a bias parameter. Default: ``False`` .
455
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional):
456
+ Initialization method of weight parameter.
457
+ It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
458
+ values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
459
+ distributions as well as constant 'One' and 'Zero' distributions are possible. Alias ``'xavier_uniform'`` ,
460
+ ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and lowercase are both acceptable.
461
+ Refer to the values of
462
+ `Initializer <https://www.mindspore.cn/docs/en/master/api_python/mindspore.common.initializer.html>`_,
463
+ for more details. Default: ``None`` , weight will be initialized using ``'HeUniform'``.
464
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
465
+ Available initialization methods are the same as 'weight_init'. Refer to the values of
466
+ `Initializer <https://www.mindspore.cn/docs/en/master/api_python/mindspore.common.initializer.html>`_,
467
+ for more details. Default: ``None`` , bias will be initialized using ``'Uniform'``.
468
+ dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
469
+
470
+ Inputs:
471
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` .
472
+
473
+ Outputs:
474
+ Tensor of shape :math:`(N, C_{out}, L_{out})`.
475
+
476
+ pad_mode is ``'same'``:
477
+
478
+ .. math::
479
+ L_{out} = \left \lceil{\frac{L_{in}}{\text{stride}}} \right \rceil
480
+
481
+ pad_mode is ``'valid'``:
482
+
483
+ .. math::
484
+ L_{out} = \left \lceil{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) }
485
+ {\text{stride}}} \right \rceil
486
+
487
+ pad_mode is ``'pad'``:
488
+
489
+ .. math::
490
+ L_{out} = \left \lfloor{\frac{L_{in} + 2 \times padding - (\text{kernel_size} - 1) \times
491
+ \text{dilation} - 1 }{\text{stride}} + 1} \right \rfloor
492
+
493
+ Raises:
494
+ TypeError: If `in_channels`, `out_channels`, `kernel_size`, `stride`, `padding` or `dilation` is not an int.
495
+ ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
496
+ ValueError: If `padding` is less than 0.
497
+ ValueError: If `pad_mode` is not one of 'same', 'valid', 'pad'.
498
+
499
+ Supported Platforms:
500
+ ``Ascend`` ``GPU`` ``CPU``
501
+
502
+ Examples:
503
+ >>> import mindspore
504
+ >>> from mindspore import Tensor, nn
505
+ >>> import numpy as np
506
+ >>> net = nn.Conv1d(120, 240, 4, has_bias=False, weight_init='normal')
507
+ >>> x = Tensor(np.ones([1, 120, 640]), mindspore.float32)
508
+ >>> output = net(x).shape
509
+ >>> print(output)
510
+ (1, 240, 640)
511
+ """
512
+
513
+ @cell_attr_register
514
+ def __init__(self,
515
+ in_channels,
516
+ out_channels,
517
+ kernel_size,
518
+ stride=1,
519
+ pad_mode='same',
520
+ padding=0,
521
+ dilation=1,
522
+ group=1,
523
+ has_bias=False,
524
+ weight_init=None,
525
+ bias_init=None,
526
+ dtype=mstype.float32):
527
+ """Initialize Conv1d."""
528
+ Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
529
+ Validator.check_value_type("stride", stride, [int], self.cls_name)
530
+ Validator.check_value_type("padding", padding, [int], self.cls_name)
531
+ Validator.check_value_type("dilation", dilation, [int], self.cls_name)
532
+ Validator.check_int(kernel_size, 1, Validator.GE, 'kernel_size', self.cls_name)
533
+ Validator.check_int(stride, 1, Validator.GE, 'stride', self.cls_name)
534
+ Validator.check_non_negative_int(padding, 'padding', self.cls_name)
535
+ Validator.check_int(dilation, 1, Validator.GE, 'dilation', self.cls_name)
536
+ Validator.check_positive_int(group, 'group', self.cls_name)
537
+ if not (in_channels % group == 0 and out_channels % group == 0):
538
+ raise ValueError(f"The argument 'group' should be divisible by 'in_channels' " \
539
+ f"and 'out_channels', but got group:{group}, in_channels:{in_channels}, " \
540
+ f"out_channels:{out_channels}.")
541
+ kernel_size = (1, kernel_size)
542
+ stride = (1, stride)
543
+ dilation = (1, dilation)
544
+ get_shape = P.Shape()
545
+ get_dtype = P.DType()
546
+ if isinstance(weight_init, Tensor):
547
+ weight_init_shape = get_shape(weight_init)
548
+ Validator.check_equal_int(len(weight_init_shape), 3, 'weight_init_shape', self.cls_name)
549
+ weight_init_dtype = get_dtype(weight_init)
550
+ weight_init_value = weight_init.asnumpy()
551
+ weight_init_value = np.expand_dims(weight_init_value, 2)
552
+ weight_init = Tensor(weight_init_value, weight_init_dtype)
553
+
554
+ super(Conv1d, self).__init__(
555
+ in_channels,
556
+ out_channels,
557
+ kernel_size,
558
+ stride,
559
+ pad_mode,
560
+ padding,
561
+ dilation,
562
+ group,
563
+ has_bias,
564
+ weight_init,
565
+ bias_init,
566
+ dtype=dtype)
567
+ self.padding = (0, 0, padding, padding)
568
+ Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
569
+ self.conv2d = P.Conv2D(out_channel=self.out_channels,
570
+ kernel_size=self.kernel_size,
571
+ mode=1,
572
+ pad_mode=self.pad_mode,
573
+ pad=self.padding,
574
+ stride=self.stride,
575
+ dilation=self.dilation,
576
+ group=self.group)
577
+ self.bias_add = P.BiasAdd()
578
+ self.expand_dims = P.ExpandDims()
579
+ self.squeeze = P.Squeeze(2)
580
+ self.shape = P.Shape()
581
+
582
+ def construct(self, x):
583
+ x = self.expand_dims(x, 2)
584
+ output = self.conv2d(x, self.weight)
585
+ if self.has_bias:
586
+ output = self.bias_add(output, self.bias)
587
+
588
+ output = self.squeeze(output)
589
+ return output
590
+
591
+
592
+ class Conv3d(_Conv):
593
+ r"""
594
+ 3D convolution layer.
595
+
596
+ Applies a 3D convolution over an input tensor which is typically of shape
597
+ :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`, where :math:`N` is batch size, :math:`C` is channel number,
598
+ :math:`D, H, W` are the depth, height and width of the feature map, respectively.
599
+
600
+ The output is calculated based on formula:
601
+
602
+ .. math::
603
+
604
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
605
+ \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
606
+
607
+ where :math:`bias` is the output channel bias, :math:`ccor` is
608
+ the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
609
+ :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
610
+
611
+ Here are the indices' meanings:
612
+
613
+ - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
614
+ where :math:`N` is the batch size of the input.
615
+
616
+ - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
617
+ where :math:`C_{out}` is the number of
618
+ output channels, which is also equal to the number of kernels.
619
+
620
+ - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
621
+ where :math:`C_{in}` is the number of
622
+ input channels, which is also equal to the number of channels in the convolutional kernels.
623
+
624
+ Therefore, in the above formula, :math:`{bias}(C_{out_j})` represents the bias of the :math:`j`-th
625
+ output channel, :math:`{weight}(C_{out_j}, k)` represents the slice of the :math:`j`-th convolutional
626
+ kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
627
+ channel in the :math:`i`-th batch of the input feature map.
628
+
629
+ The shape of the convolutional kernel is given by
630
+ :math:`(\text{kernel_size[0]}, \text{kernel_size[1]}, \text{kernel_size[2]})`
631
+ where :math:`\text{kernel_size[0]}` , :math:`\text{kernel_size[1]}` and :math:`\text{kernel_size[2]}` are the depth,
632
+ height and width of the kernel, respectively.
633
+ If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
634
+ will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]},
635
+ \text{kernel_size[1]}, \text{kernel_size[2]})`,
636
+ where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
637
+
638
+ For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
639
+ <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
640
+
641
+ Note:
642
+ On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
643
+ That is, when `group>1`, condition `in\_channels` = `out\_channels` = `group` must be satisfied.
644
+
645
+ Args:
646
+ in_channels (int): The channel number of the input tensor of the Conv3d layer.
647
+ out_channels (int): The channel number of the output tensor of the Conv3d layer.
648
+ kernel_size (Union[int, tuple[int]]): Specifies the depth, height and width of the 3D convolution kernel.
649
+ It can be a single int or a tuple of 3 integers. A single int means the value is for depth, height
650
+ and the width. A tuple of 3 ints means the first value is
651
+ for depth and the rest is for the height and width.
652
+ stride (Union[int, tuple[int]], optional): The movement stride of the 3D convolution kernel.
653
+ The data type is an integer or a tuple of three integers. An integer represents the movement step size
654
+ in depth, height and width directions. A tuple of three integers represents the movement step size
655
+ in the depth, height and width directions respectively. Default: ``1`` .
656
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
657
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
658
+
659
+ - ``"same"``: Pad the input around its depth/height/width dimension so that the shape of input and output
660
+ are the same when `stride` is set to ``1``.
661
+ The amount of padding to is calculated by the operator internally. If the amount is even,
662
+ it isuniformly distributed around the input, if it is odd, the excess amount goes
663
+ to the front/right/bottom side.
664
+ If this mode is set, `padding` must be 0.
665
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
666
+ possible depth, height and width. Extra pixels that could not complete a full stride will
667
+ be discarded. If this mode is set, `padding` must be 0.
668
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
669
+ in the depth, height and width dimension is determined by the `padding` parameter.
670
+ If this mode is set, `padding` must be greater than or equal to 0.
671
+
672
+ padding (Union(int, tuple[int]), optional): The number of padding on the depth,
673
+ height and width directions of the input.
674
+ The data type is an integer or a tuple of six integers. If `padding` is an integer,
675
+ then the head, tail, top, bottom, left, and right padding are all equal to `padding`.
676
+ If `padding` is a tuple of six integers, then the head, tail, top, bottom, left, and right padding
677
+ is equal to `padding[0]`, `padding[1]`, `padding[2]`, `padding[3]`, `padding[4]` and `padding[5]`
678
+ respectively. The value should be greater than or equal to 0. Default: ``0`` .
679
+ dilation (Union[int, tuple[int]], optional): Specifies the dilation rate to use for dilated convolution.
680
+ It can be a single int or a tuple of 3 integers. A single int means the dilation size is the same
681
+ in the depth, height and width directions. A tuple of 3 ints represents the dilation size in
682
+ the depth, height and width directions, respectively.
683
+ Assuming :math:`dilation=(d0, d1, d2)`, the convolutional kernel samples the input with a
684
+ spacing of :math:`d0-1` elements in the depth direction, :math:`d1-1` elements in the height direction,
685
+ :math:`d2-1` elements in the width direction respectively.
686
+ The values in the depth, height and width dimensions are in
687
+ the ranges [1, D], [1, H] and [1, W], respectively.
688
+ Default: ``1`` .
689
+ group (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
690
+ divisible by `group`. Default: ``1`` .
691
+ has_bias (bool, optional): Whether the Conv3d layer has a bias parameter. Default: ``False`` .
692
+ weight_init (Union[Tensor, str, Initializer, numbers.Number], optional):
693
+ Initialization method of weight parameter.
694
+ It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
695
+ values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
696
+ distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
697
+ ``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
698
+ lowercase are both acceptable. Refer to the values of
699
+ `Initializer <https://www.mindspore.cn/docs/en/master/api_python/mindspore.common.initializer.html>`_,
700
+ for more details. Default: ``None`` , weight will be initialized using ``'HeUniform'``.
701
+ bias_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initialization method of bias parameter.
702
+ Available initialization methods are the same as 'weight_init'. Refer to the values of
703
+ `Initializer <https://www.mindspore.cn/docs/en/master/api_python/mindspore.common.initializer.html>`_,
704
+ for more details. Default: ``None`` , bias will be initialized using ``'Uniform'`` .
705
+ data_format (str, optional): The optional value for data format. Currently only support ``'NCDHW'`` .
706
+ dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
707
+
708
+
709
+ Inputs:
710
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
711
+ Currently, input data type support float16 and float32 in CPU and GPU, and only float16 in Ascend.
712
+
713
+ Outputs:
714
+ Tensor of shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
715
+
716
+ pad_mode is ``'same'`` :
717
+
718
+ .. math::
719
+ \begin{array}{ll} \\
720
+ D_{out} = \left \lceil{\frac{D_{in}}{\text{stride[0]}}} \right \rceil \\
721
+ H_{out} = \left \lceil{\frac{H_{in}}{\text{stride[1]}}} \right \rceil \\
722
+ W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[2]}}} \right \rceil \\
723
+ \end{array}
724
+
725
+
726
+ pad_mode is ``'valid'`` :
727
+
728
+ .. math::
729
+ \begin{array}{ll} \\
730
+ D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
731
+ {\text{stride[0]}} + 1} \right \rfloor \\
732
+ H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
733
+ {\text{stride[1]}} + 1} \right \rfloor \\
734
+ W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) }
735
+ {\text{stride[2]}} + 1} \right \rfloor \\
736
+ \end{array}
737
+
738
+ pad_mode is ``'pad'`` :
739
+
740
+ .. math::
741
+ \begin{array}{ll} \\
742
+ D_{out} = \left \lfloor{\frac{D_{in} + padding[0] + padding[1] - (\text{dilation[0]} - 1) \times
743
+ \text{kernel_size[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
744
+ H_{out} = \left \lfloor{\frac{H_{in} + padding[2] + padding[3] - (\text{dilation[1]} - 1) \times
745
+ \text{kernel_size[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
746
+ W_{out} = \left \lfloor{\frac{W_{in} + padding[4] + padding[5] - (\text{dilation[2]} - 1) \times
747
+ \text{kernel_size[2]} - 1 }{\text{stride[2]}} + 1} \right \rfloor \\
748
+ \end{array}
749
+
750
+ Raises:
751
+ TypeError: If `in_channels`, `out_channels` or `group` is not an int.
752
+ TypeError: If `kernel_size`, `stride`, `padding` or `dilation` is neither an int nor a tuple.
753
+ ValueError: If `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
754
+ ValueError: If `padding` is less than 0.
755
+ ValueError: If `pad_mode` is not one of 'same', 'valid', 'pad'.
756
+ ValueError: If `padding` is a tuple whose length is not equal to 6.
757
+ ValueError: If `pad_mode` is not equal to 'pad' and `padding` is not equal to (0, 0, 0, 0, 0, 0).
758
+ ValueError: If `data_format` is not 'NCDHW'.
759
+
760
+ Supported Platforms:
761
+ ``Ascend`` ``GPU`` ``CPU``
762
+
763
+ Examples:
764
+ >>> import mindspore
765
+ >>> from mindspore import Tensor, nn
766
+ >>> import numpy as np
767
+ >>> x = Tensor(np.ones([16, 3, 10, 32, 32]), mindspore.float32)
768
+ >>> conv3d = nn.Conv3d(in_channels=3, out_channels=32, kernel_size=(4, 3, 3))
769
+ >>> output = conv3d(x)
770
+ >>> print(output.shape)
771
+ (16, 32, 10, 32, 32)
772
+ """
773
+
774
+ @cell_attr_register
775
+ def __init__(self,
776
+ in_channels,
777
+ out_channels,
778
+ kernel_size,
779
+ stride=1,
780
+ pad_mode='same',
781
+ padding=0,
782
+ dilation=1,
783
+ group=1,
784
+ has_bias=False,
785
+ weight_init=None,
786
+ bias_init=None,
787
+ data_format='NCDHW',
788
+ dtype=mstype.float32):
789
+ """Initialize Conv3d."""
790
+ if not (in_channels % group == 0 and out_channels % group == 0):
791
+ raise ValueError("The argument 'group' should be divisible by 'in_channels' " \
792
+ "and 'out_channels'")
793
+
794
+ kernel_size = _check_3d_int_or_tuple("kernel_size", kernel_size, self.cls_name)
795
+ stride = _check_3d_int_or_tuple("stride", stride, self.cls_name)
796
+ dilation = _check_3d_int_or_tuple("dilation", dilation, self.cls_name)
797
+ Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
798
+ if isinstance(padding, tuple):
799
+ Validator.check_equal_int(len(padding), 6, 'padding size', self.cls_name)
800
+ super(Conv3d, self).__init__(
801
+ in_channels,
802
+ out_channels,
803
+ kernel_size,
804
+ stride,
805
+ pad_mode,
806
+ padding,
807
+ dilation,
808
+ group,
809
+ has_bias,
810
+ weight_init,
811
+ bias_init,
812
+ data_format,
813
+ dtype=dtype)
814
+ out_channels = self.out_channels // group
815
+ self.conv3d = P.Conv3D(out_channel=out_channels,
816
+ kernel_size=self.kernel_size,
817
+ mode=1,
818
+ pad_mode=self.pad_mode,
819
+ pad=self.padding,
820
+ stride=self.stride,
821
+ dilation=self.dilation,
822
+ group=1,
823
+ data_format=self.data_format)
824
+ self.bias_add = P.BiasAdd(data_format=self.data_format)
825
+ self.shape = P.Shape()
826
+ self.concat = P.Concat(1)
827
+ self.split_0 = P.Split(0, self.group)
828
+ self.split_1 = P.Split(1, self.group)
829
+
830
+ def construct(self, x):
831
+ if self.group == 1:
832
+ out = self.conv3d(x, self.weight)
833
+ if self.has_bias:
834
+ out = self.bias_add(out, self.bias)
835
+ else:
836
+ features = self.split_1(x)
837
+ weights = self.split_0(self.weight)
838
+ outputs = ()
839
+ for i in range(self.group):
840
+ output = self.conv3d(features[i], weights[i])
841
+ outputs = outputs + (output,)
842
+ out = self.concat(outputs)
843
+ if self.bias is not None:
844
+ new_shape = [1 for _ in range(out.ndim)]
845
+ new_shape[1] = self.out_channels
846
+ out = out + self.bias.reshape(new_shape)
847
+ return out
848
+
849
+
850
+ class Conv3dTranspose(_Conv):
851
+ r"""
852
+ Calculates a 3D transposed convolution, which can be regarded as Conv3d for the gradient of the input.
853
+ It also called deconvolution (although it is not an actual deconvolution).
854
+
855
+ The input is typically of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`,
856
+ where :math:`N` is batch size, :math:`C_{in}` is a number of
857
+ channels, :math:`D_{in}, H_{in}, W_{in}` are the depth, height and width of the feature layer respectively.
858
+
859
+ When Conv3d and Conv3dTranspose are initialized with the same parameters, and `pad_mode` is set to 'pad',
860
+ :math:`dilation * (kernel\_size - 1) - padding` amount of zero will be paded to the depth, height and width
861
+ directions of the input, they are inverses of each other in regard to the input and output shapes in this case.
862
+ However, when `stride` > 1, Conv2d maps multiple input shapes to the same output shape. Deconvolutional network
863
+ can refer to `Deconvolutional Networks <https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf>`_.
864
+
865
+ Args:
866
+ in_channels (int): The channel number of the input tensor of the Conv3dTranspose layer.
867
+ out_channels (int): The channel number of the output tensor of the Conv3dTranspose layer.
868
+ kernel_size (Union[int, tuple[int]]): Specifies the depth, height and width of the 3D convolution kernel.
869
+ The data type is an integer or a tuple of three integers. An integer represents the depth, height
870
+ and width of the convolution kernel. A tuple of three integers represents the depth, height
871
+ and width of the convolution kernel respectively.
872
+ stride (Union[int, tuple[int]]): The movement stride of the 3D convolution kernel.
873
+ The data type is an integer or a tuple of three integers. An integer represents the movement step size
874
+ in depth, height and width directions. A tuple of three integers represents the movement step size
875
+ in the depth, height and width directions respectively. Default: ``1`` .
876
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
877
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
878
+
879
+ - ``"same"``: Pad the input around its depth/height/width dimension so that the shape of input and output
880
+ are the same when `stride` is set to ``1``.
881
+ The amount of padding to is calculated by the operator internally. If the amount is even,
882
+ it isuniformly distributed around the input, if it is odd, the excess amount goes
883
+ to the front/right/bottom side.
884
+ If this mode is set, `padding` must be 0.
885
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
886
+ possible depth, height and width. Extra pixels that could not complete a full stride will
887
+ be discarded. If this mode is set, `padding` must be 0.
888
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
889
+ in the depth, height and width dimension is determined by the `padding` parameter.
890
+ If this mode is set, `padding` must be greater than or equal to 0.
891
+
892
+ padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of the input.
893
+ The data type is an integer or a tuple of six integers. If `padding` is an integer,
894
+ then the head, tail, top, bottom, left, and right padding are all equal to `padding`.
895
+ If `padding` is a tuple of six integers, then the head, tail, top, bottom, left, and right padding
896
+ is equal to `padding[0]`, `padding[1]`, `padding[2]`, `padding[3]`, `padding[4]` and `padding[5]`
897
+ respectively. The value should be greater than or equal to 0. Default: ``0`` .
898
+ dilation (Union[int, tuple[int]]): Specifies the dilation rate to use for dilated convolution. The data type
899
+ can be a single int or a tuple of 3 integers. A single int means the dilation size is the same in the
900
+ depth, height and width directions. A tuple of 3 ints represents the dilation size in the depth, height
901
+ and width directions, respectively.
902
+ Assuming :math:`dilation=(d0, d1, d2)`, the convolutional kernel samples the input with a
903
+ spacing of :math:`d0-1` elements in the depth direction, :math:`d1-1` elements in the height direction,
904
+ :math:`d2-1` elements in the width direction respectively.
905
+ The values in the depth, height and width dimensions are in
906
+ the ranges [1, D], [1, H] and [1, W], respectively.
907
+ Default: ``1`` .
908
+ group (int): Splits filter into groups, `in_channels` and `out_channels` must be
909
+ divisible by `group`. Default: ``1`` .
910
+ output_padding (Union(int, tuple[int])): The number of padding on the depth, height and width directions of
911
+ the output. The data type is an integer or a tuple of three integers. If `output_padding` is an integer,
912
+ then the depth, height, and width dimension padding are all equal to `output_padding`.
913
+ If `output_padding` is a tuple of three integers, then the depth, height, and width padding is equal to
914
+ `output_padding[0]`, `output_padding[1]` and `output_padding[2]` respectively.
915
+ The value should be greater than or equal to 0.
916
+ Default: ``0`` .
917
+ has_bias (bool): Whether the Conv3dTranspose layer has a bias parameter. Default: ``False`` .
918
+ weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of weight parameter.
919
+ It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
920
+ values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
921
+ distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
922
+ ``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
923
+ lowercase are both acceptable. Refer to the values of Initializer for more details. Default: ``None`` ,
924
+ weight will be initialized using HeUniform.
925
+ bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of bias parameter.
926
+ Available initialization methods are the same as 'weight_init'. Refer to the values of
927
+ Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
928
+ data_format (str): The optional value for data format. Currently only support ``'NCDHW'`` .
929
+ Default: ``'NCDHW'`` .
930
+ dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
931
+
932
+ Inputs:
933
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
934
+ Currently input data dtype only support float16 and float32.
935
+
936
+ Outputs:
937
+ Tensor, the shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
938
+
939
+ pad_mode is ``'same'`` :
940
+
941
+ .. math::
942
+ \begin{array}{ll} \\
943
+ D_{out} = \left \lfloor{\frac{D_{in}}{\text{stride[0]}} + 1} \right \rfloor \\
944
+ H_{out} = \left \lfloor{\frac{H_{in}}{\text{stride[1]}} + 1} \right \rfloor \\
945
+ W_{out} = \left \lfloor{\frac{W_{in}}{\text{stride[2]}} + 1} \right \rfloor \\
946
+ \end{array}
947
+
948
+
949
+ pad_mode is ``'valid'`` :
950
+
951
+ .. math::
952
+ \begin{array}{ll} \\
953
+ D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
954
+ {\text{stride[0]}} + 1} \right \rfloor \\
955
+ H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
956
+ {\text{stride[1]}} + 1} \right \rfloor \\
957
+ W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) }
958
+ {\text{stride[2]}} + 1} \right \rfloor \\
959
+ \end{array}
960
+
961
+ pad_mode is ``'pad'`` :
962
+
963
+ .. math::
964
+ \begin{array}{ll} \\
965
+ D_{out} = \left \lfloor{\frac{D_{in} + padding[0] + padding[1] - (\text{dilation[0]} - 1) \times
966
+ \text{kernel_size[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
967
+ H_{out} = \left \lfloor{\frac{H_{in} + padding[2] + padding[3] - (\text{dilation[1]} - 1) \times
968
+ \text{kernel_size[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
969
+ W_{out} = \left \lfloor{\frac{W_{in} + padding[4] + padding[5] - (\text{dilation[2]} - 1) \times
970
+ \text{kernel_size[2]} - 1 }{\text{stride[2]}} + 1} \right \rfloor \\
971
+ \end{array}
972
+
973
+ Raises:
974
+ TypeError: If `in_channels`, `out_channels` or `group` is not an int.
975
+ TypeError: If `kernel_size`, `stride`, `padding` , `dilation` or `output_padding`
976
+ is neither an int nor a tuple of three.
977
+ TypeError: If input data type is not float16 or float32.
978
+ ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
979
+ ValueError: If `padding` is less than 0.
980
+ ValueError: If `pad_mode` is not one of 'same', 'valid', 'pad'.
981
+ ValueError: If `padding` is a tuple whose length is not equal to 6.
982
+ ValueError: If `pad_mode` is not equal to 'pad' and `padding` is not equal to (0, 0, 0, 0, 0, 0).
983
+ ValueError: If `data_format` is not 'NCDHW'.
984
+
985
+ Supported Platforms:
986
+ ``Ascend`` ``GPU`` ``CPU``
987
+
988
+ Examples:
989
+ >>> import mindspore
990
+ >>> from mindspore import Tensor, nn
991
+ >>> import numpy as np
992
+ >>> x = Tensor(np.ones([32, 16, 10, 32, 32]), mindspore.float32)
993
+ >>> conv3d_transpose = nn.Conv3dTranspose(in_channels=16, out_channels=3, kernel_size=(4, 6, 2),
994
+ ... pad_mode='pad')
995
+ >>> output = conv3d_transpose(x)
996
+ >>> print(output.shape)
997
+ (32, 3, 13, 37, 33)
998
+ """
999
+
1000
+ def __init__(self,
1001
+ in_channels,
1002
+ out_channels,
1003
+ kernel_size,
1004
+ stride=1,
1005
+ pad_mode="same",
1006
+ padding=0,
1007
+ dilation=1,
1008
+ group=1,
1009
+ output_padding=0,
1010
+ has_bias=False,
1011
+ weight_init=None,
1012
+ bias_init=None,
1013
+ data_format='NCDHW',
1014
+ dtype=mstype.float32):
1015
+ """Initialize Conv3dTranspose."""
1016
+ if not (in_channels % group == 0 and out_channels % group == 0):
1017
+ raise ValueError("The argument 'group' should be divisible by 'in_channels' " \
1018
+ "and 'out_channels'")
1019
+
1020
+ kernel_size = _check_3d_int_or_tuple("kernel_size", kernel_size, self.cls_name)
1021
+ stride = _check_3d_int_or_tuple("stride", stride, self.cls_name)
1022
+ dilation = _check_3d_int_or_tuple("dilation", dilation, self.cls_name)
1023
+ Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
1024
+ if isinstance(padding, tuple):
1025
+ Validator.check_equal_int(len(padding), 6, 'padding size', self.cls_name)
1026
+ self.output_padding = _check_3d_int_or_tuple("output_padding", output_padding, self.cls_name,
1027
+ greater_zero=False)
1028
+ super(Conv3dTranspose, self).__init__(
1029
+ in_channels,
1030
+ out_channels,
1031
+ kernel_size,
1032
+ stride,
1033
+ pad_mode,
1034
+ padding,
1035
+ dilation,
1036
+ group,
1037
+ has_bias,
1038
+ weight_init,
1039
+ bias_init,
1040
+ data_format,
1041
+ transposed=True,
1042
+ dtype=dtype)
1043
+ self.conv3d_transpose = P.Conv3DTranspose(in_channel=self.in_channels,
1044
+ out_channel=self.out_channels,
1045
+ kernel_size=self.kernel_size,
1046
+ mode=1,
1047
+ pad_mode=self.pad_mode,
1048
+ pad=self.padding,
1049
+ stride=self.stride,
1050
+ dilation=self.dilation,
1051
+ group=self.group,
1052
+ output_padding=self.output_padding,
1053
+ data_format=self.data_format)
1054
+ self.bias_add = P.BiasAdd(data_format=self.data_format)
1055
+ self.shape = P.Shape()
1056
+
1057
+ def construct(self, x):
1058
+ output = self.conv3d_transpose(x, self.weight)
1059
+ if self.has_bias:
1060
+ output = self.bias_add(output, self.bias)
1061
+ return output
1062
+
1063
+
1064
+ def _deconv_output_length(is_valid, is_same, is_pad, input_length, filter_size, stride_size, dilation_size, padding):
1065
+ """Calculate the width and height of output."""
1066
+ length = 0
1067
+ filter_size = filter_size + (filter_size - 1) * (dilation_size - 1)
1068
+ if is_valid:
1069
+ if filter_size - stride_size > 0:
1070
+ length = input_length * stride_size + filter_size - stride_size
1071
+ else:
1072
+ length = input_length * stride_size
1073
+ elif is_same:
1074
+ length = input_length * stride_size
1075
+ elif is_pad:
1076
+ length = input_length * stride_size - padding + filter_size - stride_size
1077
+
1078
+ return length
1079
+
1080
+
1081
+ class Conv2dTranspose(_Conv):
1082
+ r"""
1083
+ Calculates a 2D transposed convolution, which can be regarded as Conv2d for the gradient of the input,
1084
+ also called deconvolution (although it is not an actual deconvolution).
1085
+
1086
+ The input is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
1087
+ where :math:`N` is batch size, :math:`C_{in}` is space dimension,
1088
+ :math:`H_{in}, W_{in}` are the height and width of the feature layer respectively.
1089
+
1090
+ When Conv2d and Conv2dTranspose are initialized with the same parameters, and `pad_mode` is set to 'pad',
1091
+ :math:`dilation * (kernel\_size - 1) - padding` amount of zero will be paded to the height and width
1092
+ directions of the input, they are inverses of each other in regard to the input and output shapes in this case.
1093
+ However, when `stride` > 1, Conv2d maps multiple input shapes to the same output shape. Deconvolutional network
1094
+ can refer to `Deconvolutional Networks <https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf>`_.
1095
+
1096
+ Args:
1097
+ in_channels (int): The channel number of the input tensor of the Conv2dTranspose layer.
1098
+ out_channels (int): The channel number of the output tensor of the Conv2dTranspose layer.
1099
+ kernel_size (Union[int, tuple[int]]): Specifies the height and width of the 2D convolution kernel.
1100
+ The data type is an integer or a tuple of two integers. An integer represents the height
1101
+ and width of the convolution kernel. A tuple of two integers represents the height
1102
+ and width of the convolution kernel respectively.
1103
+ stride (Union[int, tuple[int]]): The movement stride of the 2D convolution kernel.
1104
+ The data type is an integer or a tuple of two integers. An integer represents the movement step size
1105
+ in both height and width directions. A tuple of two integers represents the movement step size in the height
1106
+ and width directions respectively. Default: ``1`` .
1107
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
1108
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
1109
+
1110
+ - ``"same"``: Pad the input around its edges so that the shape of input and output
1111
+ are the same when `stride` is set to ``1``.
1112
+ The amount of padding to is calculated by the operator internally, If the amount is even, it is
1113
+ uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
1114
+ If this mode is set, `padding` must be 0.
1115
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
1116
+ possible height and width. Extra pixels that could not complete a full stride will
1117
+ be discarded. If this mode is set, `padding` must be 0.
1118
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
1119
+ in the height and width directions is determined by the `padding` parameter.
1120
+ If this mode is set, `padding` must be greater than or equal to 0.
1121
+
1122
+ padding (Union[int, tuple[int]]): The number of padding on the height and width directions of the input.
1123
+ The data type is an integer or a tuple of four integers. If `padding` is an integer,
1124
+ then the top, bottom, left, and right padding are all equal to `padding`.
1125
+ If `padding` is a tuple of 4 integers, then the top, bottom, left, and right padding
1126
+ is equal to `padding[0]`, `padding[1]`, `padding[2]`, and `padding[3]` respectively.
1127
+ The value should be greater than or equal to 0. Default: ``0`` .
1128
+ output_padding (Union[int, tuple[int]]): The number of padding on the height and width directions of the output.
1129
+ The data type is an integer or a tuple of two integers. If `output_padding` is an integer,
1130
+ then the bottom and right padding are all equal to `output_padding`. If `output_padding` is a tuple of
1131
+ 2 integers, then the bottom and right padding is equal to `output_padding[0]`, `output_padding[1]`
1132
+ respectively. If `output_padding` is not equal to 0, `pad_mode` must be `pad`.
1133
+ The value should be in range of `[0, max(stride, dilation))` . Default: ``0`` .
1134
+ dilation (Union[int, tuple[int]]): Dilation size of 2D convolution kernel.
1135
+ It can be a single int or a tuple of 2 integers. A single int means the dilation size is the same
1136
+ in both the height and width directions. A tuple of two ints represents the dilation size in
1137
+ the height and width directions, respectively.
1138
+ Assuming :math:`dilation=(d0, d1)`, the convolutional kernel samples the input with a
1139
+ spacing of :math:`d0-1` elements in the height direction and :math:`d1-1` elements in the width direction.
1140
+ The values in the height and width dimensions are in the ranges [1, H] and [1, W], respectively.
1141
+ Default: ``1`` .
1142
+ group (int): Splits filter into groups, `in_channels` and `out_channels` must be divisible by `group`.
1143
+ Default: ``1`` .
1144
+ has_bias (bool): Whether the Conv2dTranspose layer has a bias parameter. Default: ``False`` .
1145
+ weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of weight parameter.
1146
+ It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
1147
+ values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
1148
+ distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
1149
+ ``'xavier_uniform'`` , ``'he_uniform'`` , ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and
1150
+ lowercase are both acceptable. Refer to the values of Initializer for more details. Default: ``None`` ,
1151
+ weight will be initialized using HeUniform.
1152
+ bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of bias parameter.
1153
+ Available initialization methods are the same as 'weight_init'. Refer to the values of
1154
+ Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
1155
+ dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
1156
+
1157
+ Inputs:
1158
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
1159
+
1160
+ Outputs:
1161
+ Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
1162
+
1163
+ pad_mode is ``'same'``:
1164
+
1165
+ .. math::
1166
+ \begin{array}{ll} \\
1167
+ H_{out} = \text H_{in}\times \text {stride[0]} \\
1168
+ W_{out} = \text W_{in}\times \text {stride[1]} \\
1169
+ \end{array}
1170
+
1171
+ pad_mode is ``'valid'``:
1172
+
1173
+ .. math::
1174
+ \begin{array}{ll} \\
1175
+ H_{out} = \text H_{in}\times \text {stride[0]} + \max\{(\text{dilation[0]} - 1) \times
1176
+ (\text{kernel_size[0]} - 1) - \text {stride[0]}, 0 \} \\
1177
+ W_{out} = \text W_{in}\times \text {stride[1]} + \max\{(\text{dilation[1]} - 1) \times
1178
+ (\text{kernel_size[1]} - 1) - \text {stride[1]}, 0 \} \\
1179
+ \end{array}
1180
+
1181
+ pad_mode is ``'pad'``:
1182
+
1183
+ .. math::
1184
+ \begin{array}{ll} \\
1185
+ H_{out} = \text H_{in}\times \text {stride[0]} - (padding[0] + padding[1])
1186
+ + \text{kernel_size[0]} + (\text{dilation[0]} - 1) \times
1187
+ (\text{kernel_size[0]} - 1) - \text {stride[0]} + \text {output_padding[0]} \\
1188
+ W_{out} = \text W_{in}\times \text {stride[1]} - (padding[2] + padding[3])
1189
+ + \text{kernel_size[1]} + (\text{dilation[1]} - 1) \times
1190
+ (\text{kernel_size[1]} - 1) - \text {stride[1]} + \text {output_padding[1]} \\
1191
+ \end{array}
1192
+
1193
+ Raises:
1194
+ TypeError: If `in_channels`, `out_channels` or `group` is not an int.
1195
+ TypeError: If `kernel_size`, `stride`, `padding` or `dilation` is neither an int nor a tuple.
1196
+ ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
1197
+ ValueError: If `padding` is less than 0.
1198
+ ValueError: If `pad_mode` is not one of 'same', 'valid', 'pad'.
1199
+ ValueError: If `padding` is a tuple whose length is not equal to 4.
1200
+ ValueError: If `pad_mode` is not equal to 'pad' and `padding` is not equal to (0, 0, 0, 0).
1201
+
1202
+ Supported Platforms:
1203
+ ``Ascend`` ``GPU`` ``CPU``
1204
+
1205
+ Examples:
1206
+ >>> import mindspore
1207
+ >>> from mindspore import Tensor, nn
1208
+ >>> import numpy as np
1209
+ >>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad')
1210
+ >>> x = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32)
1211
+ >>> output = net(x).shape
1212
+ >>> print(output)
1213
+ (1, 64, 19, 53)
1214
+ """
1215
+
1216
+ def __init__(self,
1217
+ in_channels,
1218
+ out_channels,
1219
+ kernel_size,
1220
+ stride=1,
1221
+ pad_mode='same',
1222
+ padding=0,
1223
+ output_padding=0,
1224
+ dilation=1,
1225
+ group=1,
1226
+ has_bias=False,
1227
+ weight_init=None,
1228
+ bias_init=None,
1229
+ dtype=mstype.float32):
1230
+ """Initialize Conv2dTranspose."""
1231
+ kernel_size = twice(kernel_size)
1232
+ stride = twice(stride)
1233
+ dilation = twice(dilation)
1234
+ Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
1235
+ if isinstance(padding, tuple):
1236
+ Validator.check_equal_int(len(padding), 4, 'padding size', self.cls_name)
1237
+ Validator.check_value_type('output_padding', output_padding, (int, tuple), self.cls_name)
1238
+ if isinstance(output_padding, tuple):
1239
+ Validator.check_equal_int(len(output_padding), 2, 'output_padding size', self.cls_name)
1240
+ # out_channels and in_channels swap.
1241
+ # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
1242
+ # then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
1243
+ super(Conv2dTranspose, self).__init__(
1244
+ in_channels,
1245
+ out_channels,
1246
+ kernel_size,
1247
+ stride,
1248
+ pad_mode,
1249
+ padding,
1250
+ dilation,
1251
+ group,
1252
+ has_bias,
1253
+ weight_init,
1254
+ bias_init,
1255
+ transposed=True,
1256
+ dtype=dtype)
1257
+
1258
+ self.in_channels = in_channels
1259
+ self.out_channels = out_channels
1260
+ self.shape = P.Shape()
1261
+ Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
1262
+ self.is_valid = self.pad_mode == 'valid'
1263
+ self.is_same = self.pad_mode == 'same'
1264
+ self.is_pad = self.pad_mode == 'pad'
1265
+ self.output_padding = output_padding
1266
+
1267
+ # cause Conv2DTranspose's out_channel refers to Conv2D's out_channel.
1268
+ self.conv2d_transpose = P.Conv2DTranspose(out_channel=in_channels,
1269
+ kernel_size=kernel_size,
1270
+ mode=1,
1271
+ pad_mode=pad_mode,
1272
+ pad=padding,
1273
+ stride=stride,
1274
+ dilation=dilation,
1275
+ group=group)
1276
+ self.bias_add = P.BiasAdd()
1277
+ if isinstance(self.padding, int):
1278
+ self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = (self.padding,) * 4
1279
+ else:
1280
+ self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = self.padding
1281
+
1282
+ def shard(self, strategy):
1283
+ self.conv2d_transpose.shard(strategy)
1284
+ return self
1285
+
1286
+ def construct(self, x):
1287
+ n, _, h, w = self.shape(x)
1288
+ h_out = _deconv_output_length(self.is_valid, self.is_same, self.is_pad, h, self.kernel_size[0],
1289
+ self.stride[0], self.dilation[0], self.padding_top + self.padding_bottom)
1290
+ w_out = _deconv_output_length(self.is_valid, self.is_same, self.is_pad, w, self.kernel_size[1],
1291
+ self.stride[1], self.dilation[1], self.padding_left + self.padding_right)
1292
+ conv2d_trans_ret = self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
1293
+ if self.has_bias:
1294
+ conv2d_trans_ret = self.bias_add(conv2d_trans_ret, self.bias)
1295
+ if isinstance(self.output_padding, tuple):
1296
+ if self.output_padding[0] < 0 or self.output_padding[0] >= max(self.dilation[0], self.stride[0]):
1297
+ raise ValueError("output_padding[0] must be in range of [0, max(stride_h, dilation_h)).")
1298
+ if self.output_padding[1] < 0 or self.output_padding[1] >= max(self.dilation[1], self.stride[1]):
1299
+ raise ValueError("output_padding[1] must be in range of [0, max(stride_w, dilation_w)).")
1300
+ if not self.is_pad and (self.output_padding[0] > 0 or self.output_padding[1] > 0):
1301
+ raise ValueError("when output_padding is not zero, pad_mode must be 'pad'")
1302
+
1303
+ pad = P.Pad(paddings=((0, 0), (0, 0), (0, self.output_padding[0]), (0, self.output_padding[1])))
1304
+ return pad(conv2d_trans_ret)
1305
+
1306
+ if self.output_padding == 0:
1307
+ return conv2d_trans_ret
1308
+
1309
+ if self.output_padding < 0 or self.output_padding >= max(self.dilation[0], self.stride[0]):
1310
+ raise ValueError("output_padding must be in range of [0, max(stride_h, dilation_h)).")
1311
+ if self.output_padding < 0 or self.output_padding >= max(self.dilation[1], self.stride[1]):
1312
+ raise ValueError("output_padding must be in range of [0, max(stride_w, dilation_w)).")
1313
+ if not self.is_pad and self.output_padding > 0:
1314
+ raise ValueError("when output_padding is not zero, pad_mode must be 'pad'")
1315
+ pad = P.Pad(paddings=((0, 0), (0, 0), (0, self.output_padding), (0, self.output_padding)))
1316
+ return pad(conv2d_trans_ret)
1317
+
1318
+
1319
+ @_primexpr
1320
+ def _check_input_3d(input_shape, op_name):
1321
+ if len(input_shape) != 3:
1322
+ raise ValueError(f"For '{op_name}', the dimension of input must be 3d, but got {len(input_shape)}.")
1323
+
1324
+
1325
+ class Conv1dTranspose(_Conv):
1326
+ r"""
1327
+ Calculates a 1D transposed convolution, which can be regarded as Conv1d for the gradient of the input,
1328
+ also called deconvolution (although it is not an actual deconvolution).
1329
+
1330
+ The input is typically of shape :math:`(N, C_{in}, L_{in})`, where :math:`N` is batch size,
1331
+ :math:`C_{in}` is a number of channels
1332
+ and :math:`L_{in}` is a length of sequence.
1333
+
1334
+ When Conv1d and ConvTranspose1d are initialized with the same parameters, and `pad_mode` is set to 'pad',
1335
+ :math:`dilation * (kernel\_size - 1) - padding` amount of zero will be paded to both sizes of input,
1336
+ they are inverses of each other in regard to the input and output shapes in this case.
1337
+ However, when `stride` > 1, Conv1d maps multiple input shapes to the same output shape. Deconvolutional network
1338
+ can refer to `Deconvolutional Networks <https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf>`_.
1339
+
1340
+ Args:
1341
+ in_channels (int): The channel number of the input tensor of the Conv1dTranspose layer.
1342
+ out_channels (int): The channel number of the output tensor of the Conv1dTranspose layer.
1343
+ kernel_size (int): Specifies the width of the 1D convolution kernel.
1344
+ stride (int): The movement stride of the 1D convolution kernel. Default: ``1`` .
1345
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
1346
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"same"`` .
1347
+
1348
+ - ``"same"``: Pad the input at the begin and end so that the shape of input and output
1349
+ are the same when `stride` is set to ``1``.
1350
+ The amount of padding to is calculated by the operator internally. If the amount is even, it is
1351
+ uniformly distributed around the input, if it is odd, the excess padding is goes to the right side.
1352
+ If this mode is set, `padding` must be 0.
1353
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
1354
+ possible length. Extra pixels that could not complete a full stride will
1355
+ be discarded. If this mode is set, `padding` must be 0.
1356
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
1357
+ at the begin and end is determined by the `padding` parameter.
1358
+ If this mode is set, `padding` must be greater than or equal to 0.
1359
+
1360
+ padding (int): The number of padding on both sides of input.
1361
+ The value should be greater than or equal to 0. Default: ``0`` .
1362
+ dilation (int): Dilation size of 1D convolution kernel. If :math:`k > 1`, the kernel is sampled
1363
+ every `k` elements. The value of `k` is in range of [1, L]. Default: ``1`` .
1364
+ group (int): Splits filter into groups, `in_channels` and `out_channels` must be
1365
+ divisible by `group`. When `group` > 1, the Ascend platform is not supported yet. Default: ``1`` .
1366
+ has_bias (bool): Whether the Conv1dTranspose layer has a bias parameter. Default: ``False``.
1367
+ weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of weight parameter.
1368
+ It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
1369
+ values from ``'TruncatedNormal'`` , ``'Normal'`` , ``'Uniform'`` , ``'HeUniform'`` and ``'XavierUniform'``
1370
+ distributions as well as constant ``'One'`` and ``'Zero'`` distributions are possible. Alias
1371
+ ``'xavier_uniform'`` , ``'he_uniform'``, ``'ones'`` and ``'zeros'`` are acceptable. Uppercase and lowercase
1372
+ are both acceptable. Refer to the values of Initializer for more details. Default: ``None`` ,
1373
+ weight will be initialized using HeUniform.
1374
+ bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initialization method of bias parameter.
1375
+ Available initialization methods are the same as 'weight_init'. Refer to the values of
1376
+ Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
1377
+ dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
1378
+
1379
+ Inputs:
1380
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})`.
1381
+
1382
+ Outputs:
1383
+ Tensor of shape :math:`(N, C_{out}, L_{out})`.
1384
+
1385
+ pad_mode is ``'same'``: :math:`L_{out} = \frac{ L_{in} + \text{stride} - 1 }{ \text{stride} }`
1386
+
1387
+ pad_mode is ``'valid'``:
1388
+ :math:`L_{out} = (L_{in} - 1) \times \text{stride} + \text{dilation} \times (\text{kernel_size} - 1) + 1`
1389
+
1390
+ pad_mode is ``'pad'``:
1391
+ :math:`L_{out} = (L_{in} - 1) \times \text{stride} - 2 \times \text{padding}
1392
+ + \text{dilation} \times (\text{kernel_size} - 1) + 1`
1393
+
1394
+ Raises:
1395
+ TypeError: If `in_channels`, `out_channels`, `kernel_size`, `stride`, `padding` or `dilation` is not an int.
1396
+ ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
1397
+ ValueError: If `padding` is less than 0.
1398
+ ValueError: If `pad_mode` is not one of 'same', 'valid', 'pad'.
1399
+
1400
+ Supported Platforms:
1401
+ ``Ascend`` ``GPU`` ``CPU``
1402
+
1403
+ Examples:
1404
+ >>> import mindspore
1405
+ >>> from mindspore import Tensor, nn
1406
+ >>> import numpy as np
1407
+ >>> net = nn.Conv1dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad')
1408
+ >>> x = Tensor(np.ones([1, 3, 50]), mindspore.float32)
1409
+ >>> output = net(x).shape
1410
+ >>> print(output)
1411
+ (1, 64, 53)
1412
+ """
1413
+
1414
+ def __init__(self,
1415
+ in_channels,
1416
+ out_channels,
1417
+ kernel_size,
1418
+ stride=1,
1419
+ pad_mode='same',
1420
+ padding=0,
1421
+ dilation=1,
1422
+ group=1,
1423
+ has_bias=False,
1424
+ weight_init=None,
1425
+ bias_init=None,
1426
+ dtype=mstype.float32):
1427
+ """Initialize Conv1dTranspose."""
1428
+ Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
1429
+ Validator.check_value_type("stride", stride, [int], self.cls_name)
1430
+ Validator.check_value_type("padding", padding, [int], self.cls_name)
1431
+ Validator.check_value_type("dilation", dilation, [int], self.cls_name)
1432
+ Validator.check_int(kernel_size, 1, Validator.GE, 'kernel_size', self.cls_name)
1433
+ Validator.check_int(stride, 1, Validator.GE, 'stride', self.cls_name)
1434
+ Validator.check_non_negative_int(padding, 'padding', self.cls_name)
1435
+ Validator.check_int(dilation, 1, Validator.GE, 'dilation', self.cls_name)
1436
+ kernel_size = (1, kernel_size)
1437
+ stride = (1, stride)
1438
+ dilation = (1, dilation)
1439
+ get_shape = P.Shape()
1440
+ get_dtype = P.DType()
1441
+ if isinstance(weight_init, Tensor):
1442
+ weight_init_shape = get_shape(weight_init)
1443
+ Validator.check_equal_int(len(weight_init_shape), 3, 'weight_init_shape', self.cls_name)
1444
+ weight_init_dtype = get_dtype(weight_init)
1445
+ weight_init_value = weight_init.asnumpy()
1446
+ weight_init_value = np.expand_dims(weight_init_value, 2)
1447
+ weight_init = Tensor(weight_init_value, weight_init_dtype)
1448
+ # out_channels and in_channels swap.
1449
+ # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
1450
+ # then Conv1dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
1451
+ super(Conv1dTranspose, self).__init__(
1452
+ in_channels,
1453
+ out_channels,
1454
+ kernel_size,
1455
+ stride,
1456
+ pad_mode,
1457
+ padding,
1458
+ dilation,
1459
+ group,
1460
+ has_bias,
1461
+ weight_init,
1462
+ bias_init,
1463
+ transposed=True,
1464
+ dtype=dtype)
1465
+ self.padding = (0, 0, padding, padding)
1466
+ self.in_channels = in_channels
1467
+ self.out_channels = out_channels
1468
+ self.shape = P.Shape()
1469
+ Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
1470
+ self.is_valid = self.pad_mode == 'valid'
1471
+ self.is_same = self.pad_mode == 'same'
1472
+ self.is_pad = self.pad_mode == 'pad'
1473
+
1474
+ # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
1475
+ self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
1476
+ kernel_size=kernel_size,
1477
+ mode=1,
1478
+ pad_mode=pad_mode,
1479
+ pad=self.padding,
1480
+ stride=stride,
1481
+ dilation=dilation,
1482
+ group=group)
1483
+ self.bias_add = P.BiasAdd()
1484
+ self.expand_dims = P.ExpandDims()
1485
+ self.squeeze = P.Squeeze(2)
1486
+
1487
+ def shard(self, strategy):
1488
+ self.conv2d_transpose.shard(strategy)
1489
+ return self
1490
+
1491
+ def construct(self, x):
1492
+ x_shape = self.shape(x)
1493
+ _check_input_3d(x_shape, self.cls_name)
1494
+ x = self.expand_dims(x, 2)
1495
+ n, _, h, w = self.shape(x)
1496
+ h_out = _deconv_output_length(self.is_valid, self.is_same, self.is_pad, h, self.kernel_size[0],
1497
+ self.stride[0], self.dilation[0], self.padding[0] + self.padding[1])
1498
+ w_out = _deconv_output_length(self.is_valid, self.is_same, self.is_pad, w, self.kernel_size[1],
1499
+ self.stride[1], self.dilation[1], self.padding[2] + self.padding[3])
1500
+ output = self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
1501
+ if self.has_bias:
1502
+ output = self.bias_add(output, self.bias)
1503
+
1504
+ output = self.squeeze(output)
1505
+ return output