mindspore 2.4.10__cp310-cp310-win_amd64.whl → 2.6.0rc1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (602) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +13 -6
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -38
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +6 -7
  15. mindspore/_extends/parse/compile_config.py +83 -0
  16. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  17. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
  18. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  19. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  20. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  21. mindspore/_extends/parse/parser.py +46 -197
  22. mindspore/_extends/parse/resources.py +1 -5
  23. mindspore/_extends/parse/standard_method.py +217 -98
  24. mindspore/_extends/pijit/__init__.py +2 -2
  25. mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
  26. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  27. mindspore/_extends/utils.py +1 -1
  28. mindspore/amp.py +11 -5
  29. mindspore/atlprov.dll +0 -0
  30. mindspore/avcodec-59.dll +0 -0
  31. mindspore/avdevice-59.dll +0 -0
  32. mindspore/avfilter-8.dll +0 -0
  33. mindspore/avformat-59.dll +0 -0
  34. mindspore/avutil-57.dll +0 -0
  35. mindspore/boost/__init__.py +2 -2
  36. mindspore/boost/base.py +3 -7
  37. mindspore/boost/boost_cell_wrapper.py +138 -43
  38. mindspore/c1.dll +0 -0
  39. mindspore/c1xx.dll +0 -0
  40. mindspore/c2.dll +0 -0
  41. mindspore/common/__init__.py +6 -3
  42. mindspore/common/_grad_function.py +56 -0
  43. mindspore/common/_pijit_context.py +14 -5
  44. mindspore/common/_register_for_tensor.py +1 -2
  45. mindspore/common/_stub_tensor.py +30 -14
  46. mindspore/common/_tensor_cpp_method.py +17 -0
  47. mindspore/common/_tensor_docs.py +4760 -0
  48. mindspore/common/api.py +435 -371
  49. mindspore/common/auto_dynamic_shape.py +41 -44
  50. mindspore/common/dtype.py +39 -36
  51. mindspore/common/dump.py +9 -6
  52. mindspore/common/file_system.py +9 -1
  53. mindspore/common/generator.py +2 -0
  54. mindspore/common/hook_handle.py +6 -2
  55. mindspore/common/initializer.py +13 -10
  56. mindspore/common/jit_begin_end.py +94 -0
  57. mindspore/common/jit_config.py +6 -1
  58. mindspore/common/jit_context.py +76 -0
  59. mindspore/common/jit_trace.py +378 -0
  60. mindspore/common/lazy_inline.py +9 -3
  61. mindspore/common/mindir_util.py +10 -2
  62. mindspore/common/mutable.py +5 -4
  63. mindspore/common/parameter.py +135 -52
  64. mindspore/common/seed.py +2 -2
  65. mindspore/common/sparse_tensor.py +23 -17
  66. mindspore/common/tensor.py +951 -1992
  67. mindspore/communication/__init__.py +7 -5
  68. mindspore/communication/_comm_helper.py +52 -2
  69. mindspore/communication/comm_func.py +240 -181
  70. mindspore/communication/management.py +95 -26
  71. mindspore/context.py +314 -566
  72. mindspore/dataset/__init__.py +65 -37
  73. mindspore/dataset/audio/__init__.py +2 -8
  74. mindspore/dataset/audio/transforms.py +3 -17
  75. mindspore/dataset/callback/ds_callback.py +2 -1
  76. mindspore/dataset/core/config.py +87 -6
  77. mindspore/dataset/engine/cache_admin.py +3 -3
  78. mindspore/dataset/engine/cache_client.py +6 -5
  79. mindspore/dataset/engine/datasets.py +292 -267
  80. mindspore/dataset/engine/datasets_audio.py +22 -8
  81. mindspore/dataset/engine/datasets_standard_format.py +46 -27
  82. mindspore/dataset/engine/datasets_text.py +78 -48
  83. mindspore/dataset/engine/datasets_user_defined.py +182 -116
  84. mindspore/dataset/engine/datasets_vision.py +120 -44
  85. mindspore/dataset/engine/iterators.py +283 -63
  86. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  87. mindspore/dataset/engine/obs/util.py +8 -0
  88. mindspore/dataset/engine/queue.py +40 -0
  89. mindspore/dataset/engine/samplers.py +289 -43
  90. mindspore/dataset/engine/serializer_deserializer.py +3 -2
  91. mindspore/dataset/engine/validators.py +53 -11
  92. mindspore/dataset/text/__init__.py +7 -6
  93. mindspore/dataset/text/transforms.py +6 -5
  94. mindspore/dataset/text/utils.py +3 -3
  95. mindspore/dataset/transforms/__init__.py +0 -9
  96. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  97. mindspore/dataset/transforms/transforms.py +31 -14
  98. mindspore/dataset/utils/browse_dataset.py +1 -1
  99. mindspore/dataset/vision/__init__.py +2 -9
  100. mindspore/dataset/vision/transforms.py +202 -158
  101. mindspore/dataset/vision/utils.py +7 -5
  102. mindspore/dataset/vision/validators.py +1 -2
  103. mindspore/device_context/__init__.py +21 -0
  104. mindspore/device_context/ascend/__init__.py +25 -0
  105. mindspore/device_context/ascend/device.py +72 -0
  106. mindspore/device_context/ascend/op_debug.py +153 -0
  107. mindspore/device_context/ascend/op_precision.py +193 -0
  108. mindspore/device_context/ascend/op_tuning.py +123 -0
  109. mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
  110. mindspore/device_context/cpu/device.py +62 -0
  111. mindspore/device_context/cpu/op_tuning.py +43 -0
  112. mindspore/device_context/gpu/__init__.py +21 -0
  113. mindspore/device_context/gpu/device.py +70 -0
  114. mindspore/device_context/gpu/op_precision.py +67 -0
  115. mindspore/device_context/gpu/op_tuning.py +175 -0
  116. mindspore/device_manager.py +170 -0
  117. mindspore/dnnl.dll +0 -0
  118. mindspore/dpcmi.dll +0 -0
  119. mindspore/experimental/es/embedding_service.py +35 -27
  120. mindspore/experimental/llm_boost/__init__.py +1 -0
  121. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  122. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
  123. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  124. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  125. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  126. mindspore/experimental/llm_boost/register.py +1 -0
  127. mindspore/experimental/map_parameter.py +4 -4
  128. mindspore/experimental/optim/adadelta.py +6 -6
  129. mindspore/experimental/optim/adagrad.py +4 -4
  130. mindspore/experimental/optim/adam.py +7 -0
  131. mindspore/experimental/optim/adamax.py +4 -4
  132. mindspore/experimental/optim/adamw.py +4 -0
  133. mindspore/experimental/optim/asgd.py +1 -1
  134. mindspore/experimental/optim/lr_scheduler.py +73 -46
  135. mindspore/experimental/optim/radam.py +34 -31
  136. mindspore/experimental/optim/rprop.py +1 -1
  137. mindspore/experimental/optim/sgd.py +1 -1
  138. mindspore/hal/contiguous_tensors_handle.py +6 -10
  139. mindspore/hal/device.py +55 -53
  140. mindspore/hal/event.py +52 -52
  141. mindspore/hal/memory.py +157 -117
  142. mindspore/hal/stream.py +150 -109
  143. mindspore/include/api/context.h +0 -1
  144. mindspore/include/dataset/constants.h +7 -4
  145. mindspore/include/dataset/execute.h +2 -2
  146. mindspore/jpeg62.dll +0 -0
  147. mindspore/log.py +50 -0
  148. mindspore/mindrecord/__init__.py +21 -8
  149. mindspore/mindrecord/config.py +17 -316
  150. mindspore/mindrecord/filereader.py +1 -9
  151. mindspore/mindrecord/filewriter.py +5 -15
  152. mindspore/mindrecord/mindpage.py +1 -9
  153. mindspore/mindspore_backend_common.dll +0 -0
  154. mindspore/mindspore_backend_manager.dll +0 -0
  155. mindspore/mindspore_common.dll +0 -0
  156. mindspore/mindspore_core.dll +0 -0
  157. mindspore/mindspore_dump.dll +0 -0
  158. mindspore/mindspore_frontend.dll +0 -0
  159. mindspore/mindspore_glog.dll +0 -0
  160. mindspore/mindspore_memory_pool.dll +0 -0
  161. mindspore/mindspore_ms_backend.dll +0 -0
  162. mindspore/mindspore_ops.dll +0 -0
  163. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  164. mindspore/mindspore_ops_kernel_common.dll +0 -0
  165. mindspore/mindspore_profiler.dll +0 -0
  166. mindspore/mindspore_pyboost.dll +0 -0
  167. mindspore/mindspore_pynative.dll +0 -0
  168. mindspore/mindspore_res_manager.dll +0 -0
  169. mindspore/mindspore_runtime_pipeline.dll +0 -0
  170. mindspore/mint/__init__.py +796 -759
  171. mindspore/mint/distributed/__init__.py +70 -4
  172. mindspore/mint/distributed/distributed.py +2679 -44
  173. mindspore/mint/linalg/__init__.py +8 -0
  174. mindspore/mint/nn/__init__.py +743 -22
  175. mindspore/mint/nn/functional.py +716 -23
  176. mindspore/mint/nn/layer/__init__.py +21 -4
  177. mindspore/mint/nn/layer/_functions.py +334 -0
  178. mindspore/mint/nn/layer/activation.py +276 -1
  179. mindspore/mint/nn/layer/basic.py +123 -0
  180. mindspore/mint/nn/layer/conv.py +921 -0
  181. mindspore/mint/nn/layer/normalization.py +223 -28
  182. mindspore/mint/nn/layer/padding.py +797 -0
  183. mindspore/mint/nn/layer/pooling.py +235 -0
  184. mindspore/mint/optim/__init__.py +3 -1
  185. mindspore/mint/optim/adam.py +223 -0
  186. mindspore/mint/optim/adamw.py +26 -19
  187. mindspore/mint/optim/sgd.py +171 -0
  188. mindspore/mint/special/__init__.py +2 -1
  189. mindspore/msobj140.dll +0 -0
  190. mindspore/mspdb140.dll +0 -0
  191. mindspore/mspdbcore.dll +0 -0
  192. mindspore/mspdbst.dll +0 -0
  193. mindspore/mspft140.dll +0 -0
  194. mindspore/msvcdis140.dll +0 -0
  195. mindspore/msvcp140_1.dll +0 -0
  196. mindspore/msvcp140_2.dll +0 -0
  197. mindspore/msvcp140_atomic_wait.dll +0 -0
  198. mindspore/msvcp140_codecvt_ids.dll +0 -0
  199. mindspore/multiprocessing/__init__.py +5 -0
  200. mindspore/nn/__init__.py +4 -1
  201. mindspore/nn/cell.py +1370 -189
  202. mindspore/nn/dynamic_lr.py +2 -1
  203. mindspore/nn/layer/activation.py +29 -27
  204. mindspore/nn/layer/basic.py +51 -35
  205. mindspore/nn/layer/channel_shuffle.py +3 -3
  206. mindspore/nn/layer/container.py +1 -1
  207. mindspore/nn/layer/conv.py +22 -17
  208. mindspore/nn/layer/embedding.py +12 -11
  209. mindspore/nn/layer/normalization.py +56 -49
  210. mindspore/nn/layer/padding.py +4 -3
  211. mindspore/nn/layer/pooling.py +120 -42
  212. mindspore/nn/layer/rnn_cells.py +1 -1
  213. mindspore/nn/layer/rnns.py +2 -1
  214. mindspore/nn/layer/timedistributed.py +5 -5
  215. mindspore/nn/layer/transformer.py +59 -36
  216. mindspore/nn/learning_rate_schedule.py +8 -4
  217. mindspore/nn/loss/loss.py +58 -55
  218. mindspore/nn/optim/ada_grad.py +7 -5
  219. mindspore/nn/optim/adadelta.py +11 -9
  220. mindspore/nn/optim/adafactor.py +1 -1
  221. mindspore/nn/optim/adam.py +17 -13
  222. mindspore/nn/optim/adamax.py +8 -7
  223. mindspore/nn/optim/adasum.py +5 -5
  224. mindspore/nn/optim/asgd.py +1 -1
  225. mindspore/nn/optim/ftrl.py +11 -9
  226. mindspore/nn/optim/lamb.py +1 -1
  227. mindspore/nn/optim/lars.py +1 -4
  228. mindspore/nn/optim/lazyadam.py +12 -10
  229. mindspore/nn/optim/momentum.py +7 -6
  230. mindspore/nn/optim/optimizer.py +3 -3
  231. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  232. mindspore/nn/optim/rmsprop.py +13 -12
  233. mindspore/nn/optim/rprop.py +11 -9
  234. mindspore/nn/optim/sgd.py +9 -6
  235. mindspore/nn/optim/tft_wrapper.py +5 -2
  236. mindspore/nn/optim/thor.py +2 -1
  237. mindspore/nn/probability/bijector/bijector.py +17 -11
  238. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  239. mindspore/nn/probability/bijector/invert.py +2 -2
  240. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  241. mindspore/nn/probability/bijector/softplus.py +3 -2
  242. mindspore/nn/probability/distribution/beta.py +3 -3
  243. mindspore/nn/probability/distribution/categorical.py +1 -1
  244. mindspore/nn/probability/distribution/cauchy.py +4 -2
  245. mindspore/nn/probability/distribution/exponential.py +6 -7
  246. mindspore/nn/probability/distribution/gamma.py +2 -2
  247. mindspore/nn/probability/distribution/gumbel.py +2 -2
  248. mindspore/nn/probability/distribution/half_normal.py +5 -3
  249. mindspore/nn/probability/distribution/logistic.py +5 -3
  250. mindspore/nn/probability/distribution/poisson.py +1 -1
  251. mindspore/nn/probability/distribution/uniform.py +5 -3
  252. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  253. mindspore/nn/reinforcement/tensor_array.py +1 -1
  254. mindspore/nn/utils/init.py +13 -11
  255. mindspore/nn/wrap/__init__.py +6 -6
  256. mindspore/nn/wrap/cell_wrapper.py +181 -122
  257. mindspore/nn/wrap/grad_reducer.py +45 -36
  258. mindspore/nn/wrap/loss_scale.py +6 -7
  259. mindspore/numpy/array_creations.py +63 -65
  260. mindspore/numpy/array_ops.py +149 -144
  261. mindspore/numpy/logic_ops.py +41 -42
  262. mindspore/numpy/math_ops.py +365 -363
  263. mindspore/numpy/utils.py +17 -18
  264. mindspore/numpy/utils_const.py +5 -6
  265. mindspore/opencv_core452.dll +0 -0
  266. mindspore/opencv_imgcodecs452.dll +0 -0
  267. mindspore/opencv_imgproc452.dll +0 -0
  268. mindspore/ops/__init__.py +5 -3
  269. mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
  270. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
  271. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  272. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  273. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  274. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  275. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  276. mindspore/ops/_register_for_op.py +0 -11
  277. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  278. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
  279. mindspore/ops/_vmap/vmap_array_ops.py +27 -25
  280. mindspore/ops/_vmap/vmap_base.py +0 -2
  281. mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
  282. mindspore/ops/_vmap/vmap_math_ops.py +15 -16
  283. mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
  284. mindspore/ops/auto_generate/__init__.py +4 -3
  285. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +236 -46
  286. mindspore/ops/auto_generate/gen_extend_func.py +764 -124
  287. mindspore/ops/auto_generate/gen_ops_def.py +4018 -2264
  288. mindspore/ops/auto_generate/gen_ops_prim.py +15463 -5037
  289. mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
  290. mindspore/ops/composite/__init__.py +2 -1
  291. mindspore/ops/composite/base.py +20 -25
  292. mindspore/ops/composite/math_ops.py +6 -16
  293. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  294. mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
  295. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  296. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  297. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  298. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  299. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  300. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  301. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  302. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  303. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  304. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  305. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  306. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  307. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  308. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  309. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  310. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  311. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  312. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  313. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  314. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  315. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  316. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  317. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  318. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  319. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  320. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
  321. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  322. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  323. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  324. mindspore/ops/function/__init__.py +40 -2
  325. mindspore/ops/function/_add_attr_func.py +58 -0
  326. mindspore/ops/function/array_func.py +2089 -2403
  327. mindspore/ops/function/clip_func.py +80 -23
  328. mindspore/ops/function/debug_func.py +57 -57
  329. mindspore/ops/function/grad/__init__.py +1 -0
  330. mindspore/ops/function/grad/grad_func.py +104 -71
  331. mindspore/ops/function/image_func.py +2 -2
  332. mindspore/ops/function/linalg_func.py +47 -78
  333. mindspore/ops/function/math_func.py +4501 -3802
  334. mindspore/ops/function/nn_func.py +1726 -620
  335. mindspore/ops/function/other_func.py +159 -1
  336. mindspore/ops/function/parameter_func.py +18 -84
  337. mindspore/ops/function/random_func.py +440 -387
  338. mindspore/ops/function/reshard_func.py +4 -70
  339. mindspore/ops/function/sparse_func.py +3 -3
  340. mindspore/ops/function/sparse_unary_func.py +6 -6
  341. mindspore/ops/function/spectral_func.py +25 -58
  342. mindspore/ops/function/vmap_func.py +24 -17
  343. mindspore/ops/functional.py +22 -7
  344. mindspore/ops/functional_overload.py +1440 -0
  345. mindspore/ops/op_info_register.py +32 -244
  346. mindspore/ops/operations/__init__.py +13 -7
  347. mindspore/ops/operations/_custom_ops_utils.py +247 -0
  348. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  349. mindspore/ops/operations/_grad_ops.py +2 -43
  350. mindspore/ops/operations/_infer_ops.py +2 -1
  351. mindspore/ops/operations/_inner_ops.py +43 -84
  352. mindspore/ops/operations/_ms_kernel.py +4 -10
  353. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  354. mindspore/ops/operations/_scalar_ops.py +3 -2
  355. mindspore/ops/operations/_sequence_ops.py +1 -1
  356. mindspore/ops/operations/_tensor_array.py +1 -1
  357. mindspore/ops/operations/array_ops.py +81 -324
  358. mindspore/ops/operations/comm_ops.py +154 -108
  359. mindspore/ops/operations/custom_ops.py +232 -78
  360. mindspore/ops/operations/debug_ops.py +153 -59
  361. mindspore/ops/operations/inner_ops.py +7 -5
  362. mindspore/ops/operations/linalg_ops.py +1 -57
  363. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  364. mindspore/ops/operations/manually_defined/ops_def.py +928 -180
  365. mindspore/ops/operations/math_ops.py +32 -234
  366. mindspore/ops/operations/nn_ops.py +210 -498
  367. mindspore/ops/operations/other_ops.py +62 -9
  368. mindspore/ops/operations/random_ops.py +13 -7
  369. mindspore/ops/operations/reshard_ops.py +1 -1
  370. mindspore/ops/operations/sparse_ops.py +2 -2
  371. mindspore/ops/primitive.py +66 -53
  372. mindspore/ops/tensor_method.py +1888 -0
  373. mindspore/ops_generate/__init__.py +0 -5
  374. mindspore/ops_generate/aclnn/__init__.py +0 -0
  375. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
  376. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
  377. mindspore/ops_generate/api/__init__.py +0 -0
  378. mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
  379. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
  380. mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
  381. mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
  382. mindspore/ops_generate/api/functions_cc_generator.py +237 -0
  383. mindspore/ops_generate/api/gen_api.py +103 -0
  384. mindspore/ops_generate/api/op_api_proto.py +235 -0
  385. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
  386. mindspore/ops_generate/common/__init__.py +0 -0
  387. mindspore/ops_generate/common/base_generator.py +11 -0
  388. mindspore/ops_generate/common/gen_constants.py +91 -0
  389. mindspore/ops_generate/common/gen_utils.py +348 -0
  390. mindspore/ops_generate/common/op_proto.py +473 -0
  391. mindspore/ops_generate/common/template.py +523 -0
  392. mindspore/ops_generate/gen_ops.py +22 -1069
  393. mindspore/ops_generate/op_def/__init__.py +0 -0
  394. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  395. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
  396. mindspore/ops_generate/op_def/ops_def_cc_generator.py +299 -0
  397. mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
  398. mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
  399. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  400. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  401. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  402. mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
  403. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
  404. mindspore/ops_generate/pyboost/__init__.py +0 -0
  405. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
  406. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
  407. mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
  408. mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
  409. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
  410. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
  411. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
  412. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
  413. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
  414. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
  415. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
  416. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
  417. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
  418. mindspore/ops_generate/resources/__init__.py +0 -0
  419. mindspore/ops_generate/resources/resource_list.py +30 -0
  420. mindspore/ops_generate/resources/resource_loader.py +36 -0
  421. mindspore/ops_generate/resources/resource_manager.py +64 -0
  422. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  423. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  424. mindspore/parallel/__init__.py +7 -3
  425. mindspore/parallel/_auto_parallel_context.py +152 -34
  426. mindspore/parallel/_cell_wrapper.py +130 -15
  427. mindspore/parallel/_parallel_serialization.py +107 -5
  428. mindspore/parallel/_ps_context.py +1 -1
  429. mindspore/parallel/_recovery_context.py +7 -2
  430. mindspore/parallel/_tensor.py +142 -18
  431. mindspore/parallel/_utils.py +199 -23
  432. mindspore/parallel/algo_parameter_config.py +4 -4
  433. mindspore/parallel/auto_parallel.py +732 -0
  434. mindspore/parallel/checkpoint_convert.py +159 -0
  435. mindspore/parallel/checkpoint_transform.py +698 -35
  436. mindspore/parallel/cluster/process_entity/_api.py +276 -50
  437. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  438. mindspore/parallel/cluster/run.py +21 -4
  439. mindspore/parallel/function/__init__.py +24 -0
  440. mindspore/parallel/function/reshard_func.py +259 -0
  441. mindspore/parallel/nn/__init__.py +25 -0
  442. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  443. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  444. mindspore/parallel/parameter_broadcast.py +25 -14
  445. mindspore/parallel/shard.py +137 -58
  446. mindspore/parallel/transform_safetensors.py +363 -305
  447. mindspore/pgodb140.dll +0 -0
  448. mindspore/pgort140.dll +0 -0
  449. mindspore/profiler/__init__.py +22 -5
  450. mindspore/profiler/analysis/__init__.py +0 -0
  451. mindspore/profiler/analysis/parser/__init__.py +0 -0
  452. mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
  453. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  454. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  455. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  456. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  457. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  458. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
  459. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  460. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +106 -0
  461. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  462. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  463. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  464. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  465. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  466. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  467. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  468. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  469. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  470. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  471. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
  472. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  473. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  474. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  475. mindspore/profiler/analysis/task_manager.py +131 -0
  476. mindspore/profiler/analysis/time_converter.py +84 -0
  477. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  478. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
  479. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  480. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
  481. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
  482. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
  483. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
  484. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  485. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  486. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
  487. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  488. mindspore/profiler/analysis/work_flow.py +73 -0
  489. mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
  490. mindspore/profiler/common/command_executor.py +90 -0
  491. mindspore/profiler/common/constant.py +186 -3
  492. mindspore/profiler/common/file_manager.py +208 -0
  493. mindspore/profiler/common/log.py +130 -0
  494. mindspore/profiler/common/msprof_cmd_tool.py +221 -0
  495. mindspore/profiler/common/path_manager.py +395 -0
  496. mindspore/profiler/common/process_bar.py +168 -0
  497. mindspore/profiler/common/process_pool.py +9 -3
  498. mindspore/profiler/common/profiler_context.py +500 -0
  499. mindspore/profiler/common/profiler_info.py +304 -0
  500. mindspore/profiler/common/profiler_meta_data.py +74 -0
  501. mindspore/profiler/common/profiler_output_path.py +284 -0
  502. mindspore/profiler/common/profiler_parameters.py +251 -0
  503. mindspore/profiler/common/profiler_path_manager.py +179 -0
  504. mindspore/profiler/common/record_function.py +76 -0
  505. mindspore/profiler/common/tlv_decoder.py +76 -0
  506. mindspore/profiler/common/util.py +75 -2
  507. mindspore/profiler/dynamic_profiler.py +341 -75
  508. mindspore/profiler/envprofiler.py +163 -0
  509. mindspore/profiler/experimental_config.py +197 -0
  510. mindspore/profiler/mstx.py +242 -0
  511. mindspore/profiler/platform/__init__.py +21 -0
  512. mindspore/profiler/platform/base_profiler.py +40 -0
  513. mindspore/profiler/platform/cpu_profiler.py +124 -0
  514. mindspore/profiler/platform/gpu_profiler.py +74 -0
  515. mindspore/profiler/platform/npu_profiler.py +335 -0
  516. mindspore/profiler/profiler.py +1073 -90
  517. mindspore/profiler/profiler_action_controller.py +187 -0
  518. mindspore/profiler/profiler_interface.py +118 -0
  519. mindspore/profiler/schedule.py +243 -0
  520. mindspore/rewrite/api/node.py +15 -13
  521. mindspore/rewrite/api/symbol_tree.py +2 -3
  522. mindspore/run_check/_check_version.py +27 -20
  523. mindspore/run_check/run_check.py +1 -1
  524. mindspore/runtime/__init__.py +37 -0
  525. mindspore/runtime/device.py +27 -0
  526. mindspore/runtime/event.py +209 -0
  527. mindspore/runtime/executor.py +177 -0
  528. mindspore/runtime/memory.py +409 -0
  529. mindspore/runtime/stream.py +460 -0
  530. mindspore/runtime/thread_bind_core.py +401 -0
  531. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  532. mindspore/swresample-4.dll +0 -0
  533. mindspore/swscale-6.dll +0 -0
  534. mindspore/tbbmalloc.dll +0 -0
  535. mindspore/tinyxml2.dll +0 -0
  536. mindspore/train/__init__.py +8 -8
  537. mindspore/train/_utils.py +88 -25
  538. mindspore/train/amp.py +9 -5
  539. mindspore/train/callback/__init__.py +2 -2
  540. mindspore/train/callback/_callback.py +2 -16
  541. mindspore/train/callback/_checkpoint.py +53 -55
  542. mindspore/train/callback/_cluster_monitor.py +14 -18
  543. mindspore/train/callback/_early_stop.py +1 -1
  544. mindspore/train/callback/_flops_collector.py +103 -68
  545. mindspore/train/callback/_history.py +8 -5
  546. mindspore/train/callback/_lambda_callback.py +2 -2
  547. mindspore/train/callback/_landscape.py +0 -3
  548. mindspore/train/callback/_loss_monitor.py +2 -1
  549. mindspore/train/callback/_on_request_exit.py +6 -5
  550. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  551. mindspore/train/callback/_summary_collector.py +52 -19
  552. mindspore/train/callback/_time_monitor.py +2 -1
  553. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -107
  554. mindspore/train/data_sink.py +25 -2
  555. mindspore/train/dataset_helper.py +15 -16
  556. mindspore/train/loss_scale_manager.py +8 -7
  557. mindspore/train/metrics/accuracy.py +3 -3
  558. mindspore/train/metrics/confusion_matrix.py +9 -9
  559. mindspore/train/metrics/error.py +3 -3
  560. mindspore/train/metrics/hausdorff_distance.py +4 -4
  561. mindspore/train/metrics/mean_surface_distance.py +3 -3
  562. mindspore/train/metrics/metric.py +0 -12
  563. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  564. mindspore/train/metrics/precision.py +11 -10
  565. mindspore/train/metrics/recall.py +9 -9
  566. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  567. mindspore/train/mind_ir_pb2.py +174 -46
  568. mindspore/train/model.py +184 -113
  569. mindspore/train/serialization.py +622 -978
  570. mindspore/train/summary/_summary_adapter.py +2 -2
  571. mindspore/train/summary/summary_record.py +2 -3
  572. mindspore/train/train_thor/model_thor.py +1 -1
  573. mindspore/turbojpeg.dll +0 -0
  574. mindspore/utils/__init__.py +6 -3
  575. mindspore/utils/dryrun.py +140 -0
  576. mindspore/utils/hooks.py +81 -0
  577. mindspore/utils/runtime_execution_order_check.py +550 -0
  578. mindspore/utils/utils.py +138 -4
  579. mindspore/vcmeta.dll +0 -0
  580. mindspore/vcruntime140.dll +0 -0
  581. mindspore/vcruntime140_1.dll +0 -0
  582. mindspore/version.py +1 -1
  583. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +3 -3
  584. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +587 -418
  585. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +1 -1
  586. mindspore/_install_custom.py +0 -43
  587. mindspore/common/_register_for_adapter.py +0 -74
  588. mindspore/common/_tensor_overload.py +0 -139
  589. mindspore/mindspore_np_dtype.dll +0 -0
  590. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  591. mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
  592. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  593. mindspore/ops_generate/gen_aclnn_implement.py +0 -263
  594. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  595. mindspore/ops_generate/gen_pyboost_func.py +0 -1052
  596. mindspore/ops_generate/gen_utils.py +0 -209
  597. mindspore/ops_generate/op_proto.py +0 -145
  598. mindspore/ops_generate/template.py +0 -261
  599. mindspore/profiler/envprofiling.py +0 -254
  600. mindspore/profiler/profiling.py +0 -1926
  601. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
  602. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,921 @@
1
+ # Copyright 2024 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+ """conv"""
16
+ from __future__ import absolute_import
17
+
18
+ import math
19
+
20
+ from mindspore.ops.auto_generate.gen_ops_prim import (conv1d_ext_op, conv1d_padding_op, conv2d_ext_op,
21
+ conv2d_padding_op, conv3d_ext_op, conv3d_padding_op)
22
+ from mindspore.ops.function.nn_func import pad_ext, conv_transpose2d
23
+ from mindspore.ops.function.array_func import rank
24
+ import mindspore.common.dtype as mstype
25
+ from mindspore.common.parameter import Parameter
26
+ from mindspore.common.initializer import initializer, HeUniform, Uniform, _calculate_fan_in_and_fan_out
27
+ from mindspore import _checkparam as Validator
28
+ from mindspore._checkparam import once, twice, triple
29
+ from mindspore._extends import cell_attr_register
30
+ from mindspore.nn.cell import Cell
31
+ from mindspore.ops.functional import isconstant
32
+
33
+ __all__ = ['Conv2d', 'ConvTranspose2d', 'Conv3d', 'Conv1d']
34
+
35
+
36
+ class _Conv(Cell):
37
+ """
38
+ Applies a N-D convolution over an input signal composed of several input planes.
39
+ """
40
+ def __init__(self,
41
+ in_channels,
42
+ out_channels,
43
+ kernel_size,
44
+ stride,
45
+ padding,
46
+ dilation,
47
+ transposed,
48
+ output_padding,
49
+ groups,
50
+ bias,
51
+ padding_mode,
52
+ dtype=mstype.float32):
53
+ """Initialize _Conv."""
54
+ super(_Conv, self).__init__()
55
+ if groups <= 0:
56
+ raise ValueError('groups must be a positive integer.')
57
+ self.in_channels = in_channels
58
+ if self.in_channels % groups != 0:
59
+ raise ValueError('in_channels must be divisible by groups.')
60
+ self.out_channels = out_channels
61
+ if self.out_channels % groups != 0:
62
+ raise ValueError('out_channels must be divisible by groups.')
63
+ valid_padding_strings = {'same', 'valid'}
64
+ self.padding = padding
65
+ self.stride = stride
66
+ if isinstance(self.padding, str):
67
+ if self.padding not in valid_padding_strings:
68
+ raise ValueError(f"The value of 'padding' must be one of '{valid_padding_strings}', "
69
+ f"but got {self.padding}.")
70
+ if self.padding == 'same' and any(s != 1 for s in self.stride):
71
+ raise ValueError("padding='same' is not supported for strided convolutions")
72
+
73
+ valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
74
+ if padding_mode not in valid_padding_modes:
75
+ raise ValueError(f"The value of 'padding_mode' must be one of '{valid_padding_modes}', "
76
+ f"but got {padding_mode}.")
77
+ self.transposed = transposed
78
+ self.groups = Validator.check_positive_int(groups)
79
+ self.output_padding = output_padding
80
+ self.padding_mode = padding_mode
81
+ self.kernel_size = kernel_size
82
+ for kernel_size_elem in kernel_size:
83
+ Validator.check_positive_int(kernel_size_elem, 'kernel_size item', self.cls_name)
84
+ for stride_elem in stride:
85
+ Validator.check_positive_int(stride_elem, 'stride item', self.cls_name)
86
+ self.dilation = dilation
87
+ for dilation_elem in dilation:
88
+ Validator.check_positive_int(dilation_elem, 'dilation item', self.cls_name)
89
+ if isinstance(self.padding, str):
90
+ self._reversed_padding = [0, 0] * len(kernel_size)
91
+ if padding == 'same':
92
+ for d, k, i in zip(dilation, kernel_size,
93
+ range(len(kernel_size) - 1, -1, -1)):
94
+ total_padding = d * (k - 1)
95
+ left_pad = total_padding // 2
96
+ self._reversed_padding[2 * i] = left_pad
97
+ self._reversed_padding[2 * i + 1] = (
98
+ total_padding - left_pad)
99
+ else:
100
+ self._reversed_padding = tuple(x for x in reversed(self.padding) for _ in range(2))
101
+ if transposed:
102
+ shape = [in_channels, out_channels // groups, *kernel_size]
103
+ else:
104
+ shape = [out_channels, in_channels // groups, *kernel_size]
105
+ weight_init = HeUniform(math.sqrt(5))
106
+ self.weight = Parameter(initializer(weight_init, shape, dtype=dtype), name='weight')
107
+
108
+ if Validator.check_bool(bias, "bias", self.cls_name):
109
+ fan_in, _ = _calculate_fan_in_and_fan_out(shape)
110
+ if fan_in != 0:
111
+ bound = 1 / math.sqrt(fan_in)
112
+ bias_init = Uniform(bound)
113
+ else:
114
+ bias_init = 'zeros'
115
+ self.bias = Parameter(initializer(bias_init, [out_channels], dtype=dtype), name='bias')
116
+ else:
117
+ self.bias = None
118
+
119
+ def construct(self, *inputs):
120
+ """Must be overridden by all subclasses."""
121
+ raise NotImplementedError
122
+
123
+ def extend_repr(self):
124
+ bias = self.bias is not None
125
+ s = 'input_channels={}, output_channels={}, kernel_size={}, ' \
126
+ 'stride={}, padding={}, dilation={}, ' \
127
+ 'groups={}, bias={}'.format(
128
+ self.in_channels,
129
+ self.out_channels,
130
+ self.kernel_size,
131
+ self.stride,
132
+ self.padding,
133
+ self.dilation,
134
+ self.groups,
135
+ bias)
136
+ return s
137
+
138
+
139
+ class Conv1d(_Conv):
140
+ r"""
141
+ 1D convolution layer.
142
+
143
+ Applies a 1D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, L_{in})`,
144
+ where :math:`N` is batch size, :math:`C` is channel number, :math:`L` is sequence length.
145
+
146
+ The output is calculated based on formula:
147
+
148
+ .. math::
149
+
150
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
151
+ \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
152
+
153
+
154
+ where :math:`bias` is the output channel bias, :math:`ccor` is
155
+ the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
156
+ :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
157
+
158
+ - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
159
+ where :math:`N` is the batch size of the input.
160
+
161
+ - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
162
+ where :math:`C_{out}` is the number of
163
+ output channels, which is also equal to the number of kernels.
164
+
165
+ - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
166
+ where :math:`C_{in}` is the number of
167
+ input channels, which is also equal to the number of channels in the convolutional kernels.
168
+
169
+ Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
170
+ output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
171
+ kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
172
+ channel in the :math:`i`-th batch of the input feature map.
173
+
174
+ The shape of the convolutional kernel is given by :math:`(\text{kernel_size})`,
175
+ where :math:`\text{kernel_size}` is the length of the kernel.
176
+ If we consider the input and output channels as well as the `groups` parameter, the complete kernel shape
177
+ will be :math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size})`,
178
+ where `groups` is the number of groups dividing `x`'s input channel when applying groups convolution.
179
+
180
+ For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
181
+ <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
182
+
183
+ Args:
184
+ in_channels (int): The channel number of the input tensor of the Conv1d layer.
185
+ out_channels (int): The channel number of the output tensor of the Conv1d layer.
186
+ kernel_size (Union[int, tuple[int], list[int]]): Specifies the length of the 1D convolution kernel.
187
+ The data type is an integer or a tuple of one integer.
188
+ stride (Union[int, tuple[int], list[int]], optional): The movement stride of the 1D convolution kernel.
189
+ The data type is an integer or a tuple of one integer. Default: ``1`` .
190
+ padding (Union[int, tuple[int], list[int], str], optional): The number of padding
191
+ on the input.
192
+ The data type is an integer or a tuple of one integer or string {``"valid"``, ``"same"``}.
193
+ The value should be greater than or equal to 0. Default: ``0`` .
194
+
195
+ - ``"same"``: Pad the input around its edges so that the shape of input and output
196
+ are the same when `stride` is set to ``1``.
197
+ The amount of padding to is calculated by the operator internally, If the amount is even, it is
198
+ uniformly distributed around the input, if it is odd, the excess amount goes to the right side.
199
+ If this mode is set, `stride` must be 1.
200
+
201
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
202
+ possible length. Extra sequence that could not complete a full stride will
203
+ be discarded.
204
+
205
+ padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
206
+ ``"zeros"`` , ``"reflect"`` ``"circular"`` or ``"replicate"`` . Default: ``"zeros"`` .
207
+ dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation rate to use for dilated convolution.
208
+ It can be a single int or a tuple of 1 integer.
209
+ Assuming :math:`dilation=(d)`, the convolutional kernel samples the input with a
210
+ spacing of :math:`d-1` elements in the length direction.
211
+ Default: ``1`` .
212
+ groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
213
+ divisible by `groups`. If the groups is equal to `in_channels` and `out_channels`,
214
+ this 1D convolution layer also can be called 1D depthwise convolution layer. Default: ``1`` .
215
+ The following restraints must be met:
216
+
217
+ - :math:`(C_{in} \text{ % } \text{groups} == 0)`
218
+ - :math:`(C_{out} \text{ % } \text{groups} == 0)`
219
+ - :math:`(C_{out} >= \text{groups})`
220
+ - :math:`(\text{kernel_size[1]} = C_{in} / \text{groups})`
221
+
222
+ bias (bool, optional): Whether the Conv1d layer has a bias parameter. Default: ``True`` .
223
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
224
+
225
+ Inputs:
226
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` \
227
+ or :math:`(C_{in}, L_{in})`.
228
+
229
+ Outputs:
230
+ Tensor of shape :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`.
231
+
232
+ padding is ``'same'``:
233
+
234
+ .. math::
235
+ \begin{array}{ll} \\
236
+ L_{out} = \left \lceil{\frac{L_{in}}{\text{stride}}} \right \rceil \\
237
+ \end{array}
238
+
239
+ padding is ``'valid'``:
240
+
241
+ .. math::
242
+ \begin{array}{ll} \\
243
+ L_{out} = \left \lceil{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) }
244
+ {\text{stride}}} \right \rceil \\
245
+ \end{array}
246
+
247
+ padding is int or tuple/list:
248
+
249
+ .. math::
250
+ \begin{array}{ll} \\
251
+ L_{out} = \left \lfloor{\frac{L_{in} + 2 \times {padding} - (\text{kernel_size} - 1) \times
252
+ \text{dilation} - 1 }{\text{stride}} + 1} \right \rfloor \\
253
+ \end{array}
254
+
255
+ Raises:
256
+ ValueError: Args and size of the input feature map should satisfy the output formula to ensure that the size of
257
+ the output feature map is positive; otherwise, an error will be reported.
258
+ RuntimeError: On Ascend, due to the limitation of the L1 cache size of different NPU chip, if input size or
259
+ kernel size is too large, it may trigger an error.
260
+ TypeError: If `in_channels`, `out_channels` or `groups` is not an int.
261
+ TypeError: If `kernel_size`, `stride` or `dilation` is neither an int nor a tuple.
262
+ ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
263
+ ValueError: If `padding` is less than 0.
264
+ ValueError: If `padding` is `same` , `stride` is not equal to 1.
265
+ ValueError: The input parameters do not satisfy the convolution output formula.
266
+ ValueError: The `kernel_size` cannot exceed the size of the input feature map.
267
+ ValueError: The value of padding cannot cause the calculation area to exceed the input size.
268
+
269
+ Supported Platforms:
270
+ ``Ascend``
271
+
272
+ Examples:
273
+ >>> import mindspore
274
+ >>> from mindspore import Tensor, mint
275
+ >>> import numpy as np
276
+ >>> net = mint.nn.Conv1d(120, 240, 4, bias=False)
277
+ >>> x = Tensor(np.ones([1, 120, 1024]), mindspore.float32)
278
+ >>> output = net(x).shape
279
+ >>> print(output)
280
+ (1, 240, 1021)
281
+ """
282
+ @cell_attr_register
283
+ def __init__(self,
284
+ in_channels,
285
+ out_channels,
286
+ kernel_size,
287
+ stride=1,
288
+ padding=0,
289
+ dilation=1,
290
+ groups=1,
291
+ bias=True,
292
+ padding_mode='zeros',
293
+ dtype=None):
294
+ """Initialize Conv1d."""
295
+ kernel_size_ = once(kernel_size)
296
+ stride_ = once(stride)
297
+ padding_ = padding if isinstance(padding, str) else once(padding)
298
+ dilation_ = once(dilation)
299
+ if not dtype:
300
+ dtype = mstype.float32
301
+ super(Conv1d, self).__init__(in_channels, out_channels, kernel_size_, stride_, padding_, dilation_, False,
302
+ once(0), groups, bias, padding_mode, dtype)
303
+ if isinstance(padding, str) and padding_mode == "zeros":
304
+ self.conv1d = conv1d_padding_op
305
+ else:
306
+ self.conv1d = conv1d_ext_op
307
+
308
+
309
+ def construct(self, input):
310
+ if self.padding_mode != "zeros":
311
+ output = self.conv1d(pad_ext(input, self._reversed_padding, mode=self.padding_mode), self.weight,
312
+ self.bias, self.stride, (0,), self.dilation, self.groups)
313
+ else:
314
+ output = self.conv1d(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
315
+ return output
316
+
317
+
318
+ class Conv2d(_Conv):
319
+ r"""
320
+ 2D convolution layer.
321
+
322
+ Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
323
+ where :math:`N` is batch size, :math:`C` is channel number, :math:`H` is feature height, :math:`W` is feature width.
324
+
325
+ The output is calculated based on formula:
326
+
327
+ .. math::
328
+
329
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
330
+ \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
331
+
332
+ where :math:`bias` is the output channel bias, :math:`ccor` is
333
+ the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
334
+ :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
335
+
336
+ - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
337
+ where :math:`N` is the batch size of the input.
338
+
339
+ - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
340
+ where :math:`C_{out}` is the number of
341
+ output channels, which is also equal to the number of kernels.
342
+
343
+ - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
344
+ where :math:`C_{in}` is the number of
345
+ input channels, which is also equal to the number of channels in the convolutional kernels.
346
+
347
+ Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
348
+ output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
349
+ kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
350
+ channel in the :math:`i`-th batch of the input feature map.
351
+
352
+ The shape of the convolutional kernel is given by :math:`(\text{kernel_size[0]},\text{kernel_size[1]})`,
353
+ where :math:`\text{kernel_size[0]}`
354
+ and :math:`\text{kernel_size[1]}` are the height and width of the kernel, respectively.
355
+ If we consider the input and output channels as well as the `groups` parameter, the complete kernel shape
356
+ will be :math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size[0]}, \text{kernel_size[1]})`,
357
+ where `groups` is the number of groups dividing `x`'s input channel when applying groups convolution.
358
+
359
+ For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
360
+ <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
361
+
362
+ Args:
363
+ in_channels (int): The channel number of the input tensor of the Conv2d layer.
364
+ out_channels (int): The channel number of the output tensor of the Conv2d layer.
365
+ kernel_size (Union[int, tuple[int], list[int]]): Specifies the height and width of the 2D convolution kernel.
366
+ The data type is an integer or a tuple of two integers. An integer represents the height
367
+ and width of the convolution kernel. A tuple of two integers represents the height
368
+ and width of the convolution kernel respectively.
369
+ stride (Union[int, tuple[int], list[int]], optional): The movement stride of the 2D convolution kernel.
370
+ The data type is an integer or a tuple of two integers. An integer represents the movement step size
371
+ in both height and width directions. A tuple of two integers represents the movement step size in the height
372
+ and width directions respectively. Default: ``1`` .
373
+ padding (Union[int, tuple[int], list[int], str], optional): The number of padding
374
+ on the height and width directions of the input.
375
+ The data type is an integer or a tuple of two integers or string {``"valid"``, ``"same"``}.
376
+ If `padding` is an integer, then `padding_{H}` and `padding_{W}` are all equal to `padding`.
377
+ If `padding` is a tuple of 2 integers, then `padding_{H}` and `padding_{W}`
378
+ is equal to `padding[0]` and `padding[1]` respectively.
379
+ The value should be greater than or equal to 0. Default: ``0`` .
380
+
381
+ - ``"same"``: Pad the input around its edges so that the shape of input and output
382
+ are the same when `stride` is set to ``1``.
383
+ The amount of padding to is calculated by the operator internally, If the amount is even, it is
384
+ uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
385
+ If this mode is set, `stride` must be 1.
386
+
387
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
388
+ possible height and width. Extra pixels that could not complete a full stride will
389
+ be discarded.
390
+
391
+ padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
392
+ ``"zeros"`` , ``"reflect"`` ``"circular"`` or ``"replicate"`` . Default: ``"zeros"`` .
393
+ dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation rate to use for dilated convolution.
394
+ It can be a single int or a tuple of 2 or 4 integers. A single int means the dilation size is the same
395
+ in both the height and width directions. A tuple of two ints represents the dilation size in
396
+ the height and width directions, respectively. For a tuple of four ints, the two ints correspond
397
+ to (N, C) dimension are treated as 1, and the two correspond to (H, W) dimensions is the
398
+ dilation size in the height and width directions respectively.
399
+ Assuming :math:`dilation=(d0, d1)`, the convolutional kernel samples the input with a
400
+ spacing of :math:`d0-1` elements in the height direction and :math:`d1-1` elements in the width direction.
401
+ The values in the height and width dimensions are in the ranges [1, H] and [1, W], respectively.
402
+ Default: ``1`` .
403
+ groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
404
+ divisible by `groups`. If the groups is equal to `in_channels` and `out_channels`,
405
+ this 2D convolution layer also can be called 2D depthwise convolution layer. Default: ``1`` .
406
+ The following restraints must be met:
407
+
408
+ - :math:`(C_{in} \text{ % } \text{groups} == 0)`
409
+ - :math:`(C_{out} \text{ % } \text{groups} == 0)`
410
+ - :math:`(C_{out} >= \text{groups})`
411
+ - :math:`(\text{kernel_size[1]} = C_{in} / \text{groups})`
412
+
413
+ bias (bool, optional): Whether the Conv2d layer has a bias parameter. Default: ``True`` .
414
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
415
+
416
+ Inputs:
417
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` \
418
+ or :math:`(C_{in}, H_{in}, W_{in})`.
419
+
420
+ Outputs:
421
+ Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`.
422
+
423
+ padding is ``'same'``:
424
+
425
+ .. math::
426
+ \begin{array}{ll} \\
427
+ H_{out} = \left \lceil{\frac{H_{in}}{\text{stride[0]}}} \right \rceil \\
428
+ W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[1]}}} \right \rceil \\
429
+ \end{array}
430
+
431
+ padding is ``'valid'``:
432
+
433
+ .. math::
434
+ \begin{array}{ll} \\
435
+ H_{out} = \left \lceil{\frac{H_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
436
+ {\text{stride[0]}}} \right \rceil \\
437
+ W_{out} = \left \lceil{\frac{W_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
438
+ {\text{stride[1]}}} \right \rceil \\
439
+ \end{array}
440
+
441
+ padding is int or tuple/list:
442
+
443
+ .. math::
444
+ \begin{array}{ll} \\
445
+ H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - (\text{kernel_size[0]} - 1) \times
446
+ \text{dilation[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
447
+ W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - (\text{kernel_size[1]} - 1) \times
448
+ \text{dilation[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
449
+ \end{array}
450
+
451
+ Raises:
452
+ ValueError: Args and size of the input feature map should satisfy the output formula to ensure that the size of
453
+ the output feature map is positive; otherwise, an error will be reported.
454
+ RuntimeError: On Ascend, due to the limitation of the L1 cache size of different NPU chip, if input size or
455
+ kernel size is too large, it may trigger an error.
456
+ TypeError: If `in_channels`, `out_channels` or `groups` is not an int.
457
+ TypeError: If `kernel_size`, `stride` or `dilation` is neither an int nor a tuple.
458
+ ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
459
+ ValueError: If `padding` is less than 0.
460
+ ValueError: If `padding` is `same` , `stride` is not equal to 1.
461
+ ValueError: The input parameters do not satisfy the convolution output formula.
462
+ ValueError: The `kernel_size` cannot exceed the size of the input feature map.
463
+ ValueError: The value of padding cannot cause the calculation area to exceed the input size.
464
+
465
+ Supported Platforms:
466
+ ``Ascend``
467
+
468
+ Examples:
469
+ >>> import mindspore
470
+ >>> from mindspore import Tensor, mint
471
+ >>> import numpy as np
472
+ >>> net = mint.nn.Conv2d(120, 240, 4, bias=False)
473
+ >>> x = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
474
+ >>> output = net(x).shape
475
+ >>> print(output)
476
+ (1, 240, 1021, 637)
477
+ """
478
+ @cell_attr_register
479
+ def __init__(self,
480
+ in_channels,
481
+ out_channels,
482
+ kernel_size,
483
+ stride=1,
484
+ padding=0,
485
+ dilation=1,
486
+ groups=1,
487
+ bias=True,
488
+ padding_mode='zeros',
489
+ dtype=None):
490
+ """Initialize Conv2d."""
491
+ kernel_size_ = twice(kernel_size)
492
+ stride_ = twice(stride)
493
+ padding_ = padding if isinstance(padding, str) else twice(padding)
494
+ dilation_ = twice(dilation)
495
+ if not dtype:
496
+ dtype = mstype.float32
497
+ super(Conv2d, self).__init__(in_channels, out_channels, kernel_size_, stride_, padding_, dilation_, False,
498
+ twice(0), groups, bias, padding_mode, dtype)
499
+ if isinstance(padding, str) and padding_mode == "zeros":
500
+ self.conv2d = conv2d_padding_op
501
+ else:
502
+ self.conv2d = conv2d_ext_op
503
+
504
+
505
+ def construct(self, input):
506
+ if self.padding_mode != "zeros":
507
+ output = self.conv2d(pad_ext(input, self._reversed_padding, mode=self.padding_mode), self.weight,
508
+ self.bias, self.stride, (0, 0), self.dilation, self.groups)
509
+ else:
510
+ output = self.conv2d(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
511
+ return output
512
+
513
+
514
+ class Conv3d(_Conv):
515
+ r"""
516
+ 3D convolution layer.
517
+
518
+ Applies a 3D convolution over an input tensor. The input tensor is typically of
519
+ shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`, where :math:`N` is batch size, :math:`C`
520
+ is channel number, :math:`D, H, W` are the depth, height and width of the feature graph, respectively.
521
+
522
+ The output is calculated based on formula:
523
+
524
+ .. math::
525
+
526
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
527
+ \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
528
+
529
+ where :math:`bias` is the output channel bias, :math:`ccor` is
530
+ the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
531
+ :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
532
+
533
+ Here are the indices' meanings:
534
+
535
+ - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
536
+ where :math:`N` is the batch size of the input.
537
+
538
+ - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
539
+ where :math:`C_{out}` is the number of
540
+ output channels, which is also equal to the number of kernels.
541
+
542
+ - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
543
+ where :math:`C_{in}` is the number of
544
+ input channels, which is also equal to the number of channels in the convolutional kernels.
545
+
546
+ Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
547
+ output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
548
+ kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
549
+ channel in the :math:`i`-th batch of the input feature map.
550
+
551
+ The shape of the convolutional kernel is given by
552
+ :math:`(\text{kernel_size[0]},\text{kernel_size[1]},\text{kernel_size[2]})`,
553
+ where :math:`\text{kernel_size[0]}`, :math:`\text{kernel_size[1]}`
554
+ and :math:`\text{kernel_size[2]}` are the depth, height and width of the kernel, respectively.
555
+ If we consider the input and output channels as well as the `groups` parameter, the complete kernel shape
556
+ will be
557
+ :math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size[0]}, \text{kernel_size[1]}, \text{kernel_size[2]})`,
558
+ where `groups` is the number of groups dividing `x`'s input channel when applying groups convolution.
559
+
560
+ For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
561
+ <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
562
+
563
+ For the detail of limitations of the parameters, please refer to :func:`mindspore.mint.nn.functional.conv3d`.
564
+
565
+ .. warning::
566
+ This is an experimental API that is subject to change or deletion.
567
+
568
+ Args:
569
+ in_channels (int): The channel number of the input tensor of the Conv3d layer.
570
+ out_channels (int): The channel number of the output tensor of the Conv3d layer.
571
+ kernel_size (Union[int, tuple[int], list[int]]): Specifies the height and width of the 3D convolution kernel.
572
+ The data type is an integer or a tuple of two integers. An integer represents the height
573
+ and width of the convolution kernel. A tuple of two integers represents the height
574
+ and width of the convolution kernel respectively.
575
+ stride (Union[int, tuple[int], list[int]], optional): The movement stride of the 3D convolution kernel.
576
+ The data type is an integer or a tuple of three integers. An integer represents the movement step size
577
+ in both height and width directions. A tuple of three integers represents the movement step size in the
578
+ depth, height and width directions respectively. Default: ``1`` .
579
+ padding (Union[int, tuple[int], list[int], str], optional): The number of padding
580
+ on the depth, height and width directions of the input.
581
+ The data type is an integer or string {``"valid"``, ``"same"``} or a tuple of three integers.
582
+ The value should be greater than or equal to 0. Default: ``0`` .
583
+
584
+ - ``"same"``: Pad the input around its edges so that the shape of input and output
585
+ are the same when `stride` is set to ``1``.
586
+ The amount of padding to is calculated by the operator internally, If the amount is even, it is
587
+ uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
588
+ If this mode is set, `padding` must be 0.
589
+
590
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
591
+ possible height and width. Extra pixels that could not complete a full stride will
592
+ be discarded. If this mode is set, `padding` must be 0.
593
+
594
+ padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
595
+ ``"zeros"`` , ``"reflect"`` ``"circular"`` or ``"replicate"`` . Default: ``"zeros"`` .
596
+ dilation (Union[int, tuple[int], list[int]], optional): Controlling the space between the kernel points.
597
+ Default: ``1`` .
598
+ groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
599
+ divisible by `groups`. If the groups is equal to `in_channels` and `out_channels`. Default: ``1`` .
600
+ bias (bool, optional): Whether the Conv3d layer has a bias parameter. Default: ``True`` .
601
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
602
+
603
+ Inputs:
604
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` \
605
+ or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`.
606
+
607
+ Outputs:
608
+ Tensor of shape :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
609
+ or :math:`(C_{out}, D_{out}, H_{out}, W_{out})`.
610
+
611
+ padding is ``"same"``:
612
+
613
+ .. math::
614
+ \begin{array}{ll} \\
615
+ D_{out} = \left \lceil{\frac{D_{in}}{\text{stride[0]}}} \right \rceil \\
616
+ H_{out} = \left \lceil{\frac{H_{in}}{\text{stride[1]}}} \right \rceil \\
617
+ W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[2]}}} \right \rceil \\
618
+ \end{array}
619
+
620
+ padding is ``"valid"``:
621
+
622
+ .. math::
623
+ \begin{array}{ll} \\
624
+ D_{out} = \left \lceil{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
625
+ {\text{stride[0]}}} \right \rceil \\
626
+ H_{out} = \left \lceil{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
627
+ {\text{stride[1]}}} \right \rceil \\
628
+ W_{out} = \left \lceil{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) }
629
+ {\text{stride[2]}}} \right \rceil \\
630
+ \end{array}
631
+
632
+ Raises:
633
+ TypeError: If `in_channels`, `out_channels` or `groups` is not an int.
634
+ TypeError: If `kernel_size`, `stride`, `padding` or `dilation` is neither an int nor a tuple.
635
+ ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
636
+ ValueError: If `padding` is less than 0.
637
+
638
+ Supported Platforms:
639
+ ``Ascend``
640
+
641
+ Examples:
642
+ >>> import mindspore
643
+ >>> from mindspore import Tensor, mint
644
+ >>> import numpy as np
645
+ >>> net = mint.nn.Conv3d(120, 10, 4)
646
+ >>> x = Tensor(np.ones([1, 120, 10, 23, 34]), mindspore.float32)
647
+ >>> output = net(x).shape
648
+ >>> print(output)
649
+ (1, 10, 7, 20, 31)
650
+ """
651
+ @cell_attr_register
652
+ def __init__(self,
653
+ in_channels,
654
+ out_channels,
655
+ kernel_size,
656
+ stride=1,
657
+ padding=0,
658
+ dilation=1,
659
+ groups=1,
660
+ bias=True,
661
+ padding_mode='zeros',
662
+ dtype=None):
663
+ """Initialize Conv3d."""
664
+ kernel_size_ = triple(kernel_size)
665
+ stride_ = triple(stride)
666
+ padding_ = padding if isinstance(padding, str) else triple(padding)
667
+ dilation_ = triple(dilation)
668
+ if not dtype:
669
+ dtype = mstype.float32
670
+ super(Conv3d, self).__init__(in_channels, out_channels, kernel_size_, stride_, padding_, dilation_, False,
671
+ triple(0), groups, bias, padding_mode, dtype)
672
+ if isinstance(padding, str) and padding_mode == "zeros":
673
+ self.conv3d = conv3d_padding_op
674
+ else:
675
+ self.conv3d = conv3d_ext_op
676
+
677
+
678
+ def construct(self, input):
679
+ if self.padding_mode != "zeros":
680
+ output = self.conv3d(pad_ext(input, self._reversed_padding, mode=self.padding_mode), self.weight,
681
+ self.bias, self.stride, (0, 0, 0), self.dilation, self.groups)
682
+ else:
683
+ output = self.conv3d(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
684
+ return output
685
+
686
+
687
+ def batchify(input, num_spatial_dims, ops_name):
688
+ """Conv input batchify"""
689
+ dim_count_no_batch = num_spatial_dims + 1
690
+ dim_count_batch = dim_count_no_batch + 1
691
+ is_batched = (input.ndim == dim_count_batch)
692
+ if not (input.ndim == dim_count_no_batch or is_batched):
693
+ raise TypeError(f"For {ops_name}, Expected {dim_count_no_batch}D (unbatched) or {dim_count_batch}D (batched)," \
694
+ f"but got input of ndim: {input.ndim}D")
695
+ if is_batched:
696
+ return input, is_batched
697
+ return input.unsqueeze(0), is_batched
698
+
699
+
700
+ class _ConvTranspose(_Conv):
701
+ """
702
+ Applies a N-D convolution over an input signal composed of several input planes.
703
+ """
704
+ def __init__(self, in_channels, out_channels, kernel_size, stride,
705
+ padding, dilation, transposed, output_padding, groups,
706
+ bias, padding_mode, dtype=None):
707
+ if padding_mode != "zeros":
708
+ raise ValueError(
709
+ f'Only "zeros" padding mode is supported for {self.__class__.__name__}'
710
+ )
711
+ super(_ConvTranspose, self).__init__(in_channels, out_channels, kernel_size,
712
+ stride, padding, dilation, transposed,
713
+ output_padding, groups, bias, padding_mode, dtype)
714
+
715
+ def _check_output_size(self, output_size, min_sizes, max_sizes, input_shape):
716
+ if isconstant(output_size) and isconstant(min_sizes)\
717
+ and isconstant(max_sizes) and isconstant(input_shape):
718
+ for i in range(len(output_size)):
719
+ size = output_size[i]
720
+ min_size = min_sizes[i]
721
+ max_size = max_sizes[i]
722
+ if size < min_size or size > max_size:
723
+ raise ValueError(
724
+ f"requested an output size of {output_size}, but valid sizes range "
725
+ f"from {min_sizes} to {max_sizes} (for an input of {input_shape})"
726
+ )
727
+
728
+ # dilation being an optional parameter is for backwards
729
+ # compatibility
730
+ def _output_padding(self, input, output_size, stride, padding, kernel_size,
731
+ num_spatial_dims, dilation):
732
+ "the computation of output padding"
733
+ if output_size is None:
734
+ ret = tuple(self.output_padding) # converting to list if was not already
735
+ else:
736
+ input_rank = rank(input)
737
+ has_batch_dim = input_rank == (num_spatial_dims + 2)
738
+ num_non_spatial_dims = 2 if has_batch_dim else 1
739
+ if isconstant(output_size) and isconstant(input_rank) and\
740
+ len(output_size) != num_spatial_dims and len(output_size) != (num_non_spatial_dims + num_spatial_dims):
741
+ raise ValueError(
742
+ f"ConvTranspose{num_spatial_dims}D: for {input_rank}D input, ",
743
+ f"output_size must have {num_spatial_dims} ",
744
+ f"or {num_non_spatial_dims + num_spatial_dims} elements (got {len(output_size)})"
745
+ )
746
+ output_size = output_size[-num_spatial_dims:]
747
+
748
+ min_sizes = []
749
+ max_sizes = []
750
+ for d in range(num_spatial_dims):
751
+ dim_size = (
752
+ (input.shape[d + num_non_spatial_dims] - 1) * stride[d]
753
+ - 2 * padding[d]
754
+ + (dilation[d] if dilation is not None else 1)
755
+ * (kernel_size[d] - 1)
756
+ + 1
757
+ )
758
+ min_sizes.append(dim_size)
759
+ max_sizes.append(min_sizes[d] + stride[d] - 1)
760
+ self._check_output_size(output_size, min_sizes, max_sizes, input.shape)
761
+
762
+ res = []
763
+ for d in range(num_spatial_dims):
764
+ res.append(output_size[d] - min_sizes[d])
765
+ ret = res
766
+ return ret
767
+
768
+ def construct(self, *inputs):
769
+ """Must be overridden by all subclasses."""
770
+ raise NotImplementedError
771
+
772
+
773
+ def _pair(x, arg_name, class_name):
774
+ if isinstance(x, int):
775
+ return (x, x)
776
+ if isinstance(x, (tuple, list)):
777
+ if len(x) == 1:
778
+ return (x[0], x[-1])
779
+ return x
780
+ raise ValueError(f"For '{class_name}', '{arg_name}'",
781
+ f" should be int, tuple or list, but got {x}")
782
+
783
+
784
+ class ConvTranspose2d(_ConvTranspose):
785
+ r"""
786
+ Applies a 2D transposed convolution operator over an input image
787
+ composed of several input planes.
788
+
789
+ This module can be seen as the gradient of Conv2d with respect to its input.
790
+ It is also known as a fractionally-strided convolution or
791
+ a deconvolution (although it is not an actual deconvolution operation as it does
792
+ not compute a true inverse of convolution).
793
+
794
+ The parameters `kernel_size`, `stride`, `padding`, `output_padding` can either be:
795
+
796
+ - a single ``int`` -- in which case the same value is used for the height and width dimensions
797
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
798
+ and the second `int` for the width dimension
799
+
800
+ .. warning::
801
+ - This is an experimental API that is subject to change or deletion.
802
+ - In the scenario where inputs are non-contiguous, `output_padding` must be less than `stride` .
803
+ - For Atlas training products, when the dtype of input is float32, the `groups` only supports 1.
804
+
805
+ Args:
806
+ in_channels (int): Number of channels in the input image.
807
+ out_channels (int): Number of channels produced by the convolution.
808
+ kernel_size (Union[int, tuple(int)]): Size of the convolving kernel.
809
+ stride (Union[int, tuple(int)], optional): Stride of the convolution. Default: ``1`` .
810
+ padding (Union[int, tuple(int)], optional): :math:`dilation * (kernel\_size - 1) - padding` zero-padding
811
+ will be added to both sides of each dimension in the input. Default: ``0`` .
812
+ output_padding (Union[int, tuple(int)], optional): Additional size added to one side of each dimension
813
+ in the output shape. The value of `output_padding` must be less than `stride` or `dilation` .
814
+ Default: ``0`` .
815
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: ``1``
816
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` .
817
+ dilation (Union[int, tuple(int)], optional): Spacing between kernel elements. Default: ``1`` .
818
+ padding_mode (str, optional): Specifies the padding mode with a padding value. For now, it can only be
819
+ set to: ``"zeros"``. Default: ``"zeros"`` .
820
+ dtype (mindspore.dtype, optional): Dtype of Parameters. Default: ``None`` , when it's ``None`` ,
821
+ the dtype of Parameters would be mstype.float32.
822
+
823
+ Variables:
824
+ - **weigh** (Parameter) - the learnable weights of the module of shape
825
+ :math:`(\text{in_channels}, \frac{\text{out_channels}}{\text{groups}},
826
+ \text{kernel_size[0]}, \text{kernel_size[1]})` . The values of these weights are sampled from
827
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
828
+ :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel_size}[i]}`
829
+ - **bias** (Parameter) - the learnable bias of the module of shape :math:`(\text{out_channels},)` .
830
+ If :attr:`bias` is ``True``, then the values of these weights are sampled from
831
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
832
+ :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel_size}[i]}` .
833
+
834
+ Inputs:
835
+ - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})` .
836
+
837
+ Outputs:
838
+ Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where
839
+
840
+ .. math::
841
+ H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
842
+ \times (\text{kernel_size}[0] - 1) + \text{output_padding}[0] + 1
843
+ .. math::
844
+ W_{out} = (W_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
845
+ \times (\text{kernel_size}[1] - 1) + \text{output_padding}[1] + 1
846
+
847
+ Supported Platforms:
848
+ ``Ascend``
849
+
850
+ Examples:
851
+ >>> import mindspore as ms
852
+ >>> from mindspore import mint
853
+ >>> # With square kernels and equal stride
854
+ >>> m = mint.nn.ConvTranspose2d(16, 33, 3, stride=2)
855
+ >>> # non-square kernels and unequal stride and with padding
856
+ >>> m = mint.nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
857
+ >>> input = mint.randn(20, 16, 50, 100)
858
+ >>> output = m(input)
859
+ >>> # exact output size can be also specified as an argument
860
+ >>> input = mint.randn(1, 16, 12, 12)
861
+ >>> downsample = mint.nn.Conv2d(16, 16, 3, stride=2, padding=1)
862
+ >>> upsample = mint.nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
863
+ >>> h = downsample(input)
864
+ >>> h.shape
865
+ (1, 16, 6, 6)
866
+ >>> output = upsample(h, output_size=input.shape)
867
+ >>> output.shape
868
+ (1, 16, 12, 12)
869
+
870
+ .. _`here`:
871
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
872
+
873
+ .. _`Deconvolutional Networks`:
874
+ https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
875
+ """
876
+
877
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0,
878
+ groups=1, bias=True, dilation=1, padding_mode="zeros", dtype=None):
879
+ dtype = mstype.float32 if dtype is None else dtype
880
+ kernel_size = _pair(kernel_size, "kernel_size", "ConvTranspose2d")
881
+ stride = _pair(stride, "kernel_size", "ConvTranspose2d")
882
+ padding = _pair(padding, "kernel_size", "ConvTranspose2d")
883
+ dilation = _pair(dilation, "kernel_size", "ConvTranspose2d")
884
+ output_padding = _pair(output_padding, "kernel_size", "ConvTranspose2d")
885
+ super(ConvTranspose2d, self).__init__(
886
+ in_channels,
887
+ out_channels,
888
+ kernel_size,
889
+ stride,
890
+ padding,
891
+ dilation,
892
+ True,
893
+ output_padding,
894
+ groups,
895
+ bias,
896
+ padding_mode,
897
+ dtype
898
+ )
899
+
900
+ def construct(self, input, output_size=None):
901
+ num_spatial_dims = 2
902
+ output_padding = self._output_padding(
903
+ input,
904
+ output_size,
905
+ self.stride, # type: ignore[arg-type]
906
+ self.padding, # type: ignore[arg-type]
907
+ self.kernel_size, # type: ignore[arg-type]
908
+ num_spatial_dims,
909
+ self.dilation, # type: ignore[arg-type]
910
+ )
911
+
912
+ return conv_transpose2d(
913
+ input,
914
+ self.weight,
915
+ self.bias,
916
+ self.stride,
917
+ self.padding,
918
+ output_padding,
919
+ self.groups,
920
+ self.dilation,
921
+ )