mindspore 2.4.10__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (602) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +13 -6
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -38
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +6 -7
  15. mindspore/_extends/parse/compile_config.py +83 -0
  16. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  17. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
  18. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  19. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  20. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  21. mindspore/_extends/parse/parser.py +47 -198
  22. mindspore/_extends/parse/resources.py +1 -5
  23. mindspore/_extends/parse/standard_method.py +229 -99
  24. mindspore/_extends/pijit/__init__.py +2 -2
  25. mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
  26. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  27. mindspore/_extends/utils.py +1 -1
  28. mindspore/amp.py +11 -5
  29. mindspore/atlprov.dll +0 -0
  30. mindspore/avcodec-59.dll +0 -0
  31. mindspore/avdevice-59.dll +0 -0
  32. mindspore/avfilter-8.dll +0 -0
  33. mindspore/avformat-59.dll +0 -0
  34. mindspore/avutil-57.dll +0 -0
  35. mindspore/boost/__init__.py +2 -2
  36. mindspore/boost/base.py +3 -7
  37. mindspore/boost/boost_cell_wrapper.py +138 -43
  38. mindspore/c1.dll +0 -0
  39. mindspore/c1xx.dll +0 -0
  40. mindspore/c2.dll +0 -0
  41. mindspore/common/__init__.py +6 -3
  42. mindspore/common/_grad_function.py +56 -0
  43. mindspore/common/_pijit_context.py +14 -5
  44. mindspore/common/_register_for_tensor.py +1 -2
  45. mindspore/common/_stub_tensor.py +30 -14
  46. mindspore/common/_tensor_cpp_method.py +17 -0
  47. mindspore/common/_tensor_docs.py +4760 -0
  48. mindspore/common/api.py +480 -372
  49. mindspore/common/auto_dynamic_shape.py +41 -44
  50. mindspore/common/dtype.py +39 -36
  51. mindspore/common/dump.py +9 -6
  52. mindspore/common/file_system.py +9 -1
  53. mindspore/common/generator.py +5 -0
  54. mindspore/common/hook_handle.py +6 -2
  55. mindspore/common/initializer.py +13 -10
  56. mindspore/common/jit_begin_end.py +94 -0
  57. mindspore/common/jit_config.py +6 -1
  58. mindspore/common/jit_context.py +76 -0
  59. mindspore/common/jit_trace.py +378 -0
  60. mindspore/common/lazy_inline.py +9 -3
  61. mindspore/common/mindir_util.py +10 -2
  62. mindspore/common/mutable.py +5 -4
  63. mindspore/common/parameter.py +135 -52
  64. mindspore/common/seed.py +2 -2
  65. mindspore/common/sparse_tensor.py +23 -17
  66. mindspore/common/tensor.py +975 -1981
  67. mindspore/communication/__init__.py +7 -5
  68. mindspore/communication/_comm_helper.py +52 -2
  69. mindspore/communication/comm_func.py +240 -181
  70. mindspore/communication/management.py +95 -26
  71. mindspore/context.py +324 -573
  72. mindspore/dataset/__init__.py +65 -37
  73. mindspore/dataset/audio/__init__.py +2 -8
  74. mindspore/dataset/audio/transforms.py +3 -17
  75. mindspore/dataset/callback/ds_callback.py +2 -1
  76. mindspore/dataset/core/config.py +87 -6
  77. mindspore/dataset/engine/cache_admin.py +3 -3
  78. mindspore/dataset/engine/cache_client.py +6 -5
  79. mindspore/dataset/engine/datasets.py +292 -267
  80. mindspore/dataset/engine/datasets_audio.py +22 -8
  81. mindspore/dataset/engine/datasets_standard_format.py +46 -27
  82. mindspore/dataset/engine/datasets_text.py +78 -48
  83. mindspore/dataset/engine/datasets_user_defined.py +183 -117
  84. mindspore/dataset/engine/datasets_vision.py +120 -44
  85. mindspore/dataset/engine/iterators.py +283 -63
  86. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  87. mindspore/dataset/engine/obs/util.py +8 -0
  88. mindspore/dataset/engine/queue.py +40 -0
  89. mindspore/dataset/engine/samplers.py +289 -43
  90. mindspore/dataset/engine/serializer_deserializer.py +3 -2
  91. mindspore/dataset/engine/validators.py +53 -11
  92. mindspore/dataset/text/__init__.py +7 -6
  93. mindspore/dataset/text/transforms.py +6 -5
  94. mindspore/dataset/text/utils.py +3 -3
  95. mindspore/dataset/transforms/__init__.py +0 -9
  96. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  97. mindspore/dataset/transforms/transforms.py +31 -14
  98. mindspore/dataset/utils/browse_dataset.py +1 -1
  99. mindspore/dataset/vision/__init__.py +2 -9
  100. mindspore/dataset/vision/transforms.py +202 -158
  101. mindspore/dataset/vision/utils.py +7 -5
  102. mindspore/dataset/vision/validators.py +1 -2
  103. mindspore/device_context/__init__.py +21 -0
  104. mindspore/device_context/ascend/__init__.py +25 -0
  105. mindspore/device_context/ascend/device.py +72 -0
  106. mindspore/device_context/ascend/op_debug.py +153 -0
  107. mindspore/device_context/ascend/op_precision.py +193 -0
  108. mindspore/device_context/ascend/op_tuning.py +123 -0
  109. mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
  110. mindspore/device_context/cpu/device.py +62 -0
  111. mindspore/device_context/cpu/op_tuning.py +43 -0
  112. mindspore/device_context/gpu/__init__.py +21 -0
  113. mindspore/device_context/gpu/device.py +70 -0
  114. mindspore/device_context/gpu/op_precision.py +67 -0
  115. mindspore/device_context/gpu/op_tuning.py +175 -0
  116. mindspore/device_manager.py +170 -0
  117. mindspore/dnnl.dll +0 -0
  118. mindspore/dpcmi.dll +0 -0
  119. mindspore/experimental/es/embedding_service.py +35 -27
  120. mindspore/experimental/llm_boost/__init__.py +1 -0
  121. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  122. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +209 -0
  123. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  124. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  125. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  126. mindspore/experimental/llm_boost/register.py +1 -0
  127. mindspore/experimental/map_parameter.py +4 -4
  128. mindspore/experimental/optim/adadelta.py +6 -6
  129. mindspore/experimental/optim/adagrad.py +4 -4
  130. mindspore/experimental/optim/adam.py +7 -0
  131. mindspore/experimental/optim/adamax.py +4 -4
  132. mindspore/experimental/optim/adamw.py +4 -0
  133. mindspore/experimental/optim/asgd.py +1 -1
  134. mindspore/experimental/optim/lr_scheduler.py +73 -46
  135. mindspore/experimental/optim/radam.py +34 -31
  136. mindspore/experimental/optim/rprop.py +1 -1
  137. mindspore/experimental/optim/sgd.py +1 -1
  138. mindspore/hal/contiguous_tensors_handle.py +6 -10
  139. mindspore/hal/device.py +55 -53
  140. mindspore/hal/event.py +52 -52
  141. mindspore/hal/memory.py +179 -120
  142. mindspore/hal/stream.py +150 -109
  143. mindspore/include/api/context.h +0 -1
  144. mindspore/include/dataset/constants.h +7 -4
  145. mindspore/include/dataset/execute.h +2 -2
  146. mindspore/jpeg62.dll +0 -0
  147. mindspore/log.py +50 -0
  148. mindspore/mindrecord/__init__.py +21 -8
  149. mindspore/mindrecord/config.py +17 -316
  150. mindspore/mindrecord/filereader.py +1 -9
  151. mindspore/mindrecord/filewriter.py +5 -15
  152. mindspore/mindrecord/mindpage.py +1 -9
  153. mindspore/mindspore_backend_common.dll +0 -0
  154. mindspore/mindspore_backend_manager.dll +0 -0
  155. mindspore/mindspore_common.dll +0 -0
  156. mindspore/mindspore_core.dll +0 -0
  157. mindspore/mindspore_dump.dll +0 -0
  158. mindspore/mindspore_frontend.dll +0 -0
  159. mindspore/mindspore_glog.dll +0 -0
  160. mindspore/mindspore_memory_pool.dll +0 -0
  161. mindspore/mindspore_ms_backend.dll +0 -0
  162. mindspore/mindspore_ops.dll +0 -0
  163. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  164. mindspore/mindspore_ops_kernel_common.dll +0 -0
  165. mindspore/mindspore_profiler.dll +0 -0
  166. mindspore/mindspore_pyboost.dll +0 -0
  167. mindspore/mindspore_pynative.dll +0 -0
  168. mindspore/mindspore_res_manager.dll +0 -0
  169. mindspore/mindspore_runtime_pipeline.dll +0 -0
  170. mindspore/mint/__init__.py +798 -761
  171. mindspore/mint/distributed/__init__.py +70 -4
  172. mindspore/mint/distributed/distributed.py +2679 -44
  173. mindspore/mint/linalg/__init__.py +8 -0
  174. mindspore/mint/nn/__init__.py +743 -22
  175. mindspore/mint/nn/functional.py +716 -23
  176. mindspore/mint/nn/layer/__init__.py +21 -4
  177. mindspore/mint/nn/layer/_functions.py +334 -0
  178. mindspore/mint/nn/layer/activation.py +276 -1
  179. mindspore/mint/nn/layer/basic.py +123 -0
  180. mindspore/mint/nn/layer/conv.py +933 -0
  181. mindspore/mint/nn/layer/normalization.py +223 -28
  182. mindspore/mint/nn/layer/padding.py +797 -0
  183. mindspore/mint/nn/layer/pooling.py +235 -0
  184. mindspore/mint/optim/__init__.py +3 -1
  185. mindspore/mint/optim/adam.py +223 -0
  186. mindspore/mint/optim/adamw.py +26 -19
  187. mindspore/mint/optim/sgd.py +171 -0
  188. mindspore/mint/special/__init__.py +2 -1
  189. mindspore/msobj140.dll +0 -0
  190. mindspore/mspdb140.dll +0 -0
  191. mindspore/mspdbcore.dll +0 -0
  192. mindspore/mspdbst.dll +0 -0
  193. mindspore/mspft140.dll +0 -0
  194. mindspore/msvcdis140.dll +0 -0
  195. mindspore/msvcp140_1.dll +0 -0
  196. mindspore/msvcp140_2.dll +0 -0
  197. mindspore/msvcp140_atomic_wait.dll +0 -0
  198. mindspore/msvcp140_codecvt_ids.dll +0 -0
  199. mindspore/multiprocessing/__init__.py +5 -0
  200. mindspore/nn/__init__.py +4 -1
  201. mindspore/nn/cell.py +1373 -192
  202. mindspore/nn/dynamic_lr.py +2 -1
  203. mindspore/nn/layer/activation.py +29 -27
  204. mindspore/nn/layer/basic.py +51 -35
  205. mindspore/nn/layer/channel_shuffle.py +3 -3
  206. mindspore/nn/layer/container.py +1 -1
  207. mindspore/nn/layer/conv.py +53 -42
  208. mindspore/nn/layer/embedding.py +12 -11
  209. mindspore/nn/layer/normalization.py +56 -49
  210. mindspore/nn/layer/padding.py +4 -3
  211. mindspore/nn/layer/pooling.py +120 -42
  212. mindspore/nn/layer/rnn_cells.py +1 -1
  213. mindspore/nn/layer/rnns.py +2 -1
  214. mindspore/nn/layer/timedistributed.py +5 -5
  215. mindspore/nn/layer/transformer.py +59 -36
  216. mindspore/nn/learning_rate_schedule.py +8 -4
  217. mindspore/nn/loss/loss.py +58 -55
  218. mindspore/nn/optim/ada_grad.py +7 -5
  219. mindspore/nn/optim/adadelta.py +11 -9
  220. mindspore/nn/optim/adafactor.py +1 -1
  221. mindspore/nn/optim/adam.py +19 -15
  222. mindspore/nn/optim/adamax.py +8 -7
  223. mindspore/nn/optim/adasum.py +5 -5
  224. mindspore/nn/optim/asgd.py +3 -1
  225. mindspore/nn/optim/ftrl.py +11 -9
  226. mindspore/nn/optim/lamb.py +1 -1
  227. mindspore/nn/optim/lars.py +1 -4
  228. mindspore/nn/optim/lazyadam.py +12 -10
  229. mindspore/nn/optim/momentum.py +7 -6
  230. mindspore/nn/optim/optimizer.py +3 -3
  231. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  232. mindspore/nn/optim/rmsprop.py +13 -12
  233. mindspore/nn/optim/rprop.py +11 -9
  234. mindspore/nn/optim/sgd.py +9 -6
  235. mindspore/nn/optim/tft_wrapper.py +5 -2
  236. mindspore/nn/optim/thor.py +2 -1
  237. mindspore/nn/probability/bijector/bijector.py +17 -11
  238. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  239. mindspore/nn/probability/bijector/invert.py +2 -2
  240. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  241. mindspore/nn/probability/bijector/softplus.py +3 -2
  242. mindspore/nn/probability/distribution/beta.py +3 -3
  243. mindspore/nn/probability/distribution/categorical.py +1 -1
  244. mindspore/nn/probability/distribution/cauchy.py +4 -2
  245. mindspore/nn/probability/distribution/exponential.py +6 -7
  246. mindspore/nn/probability/distribution/gamma.py +2 -2
  247. mindspore/nn/probability/distribution/gumbel.py +2 -2
  248. mindspore/nn/probability/distribution/half_normal.py +5 -3
  249. mindspore/nn/probability/distribution/logistic.py +5 -3
  250. mindspore/nn/probability/distribution/poisson.py +1 -1
  251. mindspore/nn/probability/distribution/uniform.py +5 -3
  252. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  253. mindspore/nn/reinforcement/tensor_array.py +1 -1
  254. mindspore/nn/utils/init.py +13 -11
  255. mindspore/nn/wrap/__init__.py +6 -6
  256. mindspore/nn/wrap/cell_wrapper.py +181 -122
  257. mindspore/nn/wrap/grad_reducer.py +45 -36
  258. mindspore/nn/wrap/loss_scale.py +6 -7
  259. mindspore/numpy/array_creations.py +63 -65
  260. mindspore/numpy/array_ops.py +149 -144
  261. mindspore/numpy/logic_ops.py +41 -42
  262. mindspore/numpy/math_ops.py +361 -359
  263. mindspore/numpy/utils.py +17 -18
  264. mindspore/numpy/utils_const.py +5 -6
  265. mindspore/opencv_core452.dll +0 -0
  266. mindspore/opencv_imgcodecs452.dll +0 -0
  267. mindspore/opencv_imgproc452.dll +0 -0
  268. mindspore/ops/__init__.py +5 -3
  269. mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
  270. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
  271. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  272. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  273. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  274. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  275. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  276. mindspore/ops/_register_for_op.py +0 -11
  277. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  278. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
  279. mindspore/ops/_vmap/vmap_array_ops.py +52 -25
  280. mindspore/ops/_vmap/vmap_base.py +0 -2
  281. mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
  282. mindspore/ops/_vmap/vmap_math_ops.py +15 -16
  283. mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
  284. mindspore/ops/auto_generate/__init__.py +4 -3
  285. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +258 -46
  286. mindspore/ops/auto_generate/gen_extend_func.py +757 -185
  287. mindspore/ops/auto_generate/gen_ops_def.py +4197 -2243
  288. mindspore/ops/auto_generate/gen_ops_prim.py +16976 -6055
  289. mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
  290. mindspore/ops/composite/__init__.py +2 -1
  291. mindspore/ops/composite/base.py +20 -25
  292. mindspore/ops/composite/math_ops.py +6 -16
  293. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  294. mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
  295. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  296. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  297. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  298. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  299. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  300. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  301. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  302. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  303. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  304. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  305. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  306. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  307. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  308. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  309. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  310. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  311. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  312. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  313. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  314. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  315. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  316. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  317. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  318. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  319. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  320. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
  321. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  322. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  323. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  324. mindspore/ops/function/__init__.py +40 -2
  325. mindspore/ops/function/_add_attr_func.py +58 -0
  326. mindspore/ops/function/array_func.py +2089 -2403
  327. mindspore/ops/function/clip_func.py +80 -23
  328. mindspore/ops/function/debug_func.py +57 -57
  329. mindspore/ops/function/grad/__init__.py +1 -0
  330. mindspore/ops/function/grad/grad_func.py +104 -71
  331. mindspore/ops/function/image_func.py +2 -2
  332. mindspore/ops/function/linalg_func.py +47 -78
  333. mindspore/ops/function/math_func.py +4351 -3813
  334. mindspore/ops/function/nn_func.py +1712 -637
  335. mindspore/ops/function/other_func.py +159 -1
  336. mindspore/ops/function/parameter_func.py +18 -84
  337. mindspore/ops/function/random_func.py +452 -387
  338. mindspore/ops/function/reshard_func.py +4 -70
  339. mindspore/ops/function/sparse_func.py +3 -3
  340. mindspore/ops/function/sparse_unary_func.py +6 -6
  341. mindspore/ops/function/spectral_func.py +25 -58
  342. mindspore/ops/function/vmap_func.py +26 -18
  343. mindspore/ops/functional.py +23 -7
  344. mindspore/ops/functional_overload.py +1548 -0
  345. mindspore/ops/op_info_register.py +32 -244
  346. mindspore/ops/operations/__init__.py +23 -15
  347. mindspore/ops/operations/_custom_ops_utils.py +235 -0
  348. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  349. mindspore/ops/operations/_grad_ops.py +2 -43
  350. mindspore/ops/operations/_infer_ops.py +2 -1
  351. mindspore/ops/operations/_inner_ops.py +43 -84
  352. mindspore/ops/operations/_ms_kernel.py +4 -10
  353. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  354. mindspore/ops/operations/_scalar_ops.py +3 -2
  355. mindspore/ops/operations/_sequence_ops.py +1 -1
  356. mindspore/ops/operations/_tensor_array.py +1 -1
  357. mindspore/ops/operations/array_ops.py +81 -324
  358. mindspore/ops/operations/comm_ops.py +154 -108
  359. mindspore/ops/operations/custom_ops.py +298 -87
  360. mindspore/ops/operations/debug_ops.py +157 -59
  361. mindspore/ops/operations/inner_ops.py +7 -5
  362. mindspore/ops/operations/linalg_ops.py +1 -57
  363. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  364. mindspore/ops/operations/manually_defined/ops_def.py +928 -180
  365. mindspore/ops/operations/math_ops.py +32 -234
  366. mindspore/ops/operations/nn_ops.py +212 -531
  367. mindspore/ops/operations/other_ops.py +62 -9
  368. mindspore/ops/operations/random_ops.py +13 -7
  369. mindspore/ops/operations/reshard_ops.py +1 -1
  370. mindspore/ops/operations/sparse_ops.py +2 -2
  371. mindspore/ops/primitive.py +66 -53
  372. mindspore/ops/tensor_method.py +1895 -0
  373. mindspore/ops_generate/__init__.py +0 -5
  374. mindspore/ops_generate/aclnn/__init__.py +0 -0
  375. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
  376. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
  377. mindspore/ops_generate/api/__init__.py +0 -0
  378. mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
  379. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
  380. mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
  381. mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
  382. mindspore/ops_generate/api/functions_cc_generator.py +237 -0
  383. mindspore/ops_generate/api/gen_api.py +103 -0
  384. mindspore/ops_generate/api/op_api_proto.py +235 -0
  385. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
  386. mindspore/ops_generate/common/__init__.py +0 -0
  387. mindspore/ops_generate/common/base_generator.py +11 -0
  388. mindspore/ops_generate/common/gen_constants.py +91 -0
  389. mindspore/ops_generate/common/gen_utils.py +348 -0
  390. mindspore/ops_generate/common/op_proto.py +473 -0
  391. mindspore/ops_generate/common/template.py +523 -0
  392. mindspore/ops_generate/gen_ops.py +22 -1069
  393. mindspore/ops_generate/op_def/__init__.py +0 -0
  394. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  395. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
  396. mindspore/ops_generate/op_def/ops_def_cc_generator.py +296 -0
  397. mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
  398. mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
  399. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  400. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  401. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  402. mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
  403. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
  404. mindspore/ops_generate/pyboost/__init__.py +0 -0
  405. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
  406. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
  407. mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
  408. mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
  409. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
  410. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
  411. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
  412. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
  413. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
  414. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
  415. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
  416. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
  417. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
  418. mindspore/ops_generate/resources/__init__.py +0 -0
  419. mindspore/ops_generate/resources/resource_list.py +30 -0
  420. mindspore/ops_generate/resources/resource_loader.py +36 -0
  421. mindspore/ops_generate/resources/resource_manager.py +64 -0
  422. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  423. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  424. mindspore/parallel/__init__.py +7 -3
  425. mindspore/parallel/_auto_parallel_context.py +159 -40
  426. mindspore/parallel/_cell_wrapper.py +132 -15
  427. mindspore/parallel/_parallel_serialization.py +107 -5
  428. mindspore/parallel/_ps_context.py +1 -1
  429. mindspore/parallel/_recovery_context.py +7 -2
  430. mindspore/parallel/_tensor.py +142 -18
  431. mindspore/parallel/_utils.py +199 -23
  432. mindspore/parallel/algo_parameter_config.py +4 -4
  433. mindspore/parallel/auto_parallel.py +732 -0
  434. mindspore/parallel/checkpoint_convert.py +159 -0
  435. mindspore/parallel/checkpoint_transform.py +700 -35
  436. mindspore/parallel/cluster/process_entity/_api.py +276 -50
  437. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  438. mindspore/parallel/cluster/run.py +21 -4
  439. mindspore/parallel/function/__init__.py +24 -0
  440. mindspore/parallel/function/reshard_func.py +258 -0
  441. mindspore/parallel/nn/__init__.py +25 -0
  442. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  443. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  444. mindspore/parallel/parameter_broadcast.py +25 -14
  445. mindspore/parallel/shard.py +137 -59
  446. mindspore/parallel/transform_safetensors.py +364 -305
  447. mindspore/pgodb140.dll +0 -0
  448. mindspore/pgort140.dll +0 -0
  449. mindspore/profiler/__init__.py +22 -5
  450. mindspore/profiler/analysis/__init__.py +0 -0
  451. mindspore/profiler/analysis/parser/__init__.py +0 -0
  452. mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
  453. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  454. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  455. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  456. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  457. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  458. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
  459. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  460. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +109 -0
  461. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  462. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  463. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  464. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  465. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  466. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  467. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  468. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  469. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  470. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  471. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
  472. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  473. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  474. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  475. mindspore/profiler/analysis/task_manager.py +131 -0
  476. mindspore/profiler/analysis/time_converter.py +84 -0
  477. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  478. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
  479. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  480. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
  481. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
  482. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
  483. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
  484. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  485. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  486. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
  487. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  488. mindspore/profiler/analysis/work_flow.py +73 -0
  489. mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
  490. mindspore/profiler/common/command_executor.py +90 -0
  491. mindspore/profiler/common/constant.py +186 -3
  492. mindspore/profiler/common/file_manager.py +208 -0
  493. mindspore/profiler/common/log.py +130 -0
  494. mindspore/profiler/common/msprof_cmd_tool.py +221 -0
  495. mindspore/profiler/common/path_manager.py +395 -0
  496. mindspore/profiler/common/process_bar.py +168 -0
  497. mindspore/profiler/common/process_pool.py +9 -3
  498. mindspore/profiler/common/profiler_context.py +500 -0
  499. mindspore/profiler/common/profiler_info.py +304 -0
  500. mindspore/profiler/common/profiler_meta_data.py +74 -0
  501. mindspore/profiler/common/profiler_output_path.py +284 -0
  502. mindspore/profiler/common/profiler_parameters.py +251 -0
  503. mindspore/profiler/common/profiler_path_manager.py +179 -0
  504. mindspore/profiler/common/record_function.py +76 -0
  505. mindspore/profiler/common/tlv_decoder.py +76 -0
  506. mindspore/profiler/common/util.py +75 -2
  507. mindspore/profiler/dynamic_profiler.py +341 -75
  508. mindspore/profiler/envprofiler.py +163 -0
  509. mindspore/profiler/experimental_config.py +197 -0
  510. mindspore/profiler/mstx.py +242 -0
  511. mindspore/profiler/platform/__init__.py +21 -0
  512. mindspore/profiler/platform/base_profiler.py +40 -0
  513. mindspore/profiler/platform/cpu_profiler.py +124 -0
  514. mindspore/profiler/platform/gpu_profiler.py +74 -0
  515. mindspore/profiler/platform/npu_profiler.py +335 -0
  516. mindspore/profiler/profiler.py +1073 -90
  517. mindspore/profiler/profiler_action_controller.py +187 -0
  518. mindspore/profiler/profiler_interface.py +118 -0
  519. mindspore/profiler/schedule.py +243 -0
  520. mindspore/rewrite/api/node.py +15 -13
  521. mindspore/rewrite/api/symbol_tree.py +2 -3
  522. mindspore/run_check/_check_version.py +27 -20
  523. mindspore/run_check/run_check.py +1 -1
  524. mindspore/runtime/__init__.py +37 -0
  525. mindspore/runtime/device.py +27 -0
  526. mindspore/runtime/event.py +209 -0
  527. mindspore/runtime/executor.py +177 -0
  528. mindspore/runtime/memory.py +416 -0
  529. mindspore/runtime/stream.py +460 -0
  530. mindspore/runtime/thread_bind_core.py +401 -0
  531. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  532. mindspore/swresample-4.dll +0 -0
  533. mindspore/swscale-6.dll +0 -0
  534. mindspore/tbbmalloc.dll +0 -0
  535. mindspore/tinyxml2.dll +0 -0
  536. mindspore/train/__init__.py +8 -8
  537. mindspore/train/_utils.py +96 -27
  538. mindspore/train/amp.py +9 -5
  539. mindspore/train/callback/__init__.py +2 -2
  540. mindspore/train/callback/_callback.py +2 -16
  541. mindspore/train/callback/_checkpoint.py +53 -55
  542. mindspore/train/callback/_cluster_monitor.py +14 -18
  543. mindspore/train/callback/_early_stop.py +1 -1
  544. mindspore/train/callback/_flops_collector.py +103 -68
  545. mindspore/train/callback/_history.py +8 -5
  546. mindspore/train/callback/_lambda_callback.py +2 -2
  547. mindspore/train/callback/_landscape.py +0 -3
  548. mindspore/train/callback/_loss_monitor.py +2 -1
  549. mindspore/train/callback/_on_request_exit.py +6 -5
  550. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  551. mindspore/train/callback/_summary_collector.py +52 -19
  552. mindspore/train/callback/_time_monitor.py +2 -1
  553. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +228 -108
  554. mindspore/train/data_sink.py +25 -2
  555. mindspore/train/dataset_helper.py +15 -16
  556. mindspore/train/loss_scale_manager.py +8 -7
  557. mindspore/train/metrics/accuracy.py +3 -3
  558. mindspore/train/metrics/confusion_matrix.py +9 -9
  559. mindspore/train/metrics/error.py +3 -3
  560. mindspore/train/metrics/hausdorff_distance.py +4 -4
  561. mindspore/train/metrics/mean_surface_distance.py +3 -3
  562. mindspore/train/metrics/metric.py +0 -12
  563. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  564. mindspore/train/metrics/precision.py +11 -10
  565. mindspore/train/metrics/recall.py +9 -9
  566. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  567. mindspore/train/mind_ir_pb2.py +174 -46
  568. mindspore/train/model.py +269 -136
  569. mindspore/train/serialization.py +622 -978
  570. mindspore/train/summary/_summary_adapter.py +2 -2
  571. mindspore/train/summary/summary_record.py +2 -3
  572. mindspore/train/train_thor/model_thor.py +1 -1
  573. mindspore/turbojpeg.dll +0 -0
  574. mindspore/utils/__init__.py +6 -3
  575. mindspore/utils/dryrun.py +140 -0
  576. mindspore/utils/hooks.py +81 -0
  577. mindspore/utils/runtime_execution_order_check.py +552 -0
  578. mindspore/utils/utils.py +138 -4
  579. mindspore/vcmeta.dll +0 -0
  580. mindspore/vcruntime140.dll +0 -0
  581. mindspore/vcruntime140_1.dll +0 -0
  582. mindspore/version.py +1 -1
  583. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/METADATA +3 -3
  584. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/RECORD +587 -418
  585. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +1 -1
  586. mindspore/_install_custom.py +0 -43
  587. mindspore/common/_register_for_adapter.py +0 -74
  588. mindspore/common/_tensor_overload.py +0 -139
  589. mindspore/mindspore_np_dtype.dll +0 -0
  590. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  591. mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
  592. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  593. mindspore/ops_generate/gen_aclnn_implement.py +0 -263
  594. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  595. mindspore/ops_generate/gen_pyboost_func.py +0 -1052
  596. mindspore/ops_generate/gen_utils.py +0 -209
  597. mindspore/ops_generate/op_proto.py +0 -145
  598. mindspore/ops_generate/template.py +0 -261
  599. mindspore/profiler/envprofiling.py +0 -254
  600. mindspore/profiler/profiling.py +0 -1926
  601. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
  602. {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
@@ -179,7 +179,7 @@ class EmbeddingExt(Cell):
179
179
  `[-num_embeddings, num_embeddings)` if it's not ``None``. Default ``None``.
180
180
  max_norm (float, optional): If the value is not None, firstly get the p-norm result of the embedding
181
181
  vector specified by `input` where p is specified by `norm_type`; if the result is larger then `max_norm`,
182
- update the embedding vector` with :math:`\frac{max\_norm}{result+1e^{-7}}`. Default ``None``.
182
+ update the embedding vector with :math:`\frac{max\_norm}{result+1e^{-7}}`. Default ``None``.
183
183
  norm_type (float, optional): Indicated the value of p in p-norm. Default ``2.0``.
184
184
  scale_grad_by_freq (bool, optional): If ``True`` the gradients will be scaled by the inverse of frequency
185
185
  of the index in `input`. Default ``False``.
@@ -193,8 +193,8 @@ class EmbeddingExt(Cell):
193
193
  not None. Default: ``None``.
194
194
 
195
195
  Variables:
196
- weight (Parameter): The learnable weights of this module of shape (num_embeddings, embedding_dim), which
197
- initialized from :math:`{N}(\text{sigma=1.0}, \text{mean=0.0})` or `_weight` .
196
+ - **weight** (Parameter) - The learnable weights of this module of shape (num_embeddings, embedding_dim), which
197
+ initialized from :math:`{N}(\text{sigma=1.0}, \text{mean=0.0})` or `_weight` .
198
198
 
199
199
  Inputs:
200
200
  - **input** (Tensor) - The indices used to lookup in the embedding vector. The data type must be
@@ -220,18 +220,19 @@ class EmbeddingExt(Cell):
220
220
  >>> import mindspore
221
221
  >>> import numpy as np
222
222
  >>> from mindspore import Tensor, nn
223
+ >>> mindspore.set_seed(0)
223
224
  >>> input = Tensor([[1, 0, 1, 1], [0, 0, 1, 0]])
224
225
  >>> embedding = nn.EmbeddingExt(num_embeddings=10, embedding_dim=3)
225
226
  >>> output = embedding(input)
226
227
  >>> print(output)
227
- [[[-0.0024154 -0.01203444 0.00811537]
228
- [ 0.00233847 -0.00596091 0.00536799]
229
- [-0.0024154 -0.01203444 0.00811537]
230
- [-0.0024154 -0.01203444 0.00811537]]
231
- [[ 0.00233847 -0.00596091 0.00536799]
232
- [ 0.00233847 -0.00596091 0.00536799]
233
- [-0.0024154 -0.01203444 0.00811537]
234
- [ 0.00233847 -0.00596091 0.00536799]]]
228
+ [[[ 0.6712398 0.5407775 1.0317237]
229
+ [-0.49091062 -0.42302188 -1.4807187]
230
+ [ 0.6712398 0.5407775 1.0317237]
231
+ [ 0.0024154 0.5407775 1.0317237]]
232
+ [[-0.49091062 -0.42302188 -1.4807187]
233
+ [-0.49091062 -0.42302188 -1.4807187]
234
+ [ 0.6712398 0.5407775 1.0317237]
235
+ [-0.49091062 -0.42302188 -1.4807187]]]
235
236
  """
236
237
 
237
238
  def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0,
@@ -36,7 +36,6 @@ from mindspore.common import dtype as mstype
36
36
  from mindspore.parallel._utils import _is_in_auto_parallel_mode
37
37
  from mindspore.nn.cell import Cell
38
38
  from mindspore import log as logger
39
- from mindspore.ops import group_norm
40
39
 
41
40
  __all__ = ['BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d', 'LayerNorm', 'LayerNormExt', 'GroupNorm',
42
41
  'SyncBatchNorm', 'InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d']
@@ -287,37 +286,40 @@ class BatchNorm2d(_BatchNorm):
287
286
  Note that the formula for updating the :math:`moving\_mean` and :math:`moving\_var` is
288
287
 
289
288
  .. math::
290
- \text{moving_mean}=\text{moving_mean*momentum}+μ_β\text{*(1momentum)}\\
291
- \text{moving_var}=\text{moving_var*momentum}+σ^2_β\text{*(1momentum)}
289
+ \text{moving_mean}=\text{moving_mean*momentum}+μ_β\text{*(1-momentum)}\\
290
+ \text{moving_var}=\text{moving_var*momentum}+σ^2_β\text{*(1-momentum)}
292
291
 
293
292
  where :math:`moving\_mean` is the updated mean, :math:`moving\_var` is the updated variance,
294
- :math:`μ_β, σ^2_β` are the observed value (mean and variance) of each batch of data.
293
+ :math:`μ_β, σ^2_β` are the observed value (mean and variance respectively) of each batch of data.
295
294
 
296
295
  Args:
297
296
  num_features (int): The number of channels of the input tensor. Expected input size is :math:`(N, C, H, W)`,
298
297
  `C` represents the number of channels.
299
- eps (float): :math:`\epsilon` added to the denominator for numerical stability. Default: ``1e-5`` .
300
- momentum (float): A floating hyperparameter of the momentum for the
298
+ eps (float, optional): :math:`\epsilon` added to the denominator for numerical stability. Default: ``1e-5`` .
299
+ momentum (float, optional): A floating hyperparameter of the momentum for the
301
300
  running_mean and running_var computation. Default: ``0.9`` .
302
- affine (bool): A bool value. When set to ``True`` , :math:`\gamma` and :math:`\beta` can be learned.
301
+ affine (bool, optional): A bool value. When set to ``True`` , :math:`\gamma` and :math:`\beta` can be learned.
303
302
  Default: ``True`` .
304
- gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the :math:`\gamma` weight.
303
+ gamma_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for the
304
+ :math:`\gamma` weight.
305
305
  The values of str refer to the function `mindspore.common.initializer
306
306
  <https://www.mindspore.cn/docs/en/master/api_python/mindspore.common.initializer.html>`_
307
307
  including ``'zeros'`` , ``'ones'`` , etc. Default: ``'ones'`` .
308
- beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the :math:`\beta` weight.
308
+ beta_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for the
309
+ :math:`\beta` weight.
309
310
  The values of str refer to the function `mindspore.common.initializer
310
311
  <https://www.mindspore.cn/docs/en/master/api_python/mindspore.common.initializer.html>`_
311
312
  including ``'zeros'`` , ``'ones'`` , etc. Default: ``'zeros'`` .
312
- moving_mean_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the moving mean.
313
+ moving_mean_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for the moving mean.
313
314
  The values of str refer to the function `mindspore.common.initializer
314
315
  <https://www.mindspore.cn/docs/en/master/api_python/mindspore.common.initializer.html>`_
315
316
  including ``'zeros'`` , ``'ones'`` , etc. Default: ``'zeros'`` .
316
- moving_var_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the moving variance.
317
+ moving_var_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for
318
+ the moving variance.
317
319
  The values of str refer to the function `mindspore.common.initializer
318
320
  <https://www.mindspore.cn/docs/en/master/api_python/mindspore.common.initializer.html>`_
319
321
  including ``'zeros'`` , ``'ones'`` , etc. Default: ``'ones'`` .
320
- use_batch_statistics (bool): Default: ``None`` .
322
+ use_batch_statistics (bool, optional): Default: ``None`` .
321
323
 
322
324
  - If ``true`` , use the mean value and variance value of current batch data and track running mean
323
325
  and running variance.
@@ -326,9 +328,9 @@ class BatchNorm2d(_BatchNorm):
326
328
  training and evaluation mode. During training, the parameter is set to true, and during evaluation, the
327
329
  parameter is set to false.
328
330
 
329
- data_format (str): The optional value for data format, is ``'NHWC'`` or ``'NCHW'`` .
331
+ data_format (str, optional): The optional value for data format, is ``'NHWC'`` or ``'NCHW'`` .
330
332
  Default: ``'NCHW'`` .
331
- dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
333
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``mstype.float32`` .
332
334
 
333
335
  Inputs:
334
336
  - **x** (Tensor) - Tensor of shape :math:`(N, C, H, W)`. Supported types: float16, float32.
@@ -341,7 +343,7 @@ class BatchNorm2d(_BatchNorm):
341
343
  TypeError: If `eps` is not a float.
342
344
  ValueError: If `num_features` is less than 1.
343
345
  ValueError: If `momentum` is not in range [0, 1].
344
- ValueError: If `data_format` is neither 'NHWC' not 'NCHW'.
346
+ ValueError: If `data_format` is neither ``'NHWC'`` not ``'NCHW'``.
345
347
 
346
348
  Supported Platforms:
347
349
  ``Ascend`` ``GPU`` ``CPU``
@@ -509,32 +511,34 @@ class SyncBatchNorm(_BatchNorm):
509
511
 
510
512
  Args:
511
513
  num_features (int): `C` from an expected input of size :math:`(N, C, H, W)`.
512
- eps (float): :math:`\epsilon`, a value added to the denominator for numerical stability. Default: ``1e-5`` .
513
- momentum (float): A floating hyperparameter of the momentum for the
514
+ eps (float, optional): :math:`\epsilon`, a value added to the denominator for numerical stability.
515
+ Default: ``1e-5`` .
516
+ momentum (float, optional): A floating hyperparameter of the momentum for the
514
517
  running_mean and running_var computation. Default: ``0.9`` .
515
- affine (bool): A bool value. When set to ``True`` , :math:`\gamma` and :math:`\beta` can be learned.
518
+ affine (bool, optional): A bool value. When set to ``True`` , :math:`\gamma` and :math:`\beta` are learnable
519
+ parameters. When set to ``False`` , :math:`\gamma` and :math:`\beta` are unlearnable parameters.
516
520
  Default: ``True`` .
517
- gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the :math:`\gamma` weight.
518
- The values of str refer to the function `initializer` including ``'zeros'`` , ``'ones'`` ,
521
+ gamma_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for the :math:`\gamma`
522
+ weight. The values of str refer to the function `initializer` including ``'zeros'`` , ``'ones'`` ,
519
523
  ``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'ones'`` .
520
- beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the :math:`\beta` weight.
524
+ beta_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for the :math:`\beta` weight.
521
525
  The values of str refer to the function `initializer` including ``'zeros'`` , ``'ones'`` ,
522
526
  ``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'zeros'`` .
523
- moving_mean_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the moving mean.
527
+ moving_mean_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for the moving mean.
524
528
  The values of str refer to the function `initializer` including ``'zeros'`` , ``'ones'`` ,
525
529
  ``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'zeros'`` .
526
- moving_var_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the moving variance.
527
- The values of str refer to the function `initializer` including ``'zeros'`` , ``'ones'`` ,
530
+ moving_var_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for the moving
531
+ variance. The values of str refer to the function `initializer` including ``'zeros'`` , ``'ones'`` ,
528
532
  ``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'ones'`` .
529
- use_batch_statistics (bool): If ``true`` , use the mean value and variance value of current batch data. If
530
- ``false`` , use the mean value and variance value of specified value. If ``None`` , training process will
531
- use the mean and variance of current batch data and track the running mean and variance, eval process will
532
- use the running mean and variance. Default: ``None`` .
533
- process_groups (list): A list to divide devices into different sync groups, containing N subtraction lists.
534
- Each subtraction list contains int numbers identifying rank ids which need to be synchronized in the same
535
- group. All int values must be in [0, rank_size) and different from each other. Default: ``None`` ,
533
+ use_batch_statistics (bool, optional): If ``true`` , use the mean value and variance value of current batch
534
+ data. If ``false`` , use the mean value and variance value of specified value. If ``None`` , training
535
+ process will use the mean and variance of current batch data and track the running mean and variance, eval
536
+ process will use the running mean and variance. Default: ``None`` .
537
+ process_groups (list, optional): A list to divide devices into different sync groups, containing N subtraction
538
+ lists. Each subtraction list contains int numbers identifying rank ids which need to be synchronized in the
539
+ same group. All int values must be in [0, rank_size) and different from each other. Default: ``None`` ,
536
540
  indicating synchronization across all devices.
537
- dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
541
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``mstype.float32`` .
538
542
 
539
543
  Inputs:
540
544
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
@@ -559,14 +563,14 @@ class SyncBatchNorm(_BatchNorm):
559
563
 
560
564
  For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
561
565
  Please see the `Ascend tutorial
562
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/rank_table.html>`_
566
+ <https://www.mindspore.cn/tutorials/en/master/parallel/rank_table.html>`_
563
567
  for more details.
564
568
 
565
569
  For the GPU devices, users need to prepare the host file and mpi, please see the `mpirun Startup
566
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/mpirun.html>`_ .
570
+ <https://www.mindspore.cn/tutorials/en/master/parallel/mpirun.html>`_ .
567
571
 
568
572
  For the CPU device, users need to write a dynamic cluster startup script, please see the `Dynamic Cluster
569
- Startup <https://www.mindspore.cn/docs/en/master/model_train/parallel/dynamic_cluster.html>`_ .
573
+ Startup <https://www.mindspore.cn/tutorials/en/master/parallel/dynamic_cluster.html>`_ .
570
574
 
571
575
  This example should be run with multiple devices.
572
576
 
@@ -795,13 +799,15 @@ class LayerNormExt(Cell):
795
799
  This is an experimental API that is subject to change or deletion.
796
800
 
797
801
  Args:
798
- normalized_shape (Union(tuple[int], list[int], int)): The normalized shape of `x` for LayerNorm
799
- eps (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-5`` .
800
- elementwise_affine (bool): Whether affine transformation is required. When this parameter is set to ``True``,
802
+ normalized_shape (Union(tuple[int], list[int], int)): The normalized shape of `x` for LayerNorm.
803
+ eps (float, optional): A value added to the denominator for numerical stability( :math:`\epsilon` ).
804
+ Default: ``1e-5`` .
805
+ elementwise_affine (bool, optional): Whether affine transformation is required.
806
+ When this parameter is set to ``True``,
801
807
  the weight parameter is initialized to 1 and the offset is initialized to 0. Default: ``True``.
802
- bias (bool): If set to ``False``, the layer will not learn an additive bias (only relevant if
808
+ bias (bool, optional): If set to ``False``, the layer will not learn an additive bias (only relevant if
803
809
  `elementwise_affine` is ``True``). Default: ``True``.
804
- dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``None`` .
810
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None`` .
805
811
 
806
812
  Inputs:
807
813
  - **x** (Tensor) - The shape is :math:`(N, *)`, where :math:`*` is equal to normalized_shape.
@@ -986,8 +992,8 @@ class InstanceNorm1d(_InstanceNorm):
986
992
  ValueError: If `num_features` is less than 1.
987
993
  ValueError: If `momentum` is not in range [0, 1].
988
994
  ValueError: If the shape of `gamma_init` / `beta_init` is not :math:`(C)`.
989
- KeyError: If any of `gamma_init`/`beta_init` is str and the homonymous class inheriting from `Initializer` not
990
- exists.
995
+ KeyError: If any of `gamma_init`/`beta_init` is str and
996
+ there is no homonymous class inheriting from `Initializer`.
991
997
 
992
998
  Supported Platforms:
993
999
  ``GPU``
@@ -1064,8 +1070,8 @@ class InstanceNorm2d(_InstanceNorm):
1064
1070
  ValueError: If `num_features` is less than 1.
1065
1071
  ValueError: If `momentum` is not in range [0, 1].
1066
1072
  ValueError: If the shape of `gamma_init` / `beta_init` is not :math:`(C)`.
1067
- KeyError: If any of `gamma_init`/`beta_init` is str and the homonymous class inheriting from `Initializer` not
1068
- exists.
1073
+ KeyError: If any of `gamma_init`/`beta_init` is str and
1074
+ there is no homonymous class inheriting from `Initializer`.
1069
1075
 
1070
1076
  Supported Platforms:
1071
1077
  ``GPU``
@@ -1170,10 +1176,11 @@ class GroupNorm(Cell):
1170
1176
 
1171
1177
  Group Normalization is widely used in recurrent neural networks. It applies
1172
1178
  normalization on a mini-batch of inputs for each single training case as described
1173
- in the paper `Group Normalization <https://arxiv.org/pdf/1803.08494.pdf>`_. Group Normalization
1174
- divides the channels into groups and computes within each group the mean and variance for normalization,
1175
- and it performs very stable over a wide range of batch size. :math:`\gamma` and :math:`\beta` are trainable scale
1176
- and shift.
1179
+ in the paper `Group Normalization <https://arxiv.org/pdf/1803.08494.pdf>`_.
1180
+ Group Normalization
1181
+ divides the channels into groups and computes within each group the mean and variance for normalization.
1182
+ :math:`\gamma` and :math:`\beta` are scale
1183
+ and shift values obtained by training learning.
1177
1184
  It can be described using the following formula:
1178
1185
 
1179
1186
  .. math::
@@ -1248,7 +1255,7 @@ class GroupNorm(Cell):
1248
1255
 
1249
1256
  def _cal_output(self, x):
1250
1257
  """calculate groupnorm output"""
1251
- return group_norm(x, self.num_groups, self.gamma.to(x.dtype), self.beta.to(x.dtype), self.eps)
1258
+ return ops.group_norm(x, self.num_groups, self.gamma.to(x.dtype), self.beta.to(x.dtype), self.eps)
1252
1259
 
1253
1260
  @staticmethod
1254
1261
  @_primexpr
@@ -442,7 +442,7 @@ class _ReflectionPadNd(Cell):
442
442
 
443
443
  class ReflectionPad1d(_ReflectionPadNd):
444
444
  r"""
445
- Using a given padding to do reflection pad on the given tensor.
445
+ Using a given padding to do reflection pad on the given tensor. 1d means the dimension of padding is 1-dimension.
446
446
 
447
447
  Args:
448
448
  padding (union[int, tuple]): The padding size to pad the last dimension of input tensor.
@@ -490,7 +490,7 @@ class ReflectionPad1d(_ReflectionPadNd):
490
490
 
491
491
  class ReflectionPad2d(_ReflectionPadNd):
492
492
  r"""
493
- Using a given padding to do reflection pad the given tensor.
493
+ Using a given padding to do reflection pad the given tensor. 2d means the dimension of padding is 2-dimension.
494
494
 
495
495
  Args:
496
496
  padding (union[int, tuple]): The padding size to pad the input tensor.
@@ -542,7 +542,8 @@ class ReflectionPad2d(_ReflectionPadNd):
542
542
 
543
543
  class ReflectionPad3d(_ReflectionPadNd):
544
544
  r"""
545
- Pad the given tensor in a reflecting way using the input boundaries as the axis of symmetry.
545
+ Pad the given tensor in a reflecting way using the input boundaries as the axis of symmetry. 3d means the dimension
546
+ of padding is 3-dimension.
546
547
 
547
548
  Note:
548
549
  ReflectionPad3d has not supported 5D tensor yet.
@@ -18,23 +18,21 @@ from __future__ import absolute_import
18
18
  from mindspore.ops import operations as P
19
19
  from mindspore.ops import functional as F
20
20
  import mindspore.ops as ops
21
- from mindspore.ops.function.nn_func import avg_pool2d_ext
22
21
  from mindspore._checkparam import _check_3d_int_or_tuple
23
22
  from mindspore import _checkparam as validator
24
23
  from mindspore.ops.primitive import constexpr, _primexpr
25
24
  from mindspore.common.tensor import Tensor
26
25
  import mindspore.context as context
27
26
  from mindspore.common import dtype as mstype
28
- from mindspore.ops.operations.nn_ops import AdaptiveMaxPool2D
29
- from mindspore.ops.operations.nn_ops import AdaptiveMaxPool3D, AdaptiveAvgPool3D
30
- from mindspore.ops.auto_generate.gen_ops_prim import MaxPoolWithIndices, MaxPoolWithMask
31
27
  from mindspore.nn.cell import Cell
32
28
  from mindspore._c_expression import MSContext
29
+ from mindspore.ops.auto_generate import avg_pool1d_ext
30
+
33
31
 
34
32
  __all__ = ['AvgPool3d', 'MaxPool3d', 'AvgPool2d', 'MaxPool2d', 'AvgPool1d', 'MaxPool1d', 'FractionalMaxPool2d',
35
33
  'FractionalMaxPool3d', 'AdaptiveAvgPool1d', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d',
36
34
  'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'LPPool1d',
37
- 'LPPool2d', 'AvgPool2dExt', 'MaxPool2dExt']
35
+ 'LPPool2d', 'AvgPool2dExt', 'AvgPool3dExt', 'MaxPool2dExt', 'AvgPool1dExt']
38
36
 
39
37
 
40
38
  class _PoolNd(Cell):
@@ -301,11 +299,12 @@ class MaxPool3d(_PoolNd):
301
299
  For Atlas training series products, this interface is not supported.
302
300
 
303
301
  Args:
304
- kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
302
+ kernel_size (Union[int, tuple[int]], optional): The size of kernel used to take the maximum value,
305
303
  is an int number or a single element tuple that represents depth, height and width of the kernel, or a tuple
306
304
  of three int numbers that represent depth, height and width respectively.
307
305
  The value must be a positive integer. Default: ``1`` .
308
- stride (Union[int, tuple[int]]): The moving stride of pooling operation, an int number or a single element tuple
306
+ stride (Union[int, tuple[int]], optional): The moving stride of pooling operation,
307
+ an int number or a single element tuple
309
308
  that represents the moving stride of pooling kernel in the directions of depth, height and the width,
310
309
  or a tuple of three int numbers that represent depth, height and width of movement respectively.
311
310
  The value must be a positive integer. If the value is None, the default value `kernel_size` is used.
@@ -326,18 +325,19 @@ class MaxPool3d(_PoolNd):
326
325
  in the depth, height and width dimension is determined by the `padding` parameter.
327
326
  If this mode is set, `padding` must be greater than or equal to 0.
328
327
 
329
- padding (Union(int, tuple[int], list[int])): Pooling padding value. Default: ``0`` .
328
+ padding (Union(int, tuple[int], list[int]), optional): Pooling padding value. Default: ``0`` .
330
329
  `padding` can only be an integer or a tuple/list containing one or three integers.
331
330
  If `padding` is an integer or a tuple/list containing one integer, it will be padded in six directions of
332
331
  front, back, top, bottom, left and right of the input. If `padding` is a tuple/list containing three
333
332
  integers, it will be padded in front and back of the input `padding[0]` times, up and down `padding[1]`
334
333
  times, and left and right of the input `padding[2]` times.
335
- dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
334
+ dilation (Union(int, tuple[int]), optional): The spacing between the elements of the kernel in convolution,
336
335
  used to increase the receptive field of the pooling operation. If it is a tuple, it must contain one or
337
336
  three integers. Default: ``1`` .
338
- return_indices (bool): If ``True`` , output is a Tuple of 2 Tensors, representing the maxpool result and where
337
+ return_indices (bool, optional): If ``True`` , output is a Tuple of 2 Tensors,
338
+ representing the maxpool result and where
339
339
  the max values are generated. Otherwise, only the maxpool result is returned. Default: ``False`` .
340
- ceil_mode (bool): If ``True``, use ceil to calculate output shape.
340
+ ceil_mode (bool, optional): If ``True``, use ceil to calculate output shape.
341
341
  If ``False``, use ceil to calculate output shape. Default: ``False`` .
342
342
 
343
343
  Inputs:
@@ -689,9 +689,11 @@ class MaxPool2dExt(Cell):
689
689
  self.return_indices = return_indices
690
690
  strides = stride if (stride is not None) else kernel_size
691
691
  if return_indices:
692
- self.max_pool_func_ = MaxPoolWithIndices(kernel_size, strides, padding, dilation, ceil_mode)
692
+ self.max_pool_func_ = ops.auto_generate.gen_ops_prim.MaxPoolWithIndices(kernel_size, strides, padding,
693
+ dilation, ceil_mode)
693
694
  else:
694
- self.max_pool_func_ = MaxPoolWithMask(kernel_size, strides, padding, dilation, ceil_mode)
695
+ self.max_pool_func_ = ops.auto_generate.gen_ops_prim.MaxPoolWithMask(kernel_size, strides, padding,
696
+ dilation, ceil_mode)
695
697
 
696
698
  def construct(self, input):
697
699
  out, indices = self.max_pool_func_(input)
@@ -713,9 +715,9 @@ class MaxPool1d(_PoolNd):
713
715
  \text{input}(N_i, C_j, s_0 \times l + n)
714
716
 
715
717
  Args:
716
- kernel_size (int): The size of kernel used to take the max value, Default: ``1`` .
717
- stride (int): The distance of kernel moving, an int number that represents
718
- the width of movement is stride, Default: ``1`` .
718
+ kernel_size (int, optional): The size of kernel used to take the max value. Default: ``1`` .
719
+ stride (int, optional): The distance of kernel moving, an int number that represents
720
+ the width of movement is stride. Default: ``1`` .
719
721
  pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
720
722
  ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
721
723
 
@@ -731,24 +733,25 @@ class MaxPool1d(_PoolNd):
731
733
  at the begin and end is determined by the `padding` parameter.
732
734
  If this mode is set, `padding` must be greater than or equal to 0.
733
735
 
734
- padding (Union(int, tuple[int], list[int])): Padding value for the pooling. Default value is ``0``.
736
+ padding (Union(int, tuple[int], list[int]), optional): Padding value for the pooling. Default value is ``0``.
735
737
  padding can only be an integer or a tuple/list containing a single integer, in which case padding times or
736
738
  padding[0] times are padded on both sides of the input.
737
- dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
739
+ dilation (Union(int, tuple[int]), optional): The spacing between the elements of the kernel in convolution,
738
740
  used to increase the receptive field of the pooling operation. If it is a tuple, its length can only be 1.
739
741
  Default: ``1`` .
740
- return_indices (bool): If ``True`` , the function will return both the result of max pooling and the indices of
742
+ return_indices (bool, optional): If ``True`` , the function will return
743
+ both the result of max pooling and the indices of
741
744
  the max elements. Default: ``False`` .
742
- ceil_mode (bool): If True, use ceil to compute the output shape instead of floor. Default: ``False`` .
745
+ ceil_mode (bool, optional): If True, use ceil to compute the output shape instead of floor. Default: ``False`` .
743
746
 
744
747
  Inputs:
745
748
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
746
749
 
747
750
  Outputs:
748
- If `return_indices` is False, output is a Tensor, with shape :math:`(N, C_{out}, L_{out})` or
751
+ If `return_indices` is ``False``, output is a Tensor, with shape :math:`(N, C_{out}, L_{out})` or
749
752
  :math:`(C_{out}, L_{out})`. It has the same data type as `x`.
750
753
 
751
- If `return_indices` is True, output is a Tuple of 2 Tensors, representing the maxpool result and where
754
+ If `return_indices` is ``True``, output is a Tuple of 2 Tensors, representing the maxpool result and where
752
755
  the max values are generated.
753
756
 
754
757
  - **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, L_{out})` or
@@ -1021,6 +1024,81 @@ class AvgPool3d(_PoolNd):
1021
1024
  return out
1022
1025
 
1023
1026
 
1027
+ class AvgPool3dExt(Cell):
1028
+ r"""
1029
+ Applies a 3D average pooling over an input Tensor which can be regarded as
1030
+ a composition of 3D input planes.
1031
+
1032
+ .. warning::
1033
+ This is an experimental API that is subject to change or deletion.
1034
+
1035
+ For details, please refer to :func:`mindspore.mint.nn.functional.avg_pool3d`.
1036
+
1037
+ Supported Platforms:
1038
+ ``Ascend``
1039
+
1040
+ Examples:
1041
+ >>> import mindspore as ms
1042
+ >>> pool = ms.nn.AvgPool3dExt(kernel_size=3, stride=1)
1043
+ >>> x = ms.ops.randn(1, 2, 4, 4, 5).astype(ms.float32)
1044
+ >>> output = pool(x)
1045
+ >>> print(output.shape)
1046
+ (1, 2, 2, 2, 3)
1047
+ >>> x1 = ms.ops.randn(6, 5, 7, 7, 5).astype(ms.float32)
1048
+ >>> pool2 = ms.nn.AvgPool3dExt(4, stride=2, padding=(2, 2, 1), divisor_override=10)
1049
+ >>> output2 = pool2(x1)
1050
+ >>> print(output2.shape)
1051
+ (6, 5, 4, 4, 2)
1052
+ """
1053
+ def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
1054
+ count_include_pad=True, divisor_override=None):
1055
+ super(AvgPool3dExt, self).__init__()
1056
+ self.kernel_size = kernel_size
1057
+ self.stride = stride
1058
+ self.padding = padding
1059
+ self.ceil_mode = ceil_mode
1060
+ self.count_include_pad = count_include_pad
1061
+ self.divisor_override = divisor_override
1062
+
1063
+ def construct(self, input):
1064
+ return ops.function.nn_func.avg_pool3d_ext(input, self.kernel_size, self.stride, self.padding,
1065
+ self.ceil_mode, self.count_include_pad, self.divisor_override)
1066
+
1067
+
1068
+ class AvgPool1dExt(Cell):
1069
+ r"""
1070
+ Applies a 1D average pooling over an input Tensor which can be regarded as
1071
+ a composition of 2D input planes.
1072
+
1073
+ For details, please refer to :func:`mindspore.mint.nn.functional.avg_pool1d`.
1074
+
1075
+ Supported Platforms:
1076
+ ``Ascend``
1077
+
1078
+ Examples:
1079
+ >>> import numpy as np
1080
+ >>> from mindspore import Tensor, nn
1081
+ >>> from mindspore import dtype as mstype
1082
+ >>> input = Tensor(np.arange(1 * 3 * 4).reshape(1, 3, 4), mstype.float32)
1083
+ >>> net = nn.AvgPool1dExt(kernel_size=2, stride=1)
1084
+ >>> output = net(input)
1085
+ >>> print(output.shape)
1086
+ (1, 3, 3)
1087
+ """
1088
+ def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
1089
+ count_include_pad=True):
1090
+ super().__init__()
1091
+ self.kernel_size = kernel_size
1092
+ self.stride = stride
1093
+ self.padding = padding
1094
+ self.ceil_mode = ceil_mode
1095
+ self.count_include_pad = count_include_pad
1096
+
1097
+ def construct(self, input):
1098
+ return avg_pool1d_ext(input, self.kernel_size, self.stride, self.padding,
1099
+ self.ceil_mode, self.count_include_pad)
1100
+
1101
+
1024
1102
  class AvgPool2dExt(Cell):
1025
1103
  r"""
1026
1104
  Applies a 2D average pooling over an input Tensor which can be regarded as
@@ -1052,8 +1130,8 @@ class AvgPool2dExt(Cell):
1052
1130
  self.divisor_override = divisor_override
1053
1131
 
1054
1132
  def construct(self, input):
1055
- return avg_pool2d_ext(input, self.kernel_size, self.stride, self.padding,
1056
- self.ceil_mode, self.count_include_pad, self.divisor_override)
1133
+ return ops.function.nn_func.avg_pool2d_ext(input, self.kernel_size, self.stride, self.padding,
1134
+ self.ceil_mode, self.count_include_pad, self.divisor_override)
1057
1135
 
1058
1136
 
1059
1137
  class AvgPool2d(_PoolNd):
@@ -1127,7 +1205,7 @@ class AvgPool2d(_PoolNd):
1127
1205
  TypeError: If `kernel_size` or `strides` is neither int nor tuple.
1128
1206
  ValueError: If `pad_mode` is not ``"valid"`` , ``"same"`` or ``"pad"`` with not case sensitive.
1129
1207
  ValueError: If `data_format` is neither ``'NCHW'`` nor ``'NHWC'``.
1130
- ValueError: If `padding`, `ceil_mode`, `count_include_pad`, or `divisor_override` is used
1208
+ ValueError: If `padding`, `ceil_mode`, `count_include_pad`, or `divisor_override` is used,
1131
1209
  or `pad_mode` is ``"pad"`` when `data_format` is 'NHWC'.
1132
1210
  ValueError: If `kernel_size` or `strides` is less than 1.
1133
1211
  ValueError: If length of `padding` tuple/list is not 1 or 2.
@@ -1236,8 +1314,8 @@ class AvgPool1d(_PoolNd):
1236
1314
  This interface currently does not support Atlas A2 training series products.
1237
1315
 
1238
1316
  Args:
1239
- kernel_size (int): The size of kernel window used to take the average value, Default: ``1`` .
1240
- stride (int): The distance of kernel moving, an int number that represents
1317
+ kernel_size (int, optional): The size of kernel window used to take the average value, Default: ``1`` .
1318
+ stride (int, optional): The distance of kernel moving, an int number that represents
1241
1319
  the width of movement is strides, Default: ``1`` .
1242
1320
  pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
1243
1321
  ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
@@ -1248,17 +1326,20 @@ class AvgPool1d(_PoolNd):
1248
1326
  uniformly distributed around the input, if it is odd, the excess padding is goes to the right side.
1249
1327
  If this mode is set, `padding` must be 0.
1250
1328
  - ``"valid"``: No padding is applied to the input, and the output returns the maximum
1251
- possible length. Extra pixels that could not complete a full stride will
1252
- be discarded. If this mode is set, `padding` must be 0.
1329
+ possible length. If a full stride cannot be formed, the extra pixels will be discarded.
1330
+ If this mode is set, `padding` must be 0.
1253
1331
  - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
1254
1332
  at the begin and end is determined by the `padding` parameter.
1255
1333
  If this mode is set, `padding` must be greater than or equal to 0.
1256
1334
 
1257
- padding (Union(int, tuple[int], list[int])): Pooling padding value, only ``"pad"`` mode can be set to non-zero.
1335
+ padding (Union(int, tuple[int], list[int]), optional): Pooling padding value,
1336
+ only ``"pad"`` mode can be set to non-zero.
1258
1337
  Default: ``0`` . padding can only be an integer or a tuple/list containing a single integer, in which case
1259
1338
  padding times or padding[0] times are padded on both sides of the input.
1260
- ceil_mode (bool): If ``True`` , use ceil to compute the output shape instead of floor. Default: ``False`` .
1261
- count_include_pad (bool): If ``True`` , averaging calculation will include the zero-padding. Default: ``True`` .
1339
+ ceil_mode (bool, optional): If ``True`` , use ceil to compute the output shape instead of floor.
1340
+ Default: ``False`` .
1341
+ count_include_pad (bool, optional): If ``True`` , averaging calculation will include the zero-padding.
1342
+ Default: ``True`` .
1262
1343
 
1263
1344
  Inputs:
1264
1345
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
@@ -1592,7 +1673,7 @@ class AdaptiveAvgPool3d(Cell):
1592
1673
  def __init__(self, output_size):
1593
1674
  """Initialize AdaptiveAvgPool3d."""
1594
1675
  super(AdaptiveAvgPool3d, self).__init__()
1595
- self.adaptive_avg_pool3d = AdaptiveAvgPool3D(output_size)
1676
+ self.adaptive_avg_pool3d = ops.AdaptiveAvgPool3D(output_size)
1596
1677
 
1597
1678
  def construct(self, input):
1598
1679
  return self.adaptive_avg_pool3d(input)
@@ -1694,13 +1775,14 @@ class AdaptiveMaxPool2d(Cell):
1694
1775
  \end{align}
1695
1776
 
1696
1777
  Note:
1697
- Ascend platform only supports float16 type for input.
1778
+ In KBK mode, `output_size` does not support mutable.
1698
1779
 
1699
1780
  Args:
1700
1781
  output_size (Union[int, tuple]): The target output size. `output_size` can be a tuple :math:`(H, W)`,
1701
1782
  or an int H for :math:`(H, H)`. :math:`H` and :math:`W` can be int or None.
1702
1783
  If it is None, it means the output size is the same as the input size.
1703
- return_indices (bool): If `return_indices` is ``True`` , the indices of max value would be output.
1784
+ return_indices (bool, optional): Whether to output the index of the maximum value.
1785
+ If `return_indices` is ``True`` , the indices of max value would be output.
1704
1786
  Default: ``False`` .
1705
1787
 
1706
1788
  Inputs:
@@ -1763,15 +1845,11 @@ class AdaptiveMaxPool2d(Cell):
1763
1845
  def __init__(self, output_size, return_indices=False):
1764
1846
  """Initialize AdaptiveMaxPool2d."""
1765
1847
  super(AdaptiveMaxPool2d, self).__init__()
1766
- validator.check_value_type('return_indices', return_indices, [bool], self.cls_name)
1767
- self.adaptive_max_pool2d = AdaptiveMaxPool2D(output_size)
1848
+ self.output_size = output_size
1768
1849
  self.return_indices = return_indices
1769
1850
 
1770
1851
  def construct(self, input):
1771
- output = self.adaptive_max_pool2d(input)
1772
- if self.return_indices:
1773
- return output
1774
- return output[0]
1852
+ return ops.adaptive_max_pool2d(input, self.output_size, self.return_indices)
1775
1853
 
1776
1854
 
1777
1855
  class AdaptiveMaxPool3d(Cell):
@@ -1823,7 +1901,7 @@ class AdaptiveMaxPool3d(Cell):
1823
1901
  output_size = (output_size, output_size, output_size)
1824
1902
  self.output_size = Tensor(output_size, dtype=mstype.int32)
1825
1903
  self.return_indices = return_indices
1826
- self.adaptive_max_pool3d = AdaptiveMaxPool3D()
1904
+ self.adaptive_max_pool3d = ops.AdaptiveMaxPool3D()
1827
1905
 
1828
1906
  def construct(self, input):
1829
1907
  output = self.adaptive_max_pool3d(input, self.output_size)
@@ -340,7 +340,7 @@ class GRUCell(RNNCellBase):
340
340
  :math:`r` is reset gate. :math:`z` is update gate. :math:`n` is n-th layer. For instance,
341
341
  :math:`W_{ir}, b_{ir}` are the weight and bias used to transform from input :math:`x` to :math:`r`.
342
342
  Details can be found in paper
343
- `Learning Phrase Representations using RNN EncoderDecoder for Statistical Machine Translation
343
+ `Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation
344
344
  <https://aclanthology.org/D14-1179.pdf>`_.
345
345
 
346
346
  Args:
@@ -237,6 +237,7 @@ class _DynamicGRUCPUGPU(Cell):
237
237
  h_0.view(1, *h_0.shape),
238
238
  weights.astype(x.dtype)
239
239
  )
240
+
240
241
  if seq_length is not None:
241
242
  h_n = get_hidden(output, seq_length)
242
243
  mask = sequence_mask(seq_length, x.shape[0])
@@ -687,7 +688,7 @@ class GRU(_RNNBase):
687
688
  are learnable weights between the output and the input in the formula. For instance,
688
689
  :math:`W_{ir}, b_{ir}` are the weight and bias used to transform from input :math:`x` to :math:`r`.
689
690
  Details can be found in paper
690
- `Learning Phrase Representations using RNN EncoderDecoder for Statistical Machine Translation
691
+ `Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation
691
692
  <https://aclanthology.org/D14-1179.pdf>`_.
692
693
 
693
694
  Note: