mindspore 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0rc1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (491) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +6 -4
  5. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -33
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parse/__init__.py +6 -7
  14. mindspore/_extends/parse/compile_config.py +19 -0
  15. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
  16. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  17. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  18. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  19. mindspore/_extends/parse/parser.py +24 -193
  20. mindspore/_extends/parse/resources.py +1 -5
  21. mindspore/_extends/parse/standard_method.py +97 -74
  22. mindspore/_extends/pijit/__init__.py +2 -2
  23. mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
  24. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  25. mindspore/_extends/utils.py +1 -1
  26. mindspore/amp.py +4 -4
  27. mindspore/atlprov.dll +0 -0
  28. mindspore/avcodec-59.dll +0 -0
  29. mindspore/avdevice-59.dll +0 -0
  30. mindspore/avfilter-8.dll +0 -0
  31. mindspore/avformat-59.dll +0 -0
  32. mindspore/avutil-57.dll +0 -0
  33. mindspore/boost/__init__.py +2 -2
  34. mindspore/boost/base.py +3 -7
  35. mindspore/boost/boost_cell_wrapper.py +2 -2
  36. mindspore/c1.dll +0 -0
  37. mindspore/c1xx.dll +0 -0
  38. mindspore/c2.dll +0 -0
  39. mindspore/common/__init__.py +4 -3
  40. mindspore/common/_grad_function.py +56 -0
  41. mindspore/common/_pijit_context.py +14 -5
  42. mindspore/common/_register_for_tensor.py +1 -1
  43. mindspore/common/_stub_tensor.py +5 -10
  44. mindspore/common/_tensor_cpp_method.py +1 -1
  45. mindspore/common/_tensor_docs.py +1915 -3287
  46. mindspore/common/api.py +341 -354
  47. mindspore/common/auto_dynamic_shape.py +41 -44
  48. mindspore/common/dtype.py +5 -2
  49. mindspore/common/dump.py +7 -5
  50. mindspore/common/file_system.py +3 -0
  51. mindspore/common/hook_handle.py +5 -3
  52. mindspore/common/initializer.py +10 -6
  53. mindspore/common/jit_begin_end.py +94 -0
  54. mindspore/common/jit_config.py +6 -1
  55. mindspore/common/jit_context.py +76 -0
  56. mindspore/common/jit_trace.py +378 -0
  57. mindspore/common/lazy_inline.py +2 -2
  58. mindspore/common/mutable.py +5 -4
  59. mindspore/common/parameter.py +106 -39
  60. mindspore/common/seed.py +2 -2
  61. mindspore/common/sparse_tensor.py +23 -17
  62. mindspore/common/tensor.py +297 -714
  63. mindspore/communication/__init__.py +7 -5
  64. mindspore/communication/_comm_helper.py +47 -2
  65. mindspore/communication/comm_func.py +70 -53
  66. mindspore/communication/management.py +83 -17
  67. mindspore/context.py +214 -560
  68. mindspore/dataset/__init__.py +44 -20
  69. mindspore/dataset/audio/__init__.py +2 -8
  70. mindspore/dataset/audio/transforms.py +3 -17
  71. mindspore/dataset/core/config.py +3 -3
  72. mindspore/dataset/engine/cache_client.py +1 -1
  73. mindspore/dataset/engine/datasets.py +102 -120
  74. mindspore/dataset/engine/datasets_audio.py +22 -22
  75. mindspore/dataset/engine/datasets_standard_format.py +43 -24
  76. mindspore/dataset/engine/datasets_text.py +78 -85
  77. mindspore/dataset/engine/datasets_user_defined.py +108 -76
  78. mindspore/dataset/engine/datasets_vision.py +111 -108
  79. mindspore/dataset/engine/iterators.py +5 -3
  80. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  81. mindspore/dataset/engine/samplers.py +279 -57
  82. mindspore/dataset/engine/serializer_deserializer.py +2 -1
  83. mindspore/dataset/engine/validators.py +10 -0
  84. mindspore/dataset/text/__init__.py +7 -6
  85. mindspore/dataset/text/transforms.py +6 -5
  86. mindspore/dataset/text/utils.py +3 -3
  87. mindspore/dataset/transforms/__init__.py +0 -9
  88. mindspore/dataset/transforms/transforms.py +3 -3
  89. mindspore/dataset/utils/browse_dataset.py +1 -1
  90. mindspore/dataset/vision/__init__.py +2 -9
  91. mindspore/dataset/vision/transforms.py +202 -158
  92. mindspore/dataset/vision/utils.py +7 -5
  93. mindspore/device_context/ascend/op_debug.py +60 -1
  94. mindspore/device_context/ascend/op_tuning.py +0 -4
  95. mindspore/device_manager.py +39 -3
  96. mindspore/dnnl.dll +0 -0
  97. mindspore/dpcmi.dll +0 -0
  98. mindspore/experimental/es/embedding_service.py +35 -27
  99. mindspore/experimental/map_parameter.py +4 -4
  100. mindspore/experimental/optim/adadelta.py +22 -26
  101. mindspore/experimental/optim/adagrad.py +4 -4
  102. mindspore/experimental/optim/adam.py +4 -0
  103. mindspore/experimental/optim/adamax.py +4 -4
  104. mindspore/experimental/optim/adamw.py +4 -0
  105. mindspore/experimental/optim/asgd.py +1 -1
  106. mindspore/experimental/optim/lr_scheduler.py +40 -22
  107. mindspore/experimental/optim/radam.py +5 -5
  108. mindspore/experimental/optim/rprop.py +1 -1
  109. mindspore/experimental/optim/sgd.py +1 -1
  110. mindspore/hal/contiguous_tensors_handle.py +6 -10
  111. mindspore/hal/device.py +55 -81
  112. mindspore/hal/event.py +38 -55
  113. mindspore/hal/memory.py +93 -144
  114. mindspore/hal/stream.py +81 -125
  115. mindspore/include/dataset/constants.h +7 -4
  116. mindspore/include/dataset/execute.h +2 -2
  117. mindspore/jpeg62.dll +0 -0
  118. mindspore/log.py +40 -2
  119. mindspore/mindrecord/__init__.py +20 -7
  120. mindspore/mindspore_backend_common.dll +0 -0
  121. mindspore/mindspore_backend_manager.dll +0 -0
  122. mindspore/mindspore_common.dll +0 -0
  123. mindspore/mindspore_core.dll +0 -0
  124. mindspore/mindspore_dump.dll +0 -0
  125. mindspore/mindspore_frontend.dll +0 -0
  126. mindspore/mindspore_glog.dll +0 -0
  127. mindspore/mindspore_memory_pool.dll +0 -0
  128. mindspore/mindspore_ms_backend.dll +0 -0
  129. mindspore/mindspore_ops.dll +0 -0
  130. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  131. mindspore/mindspore_ops_kernel_common.dll +0 -0
  132. mindspore/mindspore_profiler.dll +0 -0
  133. mindspore/mindspore_pyboost.dll +0 -0
  134. mindspore/mindspore_pynative.dll +0 -0
  135. mindspore/mindspore_res_manager.dll +0 -0
  136. mindspore/mindspore_runtime_pipeline.dll +0 -0
  137. mindspore/mint/__init__.py +131 -700
  138. mindspore/mint/distributed/__init__.py +5 -1
  139. mindspore/mint/distributed/distributed.py +194 -109
  140. mindspore/mint/linalg/__init__.py +2 -0
  141. mindspore/mint/nn/__init__.py +280 -18
  142. mindspore/mint/nn/functional.py +282 -64
  143. mindspore/mint/nn/layer/__init__.py +4 -0
  144. mindspore/mint/nn/layer/_functions.py +7 -3
  145. mindspore/mint/nn/layer/activation.py +120 -13
  146. mindspore/mint/nn/layer/conv.py +218 -24
  147. mindspore/mint/nn/layer/normalization.py +15 -16
  148. mindspore/mint/nn/layer/padding.py +1 -1
  149. mindspore/mint/nn/layer/pooling.py +66 -1
  150. mindspore/mint/optim/__init__.py +2 -1
  151. mindspore/mint/optim/sgd.py +171 -0
  152. mindspore/msobj140.dll +0 -0
  153. mindspore/mspdb140.dll +0 -0
  154. mindspore/mspdbcore.dll +0 -0
  155. mindspore/mspdbst.dll +0 -0
  156. mindspore/mspft140.dll +0 -0
  157. mindspore/msvcdis140.dll +0 -0
  158. mindspore/msvcp140_1.dll +0 -0
  159. mindspore/msvcp140_2.dll +0 -0
  160. mindspore/msvcp140_atomic_wait.dll +0 -0
  161. mindspore/msvcp140_codecvt_ids.dll +0 -0
  162. mindspore/nn/__init__.py +4 -1
  163. mindspore/nn/cell.py +1250 -176
  164. mindspore/nn/layer/activation.py +23 -21
  165. mindspore/nn/layer/basic.py +22 -16
  166. mindspore/nn/layer/container.py +1 -1
  167. mindspore/nn/layer/conv.py +22 -17
  168. mindspore/nn/layer/embedding.py +9 -8
  169. mindspore/nn/layer/normalization.py +48 -42
  170. mindspore/nn/layer/pooling.py +75 -31
  171. mindspore/nn/layer/transformer.py +11 -10
  172. mindspore/nn/learning_rate_schedule.py +4 -2
  173. mindspore/nn/loss/loss.py +27 -19
  174. mindspore/nn/optim/ada_grad.py +6 -5
  175. mindspore/nn/optim/adadelta.py +9 -7
  176. mindspore/nn/optim/adafactor.py +1 -1
  177. mindspore/nn/optim/adam.py +16 -12
  178. mindspore/nn/optim/adamax.py +8 -7
  179. mindspore/nn/optim/adasum.py +5 -5
  180. mindspore/nn/optim/asgd.py +1 -1
  181. mindspore/nn/optim/ftrl.py +11 -9
  182. mindspore/nn/optim/lamb.py +1 -1
  183. mindspore/nn/optim/lazyadam.py +12 -10
  184. mindspore/nn/optim/momentum.py +7 -6
  185. mindspore/nn/optim/optimizer.py +2 -2
  186. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  187. mindspore/nn/optim/rmsprop.py +13 -12
  188. mindspore/nn/optim/rprop.py +9 -7
  189. mindspore/nn/optim/sgd.py +9 -6
  190. mindspore/nn/optim/tft_wrapper.py +5 -2
  191. mindspore/nn/probability/bijector/bijector.py +17 -11
  192. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  193. mindspore/nn/probability/bijector/invert.py +2 -2
  194. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  195. mindspore/nn/probability/bijector/softplus.py +3 -2
  196. mindspore/nn/probability/distribution/beta.py +3 -3
  197. mindspore/nn/probability/distribution/categorical.py +1 -1
  198. mindspore/nn/probability/distribution/cauchy.py +4 -2
  199. mindspore/nn/probability/distribution/exponential.py +6 -7
  200. mindspore/nn/probability/distribution/gamma.py +2 -2
  201. mindspore/nn/probability/distribution/gumbel.py +2 -2
  202. mindspore/nn/probability/distribution/half_normal.py +5 -3
  203. mindspore/nn/probability/distribution/logistic.py +5 -3
  204. mindspore/nn/probability/distribution/poisson.py +1 -1
  205. mindspore/nn/probability/distribution/uniform.py +5 -3
  206. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  207. mindspore/nn/reinforcement/tensor_array.py +1 -1
  208. mindspore/nn/wrap/__init__.py +6 -6
  209. mindspore/nn/wrap/cell_wrapper.py +178 -117
  210. mindspore/nn/wrap/grad_reducer.py +45 -36
  211. mindspore/nn/wrap/loss_scale.py +3 -3
  212. mindspore/numpy/array_creations.py +3 -3
  213. mindspore/numpy/array_ops.py +1 -1
  214. mindspore/numpy/math_ops.py +4 -4
  215. mindspore/numpy/utils.py +1 -2
  216. mindspore/numpy/utils_const.py +1 -2
  217. mindspore/opencv_core452.dll +0 -0
  218. mindspore/opencv_imgcodecs452.dll +0 -0
  219. mindspore/opencv_imgproc452.dll +0 -0
  220. mindspore/ops/__init__.py +3 -2
  221. mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
  222. mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
  223. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  224. mindspore/ops/_register_for_op.py +0 -11
  225. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  226. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
  227. mindspore/ops/_vmap/vmap_array_ops.py +7 -6
  228. mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
  229. mindspore/ops/_vmap/vmap_math_ops.py +4 -7
  230. mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
  231. mindspore/ops/auto_generate/__init__.py +4 -3
  232. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
  233. mindspore/ops/auto_generate/gen_extend_func.py +281 -135
  234. mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
  235. mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
  236. mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
  237. mindspore/ops/composite/__init__.py +2 -1
  238. mindspore/ops/composite/base.py +19 -24
  239. mindspore/ops/composite/math_ops.py +6 -16
  240. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  241. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
  242. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  243. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  244. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  245. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  246. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  247. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  248. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  249. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  250. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  251. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  252. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  253. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  254. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  255. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  256. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  257. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  258. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  259. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  260. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  261. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  262. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  263. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  264. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  265. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  266. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  267. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
  268. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  269. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  270. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  271. mindspore/ops/function/__init__.py +28 -2
  272. mindspore/ops/function/_add_attr_func.py +58 -0
  273. mindspore/ops/function/array_func.py +1629 -2345
  274. mindspore/ops/function/clip_func.py +38 -45
  275. mindspore/ops/function/debug_func.py +36 -44
  276. mindspore/ops/function/grad/__init__.py +1 -0
  277. mindspore/ops/function/grad/grad_func.py +104 -71
  278. mindspore/ops/function/image_func.py +1 -1
  279. mindspore/ops/function/linalg_func.py +46 -78
  280. mindspore/ops/function/math_func.py +3035 -3705
  281. mindspore/ops/function/nn_func.py +676 -241
  282. mindspore/ops/function/other_func.py +159 -1
  283. mindspore/ops/function/parameter_func.py +17 -30
  284. mindspore/ops/function/random_func.py +204 -361
  285. mindspore/ops/function/reshard_func.py +4 -70
  286. mindspore/ops/function/sparse_func.py +3 -3
  287. mindspore/ops/function/sparse_unary_func.py +5 -5
  288. mindspore/ops/function/spectral_func.py +25 -58
  289. mindspore/ops/function/vmap_func.py +24 -17
  290. mindspore/ops/functional.py +6 -4
  291. mindspore/ops/functional_overload.py +547 -4
  292. mindspore/ops/op_info_register.py +32 -244
  293. mindspore/ops/operations/__init__.py +10 -5
  294. mindspore/ops/operations/_custom_ops_utils.py +247 -0
  295. mindspore/ops/operations/_grad_ops.py +1 -10
  296. mindspore/ops/operations/_inner_ops.py +5 -76
  297. mindspore/ops/operations/_ms_kernel.py +4 -10
  298. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  299. mindspore/ops/operations/_scalar_ops.py +3 -2
  300. mindspore/ops/operations/_sequence_ops.py +1 -1
  301. mindspore/ops/operations/_tensor_array.py +1 -1
  302. mindspore/ops/operations/array_ops.py +37 -22
  303. mindspore/ops/operations/comm_ops.py +150 -107
  304. mindspore/ops/operations/custom_ops.py +221 -23
  305. mindspore/ops/operations/debug_ops.py +115 -16
  306. mindspore/ops/operations/inner_ops.py +1 -1
  307. mindspore/ops/operations/linalg_ops.py +1 -58
  308. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  309. mindspore/ops/operations/manually_defined/ops_def.py +746 -79
  310. mindspore/ops/operations/math_ops.py +21 -18
  311. mindspore/ops/operations/nn_ops.py +65 -191
  312. mindspore/ops/operations/other_ops.py +62 -9
  313. mindspore/ops/operations/random_ops.py +13 -7
  314. mindspore/ops/operations/reshard_ops.py +1 -1
  315. mindspore/ops/operations/sparse_ops.py +2 -2
  316. mindspore/ops/primitive.py +43 -32
  317. mindspore/ops/tensor_method.py +232 -13
  318. mindspore/ops_generate/__init__.py +0 -5
  319. mindspore/ops_generate/aclnn/__init__.py +0 -0
  320. mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
  321. mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
  322. mindspore/ops_generate/api/__init__.py +0 -0
  323. mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
  324. mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
  325. mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
  326. mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
  327. mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
  328. mindspore/ops_generate/api/gen_api.py +103 -0
  329. mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
  330. mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
  331. mindspore/ops_generate/common/__init__.py +0 -0
  332. mindspore/ops_generate/common/gen_constants.py +91 -0
  333. mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
  334. mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
  335. mindspore/ops_generate/{template.py → common/template.py} +96 -84
  336. mindspore/ops_generate/gen_ops.py +23 -325
  337. mindspore/ops_generate/op_def/__init__.py +0 -0
  338. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  339. mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
  340. mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
  341. mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
  342. mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
  343. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  344. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  345. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  346. mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
  347. mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
  348. mindspore/ops_generate/pyboost/__init__.py +0 -0
  349. mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
  350. mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
  351. mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
  352. mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
  353. mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
  354. mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
  355. mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
  356. mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
  357. mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
  358. mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
  359. mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
  360. mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
  361. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
  362. mindspore/ops_generate/resources/__init__.py +0 -0
  363. mindspore/ops_generate/resources/resource_list.py +30 -0
  364. mindspore/ops_generate/resources/resource_loader.py +36 -0
  365. mindspore/ops_generate/resources/resource_manager.py +64 -0
  366. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  367. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  368. mindspore/parallel/__init__.py +6 -2
  369. mindspore/parallel/_auto_parallel_context.py +133 -6
  370. mindspore/parallel/_cell_wrapper.py +130 -15
  371. mindspore/parallel/_parallel_serialization.py +95 -4
  372. mindspore/parallel/_ps_context.py +1 -1
  373. mindspore/parallel/_recovery_context.py +7 -2
  374. mindspore/parallel/_tensor.py +142 -18
  375. mindspore/parallel/_utils.py +198 -25
  376. mindspore/parallel/algo_parameter_config.py +3 -3
  377. mindspore/parallel/auto_parallel.py +732 -0
  378. mindspore/parallel/checkpoint_convert.py +159 -0
  379. mindspore/parallel/checkpoint_transform.py +656 -37
  380. mindspore/parallel/cluster/process_entity/_api.py +151 -19
  381. mindspore/parallel/cluster/run.py +1 -1
  382. mindspore/parallel/function/__init__.py +24 -0
  383. mindspore/parallel/function/reshard_func.py +259 -0
  384. mindspore/parallel/nn/__init__.py +25 -0
  385. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  386. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  387. mindspore/parallel/parameter_broadcast.py +24 -13
  388. mindspore/parallel/shard.py +137 -61
  389. mindspore/parallel/transform_safetensors.py +287 -95
  390. mindspore/pgodb140.dll +0 -0
  391. mindspore/pgort140.dll +0 -0
  392. mindspore/profiler/__init__.py +9 -5
  393. mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
  394. mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
  395. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
  396. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
  397. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  398. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
  399. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
  400. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
  401. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
  402. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
  403. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
  404. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
  405. mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
  406. mindspore/profiler/common/constant.py +12 -0
  407. mindspore/profiler/common/msprof_cmd_tool.py +42 -23
  408. mindspore/profiler/common/path_manager.py +24 -0
  409. mindspore/profiler/common/profiler_context.py +26 -2
  410. mindspore/profiler/common/profiler_meta_data.py +74 -0
  411. mindspore/profiler/common/profiler_parameters.py +59 -18
  412. mindspore/profiler/common/profiler_path_manager.py +66 -7
  413. mindspore/profiler/dynamic_profiler.py +112 -79
  414. mindspore/profiler/envprofiler.py +26 -1
  415. mindspore/profiler/experimental_config.py +197 -0
  416. mindspore/profiler/mstx.py +57 -14
  417. mindspore/profiler/platform/npu_profiler.py +33 -7
  418. mindspore/profiler/profiler.py +541 -45
  419. mindspore/profiler/profiler_action_controller.py +1 -1
  420. mindspore/profiler/profiler_interface.py +4 -0
  421. mindspore/profiler/schedule.py +57 -22
  422. mindspore/rewrite/api/node.py +15 -13
  423. mindspore/rewrite/api/symbol_tree.py +1 -1
  424. mindspore/run_check/_check_version.py +25 -14
  425. mindspore/run_check/run_check.py +1 -1
  426. mindspore/runtime/__init__.py +2 -2
  427. mindspore/runtime/executor.py +40 -11
  428. mindspore/runtime/memory.py +25 -8
  429. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  430. mindspore/swresample-4.dll +0 -0
  431. mindspore/swscale-6.dll +0 -0
  432. mindspore/tbbmalloc.dll +0 -0
  433. mindspore/tinyxml2.dll +0 -0
  434. mindspore/train/__init__.py +8 -8
  435. mindspore/train/_utils.py +35 -7
  436. mindspore/train/amp.py +1 -1
  437. mindspore/train/callback/__init__.py +2 -2
  438. mindspore/train/callback/_callback.py +2 -16
  439. mindspore/train/callback/_checkpoint.py +24 -40
  440. mindspore/train/callback/_cluster_monitor.py +14 -18
  441. mindspore/train/callback/_flops_collector.py +2 -3
  442. mindspore/train/callback/_history.py +7 -4
  443. mindspore/train/callback/_lambda_callback.py +2 -2
  444. mindspore/train/callback/_landscape.py +0 -3
  445. mindspore/train/callback/_loss_monitor.py +2 -1
  446. mindspore/train/callback/_on_request_exit.py +6 -5
  447. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  448. mindspore/train/callback/_summary_collector.py +8 -13
  449. mindspore/train/callback/_time_monitor.py +2 -1
  450. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
  451. mindspore/train/data_sink.py +25 -2
  452. mindspore/train/dataset_helper.py +4 -5
  453. mindspore/train/loss_scale_manager.py +8 -7
  454. mindspore/train/metrics/accuracy.py +3 -3
  455. mindspore/train/metrics/confusion_matrix.py +9 -9
  456. mindspore/train/metrics/error.py +3 -3
  457. mindspore/train/metrics/hausdorff_distance.py +4 -4
  458. mindspore/train/metrics/mean_surface_distance.py +3 -3
  459. mindspore/train/metrics/metric.py +0 -12
  460. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  461. mindspore/train/metrics/precision.py +8 -6
  462. mindspore/train/metrics/recall.py +9 -9
  463. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  464. mindspore/train/mind_ir_pb2.py +19 -12
  465. mindspore/train/model.py +176 -103
  466. mindspore/train/serialization.py +246 -988
  467. mindspore/train/summary/_summary_adapter.py +2 -2
  468. mindspore/train/summary/summary_record.py +1 -1
  469. mindspore/turbojpeg.dll +0 -0
  470. mindspore/utils/__init__.py +3 -2
  471. mindspore/utils/dryrun.py +4 -2
  472. mindspore/utils/hooks.py +81 -0
  473. mindspore/utils/utils.py +138 -4
  474. mindspore/vcmeta.dll +0 -0
  475. mindspore/vcruntime140.dll +0 -0
  476. mindspore/vcruntime140_1.dll +0 -0
  477. mindspore/version.py +1 -1
  478. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
  479. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
  480. mindspore/_install_custom.py +0 -43
  481. mindspore/common/_register_for_adapter.py +0 -74
  482. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  483. mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
  484. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  485. mindspore/ops_generate/gen_constants.py +0 -190
  486. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  487. mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
  488. /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
  489. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
  490. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
  491. {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
@@ -77,6 +77,55 @@ class SiLU(Cell):
77
77
  return mint.nn.functional.silu(x)
78
78
 
79
79
 
80
+ class Sigmoid(Cell):
81
+ r"""
82
+ Applies sigmoid activation function element-wise.
83
+
84
+ Sigmoid function is defined as:
85
+
86
+ .. math::
87
+
88
+ \text{sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)},
89
+
90
+ where :math:`x_i` is the element of `x`.
91
+
92
+ Sigmoid Activation Function Graph:
93
+
94
+ .. image:: ../images/Sigmoid.png
95
+ :align: center
96
+
97
+ Inputs:
98
+ - **input** (Tensor) - `input` is :math:`x` in the preceding formula. Tensor of any dimension,
99
+ the data type is float16, float32, float64, complex64 or complex128.
100
+
101
+ Outputs:
102
+ Tensor, with the same type and shape as the `input`.
103
+
104
+ Raises:
105
+ TypeError: If dtype of `input` is not float16, float32, float64, complex64 or complex128.
106
+ TypeError: If `input` is not a Tensor.
107
+
108
+ Supported Platforms:
109
+ ``Ascend`` ``GPU`` ``CPU``
110
+
111
+ Examples:
112
+ >>> import mindspore
113
+ >>> from mindspore import Tensor, nn
114
+ >>> import numpy as np
115
+ >>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
116
+ >>> sigmoid = mint.nn.Sigmoid()
117
+ >>> output = sigmoid(input)
118
+ >>> print(output)
119
+ [0.2688 0.11914 0.5 0.881 0.7305 ]
120
+ """
121
+ def __init__(self):
122
+ """Initialize LogSigmoid."""
123
+ super(Sigmoid, self).__init__()
124
+
125
+ def construct(self, input):
126
+ return mint.nn.functional.sigmoid(input)
127
+
128
+
80
129
  class LogSigmoid(Cell):
81
130
  r"""
82
131
  Applies logsigmoid activation element-wise. The input is a Tensor with any valid shape.
@@ -140,7 +189,8 @@ class ELU(Cell):
140
189
  \alpha * (\exp(x_i) - 1), &\text{otherwise.}
141
190
  \end{cases}
142
191
 
143
- where :math:`x_i` represents the element of the input and :math:`\alpha` represents the `alpha` parameter.
192
+ where :math:`x_i` represents the element of the input and :math:`\alpha` represents the `alpha` parameter, and
193
+ `alpha` represents the smoothness of the ELU.
144
194
 
145
195
  ELU Activation Function Graph:
146
196
 
@@ -151,16 +201,18 @@ class ELU(Cell):
151
201
  This is an experimental API that is subject to change or deletion.
152
202
 
153
203
  Args:
154
- alpha (float, optional): The alpha value of ELU, the data type is float. Default: ``1.0`` .
204
+ alpha (float, optional): The alpha value of ELU, the data type is float. Default: ``1.0``.
205
+ inplace (bool, optional): Whether to use inplace mode, the data type is bool. Default: ``False``.
155
206
 
156
207
  Inputs:
157
208
  - **input** (Tensor) - The input of ELU is a Tensor of any dimension.
158
209
 
159
210
  Outputs:
160
- Tensor, with the same type and shape as the `input`.
211
+ Tensor, with the same shape and type as the `input`.
161
212
 
162
213
  Raises:
163
- TypeError: If `alpha` is not a float.
214
+ RuntimeError: If the dtype of `input` is not float16, float32 or bfloat16.
215
+ TypeError: If the dtype of `alpha` is not float.
164
216
 
165
217
  Supported Platforms:
166
218
  ``Ascend``
@@ -176,13 +228,14 @@ class ELU(Cell):
176
228
  [-0.63212055 -0.86466473 0. 2. 1.]
177
229
  """
178
230
 
179
- def __init__(self, alpha=1.0):
231
+ def __init__(self, alpha=1.0, inplace=False):
180
232
  """Initialize ELU."""
181
233
  super(ELU, self).__init__()
182
234
  self.alpha = alpha
235
+ self.inplace = inplace
183
236
 
184
237
  def construct(self, input):
185
- return mint.nn.functional.elu(input, self.alpha)
238
+ return mint.nn.functional.elu(input, self.alpha, self.inplace)
186
239
 
187
240
 
188
241
  class GLU(Cell):
@@ -197,9 +250,6 @@ class GLU(Cell):
197
250
  Here :math:`\sigma` is the sigmoid function, and :math:`\otimes` is the Hadamard product.
198
251
  See `Language Modeling with Gated Convluational Networks <https://arxiv.org/abs/1612.08083>`_ .
199
252
 
200
- .. warning::
201
- This is an experimental API that is subject to change or deletion.
202
-
203
253
  Args:
204
254
  dim (int, optional): The dimension to split the input `input`. The value range is `[-r, r)` where `r`
205
255
  is the number of dimensions of `input`. Default: ``-1`` , the last dimension in `input`.
@@ -224,10 +274,10 @@ class GLU(Cell):
224
274
  ``Ascend`` ``CPU``
225
275
 
226
276
  Examples:
227
- >>> import mindspore as ms
228
- >>> m = ms.mint.nn.GLU()
229
- >>> input = ms.Tensor([[0.1,0.2,0.3,0.4],[0.5,0.6,0.7,0.8]])
230
- >>> output = m(input)
277
+ >>> from mindspore import mint, Tensor
278
+ >>> glu = mint.nn.GLU()
279
+ >>> input = Tensor([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]])
280
+ >>> output = glu(input)
231
281
  >>> print(output)
232
282
  [[0.05744425 0.11973753]
233
283
  [0.33409387 0.41398472]]
@@ -292,10 +342,67 @@ class Tanh(Cell):
292
342
  return mint.nn.functional.tanh(input)
293
343
 
294
344
 
345
+ class Threshold(Cell):
346
+ r"""
347
+ Compute the Threshold activation function element-wise.
348
+
349
+ The Threshold is defined as:
350
+
351
+ .. math::
352
+ y =
353
+ \begin{cases}
354
+ x, &\text{ if } x > \text{threshold} \\
355
+ \text{value}, &\text{ otherwise }
356
+ \end{cases}
357
+
358
+ .. warning::
359
+ This is an experimental API that is subject to change or deletion.
360
+
361
+ Args:
362
+ threshold (Union[int, float]): The value of the threshold.
363
+ value (Union[int, float]): The value to replace with when element is less than threshold.
364
+ inplace (bool, optional): Whether to apply erasing inplace. Default: ``False``.
365
+
366
+ Inputs:
367
+ - **input** (Tensor) - The input Tensor.
368
+
369
+ Outputs:
370
+ Tensor, the same shape and data type as the input.
371
+
372
+ Raises:
373
+ TypeError: If `input` is not a Tensor.
374
+ TypeError: If `threshold` is not a float or an int.
375
+ TypeError: If `value` is not a float or an int.
376
+
377
+ Supported Platforms:
378
+ ``Ascend``
379
+
380
+ Examples:
381
+ >>> import mindspore
382
+ >>> from mindspore import Tensor, mint
383
+ >>> inputs = mindspore.Tensor([0.0, 2, 3], mindspore.float32)
384
+ >>> net = mint.nn.Threshold(1, 100)
385
+ >>> outputs = net(inputs)
386
+ >>> print(outputs)
387
+ [100. 2. 3.]
388
+ """
389
+
390
+ def __init__(self, threshold, value, inplace=False):
391
+ """Initialize Tanh."""
392
+ super(Threshold, self).__init__()
393
+ self.threshold = threshold
394
+ self.value = value
395
+ self.inplace = inplace
396
+
397
+ def construct(self, input):
398
+ return mint.nn.functional.threshold(input, self.threshold, self.value,
399
+ self.inplace)
400
+
295
401
  __all__ = [
296
402
  'LogSigmoid',
297
403
  'SiLU',
298
404
  'ELU',
299
405
  'GLU',
300
406
  'Tanh',
407
+ 'Threshold',
301
408
  ]
@@ -17,19 +17,20 @@ from __future__ import absolute_import
17
17
 
18
18
  import math
19
19
 
20
- from mindspore.ops.auto_generate.gen_ops_prim import conv2d_ext_op, conv2d_padding_op, conv3d_ext_op, conv3d_padding_op
20
+ from mindspore.ops.auto_generate.gen_ops_prim import (conv1d_ext_op, conv1d_padding_op, conv2d_ext_op,
21
+ conv2d_padding_op, conv3d_ext_op, conv3d_padding_op)
21
22
  from mindspore.ops.function.nn_func import pad_ext, conv_transpose2d
22
23
  from mindspore.ops.function.array_func import rank
23
24
  import mindspore.common.dtype as mstype
24
25
  from mindspore.common.parameter import Parameter
25
26
  from mindspore.common.initializer import initializer, HeUniform, Uniform, _calculate_fan_in_and_fan_out
26
27
  from mindspore import _checkparam as Validator
27
- from mindspore._checkparam import twice, triple
28
+ from mindspore._checkparam import once, twice, triple
28
29
  from mindspore._extends import cell_attr_register
29
30
  from mindspore.nn.cell import Cell
30
31
  from mindspore.ops.functional import isconstant
31
32
 
32
- __all__ = ['Conv2d', 'ConvTranspose2d', 'Conv3d']
33
+ __all__ = ['Conv2d', 'ConvTranspose2d', 'Conv3d', 'Conv1d']
33
34
 
34
35
 
35
36
  class _Conv(Cell):
@@ -135,6 +136,185 @@ class _Conv(Cell):
135
136
  return s
136
137
 
137
138
 
139
+ class Conv1d(_Conv):
140
+ r"""
141
+ 1D convolution layer.
142
+
143
+ Applies a 1D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, L_{in})`,
144
+ where :math:`N` is batch size, :math:`C` is channel number, :math:`L` is sequence length.
145
+
146
+ The output is calculated based on formula:
147
+
148
+ .. math::
149
+
150
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
151
+ \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
152
+
153
+
154
+ where :math:`bias` is the output channel bias, :math:`ccor` is
155
+ the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
156
+ :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
157
+
158
+ - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
159
+ where :math:`N` is the batch size of the input.
160
+
161
+ - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
162
+ where :math:`C_{out}` is the number of
163
+ output channels, which is also equal to the number of kernels.
164
+
165
+ - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
166
+ where :math:`C_{in}` is the number of
167
+ input channels, which is also equal to the number of channels in the convolutional kernels.
168
+
169
+ Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
170
+ output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
171
+ kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
172
+ channel in the :math:`i`-th batch of the input feature map.
173
+
174
+ The shape of the convolutional kernel is given by :math:`(\text{kernel_size})`,
175
+ where :math:`\text{kernel_size}` is the length of the kernel.
176
+ If we consider the input and output channels as well as the `groups` parameter, the complete kernel shape
177
+ will be :math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size})`,
178
+ where `groups` is the number of groups dividing `x`'s input channel when applying groups convolution.
179
+
180
+ For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
181
+ <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
182
+
183
+ Args:
184
+ in_channels (int): The channel number of the input tensor of the Conv1d layer.
185
+ out_channels (int): The channel number of the output tensor of the Conv1d layer.
186
+ kernel_size (Union[int, tuple[int], list[int]]): Specifies the length of the 1D convolution kernel.
187
+ The data type is an integer or a tuple of one integer.
188
+ stride (Union[int, tuple[int], list[int]], optional): The movement stride of the 1D convolution kernel.
189
+ The data type is an integer or a tuple of one integer. Default: ``1`` .
190
+ padding (Union[int, tuple[int], list[int], str], optional): The number of padding
191
+ on the input.
192
+ The data type is an integer or a tuple of one integer or string {``"valid"``, ``"same"``}.
193
+ The value should be greater than or equal to 0. Default: ``0`` .
194
+
195
+ - ``"same"``: Pad the input around its edges so that the shape of input and output
196
+ are the same when `stride` is set to ``1``.
197
+ The amount of padding to is calculated by the operator internally, If the amount is even, it is
198
+ uniformly distributed around the input, if it is odd, the excess amount goes to the right side.
199
+ If this mode is set, `stride` must be 1.
200
+
201
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
202
+ possible length. Extra sequence that could not complete a full stride will
203
+ be discarded.
204
+
205
+ padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
206
+ ``"zeros"`` , ``"reflect"`` ``"circular"`` or ``"replicate"`` . Default: ``"zeros"`` .
207
+ dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation rate to use for dilated convolution.
208
+ It can be a single int or a tuple of 1 integer.
209
+ Assuming :math:`dilation=(d)`, the convolutional kernel samples the input with a
210
+ spacing of :math:`d-1` elements in the length direction.
211
+ Default: ``1`` .
212
+ groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
213
+ divisible by `groups`. If the groups is equal to `in_channels` and `out_channels`,
214
+ this 1D convolution layer also can be called 1D depthwise convolution layer. Default: ``1`` .
215
+ The following restraints must be met:
216
+
217
+ - :math:`(C_{in} \text{ % } \text{groups} == 0)`
218
+ - :math:`(C_{out} \text{ % } \text{groups} == 0)`
219
+ - :math:`(C_{out} >= \text{groups})`
220
+ - :math:`(\text{kernel_size[1]} = C_{in} / \text{groups})`
221
+
222
+ bias (bool, optional): Whether the Conv1d layer has a bias parameter. Default: ``True`` .
223
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
224
+
225
+ Inputs:
226
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` \
227
+ or :math:`(C_{in}, L_{in})`.
228
+
229
+ Outputs:
230
+ Tensor of shape :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`.
231
+
232
+ padding is ``'same'``:
233
+
234
+ .. math::
235
+ \begin{array}{ll} \\
236
+ L_{out} = \left \lceil{\frac{L_{in}}{\text{stride}}} \right \rceil \\
237
+ \end{array}
238
+
239
+ padding is ``'valid'``:
240
+
241
+ .. math::
242
+ \begin{array}{ll} \\
243
+ L_{out} = \left \lceil{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) }
244
+ {\text{stride}}} \right \rceil \\
245
+ \end{array}
246
+
247
+ padding is int or tuple/list:
248
+
249
+ .. math::
250
+ \begin{array}{ll} \\
251
+ L_{out} = \left \lfloor{\frac{L_{in} + 2 \times {padding} - (\text{kernel_size} - 1) \times
252
+ \text{dilation} - 1 }{\text{stride}} + 1} \right \rfloor \\
253
+ \end{array}
254
+
255
+ Raises:
256
+ ValueError: Args and size of the input feature map should satisfy the output formula to ensure that the size of
257
+ the output feature map is positive; otherwise, an error will be reported.
258
+ RuntimeError: On Ascend, due to the limitation of the L1 cache size of different NPU chip, if input size or
259
+ kernel size is too large, it may trigger an error.
260
+ TypeError: If `in_channels`, `out_channels` or `groups` is not an int.
261
+ TypeError: If `kernel_size`, `stride` or `dilation` is neither an int nor a tuple.
262
+ ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
263
+ ValueError: If `padding` is less than 0.
264
+ ValueError: If `padding` is `same` , `stride` is not equal to 1.
265
+ ValueError: The input parameters do not satisfy the convolution output formula.
266
+ ValueError: The `kernel_size` cannot exceed the size of the input feature map.
267
+ ValueError: The value of padding cannot cause the calculation area to exceed the input size.
268
+
269
+ Supported Platforms:
270
+ ``Ascend``
271
+
272
+ Examples:
273
+ >>> import mindspore
274
+ >>> from mindspore import Tensor, mint
275
+ >>> import numpy as np
276
+ >>> net = mint.nn.Conv1d(120, 240, 4, bias=False)
277
+ >>> x = Tensor(np.ones([1, 120, 1024]), mindspore.float32)
278
+ >>> output = net(x).shape
279
+ >>> print(output)
280
+ (1, 240, 1021)
281
+ """
282
+ @cell_attr_register
283
+ def __init__(self,
284
+ in_channels,
285
+ out_channels,
286
+ kernel_size,
287
+ stride=1,
288
+ padding=0,
289
+ dilation=1,
290
+ groups=1,
291
+ bias=True,
292
+ padding_mode='zeros',
293
+ dtype=None):
294
+ """Initialize Conv1d."""
295
+ kernel_size_ = once(kernel_size)
296
+ stride_ = once(stride)
297
+ padding_ = padding if isinstance(padding, str) else once(padding)
298
+ dilation_ = once(dilation)
299
+ if not dtype:
300
+ dtype = mstype.float32
301
+ super(Conv1d, self).__init__(in_channels, out_channels, kernel_size_, stride_, padding_, dilation_, False,
302
+ once(0), groups, bias, padding_mode, dtype)
303
+ if isinstance(padding, str) and padding_mode == "zeros":
304
+ self.conv1d = conv1d_padding_op
305
+ else:
306
+ self.conv1d = conv1d_ext_op
307
+
308
+
309
+ def construct(self, input):
310
+ if self.padding_mode != "zeros":
311
+ output = self.conv1d(pad_ext(input, self._reversed_padding, mode=self.padding_mode), self.weight,
312
+ self.bias, self.stride, (0,), self.dilation, self.groups)
313
+ else:
314
+ output = self.conv1d(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
315
+ return output
316
+
317
+
138
318
  class Conv2d(_Conv):
139
319
  r"""
140
320
  2D convolution layer.
@@ -182,18 +362,18 @@ class Conv2d(_Conv):
182
362
  Args:
183
363
  in_channels (int): The channel number of the input tensor of the Conv2d layer.
184
364
  out_channels (int): The channel number of the output tensor of the Conv2d layer.
185
- kernel_size (Union[int, tuple[int]]): Specifies the height and width of the 2D convolution kernel.
365
+ kernel_size (Union[int, tuple[int], list[int]]): Specifies the height and width of the 2D convolution kernel.
186
366
  The data type is an integer or a tuple of two integers. An integer represents the height
187
367
  and width of the convolution kernel. A tuple of two integers represents the height
188
368
  and width of the convolution kernel respectively.
189
- stride (Union[int, tuple[int]], optional): The movement stride of the 2D convolution kernel.
369
+ stride (Union[int, tuple[int], list[int]], optional): The movement stride of the 2D convolution kernel.
190
370
  The data type is an integer or a tuple of two integers. An integer represents the movement step size
191
371
  in both height and width directions. A tuple of two integers represents the movement step size in the height
192
372
  and width directions respectively. Default: ``1`` .
193
- padding (Union[int, tuple[int], str], optional): The number of padding
373
+ padding (Union[int, tuple[int], list[int], str], optional): The number of padding
194
374
  on the height and width directions of the input.
195
- The data type is an integer or a tuple of two integers or string {`valid`, `same`}. If `padding` is an
196
- integer, then `padding_{H}` and `padding_{W}` are all equal to `padding`.
375
+ The data type is an integer or a tuple of two integers or string {``"valid"``, ``"same"``}.
376
+ If `padding` is an integer, then `padding_{H}` and `padding_{W}` are all equal to `padding`.
197
377
  If `padding` is a tuple of 2 integers, then `padding_{H}` and `padding_{W}`
198
378
  is equal to `padding[0]` and `padding[1]` respectively.
199
379
  The value should be greater than or equal to 0. Default: ``0`` .
@@ -210,7 +390,7 @@ class Conv2d(_Conv):
210
390
 
211
391
  padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
212
392
  ``"zeros"`` , ``"reflect"`` ``"circular"`` or ``"replicate"`` . Default: ``"zeros"`` .
213
- dilation (Union[int, tuple[int]], optional): Specifies the dilation rate to use for dilated convolution.
393
+ dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation rate to use for dilated convolution.
214
394
  It can be a single int or a tuple of 2 or 4 integers. A single int means the dilation size is the same
215
395
  in both the height and width directions. A tuple of two ints represents the dilation size in
216
396
  the height and width directions, respectively. For a tuple of four ints, the two ints correspond
@@ -223,12 +403,15 @@ class Conv2d(_Conv):
223
403
  groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
224
404
  divisible by `groups`. If the groups is equal to `in_channels` and `out_channels`,
225
405
  this 2D convolution layer also can be called 2D depthwise convolution layer. Default: ``1`` .
406
+ The following restraints must be met:
226
407
 
227
- - :math:`(C_{in} \text{ % } \text{groups} == 0)` , :math:`(C_{out} \text{ % } \text{groups} == 0)` ,
228
- :math:`(C_{out} >= \text{groups})` , :math:`(\text{kernel_size[1]} = C_{in} / \text{groups})`
408
+ - :math:`(C_{in} \text{ % } \text{groups} == 0)`
409
+ - :math:`(C_{out} \text{ % } \text{groups} == 0)`
410
+ - :math:`(C_{out} >= \text{groups})`
411
+ - :math:`(\text{kernel_size[1]} = C_{in} / \text{groups})`
229
412
 
230
413
  bias (bool, optional): Whether the Conv2d layer has a bias parameter. Default: ``True`` .
231
- dtype (mindspore.dtype, optional): Dtype of Parameters. Default: mstype.float32 .
414
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
232
415
 
233
416
  Inputs:
234
417
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` \
@@ -255,18 +438,28 @@ class Conv2d(_Conv):
255
438
  {\text{stride[1]}}} \right \rceil \\
256
439
  \end{array}
257
440
 
441
+ padding is int or tuple/list:
442
+
443
+ .. math::
444
+ \begin{array}{ll} \\
445
+ H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - (\text{kernel_size[0]} - 1) \times
446
+ \text{dilation[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
447
+ W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - (\text{kernel_size[1]} - 1) \times
448
+ \text{dilation[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
449
+ \end{array}
450
+
258
451
  Raises:
259
452
  ValueError: Args and size of the input feature map should satisfy the output formula to ensure that the size of
260
453
  the output feature map is positive; otherwise, an error will be reported.
261
454
  RuntimeError: On Ascend, due to the limitation of the L1 cache size of different NPU chip, if input size or
262
455
  kernel size is too large, it may trigger an error.
263
456
  TypeError: If `in_channels`, `out_channels` or `groups` is not an int.
264
- TypeError: If `kernel_size`, `stride` or `dilation` is neither an int not a tuple.
457
+ TypeError: If `kernel_size`, `stride` or `dilation` is neither an int nor a tuple.
265
458
  ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
266
459
  ValueError: If `padding` is less than 0.
267
460
  ValueError: If `padding` is `same` , `stride` is not equal to 1.
268
461
  ValueError: The input parameters do not satisfy the convolution output formula.
269
- ValueError: The KernelSize cannot exceed the size of the input feature map.
462
+ ValueError: The `kernel_size` cannot exceed the size of the input feature map.
270
463
  ValueError: The value of padding cannot cause the calculation area to exceed the input size.
271
464
 
272
465
  Supported Platforms:
@@ -280,7 +473,7 @@ class Conv2d(_Conv):
280
473
  >>> x = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
281
474
  >>> output = net(x).shape
282
475
  >>> print(output)
283
- (1, 240, 1024, 640)
476
+ (1, 240, 1021, 637)
284
477
  """
285
478
  @cell_attr_register
286
479
  def __init__(self,
@@ -375,17 +568,17 @@ class Conv3d(_Conv):
375
568
  Args:
376
569
  in_channels (int): The channel number of the input tensor of the Conv3d layer.
377
570
  out_channels (int): The channel number of the output tensor of the Conv3d layer.
378
- kernel_size (Union[int, tuple[int]]): Specifies the height and width of the 3D convolution kernel.
571
+ kernel_size (Union[int, tuple[int], list[int]]): Specifies the height and width of the 3D convolution kernel.
379
572
  The data type is an integer or a tuple of two integers. An integer represents the height
380
573
  and width of the convolution kernel. A tuple of two integers represents the height
381
574
  and width of the convolution kernel respectively.
382
- stride (Union[int, tuple[int]], optional): The movement stride of the 3D convolution kernel.
575
+ stride (Union[int, tuple[int], list[int]], optional): The movement stride of the 3D convolution kernel.
383
576
  The data type is an integer or a tuple of three integers. An integer represents the movement step size
384
- in both height and width directions. A tuple of three integers represents the movement step size in the depth, height
385
- and width directions respectively. Default: ``1`` .
386
- padding (Union[int, tuple[int], str], optional): The number of padding
577
+ in both height and width directions. A tuple of three integers represents the movement step size in the
578
+ depth, height and width directions respectively. Default: ``1`` .
579
+ padding (Union[int, tuple[int], list[int], str], optional): The number of padding
387
580
  on the depth, height and width directions of the input.
388
- The data type is an integer or string {`valid`, `same`} or a tuple of three integers.
581
+ The data type is an integer or string {``"valid"``, ``"same"``} or a tuple of three integers.
389
582
  The value should be greater than or equal to 0. Default: ``0`` .
390
583
 
391
584
  - ``"same"``: Pad the input around its edges so that the shape of input and output
@@ -400,11 +593,12 @@ class Conv3d(_Conv):
400
593
 
401
594
  padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
402
595
  ``"zeros"`` , ``"reflect"`` ``"circular"`` or ``"replicate"`` . Default: ``"zeros"`` .
403
- dilation (Union[int, tuple[int]], optional): Controlling the space between the kernel points. Default: ``1`` .
596
+ dilation (Union[int, tuple[int], list[int]], optional): Controlling the space between the kernel points.
597
+ Default: ``1`` .
404
598
  groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
405
599
  divisible by `groups`. If the groups is equal to `in_channels` and `out_channels`. Default: ``1`` .
406
600
  bias (bool, optional): Whether the Conv3d layer has a bias parameter. Default: ``True`` .
407
- dtype (mindspore.dtype, optional): Dtype of Parameters. Default: ``mstype.float32`` .
601
+ dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
408
602
 
409
603
  Inputs:
410
604
  - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` \
@@ -437,7 +631,7 @@ class Conv3d(_Conv):
437
631
 
438
632
  Raises:
439
633
  TypeError: If `in_channels`, `out_channels` or `groups` is not an int.
440
- TypeError: If `kernel_size`, `stride`, `padding` or `dilation` is neither an int not a tuple.
634
+ TypeError: If `kernel_size`, `stride`, `padding` or `dilation` is neither an int nor a tuple.
441
635
  ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
442
636
  ValueError: If `padding` is less than 0.
443
637
 
@@ -68,7 +68,7 @@ class _NormBase(Cell):
68
68
  requires_grad=False, name="running_mean")
69
69
  self.running_var = Parameter(Tensor(np.ones(num_features), dtype=self.dtype),
70
70
  requires_grad=False, name="running_var")
71
- self.num_batches_tracked = Parameter(Tensor(0, dtype=ms.float32),
71
+ self.num_batches_tracked = Parameter(Tensor(0, dtype=ms.int64),
72
72
  requires_grad=False, name="num_batches_tracked")
73
73
  else:
74
74
  self.running_mean = None
@@ -84,7 +84,7 @@ class _NormBase(Cell):
84
84
  np.zeros(self.num_features), dtype=self.dtype)
85
85
  one_running_var = Tensor(
86
86
  np.ones(self.num_features), dtype=self.dtype)
87
- zero_num_batches_tracked = Tensor(0, dtype=ms.float32)
87
+ zero_num_batches_tracked = Tensor(0, dtype=ms.int64)
88
88
 
89
89
  ops.assign(self.running_mean, zero_running_mean)
90
90
  ops.assign(self.running_var, one_running_var)
@@ -136,11 +136,9 @@ class _BatchNorm(_NormBase):
136
136
 
137
137
  if self.training and self.track_running_stats:
138
138
  if self.num_batches_tracked is not None:
139
- num_batches_tracked_one = Tensor(1, dtype=ms.float32)
140
- ops.assign_add(self.num_batches_tracked,
141
- num_batches_tracked_one)
139
+ self.num_batches_tracked += 1
142
140
  if self.momentum is None:
143
- exponential_average_factor = float(1.0 / self.num_batches_tracked)
141
+ exponential_average_factor = 1.0 / float(self.num_batches_tracked)
144
142
  else:
145
143
  exponential_average_factor = self.momentum
146
144
 
@@ -250,8 +248,8 @@ class BatchNorm2d(_BatchNorm):
250
248
  elements of :math:`\gamma` are set to 1 and the elements of :math:`\beta` are set to 0.
251
249
 
252
250
  .. warning::
253
- This API does not support Dynamic Rank.
254
- This is an experimental API that is subject to change or deletion.
251
+ - This API does not support Dynamic Rank.
252
+ - This is an experimental API that is subject to change or deletion.
255
253
 
256
254
  Args:
257
255
  num_features (int): `C` from an expected input of shape :math:`(N, C, H, W)`.
@@ -264,7 +262,7 @@ class BatchNorm2d(_BatchNorm):
264
262
  track_running_stats (bool, optional): a boolean value that when set to ``True``, this
265
263
  cell tracks the running mean and variance, and when set to ``False``,
266
264
  this cell does not track such statistics. And this cell always uses batch statistics
267
- in both training and eval modes. Default: ``True`` .
265
+ in both train and eval modes. Default: ``True`` .
268
266
  dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None`` .
269
267
 
270
268
  Inputs:
@@ -487,10 +485,12 @@ class SyncBatchNorm(_BatchNorm):
487
485
 
488
486
  Args:
489
487
  num_features (int): `C` from an expected input of size :math:`(N, C, +)`.
490
- eps (float): :math:`\epsilon`, a value added to the denominator for numerical stability. Default: ``1e-5`` .
491
- momentum (float): A floating hyperparameter of the momentum for the
488
+ eps (float, optional): :math:`\epsilon`, a value added to the denominator for numerical stability.
489
+ Default: ``1e-5`` .
490
+ momentum (float, optional): A floating hyperparameter of the momentum for the
492
491
  running_mean and running_var computation. Default: ``0.1`` .
493
- affine (bool): A bool value. When set to ``True`` , :math:`\gamma` and :math:`\beta` can be learned.
492
+ affine (bool, optional): A bool value. When set to ``True`` , :math:`\gamma` and :math:`\beta` are learnable
493
+ parameters. When set to ``False`` , :math:`\gamma` and :math:`\beta` are unlearnable parameters.
494
494
  Default: ``True`` .
495
495
  track_running_stats (bool, optional): a boolean value that when set to ``True``, this
496
496
  cell tracks the running mean and variance, and when set to ``False``,
@@ -524,7 +524,7 @@ class SyncBatchNorm(_BatchNorm):
524
524
  Here, examples use msrun to pull multi-process distributed tasks across nodes with a single command
525
525
  line instruction.
526
526
  Please see the `Ascend tutorial
527
- <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
527
+ <https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
528
528
  for more details.
529
529
 
530
530
  This example should be run with multiple devices.
@@ -594,10 +594,9 @@ class SyncBatchNorm(_BatchNorm):
594
594
  exponential_average_factor = self.momentum
595
595
 
596
596
  if self.training and self.track_running_stats:
597
- one_tensor = Tensor(1, dtype=ms.float32)
598
- ops.assign_add(self.num_batches_tracked, one_tensor)
597
+ self.num_batches_tracked += 1
599
598
  if self.momentum is None: # use cumulative moving average
600
- exponential_average_factor = 1.0 / self.num_batches_tracked.value()
599
+ exponential_average_factor = 1.0 / float(self.num_batches_tracked.value())
601
600
  else: # use exponential moving average
602
601
  exponential_average_factor = self.momentum
603
602
 
@@ -634,7 +634,7 @@ class ReplicationPad1d(ReplicationPadNd_):
634
634
  - **input** (Tensor) - 2D or 3D input Tensor with shape: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
635
635
 
636
636
  Outputs:
637
- Tensor, the tensor after padding.
637
+ The tensor after padding.
638
638
 
639
639
  Raises:
640
640
  TypeError: If `padding` is not an integer of a list or tuple of 2 integers.