mindspore 2.7.0rc1__cp311-cp311-win_amd64.whl → 2.7.1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (370) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +5 -2
  3. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +2 -2
  7. mindspore/_extends/builtin_operations.py +3 -3
  8. mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
  9. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  10. mindspore/_extends/parse/__init__.py +3 -3
  11. mindspore/_extends/parse/compile_config.py +24 -1
  12. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -3
  13. mindspore/_extends/parse/parser.py +28 -22
  14. mindspore/_extends/parse/resources.py +1 -1
  15. mindspore/_extends/parse/standard_method.py +23 -2
  16. mindspore/_extends/parse/trope.py +2 -1
  17. mindspore/_extends/pijit/pijit_func_white_list.py +9 -27
  18. mindspore/amp.py +0 -18
  19. mindspore/avcodec-59.dll +0 -0
  20. mindspore/avdevice-59.dll +0 -0
  21. mindspore/avfilter-8.dll +0 -0
  22. mindspore/avformat-59.dll +0 -0
  23. mindspore/avutil-57.dll +0 -0
  24. mindspore/boost/base.py +29 -2
  25. mindspore/common/__init__.py +18 -12
  26. mindspore/common/_decorator.py +3 -2
  27. mindspore/common/_grad_function.py +3 -1
  28. mindspore/common/_tensor_cpp_method.py +1 -1
  29. mindspore/common/_tensor_docs.py +371 -96
  30. mindspore/common/_utils.py +7 -43
  31. mindspore/common/api.py +434 -135
  32. mindspore/common/dtype.py +98 -57
  33. mindspore/common/dump.py +7 -108
  34. mindspore/common/dynamic_shape/__init__.py +0 -0
  35. mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +15 -23
  36. mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
  37. mindspore/common/file_system.py +59 -9
  38. mindspore/common/hook_handle.py +82 -3
  39. mindspore/common/jit_config.py +5 -1
  40. mindspore/common/jit_trace.py +27 -12
  41. mindspore/common/lazy_inline.py +5 -3
  42. mindspore/common/np_dtype.py +3 -3
  43. mindspore/common/parameter.py +17 -127
  44. mindspore/common/recompute.py +4 -13
  45. mindspore/common/tensor.py +50 -217
  46. mindspore/communication/_comm_helper.py +11 -1
  47. mindspore/communication/comm_func.py +138 -4
  48. mindspore/communication/management.py +85 -1
  49. mindspore/config/op_info.config +0 -15
  50. mindspore/context.py +20 -106
  51. mindspore/dataset/__init__.py +1 -1
  52. mindspore/dataset/audio/transforms.py +1 -1
  53. mindspore/dataset/core/config.py +35 -1
  54. mindspore/dataset/engine/datasets.py +338 -319
  55. mindspore/dataset/engine/datasets_user_defined.py +38 -22
  56. mindspore/dataset/engine/datasets_vision.py +1 -1
  57. mindspore/dataset/engine/validators.py +1 -15
  58. mindspore/dataset/transforms/c_transforms.py +2 -2
  59. mindspore/dataset/transforms/transforms.py +3 -3
  60. mindspore/dataset/vision/__init__.py +1 -1
  61. mindspore/dataset/vision/py_transforms.py +8 -8
  62. mindspore/dataset/vision/transforms.py +17 -5
  63. mindspore/dataset/vision/utils.py +632 -21
  64. mindspore/device_context/ascend/op_tuning.py +35 -1
  65. mindspore/dnnl.dll +0 -0
  66. mindspore/{profiler/common/validator → graph}/__init__.py +9 -1
  67. mindspore/graph/custom_pass.py +55 -0
  68. mindspore/include/api/cell.h +28 -4
  69. mindspore/include/api/cfg.h +24 -7
  70. mindspore/include/api/context.h +1 -0
  71. mindspore/include/api/delegate.h +0 -2
  72. mindspore/include/api/dual_abi_helper.h +100 -19
  73. mindspore/include/api/graph.h +14 -1
  74. mindspore/include/api/kernel.h +16 -3
  75. mindspore/include/api/kernel_api.h +9 -1
  76. mindspore/include/api/metrics/accuracy.h +9 -0
  77. mindspore/include/api/model.h +5 -1
  78. mindspore/include/api/model_group.h +4 -0
  79. mindspore/include/api/model_parallel_runner.h +2 -0
  80. mindspore/include/api/status.h +48 -10
  81. mindspore/include/api/types.h +6 -1
  82. mindspore/include/dataset/constants.h +9 -0
  83. mindspore/include/dataset/execute.h +2 -2
  84. mindspore/jpeg62.dll +0 -0
  85. mindspore/mindrecord/__init__.py +3 -3
  86. mindspore/mindrecord/common/exceptions.py +1 -0
  87. mindspore/mindrecord/config.py +1 -1
  88. mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
  89. mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
  90. mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
  91. mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
  92. mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
  93. mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
  94. mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
  95. mindspore/mindrecord/filereader.py +4 -4
  96. mindspore/mindrecord/filewriter.py +5 -5
  97. mindspore/mindrecord/mindpage.py +2 -2
  98. mindspore/mindrecord/tools/cifar10.py +4 -3
  99. mindspore/mindrecord/tools/cifar100.py +1 -1
  100. mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
  101. mindspore/mindrecord/tools/cifar10_to_mr.py +6 -6
  102. mindspore/mindrecord/tools/csv_to_mr.py +1 -1
  103. mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
  104. mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
  105. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
  106. mindspore/mindspore_backend_common.dll +0 -0
  107. mindspore/mindspore_backend_manager.dll +0 -0
  108. mindspore/mindspore_cluster.dll +0 -0
  109. mindspore/mindspore_common.dll +0 -0
  110. mindspore/mindspore_core.dll +0 -0
  111. mindspore/mindspore_cpu.dll +0 -0
  112. mindspore/mindspore_dump.dll +0 -0
  113. mindspore/mindspore_frontend.dll +0 -0
  114. mindspore/mindspore_glog.dll +0 -0
  115. mindspore/mindspore_hardware_abstract.dll +0 -0
  116. mindspore/mindspore_memory_pool.dll +0 -0
  117. mindspore/mindspore_ms_backend.dll +0 -0
  118. mindspore/mindspore_ops.dll +0 -0
  119. mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
  120. mindspore/mindspore_profiler.dll +0 -0
  121. mindspore/mindspore_pyboost.dll +0 -0
  122. mindspore/mindspore_pynative.dll +0 -0
  123. mindspore/mindspore_runtime_pipeline.dll +0 -0
  124. mindspore/mindspore_runtime_utils.dll +0 -0
  125. mindspore/mindspore_tools.dll +0 -0
  126. mindspore/mint/__init__.py +15 -10
  127. mindspore/mint/distributed/__init__.py +4 -0
  128. mindspore/mint/distributed/distributed.py +392 -69
  129. mindspore/mint/nn/__init__.py +2 -16
  130. mindspore/mint/nn/functional.py +4 -110
  131. mindspore/mint/nn/layer/__init__.py +0 -2
  132. mindspore/mint/nn/layer/_functions.py +1 -2
  133. mindspore/mint/nn/layer/activation.py +0 -6
  134. mindspore/mint/nn/layer/basic.py +0 -47
  135. mindspore/mint/nn/layer/conv.py +10 -10
  136. mindspore/mint/nn/layer/normalization.py +11 -16
  137. mindspore/mint/nn/layer/pooling.py +0 -4
  138. mindspore/nn/__init__.py +1 -3
  139. mindspore/nn/cell.py +231 -239
  140. mindspore/nn/layer/activation.py +4 -2
  141. mindspore/nn/layer/basic.py +56 -14
  142. mindspore/nn/layer/container.py +16 -0
  143. mindspore/nn/layer/embedding.py +4 -169
  144. mindspore/nn/layer/image.py +1 -1
  145. mindspore/nn/layer/normalization.py +2 -1
  146. mindspore/nn/layer/thor_layer.py +4 -85
  147. mindspore/nn/optim/ada_grad.py +0 -1
  148. mindspore/nn/optim/adafactor.py +0 -1
  149. mindspore/nn/optim/adam.py +32 -127
  150. mindspore/nn/optim/adamax.py +0 -1
  151. mindspore/nn/optim/asgd.py +0 -1
  152. mindspore/nn/optim/ftrl.py +8 -102
  153. mindspore/nn/optim/lamb.py +1 -4
  154. mindspore/nn/optim/lars.py +0 -3
  155. mindspore/nn/optim/lazyadam.py +25 -218
  156. mindspore/nn/optim/momentum.py +5 -43
  157. mindspore/nn/optim/optimizer.py +6 -55
  158. mindspore/nn/optim/proximal_ada_grad.py +0 -1
  159. mindspore/nn/optim/rmsprop.py +0 -1
  160. mindspore/nn/optim/rprop.py +0 -1
  161. mindspore/nn/optim/sgd.py +0 -1
  162. mindspore/nn/optim/tft_wrapper.py +2 -4
  163. mindspore/nn/optim/thor.py +0 -2
  164. mindspore/nn/probability/bijector/bijector.py +7 -8
  165. mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
  166. mindspore/nn/probability/bijector/power_transform.py +20 -21
  167. mindspore/nn/probability/bijector/scalar_affine.py +5 -5
  168. mindspore/nn/probability/bijector/softplus.py +13 -14
  169. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  170. mindspore/nn/wrap/cell_wrapper.py +39 -5
  171. mindspore/nn/wrap/grad_reducer.py +4 -89
  172. mindspore/numpy/array_creations.py +4 -4
  173. mindspore/numpy/fft.py +9 -9
  174. mindspore/numpy/utils_const.py +1 -1
  175. mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
  176. mindspore/onnx/onnx_export.py +137 -0
  177. mindspore/opencv_core4110.dll +0 -0
  178. mindspore/opencv_imgcodecs4110.dll +0 -0
  179. mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
  180. mindspore/ops/__init__.py +2 -0
  181. mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
  182. mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
  183. mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
  184. mindspore/ops/_op_impl/cpu/__init__.py +1 -5
  185. mindspore/ops/_op_impl/cpu/{buffer_append.py → joinedstr_op.py} +8 -8
  186. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +28 -24
  187. mindspore/ops/auto_generate/gen_extend_func.py +6 -11
  188. mindspore/ops/auto_generate/gen_ops_def.py +385 -154
  189. mindspore/ops/auto_generate/gen_ops_prim.py +5676 -5167
  190. mindspore/ops/communication.py +97 -0
  191. mindspore/ops/composite/__init__.py +5 -2
  192. mindspore/ops/composite/base.py +16 -2
  193. mindspore/ops/composite/multitype_ops/__init__.py +3 -1
  194. mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
  195. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  196. mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
  197. mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
  198. mindspore/ops/function/__init__.py +2 -0
  199. mindspore/ops/function/array_func.py +24 -18
  200. mindspore/ops/function/comm_func.py +3883 -0
  201. mindspore/ops/function/debug_func.py +7 -6
  202. mindspore/ops/function/grad/grad_func.py +4 -12
  203. mindspore/ops/function/math_func.py +89 -86
  204. mindspore/ops/function/nn_func.py +92 -313
  205. mindspore/ops/function/random_func.py +9 -18
  206. mindspore/ops/functional.py +4 -1
  207. mindspore/ops/functional_overload.py +377 -30
  208. mindspore/ops/operations/__init__.py +2 -5
  209. mindspore/ops/operations/_custom_ops_utils.py +7 -9
  210. mindspore/ops/operations/_inner_ops.py +12 -50
  211. mindspore/ops/operations/_rl_inner_ops.py +0 -933
  212. mindspore/ops/operations/array_ops.py +5 -50
  213. mindspore/ops/operations/comm_ops.py +95 -17
  214. mindspore/ops/operations/custom_ops.py +237 -22
  215. mindspore/ops/operations/debug_ops.py +33 -35
  216. mindspore/ops/operations/manually_defined/ops_def.py +39 -318
  217. mindspore/ops/operations/math_ops.py +5 -5
  218. mindspore/ops/operations/nn_ops.py +3 -3
  219. mindspore/ops/operations/sparse_ops.py +0 -83
  220. mindspore/ops/primitive.py +4 -27
  221. mindspore/ops/tensor_method.py +88 -10
  222. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
  223. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
  224. mindspore/ops_generate/api/functions_cc_generator.py +53 -4
  225. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
  226. mindspore/ops_generate/common/gen_constants.py +11 -10
  227. mindspore/ops_generate/common/op_proto.py +18 -1
  228. mindspore/ops_generate/common/template.py +102 -245
  229. mindspore/ops_generate/common/template_utils.py +212 -0
  230. mindspore/ops_generate/gen_custom_ops.py +69 -0
  231. mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
  232. mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
  233. mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
  234. mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
  235. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
  236. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
  237. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
  238. mindspore/ops_generate/pyboost/gen_pyboost_func.py +0 -16
  239. mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
  240. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
  241. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
  242. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
  243. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
  244. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
  245. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
  246. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
  247. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
  248. mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
  249. mindspore/ops_generate/resources/yaml_loader.py +13 -0
  250. mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
  251. mindspore/parallel/_auto_parallel_context.py +5 -15
  252. mindspore/parallel/_cell_wrapper.py +1 -1
  253. mindspore/parallel/_parallel_serialization.py +4 -6
  254. mindspore/parallel/_ps_context.py +2 -2
  255. mindspore/parallel/_utils.py +34 -17
  256. mindspore/parallel/auto_parallel.py +23 -9
  257. mindspore/parallel/checkpoint_transform.py +20 -2
  258. mindspore/parallel/cluster/process_entity/_api.py +28 -33
  259. mindspore/parallel/cluster/process_entity/_utils.py +9 -5
  260. mindspore/parallel/cluster/run.py +5 -3
  261. mindspore/{experimental/llm_boost/ascend_native → parallel/distributed}/__init__.py +21 -22
  262. mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
  263. mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
  264. mindspore/parallel/function/reshard_func.py +6 -5
  265. mindspore/parallel/nn/parallel_cell_wrapper.py +40 -3
  266. mindspore/parallel/nn/parallel_grad_reducer.py +0 -8
  267. mindspore/parallel/shard.py +7 -21
  268. mindspore/parallel/strategy.py +336 -0
  269. mindspore/parallel/transform_safetensors.py +127 -20
  270. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +13 -9
  271. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
  272. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
  273. mindspore/profiler/common/constant.py +5 -0
  274. mindspore/profiler/common/file_manager.py +9 -0
  275. mindspore/profiler/common/msprof_cmd_tool.py +40 -4
  276. mindspore/profiler/common/path_manager.py +65 -24
  277. mindspore/profiler/common/profiler_context.py +27 -14
  278. mindspore/profiler/common/profiler_info.py +3 -3
  279. mindspore/profiler/common/profiler_meta_data.py +1 -0
  280. mindspore/profiler/common/profiler_op_analyse.py +10 -6
  281. mindspore/profiler/common/profiler_path_manager.py +13 -0
  282. mindspore/profiler/common/util.py +30 -3
  283. mindspore/profiler/dynamic_profiler.py +91 -46
  284. mindspore/profiler/envprofiler.py +30 -5
  285. mindspore/profiler/experimental_config.py +18 -2
  286. mindspore/profiler/platform/cpu_profiler.py +10 -4
  287. mindspore/profiler/platform/npu_profiler.py +34 -7
  288. mindspore/profiler/profiler.py +193 -145
  289. mindspore/profiler/profiler_action_controller.py +1 -1
  290. mindspore/profiler/profiler_interface.py +2 -2
  291. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  292. mindspore/run_check/_check_version.py +108 -24
  293. mindspore/runtime/__init__.py +9 -6
  294. mindspore/runtime/executor.py +35 -0
  295. mindspore/runtime/memory.py +113 -0
  296. mindspore/runtime/thread_bind_core.py +1 -1
  297. mindspore/swresample-4.dll +0 -0
  298. mindspore/swscale-6.dll +0 -0
  299. mindspore/tinyxml2.dll +0 -0
  300. mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
  301. mindspore/tools/data_dump.py +130 -0
  302. mindspore/tools/sdc_detect.py +91 -0
  303. mindspore/tools/stress_detect.py +63 -0
  304. mindspore/train/__init__.py +6 -6
  305. mindspore/train/_utils.py +8 -21
  306. mindspore/train/amp.py +6 -7
  307. mindspore/train/callback/_callback.py +2 -1
  308. mindspore/train/callback/_checkpoint.py +1 -17
  309. mindspore/train/callback/_flops_collector.py +10 -6
  310. mindspore/train/callback/_train_fault_tolerance.py +72 -25
  311. mindspore/train/data_sink.py +5 -9
  312. mindspore/train/dataset_helper.py +5 -5
  313. mindspore/train/model.py +41 -230
  314. mindspore/train/serialization.py +160 -401
  315. mindspore/train/train_thor/model_thor.py +2 -2
  316. mindspore/turbojpeg.dll +0 -0
  317. mindspore/utils/__init__.py +6 -3
  318. mindspore/utils/dlpack.py +92 -0
  319. mindspore/utils/dryrun.py +1 -1
  320. mindspore/utils/runtime_execution_order_check.py +10 -0
  321. mindspore/utils/sdc_detect.py +14 -12
  322. mindspore/utils/stress_detect.py +43 -0
  323. mindspore/utils/utils.py +152 -16
  324. mindspore/version.py +1 -1
  325. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
  326. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/RECORD +330 -344
  327. mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
  328. mindspore/communication/_hccl_management.py +0 -297
  329. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -207
  330. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
  331. mindspore/experimental/llm_boost/atb/__init__.py +0 -23
  332. mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
  333. mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
  334. mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
  335. mindspore/experimental/llm_boost/register.py +0 -130
  336. mindspore/experimental/llm_boost/utils.py +0 -31
  337. mindspore/include/OWNERS +0 -7
  338. mindspore/mindspore_cpu_res_manager.dll +0 -0
  339. mindspore/mindspore_ops_kernel_common.dll +0 -0
  340. mindspore/mindspore_res_manager.dll +0 -0
  341. mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
  342. mindspore/nn/reinforcement/_batch_read_write.py +0 -142
  343. mindspore/nn/reinforcement/_tensors_queue.py +0 -152
  344. mindspore/nn/reinforcement/tensor_array.py +0 -145
  345. mindspore/opencv_core452.dll +0 -0
  346. mindspore/opencv_imgcodecs452.dll +0 -0
  347. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
  348. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
  349. mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
  350. mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
  351. mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
  352. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
  353. mindspore/ops/operations/_tensor_array.py +0 -359
  354. mindspore/ops/operations/rl_ops.py +0 -288
  355. mindspore/parallel/_offload_context.py +0 -275
  356. mindspore/parallel/_recovery_context.py +0 -115
  357. mindspore/parallel/_transformer/__init__.py +0 -35
  358. mindspore/parallel/_transformer/layers.py +0 -765
  359. mindspore/parallel/_transformer/loss.py +0 -251
  360. mindspore/parallel/_transformer/moe.py +0 -693
  361. mindspore/parallel/_transformer/op_parallel_config.py +0 -222
  362. mindspore/parallel/_transformer/transformer.py +0 -3124
  363. mindspore/parallel/mpi/_mpi_config.py +0 -116
  364. mindspore/profiler/common/validator/validate_path.py +0 -84
  365. mindspore/train/memory_profiling_pb2.py +0 -298
  366. mindspore/utils/hooks.py +0 -81
  367. /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
  368. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
  369. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
  370. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
@@ -61,6 +61,7 @@ from mindspore.nn.layer import ReLU
61
61
 
62
62
  # 14
63
63
  from mindspore.nn.layer.basic import DropoutExt as Dropout
64
+ from mindspore.nn.layer.basic import Dropout2dExt as Dropout2d
64
65
  # 15
65
66
  from mindspore.mint.nn.layer.conv import Conv1d, Conv2d, Conv3d, ConvTranspose2d
66
67
  # 16
@@ -260,9 +261,6 @@ from mindspore.mint.nn.layer.activation import Threshold
260
261
  # 258
261
262
  from mindspore.ops.function.nn_func import mse_loss_ext
262
263
 
263
- # 393
264
- from mindspore.mint.nn.layer.basic import Dropout2d
265
-
266
264
  # 406
267
265
  from mindspore.mint.nn.layer.activation import ELU
268
266
 
@@ -325,9 +323,6 @@ class NLLLoss(Cell):
325
323
  \sum_{n=1}^{N} l_{n}, & \text { if reduction }=\text { 'sum' }
326
324
  \end{array}\right.
327
325
 
328
- .. warning::
329
- This is an experimental API that is subject to change or deletion.
330
-
331
326
  Args:
332
327
  weight (Tensor, optional): A rescaling weight applied to the loss of each batch element.
333
328
  If not None, the shape is :math:`(C,)`, data type must be float16 or float32 or bfloat16(only supported by
@@ -696,9 +691,6 @@ class ReLU6(Cell):
696
691
  r"""
697
692
  Activation function ReLU6.
698
693
 
699
- .. warning::
700
- This is an experimental API that is subject to change or deletion.
701
-
702
694
  Refer to :func:`mindspore.mint.nn.functional.relu6` for more details.
703
695
 
704
696
  ReLU6 Activation Function Graph:
@@ -847,9 +839,6 @@ class SmoothL1Loss(Cell):
847
839
 
848
840
  Refer to :func:`mindspore.mint.nn.functional.smooth_l1_loss` for more details.
849
841
 
850
- .. warning::
851
- This is an experimental API that is subject to change or deletion.
852
-
853
842
  Supported Platforms:
854
843
  ``Ascend``
855
844
 
@@ -1190,7 +1179,7 @@ class PixelShuffle(Cell):
1190
1179
  >>> input = mint.randn(1, 9, 4, 4)
1191
1180
  >>> output = pixel_shuffle(input)
1192
1181
  >>> print(output.shape)
1193
- [1, 1, 12, 12]
1182
+ (1, 1, 12, 12)
1194
1183
  """
1195
1184
 
1196
1185
  def __init__(self, upscale_factor):
@@ -1448,9 +1437,6 @@ __all__ = [
1448
1437
 
1449
1438
  # 388
1450
1439
  'AdaptiveMaxPool2d',
1451
-
1452
- # 393
1453
- 'Dropout2d',
1454
1440
  # 406
1455
1441
  'ELU',
1456
1442
  # 407
@@ -15,10 +15,7 @@
15
15
  """mint nn functional."""
16
16
  from __future__ import absolute_import
17
17
  import mindspore.ops as ops
18
- import mindspore.mint as mint
19
- from mindspore import log as logger
20
18
  from mindspore import _checkparam as validator
21
- from mindspore.ops.primitive import constexpr
22
19
  from mindspore.ops.function.nn_func import max_pool2d_ext as max_pool2d
23
20
  from mindspore.ops.functional import (
24
21
  conv_transpose2d,
@@ -55,8 +52,9 @@ from mindspore.ops.function.nn_func import relu_
55
52
 
56
53
  # 14
57
54
  from mindspore.ops.function.nn_func import dropout_ext as dropout
55
+ from mindspore.ops.function.nn_func import dropout2d_ext as dropout2d
58
56
  # 15
59
- from mindspore.ops.function.nn_func import conv1d_ext as conv1d
57
+ from mindspore.ops.functional_overload import conv1d
60
58
  from mindspore.ops.function.nn_func import conv2d_ext as conv2d
61
59
  # 16
62
60
  from mindspore.ops.function.nn_func import log_softmax_ext as log_softmax
@@ -126,6 +124,7 @@ from mindspore.ops.auto_generate import inplace_silu
126
124
 
127
125
  # 49
128
126
  from mindspore.ops.functional import sigmoid
127
+ from mindspore.ops.functional import inplace_sigmoid as sigmoid_
129
128
  # 50
130
129
 
131
130
  # 51
@@ -502,9 +501,6 @@ def relu6(input, inplace=False):
502
501
  .. image:: ../images/ReLU6.png
503
502
  :align: center
504
503
 
505
- .. warning::
506
- This is an experimental optimizer API that is subject to change.
507
-
508
504
  Args:
509
505
  input (Tensor): input Tensor. Dtype is in int8, int16, int32, int64, uint8, float16, float32, bfloat16.
510
506
  inplace (bool, optional): Whether to apply erasing inplace. Default: ``False``.
@@ -770,9 +766,6 @@ def smooth_l1_loss(input, target, reduction='mean', beta=1.0):
770
766
  Here :math:`\text{beta}` controls the point where the loss function changes from quadratic to linear.
771
767
  :math:`\text{beta} \geq 0` , its default value is ``1.0`` . :math:`N` is the batch size.
772
768
 
773
- .. warning::
774
- This is an experimental optimizer API that is subject to change.
775
-
776
769
  Note:
777
770
  - Arg `input` and `target` comply with the implicit type conversion rules to make the data types consistent.
778
771
  If they have different data types, the lower precision data type will be converted to relatively the
@@ -837,104 +830,6 @@ def smooth_l1_loss(input, target, reduction='mean', beta=1.0):
837
830
  return ops.function.smooth_l1_loss(input, target, beta, reduction)
838
831
 
839
832
 
840
- @constexpr
841
- def log_warning(msg):
842
- """Adds warning to logger."""
843
- logger.warning(msg)
844
-
845
-
846
- def dropout2d(input, p=0.5, training=True):
847
- r"""
848
- During training, randomly zeroes some channels of the input tensor with probability `p`
849
- from a Bernoulli distribution (For a 4-dimensional tensor with a shape of :math:`(N, C, H, W)`,
850
- the channel feature map refers to a 2-dimensional feature map with the shape of :math:`(H, W)`).
851
-
852
- For example, the :math:`j\_th` channel of the :math:`i\_th` sample in the batched input is a to-be-processed
853
- `2D` tensor input[i,j].
854
- Each channel will be zeroed out independently on every forward call which based on Bernoulli distribution
855
- probability `p`.
856
- The parper `Dropout: A Simple Way to Prevent Neural Networks from Overfitting
857
- <http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ mentioned this technology, and it is proved that
858
- it can effectively reduce over fitting and prevent neuronal coadaptation.
859
- For more details, refer to `Improving neural networks by preventing co-adaptation of feature detectors
860
- <https://arxiv.org/pdf/1207.0580.pdf>`_ .
861
-
862
- `dropout2d` can improve the independence between channel feature maps.
863
-
864
- .. warning::
865
- This is an experimental API that is subject to change or deletion.
866
-
867
- Args:
868
- input (Tensor): A `4D` tensor with shape :math:`(N, C, H, W)`, where `N` is the batch size, `C` is the number
869
- of channels, `H` is the feature height, and `W` is the feature width.
870
- p (float, optional): The dropping probability of a channel, between 0 and 1, e.g. `p` = 0.8,
871
- which means dropping out 80% of channels. Default: ``0.5`` .
872
- training(bool, optional): If `training` is True, applying dropout, otherwise, not applying. Default: ``True`` .
873
-
874
- Returns:
875
- Tensor, output, with the same shape and data type as `input`.
876
-
877
- Raises:
878
- TypeError: If `input` is not a Tensor.
879
- TypeError: If the data type of `p` is not float.
880
- ValueError: If `p` is out of the range `[0.0, 1.0]`.
881
-
882
- Supported Platforms:
883
- ``Ascend``
884
-
885
- Examples:
886
- >>> import mindspore
887
- >>> import numpy as np
888
- >>> from mindspore import Tensor, mint
889
- >>> input = Tensor(np.ones([2, 1, 2, 3]), mindspore.float32)
890
- >>> output = mint.nn.functional.dropout2d(input, 0.5)
891
- >>> print(output.shape)
892
- (2, 1, 2, 3)
893
- """
894
- def dropout2d_impl_(input, p, training):
895
- if p == 0 or not training or input.numel() == 0:
896
- return input
897
-
898
- if p == 1:
899
- return mint.mul(input, mint.zeros((), dtype=input.dtype))
900
-
901
- if input.ndim < 2:
902
- raise ValueError(f'For dropout2d, input size after unsqueeze must be greater or equal to 2')
903
-
904
- if ops.is_sequence_shape_unknown(input.shape):
905
- input_tensor_shape = ops.TensorShape()(input)
906
- nosie_tensor_shape = mint.ones_like(input_tensor_shape)
907
- nosie_tensor_shape[0] = input_tensor_shape[0]
908
- nosie_tensor_shape[1] = input_tensor_shape[1]
909
- nosie_shape = ops.TensorToTuple()(nosie_tensor_shape)
910
- else:
911
- nosie_shape = input.shape[:2] + tuple(1 for _ in range(len(input.shape) - 2))
912
- nosie = mint.full(nosie_shape, 1 - p, dtype=input.dtype)
913
- nosie = mint.bernoulli(nosie)
914
- nosie = mint.div(nosie, 1 - p)
915
-
916
- return mint.mul(input, nosie)
917
-
918
- validator.check_float_range(p, 0.0, 1.0, validator.INC_BOTH, "p", "dropout2d")
919
- validator.check_bool(training, "training", "dropout2d")
920
-
921
- if input.ndim not in (3, 4):
922
- log_warning(f"dropout2d receviced a {input.ndim}-D input which is not recommended. Please use dropout instead.")
923
-
924
- is_batched = input.ndim == 4
925
- if not is_batched:
926
- input_shape = input.shape
927
- if ops.is_sequence_shape_unknown(input.shape):
928
- input_shape = ops.TensorToTuple()(ops.TensorShape()(input))
929
- input = input.reshape((1, *input_shape))
930
- result = dropout2d_impl_(input, p, training)
931
- result = result.reshape(input_shape)
932
- else:
933
- result = dropout2d_impl_(input, p, training)
934
-
935
- return result
936
-
937
-
938
833
  def normalize(input, p=2.0, dim=1, eps=1e-12):
939
834
  r"""
940
835
  Perform normalization of inputs over specified dimension
@@ -1060,7 +955,6 @@ def adaptive_avg_pool3d(input, output_size):
1060
955
 
1061
956
  .. warning::
1062
957
  For Ascend, it is only supported on Atlas A2 Training Series Products.
1063
- This is an experimental optimizer API that is subject to change or deletion.
1064
958
 
1065
959
  Args:
1066
960
  input (Tensor): The input of adaptive_avg_pool3d, which is a 4D or 5D Tensor.
@@ -1265,6 +1159,7 @@ __all__ = [
1265
1159
 
1266
1160
  # 49
1267
1161
  'sigmoid',
1162
+ 'sigmoid_',
1268
1163
  # 50
1269
1164
 
1270
1165
  # 51
@@ -1402,7 +1297,6 @@ __all__ = [
1402
1297
  'adaptive_avg_pool2d',
1403
1298
 
1404
1299
  # 350
1405
- 'conv1d',
1406
1300
 
1407
1301
  # 393
1408
1302
  'dropout2d',
@@ -31,7 +31,6 @@ from mindspore.mint.nn.layer.normalization import SyncBatchNorm
31
31
  from mindspore.mint.nn.layer.activation import LogSigmoid
32
32
  from mindspore.mint.nn.layer.activation import SiLU
33
33
  from mindspore.mint.nn.layer.activation import Threshold
34
- from mindspore.mint.nn.layer.basic import Dropout2d
35
34
  from mindspore.mint.nn.layer.pooling import AdaptiveMaxPool1d
36
35
  from mindspore.mint.nn.layer.pooling import AdaptiveAvgPool1d
37
36
  from mindspore.mint.nn.layer.pooling import AdaptiveAvgPool2d
@@ -46,7 +45,6 @@ __all__ = [
46
45
  'LayerNorm',
47
46
  'LogSigmoid',
48
47
  'SiLU',
49
- 'Dropout2d',
50
48
  'AdaptiveMaxPool1d',
51
49
  'AdaptiveAvgPool1d',
52
50
  'AdaptiveAvgPool2d',
@@ -23,7 +23,7 @@ from mindspore.communication.management import get_rank, get_group_size, GlobalC
23
23
  from mindspore.ops.auto_generate.gen_ops_prim import BatchNormReduceGrad
24
24
  from mindspore.ops.auto_generate.gen_ops_prim import BatchNormElemtGrad
25
25
  from mindspore.ops.primitive import Primitive, prim_arg_register, PrimitiveWithInfer, prim_attr_register
26
- from mindspore.ops.operations.comm_ops import ReduceOp, check_hcom_group_valid, check_collective_target_dtype
26
+ from mindspore.ops.operations.comm_ops import ReduceOp, check_collective_target_dtype
27
27
 
28
28
  batch_norm_reduce_grad = BatchNormReduceGrad()
29
29
  batch_norm_elemt_grad = BatchNormElemtGrad()
@@ -71,7 +71,6 @@ class AllReduce(Primitive):
71
71
  if not isinstance(self.group, str):
72
72
  raise TypeError(f"For '{self.name}', the 'group' must be str, "
73
73
  f"but got {type(self.group).__name__}.")
74
- check_hcom_group_valid(self.group, prim_name=self.name)
75
74
  self.op = op
76
75
  self.add_prim_attr('group', self.group)
77
76
  self.add_prim_attr('fusion', 0)
@@ -147,9 +147,6 @@ class LogSigmoid(Cell):
147
147
  .. image:: ../images/LogSigmoid.png
148
148
  :align: center
149
149
 
150
- .. warning::
151
- This is an experimental API that is subject to change or deletion.
152
-
153
150
  Inputs:
154
151
  - **input** (Tensor) - The input of LogSigmoid with data type of bfloat16, float16 or float32.
155
152
  The shape is :math:`(*)` where :math:`*` means, any number of additional dimensions.
@@ -313,9 +310,6 @@ class Tanh(Cell):
313
310
  .. image:: ../images/Tanh.png
314
311
  :align: center
315
312
 
316
- .. warning::
317
- This is an experimental API that is subject to change or deletion.
318
-
319
313
  Inputs:
320
314
  - **input** (Tensor) - Tensor of any dimension, input with data type of float16 or float32.
321
315
 
@@ -18,52 +18,6 @@ from __future__ import division
18
18
 
19
19
  from mindspore import mint
20
20
  from mindspore.nn.cell import Cell
21
- from mindspore import _checkparam as validator
22
-
23
-
24
- class Dropout2d(Cell):
25
- r"""
26
- During training, randomly zeroes some channels of the input tensor with probability `p`
27
- from a Bernoulli distribution (For a 4-dimensional tensor with a shape of :math:`NCHW`,
28
- the channel feature map refers to a 2-dimensional feature map with the shape of :math:`HW`).
29
-
30
- For example, the :math:`j\_th` channel of the :math:`i\_th` sample in the batched input is a to-be-processed
31
- `2D` tensor input[i,j].
32
- Each channel will be zeroed out independently on every forward call with probability `p` using samples
33
- from a Bernoulli distribution.
34
-
35
- `Dropout2d` can improve the independence between channel feature maps.
36
-
37
- .. warning::
38
- This is an experimental API that is subject to change or deletion.
39
-
40
- Refer to :func:`mindspore.mint.nn.functional.dropout2d` for more details.
41
-
42
- Supported Platforms:
43
- ``Ascend``
44
-
45
- Examples:
46
- >>> import mindspore
47
- >>> from mindspore import Tensor, mint
48
- >>> import numpy as np
49
- >>> dropout = mint.nn.Dropout2d(p=0.5)
50
- >>> x = Tensor(np.ones([2, 1, 2, 3]), mindspore.float32)
51
- >>> output = dropout(x)
52
- >>> print(output.shape)
53
- (2, 1, 2, 3)
54
- """
55
-
56
- def __init__(self, p=0.5):
57
- """Initialize Dropout2d."""
58
- super(Dropout2d, self).__init__()
59
- validator.check_float_range(p, 0.0, 1.0, validator.INC_BOTH, "p", self.cls_name)
60
- self.p = p
61
-
62
- def construct(self, x):
63
- if not self.training or self.p == 0:
64
- return x
65
-
66
- return mint.nn.functional.dropout2d(x, self.p)
67
21
 
68
22
 
69
23
  class Flatten(Cell):
@@ -118,6 +72,5 @@ class Flatten(Cell):
118
72
 
119
73
 
120
74
  __all__ = [
121
- 'Dropout2d',
122
75
  'Flatten',
123
76
  ]
@@ -200,8 +200,6 @@ class Conv1d(_Conv):
200
200
  possible length. Extra sequence that could not complete a full stride will
201
201
  be discarded.
202
202
 
203
- padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
204
- ``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
205
203
  dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation
206
204
  rate to use for dilated convolution.
207
205
  It can be a single int or a tuple/list of 1 integer.
@@ -219,13 +217,15 @@ class Conv1d(_Conv):
219
217
  - :math:`(\text{weight[1]} = C_{in} / \text{groups})`
220
218
 
221
219
  bias (bool, optional): Whether the Conv1d layer has a bias parameter. Default: ``True`` .
220
+ padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
221
+ ``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
222
222
  dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
223
223
 
224
224
  Variables:
225
- - **weight** (Tensor) - The weight of the convolution layer, with shape :math:
226
- `(C_{out}, C_{in} / \text{groups}, \text{kernel_size[0]})`.
227
- - **bias** (Tensor) - The bias of the convolution layer, with shape :math:
228
- `(C_{out})`. If bias is False, this will be None.
225
+ - **weight** (Tensor) - The weight of the convolution layer, with shape
226
+ :math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size[0]})`.
227
+ - **bias** (Tensor) - The bias of the convolution layer, with shape
228
+ :math:`(C_{out})`. If bias is False, this will be None.
229
229
 
230
230
  Inputs:
231
231
  - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` \
@@ -393,8 +393,6 @@ class Conv2d(_Conv):
393
393
  possible height and width. Extra pixels that could not complete a full stride will
394
394
  be discarded.
395
395
 
396
- padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
397
- ``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
398
396
  dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation rate to use
399
397
  for dilated convolution.
400
398
  It can be a single int or a tuple/list of 2 integers. A single int means the dilation size is the same
@@ -415,6 +413,8 @@ class Conv2d(_Conv):
415
413
  - :math:`(\text{weight[1]} = C_{in} / \text{groups})`
416
414
 
417
415
  bias (bool, optional): Whether the Conv2d layer has a bias parameter. Default: ``True`` .
416
+ padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
417
+ ``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
418
418
  dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
419
419
 
420
420
  Variables:
@@ -600,8 +600,6 @@ class Conv3d(_Conv):
600
600
  possible height and width. Extra pixels that could not complete a full stride will
601
601
  be discarded.
602
602
 
603
- padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
604
- ``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
605
603
  dilation (Union[int, tuple[int], list[int]], optional): Controlling the space between the kernel points.
606
604
  Default: ``1`` .
607
605
  groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
@@ -615,6 +613,8 @@ class Conv3d(_Conv):
615
613
  - :math:`(\text{weight[1]} = C_{in} / \text{groups})`
616
614
 
617
615
  bias (bool, optional): Whether the Conv3d layer has a bias parameter. Default: ``True`` .
616
+ padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
617
+ ``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
618
618
  dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
619
619
 
620
620
  Variables:
@@ -37,7 +37,6 @@ class _NormBase(Cell):
37
37
  dtype=None
38
38
  ) -> None:
39
39
  super(_NormBase, self).__init__()
40
- self.set_train()
41
40
  self.shape = ops.Shape()
42
41
  self.num_features = num_features
43
42
  self.eps = eps
@@ -110,7 +109,6 @@ class _BatchNorm(_NormBase):
110
109
  dtype=None) -> None:
111
110
  super(_BatchNorm, self).__init__(num_features, eps, momentum, affine, track_running_stats,
112
111
  dtype)
113
- self.training = True
114
112
 
115
113
 
116
114
  def _check_input_dim(self, input):
@@ -170,7 +168,6 @@ class BatchNorm1d(_BatchNorm):
170
168
 
171
169
  .. warning::
172
170
  This API does not support Dynamic Rank.
173
- This is an experimental API that is subject to change or deletion.
174
171
 
175
172
  Args:
176
173
  num_features (int): `C` from an expected input of shape :math:`(N, C, L)`.
@@ -209,8 +206,8 @@ class BatchNorm1d(_BatchNorm):
209
206
  >>> net = mint.nn.BatchNorm1d(4)
210
207
  >>> output = net(input_x)
211
208
  >>> print(output)
212
- [[ 0.99950075 0.9980011 -0.9980068 -0.9997783]
213
- [-0.9995012 -0.99799967 0.9980068 0.9997778]]
209
+ [[0.6999965 0.4999975 0.4999975 0.59999704]
210
+ [0.4999975 0.399998 0.59999704 0.89999545]]
214
211
  """
215
212
 
216
213
  def _check_input_dim(self, input):
@@ -218,7 +215,7 @@ class BatchNorm1d(_BatchNorm):
218
215
  dim = len(shape)
219
216
  if dim != 2 and dim != 3:
220
217
  raise ValueError(
221
- "expected 2D or 3D input (got {}D input)".format(dim)
218
+ "expected 2D or 3D input, but got " + str(dim) + "D input"
222
219
  )
223
220
 
224
221
 
@@ -239,7 +236,6 @@ class BatchNorm2d(_BatchNorm):
239
236
 
240
237
  .. warning::
241
238
  - This API does not support Dynamic Rank.
242
- - This is an experimental API that is subject to change or deletion.
243
239
 
244
240
  Args:
245
241
  num_features (int): `C` from an expected input of shape :math:`(N, C, H, W)`.
@@ -277,10 +273,10 @@ class BatchNorm2d(_BatchNorm):
277
273
  >>> net = mint.nn.BatchNorm2d(2)
278
274
  >>> output = net(input_x)
279
275
  >>> print(output)
280
- [[[[-0.99950075]]
281
- [[0.9980087]]]
282
- [[[0.999501]]
283
- [[-0.9980097]]]]
276
+ [[[[0.29999852]]
277
+ [[0.399998 ]]]
278
+ [[[0.4999975 ]]
279
+ [[0.29999852]]]]
284
280
  """
285
281
 
286
282
  def _check_input_dim(self, input):
@@ -288,7 +284,7 @@ class BatchNorm2d(_BatchNorm):
288
284
  dim = len(shape)
289
285
  if dim != 4:
290
286
  raise ValueError(
291
- "expected 4D input (got {}D input)".format(dim)
287
+ "expected 4D input, but got " + str(dim) + "D input"
292
288
  )
293
289
 
294
290
 
@@ -309,7 +305,6 @@ class BatchNorm3d(_BatchNorm):
309
305
 
310
306
  .. warning::
311
307
  This API does not support Dynamic Rank.
312
- This is an experimental API that is subject to change or deletion.
313
308
 
314
309
  Args:
315
310
  num_features (int): `C` from an expected input of shape :math:`(N, C, D, H, W)`.
@@ -347,8 +342,8 @@ class BatchNorm3d(_BatchNorm):
347
342
  >>> net = mint.nn.BatchNorm3d(2)
348
343
  >>> output = net(input_x)
349
344
  >>> print(output)
350
- [[[[[-0.9999688 0.99996865]]]
351
- [[[-0.9999833 06.9999831]]]]]
345
+ [[[[[0.0999995 0.89999545]]]
346
+ [[[1.1999941 2.2999885 ]]]]]
352
347
  """
353
348
 
354
349
  def _check_input_dim(self, input):
@@ -356,7 +351,7 @@ class BatchNorm3d(_BatchNorm):
356
351
  dim = len(shape)
357
352
  if dim != 5:
358
353
  raise ValueError(
359
- "expected 5D input (got {}D input)".format(dim)
354
+ "expected 5D input, but got " + str(dim) + "D input"
360
355
  )
361
356
 
362
357
 
@@ -73,9 +73,6 @@ class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
73
73
  The output is of size :math:`H x W` , for any input size.
74
74
  The number of output features is equal to the number of input planes.
75
75
 
76
- .. warning::
77
- This is an experimental API that is subject to change or deletion.
78
-
79
76
  Args:
80
77
  output_size (Union(int, tuple[int])): the target output size of the image of the form :math:`H x W` .
81
78
  Can be a tuple :math:`(H, W)` or a single :math:`H` for square image :math:`H x H` .
@@ -129,7 +126,6 @@ class AdaptiveAvgPool3d(Cell):
129
126
 
130
127
  .. warning::
131
128
  For Ascend, it is only supported on Atlas A2 Training Series Products.
132
- This is an experimental optimizer API that is subject to change or deletion.
133
129
 
134
130
  Args:
135
131
  output_size (Union[int, tuple]): The target output size. `output_size` can be a tuple :math:`(D, H, W)`,
mindspore/nn/__init__.py CHANGED
@@ -21,7 +21,7 @@ from __future__ import absolute_import
21
21
 
22
22
  __all__ = ["Cell", "GraphCell", "PipelineGradReducer", "PipelineCell", "MicroBatchInterleaved"]
23
23
 
24
- from mindspore.nn import layer, loss, optim, wrap, grad, metrics, probability, sparse, dynamic_lr, reinforcement
24
+ from mindspore.nn import layer, loss, optim, wrap, grad, metrics, probability, sparse, dynamic_lr
25
25
  from mindspore.parallel.nn.parallel_grad_reducer import PipelineGradReducer
26
26
  from mindspore.parallel.nn.parallel_cell_wrapper import PipelineCell, MicroBatchInterleaved
27
27
  from mindspore.nn.learning_rate_schedule import *
@@ -34,7 +34,6 @@ from mindspore.nn.metrics import *
34
34
  from mindspore.nn.wrap import *
35
35
  from mindspore.nn.grad import Jvp, Vjp
36
36
  from mindspore.nn.sparse import *
37
- from mindspore.nn.reinforcement import *
38
37
  from mindspore.nn.utils import *
39
38
 
40
39
  __all__.extend(layer.__all__)
@@ -46,7 +45,6 @@ __all__.extend(grad.__all__)
46
45
  __all__.extend(sparse.__all__)
47
46
  __all__.extend(learning_rate_schedule.__all__)
48
47
  __all__.extend(dynamic_lr.__all__)
49
- __all__.extend(reinforcement.__all__)
50
48
  __all__.extend(utils.__all__)
51
49
 
52
50
  __all__.sort()