mindspore 2.4.0__cp310-cp310-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1387) hide show
  1. mindspore/.commit_id +1 -0
  2. mindspore/__init__.py +53 -0
  3. mindspore/_c_dataengine.cpython-310-darwin.so +0 -0
  4. mindspore/_c_expression.cpython-310-darwin.so +0 -0
  5. mindspore/_c_mindrecord.cpython-310-darwin.so +0 -0
  6. mindspore/_check_jit_forbidden_api.py +106 -0
  7. mindspore/_checkparam.py +1419 -0
  8. mindspore/_extends/__init__.py +23 -0
  9. mindspore/_extends/builtin_operations.py +224 -0
  10. mindspore/_extends/graph_kernel/__init__.py +17 -0
  11. mindspore/_extends/graph_kernel/model/__init__.py +19 -0
  12. mindspore/_extends/graph_kernel/model/graph_parallel.py +311 -0
  13. mindspore/_extends/graph_kernel/model/graph_split.py +1348 -0
  14. mindspore/_extends/graph_kernel/model/model.py +553 -0
  15. mindspore/_extends/graph_kernel/model/model_builder.py +216 -0
  16. mindspore/_extends/graph_kernel/parallel_estimate.py +60 -0
  17. mindspore/_extends/graph_kernel/splitter.py +140 -0
  18. mindspore/_extends/graph_kernel/utils.py +28 -0
  19. mindspore/_extends/parallel_compile/__init__.py +19 -0
  20. mindspore/_extends/parallel_compile/akg_compiler/__init__.py +19 -0
  21. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +269 -0
  22. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +529 -0
  23. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +56 -0
  24. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  25. mindspore/_extends/parallel_compile/akg_compiler/get_file_path.py +36 -0
  26. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +556 -0
  27. mindspore/_extends/parallel_compile/akg_compiler/util.py +159 -0
  28. mindspore/_extends/parse/__init__.py +49 -0
  29. mindspore/_extends/parse/compile_config.py +299 -0
  30. mindspore/_extends/parse/namespace.py +136 -0
  31. mindspore/_extends/parse/parser.py +1448 -0
  32. mindspore/_extends/parse/resources.py +213 -0
  33. mindspore/_extends/parse/standard_method.py +4475 -0
  34. mindspore/_extends/parse/trope.py +97 -0
  35. mindspore/_extends/pijit/__init__.py +23 -0
  36. mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
  37. mindspore/_extends/remote/__init__.py +19 -0
  38. mindspore/_extends/remote/kernel_build_server.py +199 -0
  39. mindspore/_extends/remote/kernel_build_server_akg.py +55 -0
  40. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  41. mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
  42. mindspore/_extends/utils.py +68 -0
  43. mindspore/_install_custom.py +43 -0
  44. mindspore/_profiler.py +30 -0
  45. mindspore/amp.py +433 -0
  46. mindspore/boost/__init__.py +42 -0
  47. mindspore/boost/adasum.py +319 -0
  48. mindspore/boost/base.py +535 -0
  49. mindspore/boost/boost.py +400 -0
  50. mindspore/boost/boost_cell_wrapper.py +790 -0
  51. mindspore/boost/dim_reduce.py +323 -0
  52. mindspore/boost/grad_accumulation.py +79 -0
  53. mindspore/boost/grad_freeze.py +382 -0
  54. mindspore/boost/group_loss_scale_manager.py +166 -0
  55. mindspore/boost/less_batch_normalization.py +174 -0
  56. mindspore/common/__init__.py +86 -0
  57. mindspore/common/_auto_dynamic.py +68 -0
  58. mindspore/common/_decorator.py +50 -0
  59. mindspore/common/_jit_fallback_utils.py +110 -0
  60. mindspore/common/_monad.py +25 -0
  61. mindspore/common/_pijit_context.py +190 -0
  62. mindspore/common/_register_for_adapter.py +74 -0
  63. mindspore/common/_register_for_recompute.py +48 -0
  64. mindspore/common/_register_for_tensor.py +46 -0
  65. mindspore/common/_stub_tensor.py +210 -0
  66. mindspore/common/_tensor_overload.py +139 -0
  67. mindspore/common/_utils.py +122 -0
  68. mindspore/common/api.py +2064 -0
  69. mindspore/common/auto_dynamic_shape.py +507 -0
  70. mindspore/common/dtype.py +422 -0
  71. mindspore/common/dump.py +130 -0
  72. mindspore/common/file_system.py +48 -0
  73. mindspore/common/generator.py +254 -0
  74. mindspore/common/hook_handle.py +143 -0
  75. mindspore/common/initializer.py +880 -0
  76. mindspore/common/jit_config.py +98 -0
  77. mindspore/common/lazy_inline.py +240 -0
  78. mindspore/common/mindir_util.py +111 -0
  79. mindspore/common/mutable.py +234 -0
  80. mindspore/common/no_inline.py +54 -0
  81. mindspore/common/np_dtype.py +25 -0
  82. mindspore/common/parameter.py +1081 -0
  83. mindspore/common/recompute.py +292 -0
  84. mindspore/common/seed.py +260 -0
  85. mindspore/common/sparse_tensor.py +1175 -0
  86. mindspore/common/symbol.py +122 -0
  87. mindspore/common/tensor.py +5039 -0
  88. mindspore/communication/__init__.py +37 -0
  89. mindspore/communication/_comm_helper.py +501 -0
  90. mindspore/communication/_hccl_management.py +297 -0
  91. mindspore/communication/comm_func.py +1395 -0
  92. mindspore/communication/management.py +673 -0
  93. mindspore/config/op_info.config +533 -0
  94. mindspore/context.py +2077 -0
  95. mindspore/dataset/__init__.py +90 -0
  96. mindspore/dataset/audio/__init__.py +61 -0
  97. mindspore/dataset/audio/transforms.py +3690 -0
  98. mindspore/dataset/audio/utils.py +386 -0
  99. mindspore/dataset/audio/validators.py +1172 -0
  100. mindspore/dataset/callback/__init__.py +20 -0
  101. mindspore/dataset/callback/ds_callback.py +368 -0
  102. mindspore/dataset/callback/validators.py +32 -0
  103. mindspore/dataset/core/__init__.py +13 -0
  104. mindspore/dataset/core/config.py +1095 -0
  105. mindspore/dataset/core/datatypes.py +101 -0
  106. mindspore/dataset/core/py_util_helpers.py +65 -0
  107. mindspore/dataset/core/validator_helpers.py +781 -0
  108. mindspore/dataset/debug/__init__.py +21 -0
  109. mindspore/dataset/debug/debug_hook.py +97 -0
  110. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  111. mindspore/dataset/engine/__init__.py +124 -0
  112. mindspore/dataset/engine/cache_admin.py +47 -0
  113. mindspore/dataset/engine/cache_client.py +129 -0
  114. mindspore/dataset/engine/datasets.py +4582 -0
  115. mindspore/dataset/engine/datasets_audio.py +911 -0
  116. mindspore/dataset/engine/datasets_standard_format.py +543 -0
  117. mindspore/dataset/engine/datasets_text.py +2161 -0
  118. mindspore/dataset/engine/datasets_user_defined.py +1184 -0
  119. mindspore/dataset/engine/datasets_vision.py +4816 -0
  120. mindspore/dataset/engine/iterators.py +371 -0
  121. mindspore/dataset/engine/obs/__init__.py +23 -0
  122. mindspore/dataset/engine/obs/config_loader.py +68 -0
  123. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +508 -0
  124. mindspore/dataset/engine/obs/util.py +482 -0
  125. mindspore/dataset/engine/offload.py +596 -0
  126. mindspore/dataset/engine/queue.py +304 -0
  127. mindspore/dataset/engine/samplers.py +895 -0
  128. mindspore/dataset/engine/serializer_deserializer.py +159 -0
  129. mindspore/dataset/engine/validators.py +2895 -0
  130. mindspore/dataset/text/__init__.py +51 -0
  131. mindspore/dataset/text/transforms.py +1703 -0
  132. mindspore/dataset/text/utils.py +715 -0
  133. mindspore/dataset/text/validators.py +642 -0
  134. mindspore/dataset/transforms/__init__.py +45 -0
  135. mindspore/dataset/transforms/c_transforms.py +638 -0
  136. mindspore/dataset/transforms/py_transforms.py +393 -0
  137. mindspore/dataset/transforms/py_transforms_util.py +255 -0
  138. mindspore/dataset/transforms/transforms.py +1260 -0
  139. mindspore/dataset/transforms/validators.py +410 -0
  140. mindspore/dataset/utils/__init__.py +19 -0
  141. mindspore/dataset/utils/browse_dataset.py +190 -0
  142. mindspore/dataset/utils/line_reader.py +126 -0
  143. mindspore/dataset/vision/__init__.py +65 -0
  144. mindspore/dataset/vision/c_transforms.py +2641 -0
  145. mindspore/dataset/vision/py_transforms.py +2120 -0
  146. mindspore/dataset/vision/py_transforms_util.py +1660 -0
  147. mindspore/dataset/vision/transforms.py +7295 -0
  148. mindspore/dataset/vision/utils.py +863 -0
  149. mindspore/dataset/vision/validators.py +1483 -0
  150. mindspore/default_config.py +2 -0
  151. mindspore/experimental/__init__.py +20 -0
  152. mindspore/experimental/es/__init__.py +22 -0
  153. mindspore/experimental/es/embedding_service.py +883 -0
  154. mindspore/experimental/es/embedding_service_layer.py +581 -0
  155. mindspore/experimental/llm_boost/__init__.py +21 -0
  156. mindspore/experimental/llm_boost/atb/__init__.py +23 -0
  157. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  158. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  159. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  160. mindspore/experimental/llm_boost/register.py +129 -0
  161. mindspore/experimental/llm_boost/utils.py +31 -0
  162. mindspore/experimental/map_parameter.py +309 -0
  163. mindspore/experimental/optim/__init__.py +40 -0
  164. mindspore/experimental/optim/adadelta.py +161 -0
  165. mindspore/experimental/optim/adagrad.py +168 -0
  166. mindspore/experimental/optim/adam.py +193 -0
  167. mindspore/experimental/optim/adamax.py +170 -0
  168. mindspore/experimental/optim/adamw.py +290 -0
  169. mindspore/experimental/optim/asgd.py +153 -0
  170. mindspore/experimental/optim/lr_scheduler.py +1371 -0
  171. mindspore/experimental/optim/nadam.py +157 -0
  172. mindspore/experimental/optim/optimizer.py +262 -0
  173. mindspore/experimental/optim/radam.py +194 -0
  174. mindspore/experimental/optim/rmsprop.py +154 -0
  175. mindspore/experimental/optim/rprop.py +164 -0
  176. mindspore/experimental/optim/sgd.py +156 -0
  177. mindspore/hal/__init__.py +40 -0
  178. mindspore/hal/_ascend.py +57 -0
  179. mindspore/hal/_base.py +57 -0
  180. mindspore/hal/_cpu.py +56 -0
  181. mindspore/hal/_gpu.py +57 -0
  182. mindspore/hal/contiguous_tensors_handle.py +175 -0
  183. mindspore/hal/device.py +356 -0
  184. mindspore/hal/event.py +179 -0
  185. mindspore/hal/memory.py +326 -0
  186. mindspore/hal/stream.py +357 -0
  187. mindspore/include/OWNERS +7 -0
  188. mindspore/include/api/allocator.h +97 -0
  189. mindspore/include/api/callback/callback.h +93 -0
  190. mindspore/include/api/callback/ckpt_saver.h +41 -0
  191. mindspore/include/api/callback/loss_monitor.h +33 -0
  192. mindspore/include/api/callback/lr_scheduler.h +51 -0
  193. mindspore/include/api/callback/time_monitor.h +34 -0
  194. mindspore/include/api/callback/train_accuracy.h +37 -0
  195. mindspore/include/api/cell.h +90 -0
  196. mindspore/include/api/cfg.h +82 -0
  197. mindspore/include/api/context.h +602 -0
  198. mindspore/include/api/data_type.h +47 -0
  199. mindspore/include/api/delegate.h +178 -0
  200. mindspore/include/api/delegate_api.h +75 -0
  201. mindspore/include/api/dual_abi_helper.h +208 -0
  202. mindspore/include/api/format.h +28 -0
  203. mindspore/include/api/graph.h +46 -0
  204. mindspore/include/api/kernel.h +58 -0
  205. mindspore/include/api/kernel_api.h +168 -0
  206. mindspore/include/api/metrics/accuracy.h +36 -0
  207. mindspore/include/api/metrics/metrics.h +41 -0
  208. mindspore/include/api/model.h +438 -0
  209. mindspore/include/api/model_group.h +91 -0
  210. mindspore/include/api/model_parallel_runner.h +168 -0
  211. mindspore/include/api/serialization.h +185 -0
  212. mindspore/include/api/status.h +192 -0
  213. mindspore/include/api/types.h +431 -0
  214. mindspore/include/api/visible.h +41 -0
  215. mindspore/include/c_api/context_c.h +179 -0
  216. mindspore/include/c_api/data_type_c.h +52 -0
  217. mindspore/include/c_api/format_c.h +46 -0
  218. mindspore/include/c_api/model_c.h +347 -0
  219. mindspore/include/c_api/status_c.h +79 -0
  220. mindspore/include/c_api/tensor_c.h +146 -0
  221. mindspore/include/c_api/types_c.h +67 -0
  222. mindspore/include/dataset/config.h +163 -0
  223. mindspore/include/dataset/constants.h +363 -0
  224. mindspore/include/dataset/execute.h +196 -0
  225. mindspore/include/dataset/text.h +1092 -0
  226. mindspore/include/dataset/transforms.h +638 -0
  227. mindspore/include/dataset/vision.h +2129 -0
  228. mindspore/include/dataset/vision_ascend.h +206 -0
  229. mindspore/include/dataset/vision_lite.h +625 -0
  230. mindspore/lib/libavcodec.59.dylib +0 -0
  231. mindspore/lib/libavdevice.59.dylib +0 -0
  232. mindspore/lib/libavfilter.8.dylib +0 -0
  233. mindspore/lib/libavformat.59.dylib +0 -0
  234. mindspore/lib/libavutil.57.dylib +0 -0
  235. mindspore/lib/libdnnl.2.dylib +0 -0
  236. mindspore/lib/libicudata.69.dylib +0 -0
  237. mindspore/lib/libicui18n.69.dylib +0 -0
  238. mindspore/lib/libicuuc.69.dylib +0 -0
  239. mindspore/lib/libmindspore_address_sorting.15.dylib +0 -0
  240. mindspore/lib/libmindspore_backend.dylib +0 -0
  241. mindspore/lib/libmindspore_common.dylib +0 -0
  242. mindspore/lib/libmindspore_core.dylib +0 -0
  243. mindspore/lib/libmindspore_glog.0.dylib +0 -0
  244. mindspore/lib/libmindspore_gpr.15.dylib +0 -0
  245. mindspore/lib/libmindspore_grpc++.1.dylib +0 -0
  246. mindspore/lib/libmindspore_grpc.15.dylib +0 -0
  247. mindspore/lib/libmindspore_np_dtype.dylib +0 -0
  248. mindspore/lib/libmindspore_ops.dylib +0 -0
  249. mindspore/lib/libmindspore_upb.15.dylib +0 -0
  250. mindspore/lib/libnnacl.dylib +0 -0
  251. mindspore/lib/libopencv_core.4.5.dylib +0 -0
  252. mindspore/lib/libopencv_imgcodecs.4.5.dylib +0 -0
  253. mindspore/lib/libopencv_imgproc.4.5.dylib +0 -0
  254. mindspore/lib/libps_cache.dylib +0 -0
  255. mindspore/lib/libswresample.4.dylib +0 -0
  256. mindspore/lib/libswscale.6.dylib +0 -0
  257. mindspore/lib/libtinyxml2.8.dylib +0 -0
  258. mindspore/log.py +633 -0
  259. mindspore/mindrecord/__init__.py +43 -0
  260. mindspore/mindrecord/common/__init__.py +17 -0
  261. mindspore/mindrecord/common/constant.py +20 -0
  262. mindspore/mindrecord/common/enums.py +44 -0
  263. mindspore/mindrecord/common/exceptions.py +311 -0
  264. mindspore/mindrecord/config.py +809 -0
  265. mindspore/mindrecord/filereader.py +174 -0
  266. mindspore/mindrecord/filewriter.py +722 -0
  267. mindspore/mindrecord/mindpage.py +210 -0
  268. mindspore/mindrecord/shardheader.py +141 -0
  269. mindspore/mindrecord/shardindexgenerator.py +74 -0
  270. mindspore/mindrecord/shardreader.py +117 -0
  271. mindspore/mindrecord/shardsegment.py +128 -0
  272. mindspore/mindrecord/shardutils.py +185 -0
  273. mindspore/mindrecord/shardwriter.py +237 -0
  274. mindspore/mindrecord/tools/__init__.py +17 -0
  275. mindspore/mindrecord/tools/cifar10.py +140 -0
  276. mindspore/mindrecord/tools/cifar100.py +153 -0
  277. mindspore/mindrecord/tools/cifar100_to_mr.py +185 -0
  278. mindspore/mindrecord/tools/cifar10_to_mr.py +177 -0
  279. mindspore/mindrecord/tools/csv_to_mr.py +200 -0
  280. mindspore/mindrecord/tools/imagenet_to_mr.py +206 -0
  281. mindspore/mindrecord/tools/mnist_to_mr.py +259 -0
  282. mindspore/mindrecord/tools/tfrecord_to_mr.py +360 -0
  283. mindspore/mint/__init__.py +1586 -0
  284. mindspore/mint/distributed/__init__.py +31 -0
  285. mindspore/mint/distributed/distributed.py +254 -0
  286. mindspore/mint/linalg/__init__.py +22 -0
  287. mindspore/mint/nn/__init__.py +757 -0
  288. mindspore/mint/nn/functional.py +679 -0
  289. mindspore/mint/nn/layer/__init__.py +39 -0
  290. mindspore/mint/nn/layer/activation.py +133 -0
  291. mindspore/mint/nn/layer/normalization.py +477 -0
  292. mindspore/mint/nn/layer/pooling.py +110 -0
  293. mindspore/mint/optim/__init__.py +24 -0
  294. mindspore/mint/optim/adamw.py +206 -0
  295. mindspore/mint/special/__init__.py +63 -0
  296. mindspore/multiprocessing/__init__.py +73 -0
  297. mindspore/nn/__init__.py +47 -0
  298. mindspore/nn/cell.py +2787 -0
  299. mindspore/nn/dynamic_lr.py +482 -0
  300. mindspore/nn/grad/__init__.py +21 -0
  301. mindspore/nn/grad/cell_grad.py +196 -0
  302. mindspore/nn/layer/__init__.py +63 -0
  303. mindspore/nn/layer/activation.py +1822 -0
  304. mindspore/nn/layer/basic.py +1629 -0
  305. mindspore/nn/layer/channel_shuffle.py +90 -0
  306. mindspore/nn/layer/combined.py +248 -0
  307. mindspore/nn/layer/container.py +734 -0
  308. mindspore/nn/layer/conv.py +1505 -0
  309. mindspore/nn/layer/dense.py +204 -0
  310. mindspore/nn/layer/embedding.py +869 -0
  311. mindspore/nn/layer/image.py +661 -0
  312. mindspore/nn/layer/math.py +1069 -0
  313. mindspore/nn/layer/normalization.py +1273 -0
  314. mindspore/nn/layer/padding.py +880 -0
  315. mindspore/nn/layer/pooling.py +2302 -0
  316. mindspore/nn/layer/rnn_cells.py +388 -0
  317. mindspore/nn/layer/rnns.py +849 -0
  318. mindspore/nn/layer/thor_layer.py +963 -0
  319. mindspore/nn/layer/timedistributed.py +155 -0
  320. mindspore/nn/layer/transformer.py +823 -0
  321. mindspore/nn/learning_rate_schedule.py +512 -0
  322. mindspore/nn/loss/__init__.py +36 -0
  323. mindspore/nn/loss/loss.py +2924 -0
  324. mindspore/nn/metrics.py +53 -0
  325. mindspore/nn/optim/__init__.py +45 -0
  326. mindspore/nn/optim/_dist_optimizer_registry.py +111 -0
  327. mindspore/nn/optim/ada_grad.py +217 -0
  328. mindspore/nn/optim/adadelta.py +206 -0
  329. mindspore/nn/optim/adafactor.py +448 -0
  330. mindspore/nn/optim/adam.py +1297 -0
  331. mindspore/nn/optim/adamax.py +220 -0
  332. mindspore/nn/optim/adasum.py +548 -0
  333. mindspore/nn/optim/asgd.py +216 -0
  334. mindspore/nn/optim/ftrl.py +401 -0
  335. mindspore/nn/optim/lamb.py +296 -0
  336. mindspore/nn/optim/lars.py +202 -0
  337. mindspore/nn/optim/lazyadam.py +533 -0
  338. mindspore/nn/optim/momentum.py +239 -0
  339. mindspore/nn/optim/optimizer.py +1034 -0
  340. mindspore/nn/optim/proximal_ada_grad.py +242 -0
  341. mindspore/nn/optim/rmsprop.py +264 -0
  342. mindspore/nn/optim/rprop.py +251 -0
  343. mindspore/nn/optim/sgd.py +237 -0
  344. mindspore/nn/optim/tft_wrapper.py +127 -0
  345. mindspore/nn/optim/thor.py +1310 -0
  346. mindspore/nn/probability/__init__.py +22 -0
  347. mindspore/nn/probability/bijector/__init__.py +35 -0
  348. mindspore/nn/probability/bijector/bijector.py +337 -0
  349. mindspore/nn/probability/bijector/exp.py +65 -0
  350. mindspore/nn/probability/bijector/gumbel_cdf.py +144 -0
  351. mindspore/nn/probability/bijector/invert.py +126 -0
  352. mindspore/nn/probability/bijector/power_transform.py +196 -0
  353. mindspore/nn/probability/bijector/scalar_affine.py +167 -0
  354. mindspore/nn/probability/bijector/softplus.py +189 -0
  355. mindspore/nn/probability/bnn_layers/__init__.py +29 -0
  356. mindspore/nn/probability/bnn_layers/_util.py +46 -0
  357. mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +112 -0
  358. mindspore/nn/probability/bnn_layers/conv_variational.py +267 -0
  359. mindspore/nn/probability/bnn_layers/dense_variational.py +302 -0
  360. mindspore/nn/probability/bnn_layers/layer_distribution.py +123 -0
  361. mindspore/nn/probability/distribution/__init__.py +56 -0
  362. mindspore/nn/probability/distribution/_utils/__init__.py +34 -0
  363. mindspore/nn/probability/distribution/_utils/custom_ops.py +96 -0
  364. mindspore/nn/probability/distribution/_utils/utils.py +362 -0
  365. mindspore/nn/probability/distribution/bernoulli.py +334 -0
  366. mindspore/nn/probability/distribution/beta.py +391 -0
  367. mindspore/nn/probability/distribution/categorical.py +435 -0
  368. mindspore/nn/probability/distribution/cauchy.py +383 -0
  369. mindspore/nn/probability/distribution/distribution.py +827 -0
  370. mindspore/nn/probability/distribution/exponential.py +350 -0
  371. mindspore/nn/probability/distribution/gamma.py +391 -0
  372. mindspore/nn/probability/distribution/geometric.py +335 -0
  373. mindspore/nn/probability/distribution/gumbel.py +257 -0
  374. mindspore/nn/probability/distribution/half_normal.py +133 -0
  375. mindspore/nn/probability/distribution/laplace.py +128 -0
  376. mindspore/nn/probability/distribution/log_normal.py +272 -0
  377. mindspore/nn/probability/distribution/logistic.py +379 -0
  378. mindspore/nn/probability/distribution/normal.py +336 -0
  379. mindspore/nn/probability/distribution/poisson.py +288 -0
  380. mindspore/nn/probability/distribution/student_t.py +149 -0
  381. mindspore/nn/probability/distribution/transformed_distribution.py +235 -0
  382. mindspore/nn/probability/distribution/uniform.py +375 -0
  383. mindspore/nn/reinforcement/__init__.py +24 -0
  384. mindspore/nn/reinforcement/_batch_read_write.py +142 -0
  385. mindspore/nn/reinforcement/_tensors_queue.py +152 -0
  386. mindspore/nn/reinforcement/tensor_array.py +145 -0
  387. mindspore/nn/sparse/__init__.py +23 -0
  388. mindspore/nn/sparse/sparse.py +147 -0
  389. mindspore/nn/wrap/__init__.py +49 -0
  390. mindspore/nn/wrap/cell_wrapper.py +968 -0
  391. mindspore/nn/wrap/grad_reducer.py +608 -0
  392. mindspore/nn/wrap/loss_scale.py +694 -0
  393. mindspore/numpy/__init__.py +121 -0
  394. mindspore/numpy/array_creations.py +2731 -0
  395. mindspore/numpy/array_ops.py +2629 -0
  396. mindspore/numpy/dtypes.py +185 -0
  397. mindspore/numpy/fft.py +966 -0
  398. mindspore/numpy/logic_ops.py +936 -0
  399. mindspore/numpy/math_ops.py +5911 -0
  400. mindspore/numpy/utils.py +214 -0
  401. mindspore/numpy/utils_const.py +565 -0
  402. mindspore/ops/__init__.py +56 -0
  403. mindspore/ops/_constants.py +30 -0
  404. mindspore/ops/_grad_experimental/__init__.py +31 -0
  405. mindspore/ops/_grad_experimental/grad_array_ops.py +830 -0
  406. mindspore/ops/_grad_experimental/grad_base.py +143 -0
  407. mindspore/ops/_grad_experimental/grad_comm_ops.py +714 -0
  408. mindspore/ops/_grad_experimental/grad_debug_ops.py +31 -0
  409. mindspore/ops/_grad_experimental/grad_implementations.py +203 -0
  410. mindspore/ops/_grad_experimental/grad_inner_ops.py +79 -0
  411. mindspore/ops/_grad_experimental/grad_math_ops.py +802 -0
  412. mindspore/ops/_grad_experimental/grad_nn_ops.py +231 -0
  413. mindspore/ops/_grad_experimental/grad_quant_ops.py +238 -0
  414. mindspore/ops/_grad_experimental/grad_sparse.py +342 -0
  415. mindspore/ops/_grad_experimental/grad_sparse_ops.py +399 -0
  416. mindspore/ops/_grad_experimental/taylor_rule.py +220 -0
  417. mindspore/ops/_op_impl/__init__.py +23 -0
  418. mindspore/ops/_op_impl/_custom_op/__init__.py +39 -0
  419. mindspore/ops/_op_impl/_custom_op/_basic.py +158 -0
  420. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +279 -0
  421. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +156 -0
  422. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +109 -0
  423. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +125 -0
  424. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +105 -0
  425. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +124 -0
  426. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +116 -0
  427. mindspore/ops/_op_impl/_custom_op/correction_mul.py +89 -0
  428. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +196 -0
  429. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +366 -0
  430. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +162 -0
  431. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +136 -0
  432. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +206 -0
  433. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +88 -0
  434. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +128 -0
  435. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +199 -0
  436. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +88 -0
  437. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +156 -0
  438. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +184 -0
  439. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +143 -0
  440. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +169 -0
  441. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +548 -0
  442. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +881 -0
  443. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +278 -0
  444. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +200 -0
  445. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +334 -0
  446. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +255 -0
  447. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +222 -0
  448. mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +644 -0
  449. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +488 -0
  450. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +87 -0
  451. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +129 -0
  452. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +121 -0
  453. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +352 -0
  454. mindspore/ops/_op_impl/aicpu/__init__.py +441 -0
  455. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  456. mindspore/ops/_op_impl/aicpu/acos.py +32 -0
  457. mindspore/ops/_op_impl/aicpu/acos_grad.py +33 -0
  458. mindspore/ops/_op_impl/aicpu/acosh.py +34 -0
  459. mindspore/ops/_op_impl/aicpu/acosh_grad.py +35 -0
  460. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
  461. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  462. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
  463. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
  464. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
  465. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
  466. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
  467. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
  468. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  469. mindspore/ops/_op_impl/aicpu/add_n.py +41 -0
  470. mindspore/ops/_op_impl/aicpu/add_v2.py +40 -0
  471. mindspore/ops/_op_impl/aicpu/addcdiv.py +41 -0
  472. mindspore/ops/_op_impl/aicpu/addcmul.py +47 -0
  473. mindspore/ops/_op_impl/aicpu/adjust_contrastv2.py +32 -0
  474. mindspore/ops/_op_impl/aicpu/adjust_hue.py +31 -0
  475. mindspore/ops/_op_impl/aicpu/adjust_saturation.py +32 -0
  476. mindspore/ops/_op_impl/aicpu/affine_grid.py +33 -0
  477. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  478. mindspore/ops/_op_impl/aicpu/angle.py +31 -0
  479. mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
  480. mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
  481. mindspore/ops/_op_impl/aicpu/argmax_with_value.py +43 -0
  482. mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
  483. mindspore/ops/_op_impl/aicpu/asin.py +32 -0
  484. mindspore/ops/_op_impl/aicpu/asin_grad.py +33 -0
  485. mindspore/ops/_op_impl/aicpu/asinh.py +34 -0
  486. mindspore/ops/_op_impl/aicpu/asinh_grad.py +35 -0
  487. mindspore/ops/_op_impl/aicpu/atanh.py +34 -0
  488. mindspore/ops/_op_impl/aicpu/avgpool_grad_v1.py +37 -0
  489. mindspore/ops/_op_impl/aicpu/avgpool_v1.py +36 -0
  490. mindspore/ops/_op_impl/aicpu/bartlett_window.py +36 -0
  491. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
  492. mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
  493. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  494. mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
  495. mindspore/ops/_op_impl/aicpu/betainc.py +31 -0
  496. mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
  497. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +42 -0
  498. mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
  499. mindspore/ops/_op_impl/aicpu/blackman_window.py +36 -0
  500. mindspore/ops/_op_impl/aicpu/broadcast_to.py +58 -0
  501. mindspore/ops/_op_impl/aicpu/bucketize.py +34 -0
  502. mindspore/ops/_op_impl/aicpu/cache_swap_table.py +102 -0
  503. mindspore/ops/_op_impl/aicpu/cast.py +225 -0
  504. mindspore/ops/_op_impl/aicpu/cauchy.py +33 -0
  505. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  506. mindspore/ops/_op_impl/aicpu/check_numerics.py +33 -0
  507. mindspore/ops/_op_impl/aicpu/cholesky.py +32 -0
  508. mindspore/ops/_op_impl/aicpu/cholesky_inverse.py +31 -0
  509. mindspore/ops/_op_impl/aicpu/cholesky_solve.py +33 -0
  510. mindspore/ops/_op_impl/aicpu/choleskygrad.py +32 -0
  511. mindspore/ops/_op_impl/aicpu/coalesce.py +37 -0
  512. mindspore/ops/_op_impl/aicpu/col2im.py +38 -0
  513. mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
  514. mindspore/ops/_op_impl/aicpu/compare_and_bitpack.py +37 -0
  515. mindspore/ops/_op_impl/aicpu/complex.py +32 -0
  516. mindspore/ops/_op_impl/aicpu/complex_abs.py +31 -0
  517. mindspore/ops/_op_impl/aicpu/compute_accidental_hits.py +44 -0
  518. mindspore/ops/_op_impl/aicpu/concat.py +57 -0
  519. mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
  520. mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
  521. mindspore/ops/_op_impl/aicpu/conj.py +42 -0
  522. mindspore/ops/_op_impl/aicpu/conjugate_transpose.py +58 -0
  523. mindspore/ops/_op_impl/aicpu/cos.py +34 -0
  524. mindspore/ops/_op_impl/aicpu/cosh.py +34 -0
  525. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  526. mindspore/ops/_op_impl/aicpu/crop_and_resize.py +69 -0
  527. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_boxes.py +68 -0
  528. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
  529. mindspore/ops/_op_impl/aicpu/cross.py +42 -0
  530. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_dense.py +48 -0
  531. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_sparse_tensor.py +51 -0
  532. mindspore/ops/_op_impl/aicpu/ctc_greedy_decoder.py +35 -0
  533. mindspore/ops/_op_impl/aicpu/ctc_loss_v2.py +43 -0
  534. mindspore/ops/_op_impl/aicpu/ctc_loss_v2_grad.py +45 -0
  535. mindspore/ops/_op_impl/aicpu/ctcloss.py +38 -0
  536. mindspore/ops/_op_impl/aicpu/cummax.py +41 -0
  537. mindspore/ops/_op_impl/aicpu/cumprod.py +58 -0
  538. mindspore/ops/_op_impl/aicpu/cumsum.py +58 -0
  539. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
  540. mindspore/ops/_op_impl/aicpu/data_format_vec_permute.py +32 -0
  541. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  542. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  543. mindspore/ops/_op_impl/aicpu/dense_to_csr_sparse_matrix.py +49 -0
  544. mindspore/ops/_op_impl/aicpu/dense_to_dense_set_operation.py +45 -0
  545. mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
  546. mindspore/ops/_op_impl/aicpu/depth_to_space.py +44 -0
  547. mindspore/ops/_op_impl/aicpu/diag.py +36 -0
  548. mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
  549. mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
  550. mindspore/ops/_op_impl/aicpu/digamma.py +31 -0
  551. mindspore/ops/_op_impl/aicpu/div.py +41 -0
  552. mindspore/ops/_op_impl/aicpu/div_no_nan.py +35 -0
  553. mindspore/ops/_op_impl/aicpu/dropout2d.py +42 -0
  554. mindspore/ops/_op_impl/aicpu/dropout3d.py +42 -0
  555. mindspore/ops/_op_impl/aicpu/dropout_genmask.py +41 -0
  556. mindspore/ops/_op_impl/aicpu/dropout_genmask_v3.py +32 -0
  557. mindspore/ops/_op_impl/aicpu/dynamic_stitch.py +42 -0
  558. mindspore/ops/_op_impl/aicpu/edit_distance.py +56 -0
  559. mindspore/ops/_op_impl/aicpu/eig.py +35 -0
  560. mindspore/ops/_op_impl/aicpu/embedding_lookup.py +102 -0
  561. mindspore/ops/_op_impl/aicpu/end_of_sequence.py +30 -0
  562. mindspore/ops/_op_impl/aicpu/environ_create.py +28 -0
  563. mindspore/ops/_op_impl/aicpu/environ_destroy_all.py +28 -0
  564. mindspore/ops/_op_impl/aicpu/environ_get.py +41 -0
  565. mindspore/ops/_op_impl/aicpu/environ_set.py +40 -0
  566. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  567. mindspore/ops/_op_impl/aicpu/equal.py +41 -0
  568. mindspore/ops/_op_impl/aicpu/exp.py +37 -0
  569. mindspore/ops/_op_impl/aicpu/expand.py +45 -0
  570. mindspore/ops/_op_impl/aicpu/expand_dims.py +42 -0
  571. mindspore/ops/_op_impl/aicpu/expm1.py +34 -0
  572. mindspore/ops/_op_impl/aicpu/extract_glimpse.py +35 -0
  573. mindspore/ops/_op_impl/aicpu/eye.py +44 -0
  574. mindspore/ops/_op_impl/aicpu/fft_with_size.py +47 -0
  575. mindspore/ops/_op_impl/aicpu/fill_diagonal.py +39 -0
  576. mindspore/ops/_op_impl/aicpu/fill_v2.py +58 -0
  577. mindspore/ops/_op_impl/aicpu/flatten.py +43 -0
  578. mindspore/ops/_op_impl/aicpu/floor_div.py +38 -0
  579. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  580. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  581. mindspore/ops/_op_impl/aicpu/fractional_avg_pool.py +41 -0
  582. mindspore/ops/_op_impl/aicpu/fractional_avg_pool_grad.py +41 -0
  583. mindspore/ops/_op_impl/aicpu/fractional_max_pool.py +41 -0
  584. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_grad_with_fixed_ksize.py +43 -0
  585. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +65 -0
  586. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad.py +42 -0
  587. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad_with_fixed_ksize.py +42 -0
  588. mindspore/ops/_op_impl/aicpu/fractional_max_pool_with_fixed_ksize.py +49 -0
  589. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  590. mindspore/ops/_op_impl/aicpu/fused_sparse_adam.py +46 -0
  591. mindspore/ops/_op_impl/aicpu/fused_sparse_ftrl.py +41 -0
  592. mindspore/ops/_op_impl/aicpu/fused_sparse_lazy_adam.py +46 -0
  593. mindspore/ops/_op_impl/aicpu/fused_sparse_proximal_adagrad.py +39 -0
  594. mindspore/ops/_op_impl/aicpu/gamma.py +38 -0
  595. mindspore/ops/_op_impl/aicpu/gather.py +46 -0
  596. mindspore/ops/_op_impl/aicpu/gather_d.py +79 -0
  597. mindspore/ops/_op_impl/aicpu/gather_d_grad_v2.py +79 -0
  598. mindspore/ops/_op_impl/aicpu/gather_grad.py +54 -0
  599. mindspore/ops/_op_impl/aicpu/gather_nd.py +56 -0
  600. mindspore/ops/_op_impl/aicpu/gcd.py +32 -0
  601. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
  602. mindspore/ops/_op_impl/aicpu/geqrf.py +32 -0
  603. mindspore/ops/_op_impl/aicpu/get_next.py +39 -0
  604. mindspore/ops/_op_impl/aicpu/glu.py +33 -0
  605. mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
  606. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  607. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  608. mindspore/ops/_op_impl/aicpu/grid_sampler_2d.py +35 -0
  609. mindspore/ops/_op_impl/aicpu/grid_sampler_2d_grad.py +38 -0
  610. mindspore/ops/_op_impl/aicpu/grid_sampler_3d.py +34 -0
  611. mindspore/ops/_op_impl/aicpu/grid_sampler_3d_grad.py +38 -0
  612. mindspore/ops/_op_impl/aicpu/hamming_window.py +57 -0
  613. mindspore/ops/_op_impl/aicpu/hard_sigmoid.py +32 -0
  614. mindspore/ops/_op_impl/aicpu/hard_sigmoid_grad.py +33 -0
  615. mindspore/ops/_op_impl/aicpu/heaviside.py +40 -0
  616. mindspore/ops/_op_impl/aicpu/histogram.py +35 -0
  617. mindspore/ops/_op_impl/aicpu/hsv_to_rgb.py +32 -0
  618. mindspore/ops/_op_impl/aicpu/hypot.py +32 -0
  619. mindspore/ops/_op_impl/aicpu/identity.py +42 -0
  620. mindspore/ops/_op_impl/aicpu/identity_n.py +41 -0
  621. mindspore/ops/_op_impl/aicpu/igamma.py +30 -0
  622. mindspore/ops/_op_impl/aicpu/igammac.py +30 -0
  623. mindspore/ops/_op_impl/aicpu/igammagrada.py +30 -0
  624. mindspore/ops/_op_impl/aicpu/im2col.py +43 -0
  625. mindspore/ops/_op_impl/aicpu/imag.py +31 -0
  626. mindspore/ops/_op_impl/aicpu/index_fill.py +54 -0
  627. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  628. mindspore/ops/_op_impl/aicpu/init_data_set_queue.py +27 -0
  629. mindspore/ops/_op_impl/aicpu/inplace_index_add.py +39 -0
  630. mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
  631. mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
  632. mindspore/ops/_op_impl/aicpu/is_finite.py +40 -0
  633. mindspore/ops/_op_impl/aicpu/is_inf.py +31 -0
  634. mindspore/ops/_op_impl/aicpu/is_nan.py +31 -0
  635. mindspore/ops/_op_impl/aicpu/kldivloss.py +34 -0
  636. mindspore/ops/_op_impl/aicpu/kldivlossgrad.py +35 -0
  637. mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
  638. mindspore/ops/_op_impl/aicpu/lcm.py +32 -0
  639. mindspore/ops/_op_impl/aicpu/left_shift.py +38 -0
  640. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  641. mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
  642. mindspore/ops/_op_impl/aicpu/lgamma.py +33 -0
  643. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +57 -0
  644. mindspore/ops/_op_impl/aicpu/linspace.py +33 -0
  645. mindspore/ops/_op_impl/aicpu/list_diff.py +50 -0
  646. mindspore/ops/_op_impl/aicpu/log.py +37 -0
  647. mindspore/ops/_op_impl/aicpu/log1p.py +34 -0
  648. mindspore/ops/_op_impl/aicpu/log_matrix_determinant.py +31 -0
  649. mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
  650. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +37 -0
  651. mindspore/ops/_op_impl/aicpu/logical_xor.py +30 -0
  652. mindspore/ops/_op_impl/aicpu/logit.py +33 -0
  653. mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
  654. mindspore/ops/_op_impl/aicpu/logspace.py +36 -0
  655. mindspore/ops/_op_impl/aicpu/lower_bound.py +47 -0
  656. mindspore/ops/_op_impl/aicpu/lstsq.py +34 -0
  657. mindspore/ops/_op_impl/aicpu/lu.py +39 -0
  658. mindspore/ops/_op_impl/aicpu/lu_solve.py +32 -0
  659. mindspore/ops/_op_impl/aicpu/lu_unpack.py +114 -0
  660. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +49 -0
  661. mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
  662. mindspore/ops/_op_impl/aicpu/masked_scatter.py +40 -0
  663. mindspore/ops/_op_impl/aicpu/masked_select.py +31 -0
  664. mindspore/ops/_op_impl/aicpu/masked_select_grad.py +35 -0
  665. mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
  666. mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
  667. mindspore/ops/_op_impl/aicpu/matrix_determinant.py +30 -0
  668. mindspore/ops/_op_impl/aicpu/matrix_diag_part_v3.py +54 -0
  669. mindspore/ops/_op_impl/aicpu/matrix_diag_v3.py +56 -0
  670. mindspore/ops/_op_impl/aicpu/matrix_exp.py +34 -0
  671. mindspore/ops/_op_impl/aicpu/matrix_inverse.py +31 -0
  672. mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
  673. mindspore/ops/_op_impl/aicpu/matrix_power.py +37 -0
  674. mindspore/ops/_op_impl/aicpu/matrix_set_diag_v3.py +54 -0
  675. mindspore/ops/_op_impl/aicpu/matrix_solve.py +35 -0
  676. mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
  677. mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
  678. mindspore/ops/_op_impl/aicpu/max_pool3d_grad_with_argmax.py +60 -0
  679. mindspore/ops/_op_impl/aicpu/max_pool3d_with_argmax.py +59 -0
  680. mindspore/ops/_op_impl/aicpu/max_unpool2d.py +57 -0
  681. mindspore/ops/_op_impl/aicpu/max_unpool2d_grad.py +58 -0
  682. mindspore/ops/_op_impl/aicpu/max_unpool3d.py +57 -0
  683. mindspore/ops/_op_impl/aicpu/max_unpool3d_grad.py +58 -0
  684. mindspore/ops/_op_impl/aicpu/maximum_grad_grad.py +40 -0
  685. mindspore/ops/_op_impl/aicpu/maxpool_grad_v1.py +46 -0
  686. mindspore/ops/_op_impl/aicpu/maxpool_v1.py +42 -0
  687. mindspore/ops/_op_impl/aicpu/median.py +39 -0
  688. mindspore/ops/_op_impl/aicpu/median_grad.py +45 -0
  689. mindspore/ops/_op_impl/aicpu/meshgrid.py +41 -0
  690. mindspore/ops/_op_impl/aicpu/minimum_grad_grad.py +40 -0
  691. mindspore/ops/_op_impl/aicpu/mirror_pad.py +50 -0
  692. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +48 -0
  693. mindspore/ops/_op_impl/aicpu/mul.py +43 -0
  694. mindspore/ops/_op_impl/aicpu/mul_no_nan.py +42 -0
  695. mindspore/ops/_op_impl/aicpu/multi_margin_loss.py +37 -0
  696. mindspore/ops/_op_impl/aicpu/multi_margin_loss_grad.py +41 -0
  697. mindspore/ops/_op_impl/aicpu/multilabel_margin_loss_grad.py +37 -0
  698. mindspore/ops/_op_impl/aicpu/multinomial.py +47 -0
  699. mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
  700. mindspore/ops/_op_impl/aicpu/mvlgamma.py +32 -0
  701. mindspore/ops/_op_impl/aicpu/mvlgamma_grad.py +33 -0
  702. mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
  703. mindspore/ops/_op_impl/aicpu/neg.py +36 -0
  704. mindspore/ops/_op_impl/aicpu/nextafter.py +32 -0
  705. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  706. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  707. mindspore/ops/_op_impl/aicpu/no_repeat_ngram.py +34 -0
  708. mindspore/ops/_op_impl/aicpu/non_deterministic_ints.py +33 -0
  709. mindspore/ops/_op_impl/aicpu/non_max_suppression.py +36 -0
  710. mindspore/ops/_op_impl/aicpu/non_max_suppression_with_overlaps.py +35 -0
  711. mindspore/ops/_op_impl/aicpu/non_zero.py +43 -0
  712. mindspore/ops/_op_impl/aicpu/not_equal.py +39 -0
  713. mindspore/ops/_op_impl/aicpu/nth_element.py +39 -0
  714. mindspore/ops/_op_impl/aicpu/nuclear_norm.py +33 -0
  715. mindspore/ops/_op_impl/aicpu/one_hot.py +116 -0
  716. mindspore/ops/_op_impl/aicpu/ones_like.py +39 -0
  717. mindspore/ops/_op_impl/aicpu/orgqr.py +34 -0
  718. mindspore/ops/_op_impl/aicpu/pad_and_shift.py +33 -0
  719. mindspore/ops/_op_impl/aicpu/pad_v3.py +61 -0
  720. mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +59 -0
  721. mindspore/ops/_op_impl/aicpu/padding.py +41 -0
  722. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +54 -0
  723. mindspore/ops/_op_impl/aicpu/pdist_grad.py +33 -0
  724. mindspore/ops/_op_impl/aicpu/poisson.py +37 -0
  725. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  726. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  727. mindspore/ops/_op_impl/aicpu/pow.py +39 -0
  728. mindspore/ops/_op_impl/aicpu/print_tensor.py +39 -0
  729. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +113 -0
  730. mindspore/ops/_op_impl/aicpu/qr.py +36 -0
  731. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  732. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  733. mindspore/ops/_op_impl/aicpu/ragged_range.py +49 -0
  734. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  735. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
  736. mindspore/ops/_op_impl/aicpu/random_categorical.py +68 -0
  737. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +36 -0
  738. mindspore/ops/_op_impl/aicpu/random_gamma.py +38 -0
  739. mindspore/ops/_op_impl/aicpu/random_poisson.py +134 -0
  740. mindspore/ops/_op_impl/aicpu/random_shuffle.py +47 -0
  741. mindspore/ops/_op_impl/aicpu/randperm.py +38 -0
  742. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  743. mindspore/ops/_op_impl/aicpu/range.py +36 -0
  744. mindspore/ops/_op_impl/aicpu/range_v2.py +35 -0
  745. mindspore/ops/_op_impl/aicpu/real.py +31 -0
  746. mindspore/ops/_op_impl/aicpu/real_div.py +40 -0
  747. mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
  748. mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
  749. mindspore/ops/_op_impl/aicpu/reduce_mean.py +57 -0
  750. mindspore/ops/_op_impl/aicpu/reduce_prod.py +57 -0
  751. mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
  752. mindspore/ops/_op_impl/aicpu/relu_grad_v3.py +41 -0
  753. mindspore/ops/_op_impl/aicpu/relu_v3.py +38 -0
  754. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +96 -0
  755. mindspore/ops/_op_impl/aicpu/reshape.py +42 -0
  756. mindspore/ops/_op_impl/aicpu/resize_area.py +40 -0
  757. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +20 -0
  758. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +19 -0
  759. mindspore/ops/_op_impl/aicpu/resize_bilinear.py +32 -0
  760. mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +32 -0
  761. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +36 -0
  762. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +35 -0
  763. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  764. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  765. mindspore/ops/_op_impl/aicpu/reverse_sequence.py +55 -0
  766. mindspore/ops/_op_impl/aicpu/reversev2.py +54 -0
  767. mindspore/ops/_op_impl/aicpu/rgb_to_hsv.py +32 -0
  768. mindspore/ops/_op_impl/aicpu/right_shift.py +38 -0
  769. mindspore/ops/_op_impl/aicpu/rnnt_loss.py +35 -0
  770. mindspore/ops/_op_impl/aicpu/round.py +34 -0
  771. mindspore/ops/_op_impl/aicpu/rsqrt.py +33 -0
  772. mindspore/ops/_op_impl/aicpu/rsqrt_grad.py +36 -0
  773. mindspore/ops/_op_impl/aicpu/sample_distorted_bounding_box_v2.py +49 -0
  774. mindspore/ops/_op_impl/aicpu/scale_and_translate.py +52 -0
  775. mindspore/ops/_op_impl/aicpu/scale_and_translate_grad.py +36 -0
  776. mindspore/ops/_op_impl/aicpu/scatter.py +79 -0
  777. mindspore/ops/_op_impl/aicpu/scatter_add_with_axis.py +53 -0
  778. mindspore/ops/_op_impl/aicpu/scatter_elements.py +39 -0
  779. mindspore/ops/_op_impl/aicpu/scatter_nd.py +59 -0
  780. mindspore/ops/_op_impl/aicpu/scatter_nd_max.py +54 -0
  781. mindspore/ops/_op_impl/aicpu/scatter_nd_min.py +54 -0
  782. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +59 -0
  783. mindspore/ops/_op_impl/aicpu/search_sorted.py +44 -0
  784. mindspore/ops/_op_impl/aicpu/segment_max.py +52 -0
  785. mindspore/ops/_op_impl/aicpu/segment_mean.py +56 -0
  786. mindspore/ops/_op_impl/aicpu/segment_min.py +52 -0
  787. mindspore/ops/_op_impl/aicpu/segment_prod.py +56 -0
  788. mindspore/ops/_op_impl/aicpu/segment_sum.py +56 -0
  789. mindspore/ops/_op_impl/aicpu/select.py +45 -0
  790. mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
  791. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  792. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  793. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  794. mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
  795. mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
  796. mindspore/ops/_op_impl/aicpu/set_size.py +38 -0
  797. mindspore/ops/_op_impl/aicpu/sign.py +36 -0
  798. mindspore/ops/_op_impl/aicpu/sin.py +34 -0
  799. mindspore/ops/_op_impl/aicpu/sinc.py +43 -0
  800. mindspore/ops/_op_impl/aicpu/sinh.py +34 -0
  801. mindspore/ops/_op_impl/aicpu/slice.py +59 -0
  802. mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
  803. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  804. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  805. mindspore/ops/_op_impl/aicpu/sort.py +39 -0
  806. mindspore/ops/_op_impl/aicpu/space_to_depth.py +44 -0
  807. mindspore/ops/_op_impl/aicpu/sparse_addmm.py +87 -0
  808. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +80 -0
  809. mindspore/ops/_op_impl/aicpu/sparse_apply_centered_rms_prop.py +105 -0
  810. mindspore/ops/_op_impl/aicpu/sparse_apply_momentum.py +80 -0
  811. mindspore/ops/_op_impl/aicpu/sparse_apply_proximal_gradient_descent.py +79 -0
  812. mindspore/ops/_op_impl/aicpu/sparse_concat.py +59 -0
  813. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  814. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_add.py +58 -0
  815. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_div.py +58 -0
  816. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_mul.py +58 -0
  817. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
  818. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
  819. mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
  820. mindspore/ops/_op_impl/aicpu/sparse_matrix_nnz.py +81 -0
  821. mindspore/ops/_op_impl/aicpu/sparse_matrix_transpose.py +116 -0
  822. mindspore/ops/_op_impl/aicpu/sparse_reorder.py +56 -0
  823. mindspore/ops/_op_impl/aicpu/sparse_reshape.py +34 -0
  824. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_grad.py +36 -0
  825. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_with_num_segments.py +44 -0
  826. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n.py +43 -0
  827. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_grad.py +38 -0
  828. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_with_num_segments.py +44 -0
  829. mindspore/ops/_op_impl/aicpu/sparse_segment_sum.py +49 -0
  830. mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
  831. mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
  832. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
  833. mindspore/ops/_op_impl/aicpu/sparse_softmax.py +33 -0
  834. mindspore/ops/_op_impl/aicpu/sparse_softmax_cross_entropy_with_logits_v2.py +35 -0
  835. mindspore/ops/_op_impl/aicpu/sparse_sparse_maximum.py +53 -0
  836. mindspore/ops/_op_impl/aicpu/sparse_sparse_minimum.py +53 -0
  837. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_add.py +84 -0
  838. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_mat_mul.py +190 -0
  839. mindspore/ops/_op_impl/aicpu/sparse_tensor_to_csr_sparse_matrix.py +51 -0
  840. mindspore/ops/_op_impl/aicpu/sparse_to_dense_v2.py +73 -0
  841. mindspore/ops/_op_impl/aicpu/split.py +45 -0
  842. mindspore/ops/_op_impl/aicpu/sqrt.py +34 -0
  843. mindspore/ops/_op_impl/aicpu/sqrt_grad.py +35 -0
  844. mindspore/ops/_op_impl/aicpu/square.py +35 -0
  845. mindspore/ops/_op_impl/aicpu/squared_difference.py +37 -0
  846. mindspore/ops/_op_impl/aicpu/squeeze.py +42 -0
  847. mindspore/ops/_op_impl/aicpu/sspaddmm.py +97 -0
  848. mindspore/ops/_op_impl/aicpu/stack.py +45 -0
  849. mindspore/ops/_op_impl/aicpu/stack_push_pop.py +87 -0
  850. mindspore/ops/_op_impl/aicpu/standard_laplace.py +34 -0
  851. mindspore/ops/_op_impl/aicpu/standard_normal.py +34 -0
  852. mindspore/ops/_op_impl/aicpu/stateless_dropout_genmask.py +37 -0
  853. mindspore/ops/_op_impl/aicpu/stft.py +70 -0
  854. mindspore/ops/_op_impl/aicpu/strided_slice.py +43 -0
  855. mindspore/ops/_op_impl/aicpu/strided_slice_grad.py +50 -0
  856. mindspore/ops/_op_impl/aicpu/sub.py +41 -0
  857. mindspore/ops/_op_impl/aicpu/sub_and_filter.py +36 -0
  858. mindspore/ops/_op_impl/aicpu/tan.py +34 -0
  859. mindspore/ops/_op_impl/aicpu/tanh.py +34 -0
  860. mindspore/ops/_op_impl/aicpu/tanh_grad.py +35 -0
  861. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  862. mindspore/ops/_op_impl/aicpu/tile.py +56 -0
  863. mindspore/ops/_op_impl/aicpu/topk.py +34 -0
  864. mindspore/ops/_op_impl/aicpu/trace.py +40 -0
  865. mindspore/ops/_op_impl/aicpu/tracegrad.py +41 -0
  866. mindspore/ops/_op_impl/aicpu/trans_data.py +35 -0
  867. mindspore/ops/_op_impl/aicpu/transpose.py +58 -0
  868. mindspore/ops/_op_impl/aicpu/tridiagonal_matmul.py +42 -0
  869. mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
  870. mindspore/ops/_op_impl/aicpu/tril.py +42 -0
  871. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  872. mindspore/ops/_op_impl/aicpu/triplet_margin_loss.py +62 -0
  873. mindspore/ops/_op_impl/aicpu/triu.py +43 -0
  874. mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
  875. mindspore/ops/_op_impl/aicpu/truncated_normal.py +39 -0
  876. mindspore/ops/_op_impl/aicpu/uniform.py +36 -0
  877. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +41 -0
  878. mindspore/ops/_op_impl/aicpu/uniform_int.py +36 -0
  879. mindspore/ops/_op_impl/aicpu/uniform_real.py +33 -0
  880. mindspore/ops/_op_impl/aicpu/unique.py +31 -0
  881. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +47 -0
  882. mindspore/ops/_op_impl/aicpu/unique_with_pad.py +32 -0
  883. mindspore/ops/_op_impl/aicpu/unravel_index.py +32 -0
  884. mindspore/ops/_op_impl/aicpu/unsorted_segment_prod.py +53 -0
  885. mindspore/ops/_op_impl/aicpu/unsorted_segment_sum.py +57 -0
  886. mindspore/ops/_op_impl/aicpu/unstack.py +45 -0
  887. mindspore/ops/_op_impl/aicpu/update_cache.py +44 -0
  888. mindspore/ops/_op_impl/aicpu/upper_bound.py +47 -0
  889. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +42 -0
  890. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +49 -0
  891. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +40 -0
  892. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +50 -0
  893. mindspore/ops/_op_impl/aicpu/xdivy.py +35 -0
  894. mindspore/ops/_op_impl/aicpu/xlogy.py +33 -0
  895. mindspore/ops/_op_impl/aicpu/zeros_like.py +42 -0
  896. mindspore/ops/_op_impl/aicpu/zeta.py +31 -0
  897. mindspore/ops/_op_impl/akg/__init__.py +19 -0
  898. mindspore/ops/_op_impl/akg/ascend/__init__.py +48 -0
  899. mindspore/ops/_op_impl/akg/ascend/abs.py +35 -0
  900. mindspore/ops/_op_impl/akg/ascend/add.py +42 -0
  901. mindspore/ops/_op_impl/akg/ascend/add_n.py +37 -0
  902. mindspore/ops/_op_impl/akg/ascend/batchmatmul.py +33 -0
  903. mindspore/ops/_op_impl/akg/ascend/cast.py +46 -0
  904. mindspore/ops/_op_impl/akg/ascend/equal.py +35 -0
  905. mindspore/ops/_op_impl/akg/ascend/exp.py +35 -0
  906. mindspore/ops/_op_impl/akg/ascend/expand_dims.py +33 -0
  907. mindspore/ops/_op_impl/akg/ascend/greater.py +34 -0
  908. mindspore/ops/_op_impl/akg/ascend/greater_equal.py +35 -0
  909. mindspore/ops/_op_impl/akg/ascend/less.py +31 -0
  910. mindspore/ops/_op_impl/akg/ascend/less_equal.py +35 -0
  911. mindspore/ops/_op_impl/akg/ascend/load_im2col.py +33 -0
  912. mindspore/ops/_op_impl/akg/ascend/log.py +34 -0
  913. mindspore/ops/_op_impl/akg/ascend/maximum.py +36 -0
  914. mindspore/ops/_op_impl/akg/ascend/minimum.py +39 -0
  915. mindspore/ops/_op_impl/akg/ascend/mul.py +41 -0
  916. mindspore/ops/_op_impl/akg/ascend/neg.py +37 -0
  917. mindspore/ops/_op_impl/akg/ascend/pow.py +35 -0
  918. mindspore/ops/_op_impl/akg/ascend/prod_force_se_a.py +33 -0
  919. mindspore/ops/_op_impl/akg/ascend/real_div.py +36 -0
  920. mindspore/ops/_op_impl/akg/ascend/reciprocal.py +32 -0
  921. mindspore/ops/_op_impl/akg/ascend/reduce_max.py +32 -0
  922. mindspore/ops/_op_impl/akg/ascend/reduce_min.py +32 -0
  923. mindspore/ops/_op_impl/akg/ascend/reduce_sum.py +37 -0
  924. mindspore/ops/_op_impl/akg/ascend/rsqrt.py +35 -0
  925. mindspore/ops/_op_impl/akg/ascend/select.py +37 -0
  926. mindspore/ops/_op_impl/akg/ascend/sqrt.py +35 -0
  927. mindspore/ops/_op_impl/akg/ascend/square.py +35 -0
  928. mindspore/ops/_op_impl/akg/ascend/sub.py +42 -0
  929. mindspore/ops/_op_impl/akg/cpu/__init__.py +23 -0
  930. mindspore/ops/_op_impl/akg/cpu/coo2csr.py +29 -0
  931. mindspore/ops/_op_impl/akg/cpu/csr2coo.py +29 -0
  932. mindspore/ops/_op_impl/akg/cpu/csr_gather.py +33 -0
  933. mindspore/ops/_op_impl/akg/cpu/csr_mm.py +34 -0
  934. mindspore/ops/_op_impl/akg/cpu/csr_mul.py +33 -0
  935. mindspore/ops/_op_impl/akg/cpu/csr_mv.py +33 -0
  936. mindspore/ops/_op_impl/akg/cpu/csr_reduce_sum.py +31 -0
  937. mindspore/ops/_op_impl/akg/gpu/__init__.py +24 -0
  938. mindspore/ops/_op_impl/akg/gpu/coo2csr.py +29 -0
  939. mindspore/ops/_op_impl/akg/gpu/csr2coo.py +29 -0
  940. mindspore/ops/_op_impl/akg/gpu/csr_div.py +36 -0
  941. mindspore/ops/_op_impl/akg/gpu/csr_gather.py +33 -0
  942. mindspore/ops/_op_impl/akg/gpu/csr_mm.py +37 -0
  943. mindspore/ops/_op_impl/akg/gpu/csr_mul.py +36 -0
  944. mindspore/ops/_op_impl/akg/gpu/csr_mv.py +36 -0
  945. mindspore/ops/_op_impl/akg/gpu/csr_reduce_sum.py +33 -0
  946. mindspore/ops/_op_impl/cpu/__init__.py +78 -0
  947. mindspore/ops/_op_impl/cpu/adam.py +49 -0
  948. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +47 -0
  949. mindspore/ops/_op_impl/cpu/arg_max.py +30 -0
  950. mindspore/ops/_op_impl/cpu/arg_max_with_value.py +31 -0
  951. mindspore/ops/_op_impl/cpu/arg_min_with_value.py +31 -0
  952. mindspore/ops/_op_impl/cpu/buffer_append.py +28 -0
  953. mindspore/ops/_op_impl/cpu/buffer_get.py +28 -0
  954. mindspore/ops/_op_impl/cpu/buffer_sample.py +28 -0
  955. mindspore/ops/_op_impl/cpu/cast.py +171 -0
  956. mindspore/ops/_op_impl/cpu/concat_offset.py +38 -0
  957. mindspore/ops/_op_impl/cpu/conv2d.py +30 -0
  958. mindspore/ops/_op_impl/cpu/conv3d.py +30 -0
  959. mindspore/ops/_op_impl/cpu/div.py +32 -0
  960. mindspore/ops/_op_impl/cpu/dropout.py +31 -0
  961. mindspore/ops/_op_impl/cpu/dropout_grad.py +30 -0
  962. mindspore/ops/_op_impl/cpu/dynamic_shape.py +42 -0
  963. mindspore/ops/_op_impl/cpu/dynamic_stitch.py +41 -0
  964. mindspore/ops/_op_impl/cpu/equal_count.py +30 -0
  965. mindspore/ops/_op_impl/cpu/gather_d.py +49 -0
  966. mindspore/ops/_op_impl/cpu/gather_d_grad.py +38 -0
  967. mindspore/ops/_op_impl/cpu/gather_d_grad_v2.py +40 -0
  968. mindspore/ops/_op_impl/cpu/gather_v2.py +40 -0
  969. mindspore/ops/_op_impl/cpu/hsigmoid.py +33 -0
  970. mindspore/ops/_op_impl/cpu/hsigmoid_grad.py +34 -0
  971. mindspore/ops/_op_impl/cpu/hswish.py +32 -0
  972. mindspore/ops/_op_impl/cpu/hswish_grad.py +33 -0
  973. mindspore/ops/_op_impl/cpu/identity_n.py +40 -0
  974. mindspore/ops/_op_impl/cpu/is_finite.py +39 -0
  975. mindspore/ops/_op_impl/cpu/l2loss.py +30 -0
  976. mindspore/ops/_op_impl/cpu/layer_norm.py +36 -0
  977. mindspore/ops/_op_impl/cpu/layer_norm_grad.py +38 -0
  978. mindspore/ops/_op_impl/cpu/maximum.py +35 -0
  979. mindspore/ops/_op_impl/cpu/maximum_grad.py +47 -0
  980. mindspore/ops/_op_impl/cpu/minimum.py +40 -0
  981. mindspore/ops/_op_impl/cpu/minimum_grad.py +51 -0
  982. mindspore/ops/_op_impl/cpu/mirror_pad.py +36 -0
  983. mindspore/ops/_op_impl/cpu/mirror_pad_grad.py +36 -0
  984. mindspore/ops/_op_impl/cpu/mul.py +32 -0
  985. mindspore/ops/_op_impl/cpu/one_hot.py +31 -0
  986. mindspore/ops/_op_impl/cpu/pad.py +32 -0
  987. mindspore/ops/_op_impl/cpu/pow.py +32 -0
  988. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +42 -0
  989. mindspore/ops/_op_impl/cpu/pyexecute.py +29 -0
  990. mindspore/ops/_op_impl/cpu/pyfunc.py +29 -0
  991. mindspore/ops/_op_impl/cpu/range.py +34 -0
  992. mindspore/ops/_op_impl/cpu/real_div.py +33 -0
  993. mindspore/ops/_op_impl/cpu/reduce_all.py +29 -0
  994. mindspore/ops/_op_impl/cpu/reduce_any.py +29 -0
  995. mindspore/ops/_op_impl/cpu/reduce_max.py +32 -0
  996. mindspore/ops/_op_impl/cpu/reduce_mean.py +40 -0
  997. mindspore/ops/_op_impl/cpu/reduce_min.py +32 -0
  998. mindspore/ops/_op_impl/cpu/reduce_prod.py +40 -0
  999. mindspore/ops/_op_impl/cpu/reduce_std.py +31 -0
  1000. mindspore/ops/_op_impl/cpu/reduce_sum.py +41 -0
  1001. mindspore/ops/_op_impl/cpu/space_to_batch_nd.py +38 -0
  1002. mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
  1003. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
  1004. mindspore/ops/_op_impl/cpu/split.py +34 -0
  1005. mindspore/ops/_op_impl/cpu/sspaddmm.py +95 -0
  1006. mindspore/ops/_op_impl/cpu/stack.py +38 -0
  1007. mindspore/ops/_op_impl/cpu/sub.py +32 -0
  1008. mindspore/ops/_op_impl/cpu/tensor_copy_slices.py +41 -0
  1009. mindspore/ops/_op_impl/cpu/tile.py +37 -0
  1010. mindspore/ops/_op_impl/cpu/top_k.py +31 -0
  1011. mindspore/ops/_op_impl/cpu/transpose.py +39 -0
  1012. mindspore/ops/_primitive_cache.py +90 -0
  1013. mindspore/ops/_register_for_op.py +73 -0
  1014. mindspore/ops/_utils/__init__.py +20 -0
  1015. mindspore/ops/_utils/utils.py +147 -0
  1016. mindspore/ops/_vmap/__init__.py +25 -0
  1017. mindspore/ops/_vmap/vmap_array_ops.py +2149 -0
  1018. mindspore/ops/_vmap/vmap_base.py +533 -0
  1019. mindspore/ops/_vmap/vmap_convolution_ops.py +441 -0
  1020. mindspore/ops/_vmap/vmap_debug_ops.py +50 -0
  1021. mindspore/ops/_vmap/vmap_grad_math_ops.py +274 -0
  1022. mindspore/ops/_vmap/vmap_grad_nn_ops.py +806 -0
  1023. mindspore/ops/_vmap/vmap_image_ops.py +194 -0
  1024. mindspore/ops/_vmap/vmap_math_ops.py +993 -0
  1025. mindspore/ops/_vmap/vmap_nn_ops.py +2250 -0
  1026. mindspore/ops/_vmap/vmap_other_ops.py +105 -0
  1027. mindspore/ops/_vmap/vmap_random_ops.py +122 -0
  1028. mindspore/ops/_vmap/vmap_sparse_ops.py +89 -0
  1029. mindspore/ops/auto_generate/__init__.py +31 -0
  1030. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
  1031. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
  1032. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  1033. mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
  1034. mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
  1035. mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
  1036. mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
  1037. mindspore/ops/composite/__init__.py +71 -0
  1038. mindspore/ops/composite/base.py +1318 -0
  1039. mindspore/ops/composite/env_ops.py +41 -0
  1040. mindspore/ops/composite/math_ops.py +125 -0
  1041. mindspore/ops/composite/multitype_ops/__init__.py +77 -0
  1042. mindspore/ops/composite/multitype_ops/_compile_utils.py +1459 -0
  1043. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +897 -0
  1044. mindspore/ops/composite/multitype_ops/add_impl.py +606 -0
  1045. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +56 -0
  1046. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +56 -0
  1047. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +56 -0
  1048. mindspore/ops/composite/multitype_ops/div_impl.py +189 -0
  1049. mindspore/ops/composite/multitype_ops/equal_impl.py +335 -0
  1050. mindspore/ops/composite/multitype_ops/floordiv_impl.py +88 -0
  1051. mindspore/ops/composite/multitype_ops/getitem_impl.py +400 -0
  1052. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +109 -0
  1053. mindspore/ops/composite/multitype_ops/greater_impl.py +110 -0
  1054. mindspore/ops/composite/multitype_ops/in_impl.py +196 -0
  1055. mindspore/ops/composite/multitype_ops/left_shift_impl.py +37 -0
  1056. mindspore/ops/composite/multitype_ops/less_equal_impl.py +111 -0
  1057. mindspore/ops/composite/multitype_ops/less_impl.py +112 -0
  1058. mindspore/ops/composite/multitype_ops/logic_not_impl.py +113 -0
  1059. mindspore/ops/composite/multitype_ops/logical_and_impl.py +60 -0
  1060. mindspore/ops/composite/multitype_ops/logical_or_impl.py +61 -0
  1061. mindspore/ops/composite/multitype_ops/mod_impl.py +86 -0
  1062. mindspore/ops/composite/multitype_ops/mul_impl.py +294 -0
  1063. mindspore/ops/composite/multitype_ops/negative_impl.py +79 -0
  1064. mindspore/ops/composite/multitype_ops/not_equal_impl.py +290 -0
  1065. mindspore/ops/composite/multitype_ops/not_in_impl.py +196 -0
  1066. mindspore/ops/composite/multitype_ops/ones_like_impl.py +96 -0
  1067. mindspore/ops/composite/multitype_ops/pow_impl.py +87 -0
  1068. mindspore/ops/composite/multitype_ops/right_shift_impl.py +37 -0
  1069. mindspore/ops/composite/multitype_ops/setitem_impl.py +884 -0
  1070. mindspore/ops/composite/multitype_ops/sub_impl.py +116 -0
  1071. mindspore/ops/composite/multitype_ops/uadd_impl.py +29 -0
  1072. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +228 -0
  1073. mindspore/ops/deprecated.py +315 -0
  1074. mindspore/ops/function/__init__.py +782 -0
  1075. mindspore/ops/function/array_func.py +7226 -0
  1076. mindspore/ops/function/clip_func.py +384 -0
  1077. mindspore/ops/function/debug_func.py +181 -0
  1078. mindspore/ops/function/fft_func.py +44 -0
  1079. mindspore/ops/function/grad/__init__.py +34 -0
  1080. mindspore/ops/function/grad/grad_func.py +1425 -0
  1081. mindspore/ops/function/image_func.py +292 -0
  1082. mindspore/ops/function/linalg_func.py +416 -0
  1083. mindspore/ops/function/math_func.py +12228 -0
  1084. mindspore/ops/function/nn_func.py +8609 -0
  1085. mindspore/ops/function/other_func.py +115 -0
  1086. mindspore/ops/function/parameter_func.py +134 -0
  1087. mindspore/ops/function/random_func.py +1715 -0
  1088. mindspore/ops/function/reshard_func.py +104 -0
  1089. mindspore/ops/function/sparse_func.py +884 -0
  1090. mindspore/ops/function/sparse_unary_func.py +2422 -0
  1091. mindspore/ops/function/spectral_func.py +150 -0
  1092. mindspore/ops/function/vmap_func.py +117 -0
  1093. mindspore/ops/functional.py +464 -0
  1094. mindspore/ops/op_info_register.py +1572 -0
  1095. mindspore/ops/operations/__init__.py +722 -0
  1096. mindspore/ops/operations/_csr_ops.py +403 -0
  1097. mindspore/ops/operations/_custom_grad.py +181 -0
  1098. mindspore/ops/operations/_embedding_cache_ops.py +307 -0
  1099. mindspore/ops/operations/_grad_ops.py +2978 -0
  1100. mindspore/ops/operations/_infer_ops.py +19 -0
  1101. mindspore/ops/operations/_inner_ops.py +2544 -0
  1102. mindspore/ops/operations/_map_tensor_ops.py +112 -0
  1103. mindspore/ops/operations/_ms_kernel.py +601 -0
  1104. mindspore/ops/operations/_ocr_ops.py +379 -0
  1105. mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
  1106. mindspore/ops/operations/_pyfunc_registry.py +58 -0
  1107. mindspore/ops/operations/_quant_ops.py +1844 -0
  1108. mindspore/ops/operations/_rl_inner_ops.py +1231 -0
  1109. mindspore/ops/operations/_scalar_ops.py +106 -0
  1110. mindspore/ops/operations/_sequence_ops.py +1155 -0
  1111. mindspore/ops/operations/_sparse_grad_ops.py +56 -0
  1112. mindspore/ops/operations/_tensor_array.py +359 -0
  1113. mindspore/ops/operations/_thor_ops.py +807 -0
  1114. mindspore/ops/operations/array_ops.py +6124 -0
  1115. mindspore/ops/operations/comm_ops.py +1985 -0
  1116. mindspore/ops/operations/control_ops.py +127 -0
  1117. mindspore/ops/operations/custom_ops.py +1129 -0
  1118. mindspore/ops/operations/debug_ops.py +678 -0
  1119. mindspore/ops/operations/image_ops.py +1041 -0
  1120. mindspore/ops/operations/inner_ops.py +697 -0
  1121. mindspore/ops/operations/linalg_ops.py +95 -0
  1122. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  1123. mindspore/ops/operations/manually_defined/_inner.py +73 -0
  1124. mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
  1125. mindspore/ops/operations/math_ops.py +5095 -0
  1126. mindspore/ops/operations/nn_ops.py +9575 -0
  1127. mindspore/ops/operations/other_ops.py +874 -0
  1128. mindspore/ops/operations/random_ops.py +1288 -0
  1129. mindspore/ops/operations/reshard_ops.py +53 -0
  1130. mindspore/ops/operations/rl_ops.py +288 -0
  1131. mindspore/ops/operations/sparse_ops.py +2753 -0
  1132. mindspore/ops/operations/spectral_ops.py +111 -0
  1133. mindspore/ops/primitive.py +1046 -0
  1134. mindspore/ops/signature.py +54 -0
  1135. mindspore/ops/vm_impl_registry.py +91 -0
  1136. mindspore/ops_generate/__init__.py +27 -0
  1137. mindspore/ops_generate/arg_dtype_cast.py +252 -0
  1138. mindspore/ops_generate/arg_handler.py +197 -0
  1139. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  1140. mindspore/ops_generate/gen_constants.py +36 -0
  1141. mindspore/ops_generate/gen_ops.py +1099 -0
  1142. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  1143. mindspore/ops_generate/gen_pyboost_func.py +1052 -0
  1144. mindspore/ops_generate/gen_utils.py +209 -0
  1145. mindspore/ops_generate/op_proto.py +145 -0
  1146. mindspore/ops_generate/pyboost_utils.py +367 -0
  1147. mindspore/ops_generate/template.py +261 -0
  1148. mindspore/parallel/__init__.py +30 -0
  1149. mindspore/parallel/_auto_parallel_context.py +1486 -0
  1150. mindspore/parallel/_cell_wrapper.py +174 -0
  1151. mindspore/parallel/_cost_model_context.py +700 -0
  1152. mindspore/parallel/_dp_allreduce_fusion.py +159 -0
  1153. mindspore/parallel/_offload_context.py +275 -0
  1154. mindspore/parallel/_parallel_serialization.py +561 -0
  1155. mindspore/parallel/_ps_context.py +242 -0
  1156. mindspore/parallel/_recovery_context.py +110 -0
  1157. mindspore/parallel/_tensor.py +730 -0
  1158. mindspore/parallel/_transformer/__init__.py +35 -0
  1159. mindspore/parallel/_transformer/layers.py +765 -0
  1160. mindspore/parallel/_transformer/loss.py +251 -0
  1161. mindspore/parallel/_transformer/moe.py +693 -0
  1162. mindspore/parallel/_transformer/op_parallel_config.py +222 -0
  1163. mindspore/parallel/_transformer/transformer.py +3119 -0
  1164. mindspore/parallel/_utils.py +612 -0
  1165. mindspore/parallel/algo_parameter_config.py +400 -0
  1166. mindspore/parallel/checkpoint_transform.py +650 -0
  1167. mindspore/parallel/cluster/__init__.py +15 -0
  1168. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  1169. mindspore/parallel/cluster/process_entity/_api.py +352 -0
  1170. mindspore/parallel/cluster/process_entity/_utils.py +101 -0
  1171. mindspore/parallel/cluster/run.py +136 -0
  1172. mindspore/parallel/mpi/__init__.py +14 -0
  1173. mindspore/parallel/mpi/_mpi_config.py +116 -0
  1174. mindspore/parallel/parameter_broadcast.py +151 -0
  1175. mindspore/parallel/shard.py +481 -0
  1176. mindspore/parallel/transform_safetensors.py +993 -0
  1177. mindspore/profiler/__init__.py +28 -0
  1178. mindspore/profiler/common/__init__.py +14 -0
  1179. mindspore/profiler/common/constant.py +29 -0
  1180. mindspore/profiler/common/exceptions/__init__.py +14 -0
  1181. mindspore/profiler/common/exceptions/error_code.py +83 -0
  1182. mindspore/profiler/common/exceptions/exceptions.py +286 -0
  1183. mindspore/profiler/common/process_pool.py +41 -0
  1184. mindspore/profiler/common/registry.py +47 -0
  1185. mindspore/profiler/common/singleton.py +28 -0
  1186. mindspore/profiler/common/struct_type.py +118 -0
  1187. mindspore/profiler/common/util.py +472 -0
  1188. mindspore/profiler/common/validator/__init__.py +14 -0
  1189. mindspore/profiler/common/validator/validate_path.py +84 -0
  1190. mindspore/profiler/dynamic_profiler.py +694 -0
  1191. mindspore/profiler/envprofiling.py +254 -0
  1192. mindspore/profiler/parser/__init__.py +14 -0
  1193. mindspore/profiler/parser/aicpu_data_parser.py +272 -0
  1194. mindspore/profiler/parser/ascend_analysis/__init__.py +14 -0
  1195. mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
  1196. mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
  1197. mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
  1198. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
  1199. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
  1200. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
  1201. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  1202. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
  1203. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  1204. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
  1205. mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
  1206. mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
  1207. mindspore/profiler/parser/ascend_flops_generator.py +116 -0
  1208. mindspore/profiler/parser/ascend_fpbp_generator.py +82 -0
  1209. mindspore/profiler/parser/ascend_hccl_generator.py +271 -0
  1210. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  1211. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  1212. mindspore/profiler/parser/ascend_msprof_exporter.py +282 -0
  1213. mindspore/profiler/parser/ascend_msprof_generator.py +187 -0
  1214. mindspore/profiler/parser/ascend_op_generator.py +334 -0
  1215. mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
  1216. mindspore/profiler/parser/ascend_timeline_generator.py +545 -0
  1217. mindspore/profiler/parser/base_timeline_generator.py +483 -0
  1218. mindspore/profiler/parser/container.py +229 -0
  1219. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +697 -0
  1220. mindspore/profiler/parser/flops_parser.py +531 -0
  1221. mindspore/profiler/parser/framework_enum.py +111 -0
  1222. mindspore/profiler/parser/framework_parser.py +464 -0
  1223. mindspore/profiler/parser/framework_struct.py +61 -0
  1224. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  1225. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  1226. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  1227. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  1228. mindspore/profiler/parser/hccl_parser.py +573 -0
  1229. mindspore/profiler/parser/hwts_log_parser.py +122 -0
  1230. mindspore/profiler/parser/integrator.py +526 -0
  1231. mindspore/profiler/parser/memory_usage_parser.py +277 -0
  1232. mindspore/profiler/parser/minddata_analyzer.py +800 -0
  1233. mindspore/profiler/parser/minddata_parser.py +186 -0
  1234. mindspore/profiler/parser/minddata_pipeline_parser.py +299 -0
  1235. mindspore/profiler/parser/op_intermediate_parser.py +149 -0
  1236. mindspore/profiler/parser/optime_parser.py +250 -0
  1237. mindspore/profiler/parser/profiler_info.py +213 -0
  1238. mindspore/profiler/parser/step_trace_parser.py +666 -0
  1239. mindspore/profiler/profiler.py +153 -0
  1240. mindspore/profiler/profiling.py +1922 -0
  1241. mindspore/rewrite/__init__.py +28 -0
  1242. mindspore/rewrite/api/__init__.py +17 -0
  1243. mindspore/rewrite/api/node.py +519 -0
  1244. mindspore/rewrite/api/node_type.py +53 -0
  1245. mindspore/rewrite/api/pattern_engine.py +490 -0
  1246. mindspore/rewrite/api/scoped_value.py +181 -0
  1247. mindspore/rewrite/api/symbol_tree.py +497 -0
  1248. mindspore/rewrite/ast_helpers/__init__.py +25 -0
  1249. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  1250. mindspore/rewrite/ast_helpers/ast_finder.py +404 -0
  1251. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  1252. mindspore/rewrite/ast_helpers/ast_modifier.py +605 -0
  1253. mindspore/rewrite/ast_helpers/ast_replacer.py +79 -0
  1254. mindspore/rewrite/common/__init__.py +19 -0
  1255. mindspore/rewrite/common/config.py +24 -0
  1256. mindspore/rewrite/common/error_log.py +39 -0
  1257. mindspore/rewrite/common/event.py +28 -0
  1258. mindspore/rewrite/common/namer.py +271 -0
  1259. mindspore/rewrite/common/namespace.py +118 -0
  1260. mindspore/rewrite/common/observable.py +44 -0
  1261. mindspore/rewrite/common/observer.py +54 -0
  1262. mindspore/rewrite/node/__init__.py +22 -0
  1263. mindspore/rewrite/node/call_function.py +95 -0
  1264. mindspore/rewrite/node/cell_container.py +139 -0
  1265. mindspore/rewrite/node/control_flow.py +113 -0
  1266. mindspore/rewrite/node/node.py +1428 -0
  1267. mindspore/rewrite/node/node_manager.py +283 -0
  1268. mindspore/rewrite/node/node_topological_manager.py +223 -0
  1269. mindspore/rewrite/parsers/__init__.py +29 -0
  1270. mindspore/rewrite/parsers/arguments_parser.py +63 -0
  1271. mindspore/rewrite/parsers/assign_parser.py +852 -0
  1272. mindspore/rewrite/parsers/attribute_parser.py +57 -0
  1273. mindspore/rewrite/parsers/class_def_parser.py +289 -0
  1274. mindspore/rewrite/parsers/constant_parser.py +104 -0
  1275. mindspore/rewrite/parsers/container_parser.py +88 -0
  1276. mindspore/rewrite/parsers/expr_parser.py +55 -0
  1277. mindspore/rewrite/parsers/for_parser.py +61 -0
  1278. mindspore/rewrite/parsers/function_def_parser.py +84 -0
  1279. mindspore/rewrite/parsers/if_parser.py +85 -0
  1280. mindspore/rewrite/parsers/module_parser.py +117 -0
  1281. mindspore/rewrite/parsers/parser.py +43 -0
  1282. mindspore/rewrite/parsers/parser_register.py +86 -0
  1283. mindspore/rewrite/parsers/return_parser.py +37 -0
  1284. mindspore/rewrite/parsers/while_parser.py +59 -0
  1285. mindspore/rewrite/sparsify/__init__.py +0 -0
  1286. mindspore/rewrite/sparsify/sparse_transformer.py +457 -0
  1287. mindspore/rewrite/sparsify/sparsify.py +112 -0
  1288. mindspore/rewrite/sparsify/utils.py +179 -0
  1289. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  1290. mindspore/rewrite/symbol_tree/symbol_tree.py +1819 -0
  1291. mindspore/rewrite/symbol_tree/symbol_tree_builder.py +76 -0
  1292. mindspore/rewrite/symbol_tree/symbol_tree_dumper.py +142 -0
  1293. mindspore/run_check/__init__.py +20 -0
  1294. mindspore/run_check/_check_version.py +507 -0
  1295. mindspore/run_check/run_check.py +66 -0
  1296. mindspore/safeguard/__init__.py +18 -0
  1297. mindspore/safeguard/rewrite_obfuscation.py +875 -0
  1298. mindspore/scipy/__init__.py +18 -0
  1299. mindspore/scipy/fft.py +264 -0
  1300. mindspore/scipy/linalg.py +919 -0
  1301. mindspore/scipy/ops.py +165 -0
  1302. mindspore/scipy/ops_grad.py +115 -0
  1303. mindspore/scipy/ops_wrapper.py +74 -0
  1304. mindspore/scipy/optimize/__init__.py +20 -0
  1305. mindspore/scipy/optimize/_bfgs.py +230 -0
  1306. mindspore/scipy/optimize/_lagrange.py +201 -0
  1307. mindspore/scipy/optimize/_lbfgs.py +146 -0
  1308. mindspore/scipy/optimize/gradient_optimization_algorithm.py +168 -0
  1309. mindspore/scipy/optimize/line_search.py +370 -0
  1310. mindspore/scipy/optimize/linear_sum_assignment.py +78 -0
  1311. mindspore/scipy/optimize/minimize.py +200 -0
  1312. mindspore/scipy/utils.py +156 -0
  1313. mindspore/scipy/utils_const.py +246 -0
  1314. mindspore/train/__init__.py +48 -0
  1315. mindspore/train/_utils.py +465 -0
  1316. mindspore/train/amp.py +935 -0
  1317. mindspore/train/anf_ir_pb2.py +1517 -0
  1318. mindspore/train/callback/__init__.py +44 -0
  1319. mindspore/train/callback/_backup_and_restore.py +117 -0
  1320. mindspore/train/callback/_callback.py +613 -0
  1321. mindspore/train/callback/_checkpoint.py +814 -0
  1322. mindspore/train/callback/_cluster_monitor.py +201 -0
  1323. mindspore/train/callback/_dataset_graph.py +150 -0
  1324. mindspore/train/callback/_early_stop.py +239 -0
  1325. mindspore/train/callback/_flops_collector.py +239 -0
  1326. mindspore/train/callback/_history.py +92 -0
  1327. mindspore/train/callback/_lambda_callback.py +80 -0
  1328. mindspore/train/callback/_landscape.py +1049 -0
  1329. mindspore/train/callback/_loss_monitor.py +107 -0
  1330. mindspore/train/callback/_lr_scheduler_callback.py +76 -0
  1331. mindspore/train/callback/_on_request_exit.py +298 -0
  1332. mindspore/train/callback/_reduce_lr_on_plateau.py +226 -0
  1333. mindspore/train/callback/_summary_collector.py +1184 -0
  1334. mindspore/train/callback/_tft_register.py +352 -0
  1335. mindspore/train/callback/_time_monitor.py +141 -0
  1336. mindspore/train/checkpoint_pb2.py +233 -0
  1337. mindspore/train/data_sink.py +219 -0
  1338. mindspore/train/dataset_helper.py +692 -0
  1339. mindspore/train/lineage_pb2.py +1260 -0
  1340. mindspore/train/loss_scale_manager.py +213 -0
  1341. mindspore/train/memory_profiling_pb2.py +298 -0
  1342. mindspore/train/metrics/__init__.py +175 -0
  1343. mindspore/train/metrics/accuracy.py +133 -0
  1344. mindspore/train/metrics/auc.py +129 -0
  1345. mindspore/train/metrics/bleu_score.py +170 -0
  1346. mindspore/train/metrics/confusion_matrix.py +700 -0
  1347. mindspore/train/metrics/cosine_similarity.py +109 -0
  1348. mindspore/train/metrics/dice.py +116 -0
  1349. mindspore/train/metrics/error.py +175 -0
  1350. mindspore/train/metrics/fbeta.py +167 -0
  1351. mindspore/train/metrics/hausdorff_distance.py +333 -0
  1352. mindspore/train/metrics/loss.py +97 -0
  1353. mindspore/train/metrics/mean_surface_distance.py +189 -0
  1354. mindspore/train/metrics/metric.py +373 -0
  1355. mindspore/train/metrics/occlusion_sensitivity.py +225 -0
  1356. mindspore/train/metrics/perplexity.py +133 -0
  1357. mindspore/train/metrics/precision.py +160 -0
  1358. mindspore/train/metrics/recall.py +159 -0
  1359. mindspore/train/metrics/roc.py +223 -0
  1360. mindspore/train/metrics/root_mean_square_surface_distance.py +191 -0
  1361. mindspore/train/metrics/topk.py +167 -0
  1362. mindspore/train/mind_ir_pb2.py +1908 -0
  1363. mindspore/train/model.py +2252 -0
  1364. mindspore/train/node_strategy_pb2.py +653 -0
  1365. mindspore/train/print_pb2.py +184 -0
  1366. mindspore/train/profiling_parallel_pb2.py +151 -0
  1367. mindspore/train/serialization.py +3325 -0
  1368. mindspore/train/summary/__init__.py +23 -0
  1369. mindspore/train/summary/_lineage_adapter.py +41 -0
  1370. mindspore/train/summary/_summary_adapter.py +496 -0
  1371. mindspore/train/summary/_writer_pool.py +207 -0
  1372. mindspore/train/summary/enums.py +56 -0
  1373. mindspore/train/summary/summary_record.py +581 -0
  1374. mindspore/train/summary/writer.py +167 -0
  1375. mindspore/train/summary_pb2.py +1165 -0
  1376. mindspore/train/train_thor/__init__.py +20 -0
  1377. mindspore/train/train_thor/convert_utils.py +268 -0
  1378. mindspore/train/train_thor/dataset_helper.py +192 -0
  1379. mindspore/train/train_thor/model_thor.py +257 -0
  1380. mindspore/utils/__init__.py +21 -0
  1381. mindspore/utils/utils.py +60 -0
  1382. mindspore/version.py +1 -0
  1383. mindspore-2.4.0.dist-info/METADATA +352 -0
  1384. mindspore-2.4.0.dist-info/RECORD +1387 -0
  1385. mindspore-2.4.0.dist-info/WHEEL +5 -0
  1386. mindspore-2.4.0.dist-info/entry_points.txt +3 -0
  1387. mindspore-2.4.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2302 @@
1
+ # Copyright 2020-2022 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+ """pooling"""
16
+ from __future__ import absolute_import
17
+
18
+ from mindspore.ops import operations as P
19
+ from mindspore.ops import functional as F
20
+ import mindspore.ops as ops
21
+ from mindspore.ops.function.nn_func import avg_pool2d_ext
22
+ from mindspore._checkparam import _check_3d_int_or_tuple
23
+ from mindspore import _checkparam as validator
24
+ from mindspore.ops.primitive import constexpr, _primexpr
25
+ from mindspore.common.tensor import Tensor
26
+ import mindspore.context as context
27
+ from mindspore.common import dtype as mstype
28
+ from mindspore.ops.operations.nn_ops import AdaptiveMaxPool2D
29
+ from mindspore.ops.operations.nn_ops import AdaptiveMaxPool3D, AdaptiveAvgPool3D
30
+ from mindspore.ops.auto_generate.gen_ops_prim import MaxPoolWithIndices, MaxPoolWithMask
31
+ from mindspore.nn.cell import Cell
32
+ from mindspore._c_expression import MSContext
33
+
34
+ __all__ = ['AvgPool3d', 'MaxPool3d', 'AvgPool2d', 'MaxPool2d', 'AvgPool1d', 'MaxPool1d', 'FractionalMaxPool2d',
35
+ 'FractionalMaxPool3d', 'AdaptiveAvgPool1d', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d',
36
+ 'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'LPPool1d',
37
+ 'LPPool2d', 'AvgPool2dExt', 'MaxPool2dExt']
38
+
39
+
40
+ class _PoolNd(Cell):
41
+ """N-D AvgPool"""
42
+
43
+ def __init__(self, kernel_size, stride, pad_mode, data_format="NCHW"):
44
+ """Initialize _PoolNd."""
45
+ super(_PoolNd, self).__init__()
46
+ validator.check_value_type('pad_mode', pad_mode, [str], self.cls_name)
47
+ self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME', 'PAD'], 'pad_mode', self.cls_name)
48
+ self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name)
49
+ if context.get_context("device_target") != "GPU" and self.format == "NHWC":
50
+ raise ValueError(f"For '{self.cls_name}, the 'NHWC' format only support in GPU target, but got device "
51
+ f"target {context.get_context('device_target')}.")
52
+
53
+ def _check_int_or_tuple(arg_name, arg_value):
54
+ validator.check_value_type(arg_name, arg_value, [int, tuple], self.cls_name)
55
+ error_msg = f"For '{self.cls_name}', the '{arg_name}' must be an positive int number or " \
56
+ f"a tuple, but got {arg_value}"
57
+ if isinstance(arg_value, int):
58
+ if arg_value <= 0:
59
+ raise ValueError(error_msg)
60
+ else:
61
+ for item in arg_value:
62
+ if isinstance(item, int) and item > 0:
63
+ continue
64
+ raise ValueError(error_msg)
65
+ if len(arg_value) == 1:
66
+ return arg_value[0]
67
+ return arg_value
68
+
69
+ self.kernel_size = _check_int_or_tuple('kernel_size', kernel_size)
70
+ self.stride = _check_int_or_tuple('stride', stride)
71
+
72
+ def construct(self, *inputs):
73
+ pass
74
+
75
+ def extend_repr(self):
76
+ return 'kernel_size={kernel_size}, stride={stride}, pad_mode={pad_mode}'.format(**self.__dict__)
77
+
78
+
79
+ @_primexpr
80
+ def _shape_check(in_shape, prim_name=None):
81
+ msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
82
+
83
+ def _check():
84
+ if len(in_shape) != 3:
85
+ raise ValueError(f"{msg_prefix} input must has 3 dim, but got {len(in_shape)}")
86
+
87
+ _check()
88
+
89
+
90
+ class LPPool1d(Cell):
91
+ r"""
92
+ Applying 1D LPPooling operation on an input Tensor can be regarded as forming a 1D input plane.
93
+
94
+ Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})``, the output is of
95
+ shape :math:`(N_{out}, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, with the same shape as input,
96
+ the operation is as follows.
97
+
98
+ .. math::
99
+ f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
100
+
101
+ Note:
102
+ This interface currently does not support Atlas A2 training series products.
103
+
104
+ Args:
105
+ norm_type (Union[int, float]): Type of normalization, represents :math:`p` in the formula, can not be 0.
106
+
107
+ - if p = 1, the result is the sum of the elements within the pooling kernel(proportional to average
108
+ pooling).
109
+ - if p = :math:`\infty`, the result is the result of maximum pooling.
110
+
111
+ kernel_size (int): The size of kernel window.
112
+ stride (int): The distance of kernel moving, an int number that represents the width of movement is stride,
113
+ if the value is None, the default value `kernel_size` is used. Default: ``None`` .
114
+ ceil_mode (bool): If ``True``, use ceil to calculate output shape.
115
+ If ``False``, use ceil to calculate output shape. Default: ``False`` .
116
+
117
+ Inputs:
118
+ - **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
119
+
120
+ Outputs:
121
+ - **output** (Tensor) - LPPool1d result, with shape :math:`(N_{out}, C_{out}, L_{out})` or
122
+ :math:`(C_{out}, L_{out})`, it has the same data type as `x`, where
123
+
124
+ .. math::
125
+ L_{out} = \left\lfloor\frac{L_{in} - \text{kernel_size}}{\text{stride}} + 1\right\rfloor
126
+
127
+
128
+ Raises:
129
+ TypeError: If `x` is not a Tensor.
130
+ TypeError: If `kernel_size` or `stride` is not an int.
131
+ TypeError: If `ceil_mode` is not a bool.
132
+ TypeError: If `norm_type` is neither float nor int.
133
+ ValueError: If `norm_type` is equal to 0.
134
+ ValueError: If `kernel_size` or `stride` is less than 1.
135
+ ValueError: If length of shape of `x` is not equal to 2 or 3.
136
+
137
+ Supported Platforms:
138
+ ``Ascend`` ``GPU`` ``CPU``
139
+
140
+ Examples:
141
+ >>> import mindspore as ms
142
+ >>> import numpy as np
143
+ >>> a = ms.Tensor(np.arange(2 * 3 * 4).reshape((2, 3, 4)), dtype=ms.float32)
144
+ >>> net = ms.nn.LPPool1d(norm_type=1, kernel_size=3, stride=1)
145
+ >>> out = net(a)
146
+ >>> print(out)
147
+ [[[ 3. 6.]
148
+ [15. 18.]
149
+ [27. 30.]]
150
+ [[39. 42.]
151
+ [51. 54.]
152
+ [63. 66.]]]
153
+ """
154
+
155
+ def __init__(self, norm_type, kernel_size, stride=None, ceil_mode=False):
156
+ super(LPPool1d, self).__init__()
157
+ self.norm_type = norm_type
158
+ self.kernel_size = kernel_size
159
+ self.stride = stride
160
+ self.ceil_mode = ceil_mode
161
+
162
+ def construct(self, x):
163
+ return ops.lp_pool1d(x, self.norm_type, self.kernel_size,
164
+ self.stride, self.ceil_mode)
165
+
166
+
167
+ class LPPool2d(Cell):
168
+ r"""
169
+ Applying 2D LPPooling operation on an input Tensor can be regarded as forming a 1D input plane.
170
+
171
+ Typically the input is of shape :math:`(N, C, H_{in}, W_{in})`, the output is of shape
172
+ :math:`(N, C, H_{in}, W_{in})`, with the same shape as input, the operation is as follows.
173
+
174
+ .. math::
175
+ f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
176
+
177
+ Note:
178
+ This interface currently does not support Atlas A2 training series products.
179
+
180
+ Args:
181
+ norm_type(Union[int, float]): Type of normalization, represents :math:`p` in the formula, can not be 0.
182
+
183
+ - if p = 1, the result is the sum of the elements within the pooling kernel(proportional to average
184
+ pooling).
185
+ - if p = :math:`\infty`, the result is the result of maximum pooling.
186
+
187
+ kernel_size(Union[int, tuple[int]]): The size of kernel window.
188
+ The data type of kernel_size must be int and the value represents the height and width,
189
+ or a tuple of two int numbers that represent height and width respectively.
190
+ stride(Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
191
+ the height and width of movement are both stride, or a tuple of two int numbers that
192
+ represent height and width of movement respectively, if the value is ``None``,
193
+ the default value `kernel_size` is used. Default: ``None`` .
194
+ ceil_mode(bool): Whether to use ceil or floor to calculate output shape. Default: ``False`` .
195
+
196
+ Inputs:
197
+ - **x** (Tensor) - Tensor of shape :math:`(N, C, H_{in}, W_{in})`.
198
+
199
+ Outputs:
200
+ - **output** (Tensor) - LPPool2d result, with shape :math:`(N, C, H_{in}, W_{in})`,
201
+ It has the same data type as `x`, where
202
+
203
+ .. math::
204
+ H_{out} = \left\lfloor\frac{H_{in} - \text{kernel_size}[0]}{\text{stride}[0]} + 1\right\rfloor
205
+
206
+ .. math::
207
+ W_{out} = \left\lfloor\frac{W_{in} - \text{kernel_size}[1]}{\text{stride}[1]} + 1\right\rfloor
208
+
209
+ Raises:
210
+ TypeError: If `x` is not a Tensor.
211
+ TypeError: If `kernel_size` or `stride` is neither int nor tuple.
212
+ TypeError: If `ceil_mode` is not a bool.
213
+ TypeError: If `norm_type` is neither float nor int.
214
+ ValueError: If `norm_type` is equal to 0.
215
+ ValueError: If `kernel_size` or `stride` is less than 1.
216
+ ValueError: If `kernel_size` or `stride` is a tuple whose length is not equal to `2`.
217
+ ValueError: If length of shape of `x` is not equal to 4.
218
+
219
+ Supported Platforms:
220
+ ``Ascend`` ``GPU`` ``CPU``
221
+
222
+ Examples:
223
+ >>> import mindspore as ms
224
+ >>> import numpy as np
225
+ >>> a = ms.Tensor(np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5)), dtype=ms.float32)
226
+ >>> net = ms.nn.LPPool2d(norm_type=1, kernel_size=3, stride=1)
227
+ >>> out = net(a)
228
+ >>> print(out)
229
+ [[[[ 54. 63. 72.]
230
+ [ 99. 108. 117.]]
231
+ [[ 234. 243. 252.]
232
+ [ 279. 288. 297.]]
233
+ [[ 414. 423. 432.]
234
+ [ 459. 468. 477.]]]
235
+ [[[ 594. 603. 612.]
236
+ [ 639. 648. 657.]]
237
+ [[ 774. 783. 792.]
238
+ [ 819. 828. 837.]]
239
+ [[ 954. 963. 972.]
240
+ [ 999. 1008. 1017.]]]]
241
+ """
242
+
243
+ def __init__(self, norm_type, kernel_size, stride=None, ceil_mode=False):
244
+ super(LPPool2d, self).__init__()
245
+ self.norm_type = norm_type
246
+ self.kernel_size = kernel_size
247
+ self.stride = stride
248
+ self.ceil_mode = ceil_mode
249
+
250
+ def construct(self, x):
251
+ return ops.lp_pool2d(x, self.norm_type, self.kernel_size,
252
+ self.stride, self.ceil_mode)
253
+
254
+
255
+ def _check_maxpool_padding(padding, nd, cls_name):
256
+ """Calculate maxpool padding before call primitive"""
257
+ validator.check_value_type('padding', padding, (int, tuple, list), cls_name)
258
+ if isinstance(padding, int):
259
+ return (0,) * (3 - nd) + (padding,) * nd
260
+ if isinstance(padding, (tuple, list)):
261
+ validator.check_non_negative_int_sequence(padding, "padding", cls_name)
262
+ if len(padding) == 1:
263
+ return (0,) * (3 - nd) + tuple(padding * nd)
264
+ if len(padding) != nd:
265
+ raise ValueError(f"For {cls_name}, the length of padding must equal to {nd}, but got {len(padding)}.")
266
+ return (0,) * (3 - nd) + tuple(padding)
267
+ return padding
268
+
269
+
270
+ def _cal_dilation(dilation, nd, cls_name):
271
+ """check the dilation"""
272
+ if isinstance(dilation, int):
273
+ return dilation
274
+ if isinstance(dilation, tuple):
275
+ if len(dilation) == 1:
276
+ return dilation[0]
277
+ if len(dilation) == nd:
278
+ return (3 - nd) * (1,) + dilation
279
+ if nd == 1:
280
+ raise ValueError(f"For {cls_name}, the length of 'dilation' must be 1, but got {len(dilation)}.")
281
+ raise ValueError(f"For {cls_name}, the length of 'dilation' must be 1 or {nd}, but got {len(dilation)}.")
282
+ raise ValueError(f"For {cls_name}, the 'dilation' must be int or tuple, but got {type(dilation)}.")
283
+
284
+
285
+ class MaxPool3d(_PoolNd):
286
+ r"""
287
+ 3D max pooling operation.
288
+
289
+ Applies a 3D max pooling over an input Tensor which can be regarded as a composition of 3D planes.
290
+
291
+ Typically the input is of shape :math:`(N_{in}, C_{in}, D_{in}, H_{in}, W_{in})`, MaxPool outputs
292
+ regional maximum in the :math:`(D_{in}, H_{in}, W_{in})`-dimension. Given kernel size is
293
+ :math:`ks = (d_{ker}, h_{ker}, w_{ker})` and stride is :math:`s = (s_0, s_1, s_2)`, the operation is as follows.
294
+
295
+ .. math::
296
+ \text{output}(N_i, C_j, d, h, w) =
297
+ \max_{l=0, \ldots, d_{ker}-1} \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
298
+ \text{input}(N_i, C_j, s_0 \times d + l, s_1 \times h + m, s_2 \times w + n)
299
+
300
+ Args:
301
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
302
+ is an int number or a single element tuple that represents depth, height and width of the kernel, or a tuple
303
+ of three int numbers that represent depth, height and width respectively.
304
+ The value must be a positive integer. Default: ``1`` .
305
+ stride (Union[int, tuple[int]]): The moving stride of pooling operation, an int number or a single element tuple
306
+ that represents the moving stride of pooling kernel in the directions of depth, height and the width,
307
+ or a tuple of three int numbers that represent depth, height and width of movement respectively.
308
+ The value must be a positive integer. If the value is None, the default value `kernel_size` is used.
309
+ Default: ``1`` .
310
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
311
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
312
+
313
+ - ``"same"``: Pad the input around its depth/height/width dimension so that the shape of input and output
314
+ are the same when `stride` is set to ``1``.
315
+ The amount of padding to is calculated by the operator internally. If the amount is even,
316
+ it isuniformly distributed around the input, if it is odd, the excess amount goes
317
+ to the front/right/bottom side.
318
+ If this mode is set, `padding` must be 0.
319
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
320
+ possible depth, height and width. Extra pixels that could not complete a full stride will
321
+ be discarded. If this mode is set, `padding` must be 0.
322
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
323
+ in the depth, height and width dimension is determined by the `padding` parameter.
324
+ If this mode is set, `padding` must be greater than or equal to 0.
325
+
326
+ padding (Union(int, tuple[int], list[int])): Pooling padding value. Default: ``0`` .
327
+ `padding` can only be an integer or a tuple/list containing one or three integers.
328
+ If `padding` is an integer or a tuple/list containing one integer, it will be padded in six directions of
329
+ front, back, top, bottom, left and right of the input. If `padding` is a tuple/list containing three
330
+ integers, it will be padded in front and back of the input `padding[0]` times, up and down `padding[1]`
331
+ times, and left and right of the input `padding[2]` times.
332
+ dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
333
+ used to increase the receptive field of the pooling operation. If it is a tuple, it must contain one or
334
+ three integers. Default: ``1`` .
335
+ return_indices (bool): If ``True`` , output is a Tuple of 2 Tensors, representing the maxpool result and where
336
+ the max values are generated. Otherwise, only the maxpool result is returned. Default: ``False`` .
337
+ ceil_mode (bool): If ``True``, use ceil to calculate output shape.
338
+ If ``False``, use ceil to calculate output shape. Default: ``False`` .
339
+
340
+ Inputs:
341
+ - **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, D_{in}, H_{in}, W_{in})` or
342
+ :math:`(C_{in}, D_{in}, H_{in}, W_{in})`.
343
+
344
+ Outputs:
345
+ If `return_indices` is False, output is a Tensor, with shape
346
+ :math:`(N_{out}, C_{out}, D_{out}, H_{out}, W_{out})` or :math:`(C_{out}, D_{out}, H_{out}, W_{out})`.
347
+ It has the same data type as `x`.
348
+
349
+ If `return_indices` is True, output is a Tuple of 2 Tensors, representing the maxpool result and where
350
+ the max values are generated.
351
+
352
+ - **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, D_{out}, H_{out}, W_{out})` or
353
+ :math:`(C_{out}, D_{out}, H_{out}, W_{out})`. It has the same data type as `x`.
354
+ - **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int64.
355
+
356
+ If `pad_mode` is in ``"pad"`` mode, the output shape calculation formula is as follows:
357
+
358
+ .. math::
359
+ D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times
360
+ (\text{kernel_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
361
+
362
+ .. math::
363
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times
364
+ (\text{kernel_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
365
+
366
+ .. math::
367
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times
368
+ (\text{kernel_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
369
+
370
+ Raises:
371
+ ValueError: If length of shape of `x` is not equal to 4 or 5.
372
+ TypeError: If `kernel_size` , `stride` , `padding` or `dilation` is neither an int nor a tuple.
373
+ ValueError: If `kernel_size` or `stride` is less than 1.
374
+ ValueError: If the `padding` parameter is neither an integer nor a tuple of length 3.
375
+ ValueError: If `pad_mode` is not set to ``"pad"``, setting return_indices to True or dilation to a value
376
+ other than 1.
377
+ ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
378
+
379
+ Supported Platforms:
380
+ ``Ascend`` ``GPU`` ``CPU``
381
+
382
+ Examples:
383
+ >>> import mindspore as ms
384
+ >>> import mindspore.nn as nn
385
+ >>> from mindspore import Tensor
386
+ >>> import numpy as np
387
+ >>> np_x = np.random.randint(0, 10, [5, 3, 4, 6, 7])
388
+ >>> x = Tensor(np_x, ms.float32)
389
+ >>> pool1 = nn.MaxPool3d(kernel_size=2, stride=1, pad_mode="pad", padding=1, dilation=3, return_indices=True)
390
+ >>> output = pool1(x)
391
+ >>> print(output[0].shape)
392
+ (5, 3, 3, 5, 6)
393
+ >>> print(output[1].shape)
394
+ (5, 3, 3, 5, 6)
395
+ >>> pool2 = nn.MaxPool3d(kernel_size=2, stride=1, pad_mode="pad", padding=1, dilation=3, return_indices=False)
396
+ >>> output2 = pool2(x)
397
+ >>> print(output2.shape)
398
+ (5, 3, 3, 5, 6)
399
+ """
400
+
401
+ def __init__(self, kernel_size=1, stride=1, pad_mode="valid", padding=0, dilation=1, return_indices=False,
402
+ ceil_mode=False):
403
+ """Initialize MaxPool3d."""
404
+ super(MaxPool3d, self).__init__(kernel_size, stride, pad_mode)
405
+ self.return_indices = return_indices
406
+ padding = _check_maxpool_padding(padding, 3, self.cls_name)
407
+ _check_3d_int_or_tuple("padding", padding, self.cls_name, greater_zero=False, ret_five=False)
408
+ if dilation != 1 or return_indices:
409
+ self.only_pad = True
410
+ if pad_mode.upper() != "PAD":
411
+ raise ValueError(f"For {self.cls_name}, the pad_mode must be 'pad' when dilation is not 1 "
412
+ f"or return_indices is True, but got pad_mode:{pad_mode}.")
413
+ self.max_pool = P.MaxPool3DWithArgmax(ksize=kernel_size, strides=stride, pads=padding,
414
+ dilation=dilation, ceil_mode=ceil_mode)
415
+ else:
416
+ self.only_pad = False
417
+ ceil_mode = None if not ceil_mode else True
418
+ self.max_pool = P.MaxPool3D(kernel_size=kernel_size, strides=stride, pad_mode=pad_mode, pad_list=padding,
419
+ ceil_mode=ceil_mode)
420
+
421
+ def construct(self, x):
422
+ expand_batch = False
423
+ if x.ndim == 4:
424
+ x = x.unsqueeze(0)
425
+ expand_batch = True
426
+ out = self.max_pool(x)
427
+ if expand_batch:
428
+ if isinstance(out, tuple):
429
+ out = (out[0].squeeze(0), out[1].squeeze(0))
430
+ else:
431
+ out = out.squeeze(0)
432
+ if self.only_pad and not self.return_indices:
433
+ return out[0]
434
+ return out
435
+
436
+
437
+ class MaxPool2d(_PoolNd):
438
+ r"""
439
+ Applies a 2D max pooling over an input Tensor which can be regarded as a composition of 2D planes.
440
+
441
+ Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool2d outputs
442
+ regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
443
+ :math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows.
444
+
445
+ .. math::
446
+ \text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
447
+ \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
448
+
449
+ Args:
450
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the max value,
451
+ is an int number or a single element tuple that represents height and width are both kernel_size,
452
+ or a tuple of two int numbers that represent height and width respectively.
453
+ Default: ``1`` .
454
+ stride (Union[int, tuple[int]]): The distance of kernel moving, an int number or a single element tuple that
455
+ represents the height and width of movement are both stride, or a tuple of two int numbers that
456
+ represent height and width of movement respectively. Default: ``1`` .
457
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
458
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
459
+
460
+ - ``"same"``: Pad the input around its edges so that the shape of input and output
461
+ are the same when `stride` is set to ``1``.
462
+ The amount of padding to is calculated by the operator internally, If the amount is even, it is
463
+ uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
464
+ If this mode is set, `padding` must be 0.
465
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
466
+ possible height and width. Extra pixels that could not complete a full stride will
467
+ be discarded. If this mode is set, `padding` must be 0.
468
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
469
+ in the height and width directions is determined by the `padding` parameter.
470
+ If this mode is set, `padding` must be greater than or equal to 0.
471
+
472
+ padding (Union(int, tuple[int], list[int])): Specifies the padding value of the pooling operation.
473
+ Default: ``0`` . `padding` can only be an integer or a tuple/list containing one or two integers. If
474
+ `padding` is an integer or a tuple/list containing one integer, it will be padded `padding` times in the
475
+ four directions of the input. If `padding` is a tuple/list containing two integers, it will be padded
476
+ `padding[0]` times in the up-down direction of the input and `padding[1]` times in the left-right direction
477
+ of the input.
478
+ dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
479
+ used to increase the receptive field of the pooling operation. If it is a tuple, it must contain one or two
480
+ integers. Default: ``1`` .
481
+ return_indices (bool): If ``True`` , the function will return both the result of max pooling and the indices of
482
+ the max elements. Default: ``False`` .
483
+ ceil_mode (bool): If ``True`` , use ceil to compute the output shape instead of floor. Default: ``False`` .
484
+ data_format (str): The optional value for data format, is ``'NHWC'`` or ``'NCHW'`` .
485
+ Default: ``'NCHW'`` .
486
+
487
+ Inputs:
488
+ - **x** (Tensor) - Tensor of shape :math:`(N,C_{in},H_{in},W_{in})` or :math:`(C_{in},H_{in},W_{in})`.
489
+
490
+ Outputs:
491
+ If `return_indices` is False, output is a Tensor, with shape :math:`(N, C, H_{out}, W_{out})` or
492
+ :math:`(C_{out}, H_{out}, W_{out})`. It has the same data type as `x`.
493
+
494
+ If `return_indices` is True, output is a Tuple of 2 Tensors, representing the maxpool result and where
495
+ the max values are generated.
496
+
497
+ - **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})` or
498
+ :math:`(C_{out}, H_{out}, W_{out})`. It has the same data type as `x`.
499
+ - **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int64.
500
+
501
+ If `pad_mode` is in `pad` mode, the output shape calculation formula is as follows:
502
+
503
+ .. math::
504
+ H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
505
+ \times (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
506
+
507
+ .. math::
508
+ W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
509
+ \times (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
510
+
511
+ Raises:
512
+ TypeError: If `kernel_size` or `stride` is neither int nor tuple.
513
+ ValueError: If `pad_mode` is neither ``"valid"`` nor ``"same"`` with not case sensitive.
514
+ ValueError: If `data_format` is neither ``'NCHW'`` nor ``'NHWC'`` .
515
+ ValueError: If `kernel_size` or `stride` is less than 1.
516
+ ValueError: If length of shape of `x` is not equal to 3 or 4.
517
+ ValueError: If `pad_mode` is not ``"pad"``, `padding`, `dilation`, `return_indices`, `ceil_mode` parameters
518
+ are not set to their default values.
519
+ ValueError: If the length of the tuple/list `padding` parameter is not 2.
520
+ ValueError: If The length of the tuple dilation parameter is not 2.
521
+ ValueError: If dilation parameter is neither an integer nor a tuple.
522
+ ValueError: If `pad_mode` is ``"pad"`` and `data_format` is ``'NHWC'``.
523
+ ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
524
+
525
+ Supported Platforms:
526
+ ``Ascend`` ``GPU`` ``CPU``
527
+
528
+ Examples:
529
+ >>> import mindspore as ms
530
+ >>> import numpy as np
531
+ >>> pool = ms.nn.MaxPool2d(kernel_size=3, stride=1)
532
+ >>> x = ms.Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), ms.float32)
533
+ >>> output = pool(x)
534
+ >>> print(output.shape)
535
+ (1, 2, 2, 2)
536
+ >>> np_x = np.random.randint(0, 10, [5, 3, 4, 5])
537
+ >>> x = ms.Tensor(np_x, ms.float32)
538
+ >>> pool2 = ms.nn.MaxPool2d(kernel_size=2, stride=1, pad_mode="pad", padding=1, dilation=1, return_indices=True)
539
+ >>> output = pool2(x)
540
+ >>> print(output[0].shape)
541
+ (5, 3, 5, 6)
542
+ >>> print(output[1].shape)
543
+ (5, 3, 5, 6)
544
+ """
545
+
546
+ def __init__(self, kernel_size=1, stride=1, pad_mode="valid", padding=0, dilation=1, return_indices=False,
547
+ ceil_mode=False, data_format="NCHW"):
548
+ """Initialize MaxPool2d."""
549
+ super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode, data_format)
550
+ self.return_indices = return_indices
551
+ if pad_mode.upper() == 'PAD':
552
+ if self.format == "NHWC":
553
+ raise ValueError(f"For '{self.cls_name}, the 'NHWC' format are not support when 'pad_mode' is 'pad'.")
554
+ self.use_pad = True
555
+ if isinstance(self.kernel_size, tuple):
556
+ _check_tuple_length(self.kernel_size, 'kernel_size', 2, self.cls_name)
557
+ kernel_size = (1,) + self.kernel_size
558
+ elif isinstance(self.kernel_size, int):
559
+ kernel_size = (1, self.kernel_size, self.kernel_size)
560
+ if isinstance(self.stride, tuple):
561
+ _check_tuple_length(self.stride, 'stride', 2, self.cls_name)
562
+ stride = (1,) + self.stride
563
+ elif isinstance(self.stride, int):
564
+ stride = (1, self.stride, self.stride)
565
+ self.padding = _check_maxpool_padding(padding, 2, self.cls_name)
566
+ dilation = _cal_dilation(dilation, 2, self.cls_name)
567
+ self.max_pool = P.MaxPool3DWithArgmax(ksize=kernel_size, strides=stride, pads=self.padding,
568
+ dilation=dilation, ceil_mode=ceil_mode)
569
+ else:
570
+ self.use_pad = False
571
+ if padding != 0 or dilation != 1 or return_indices or ceil_mode:
572
+ raise ValueError(f"For MaxPool2d, the parameter 'padding', 'dilation', 'return_indices', 'ceil_mode' "
573
+ f"can not be set to non-default value when pad_mode is not 'pad', "
574
+ f"but got pad_mode:{pad_mode}.")
575
+ self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
576
+ strides=self.stride,
577
+ pad_mode=self.pad_mode,
578
+ data_format=self.format)
579
+
580
+ def construct(self, x):
581
+ expand_batch = False
582
+ if x.ndim == 3:
583
+ x = x.unsqueeze(0)
584
+ expand_batch = True
585
+ if self.use_pad:
586
+ x = x.unsqueeze(2)
587
+ out = self.max_pool(x)
588
+ if isinstance(out, tuple):
589
+ out = out[0].squeeze(2), out[1].squeeze(2)
590
+ else:
591
+ out = out.squeeze(2)
592
+ else:
593
+ out = self.max_pool(x)
594
+ if expand_batch:
595
+ if isinstance(out, tuple):
596
+ out = (out[0].squeeze(0), out[1].squeeze(0))
597
+ else:
598
+ out = out.squeeze(0)
599
+ if self.use_pad and not self.return_indices:
600
+ return out[0]
601
+ return out
602
+
603
+
604
+ class MaxPool2dExt(Cell):
605
+ r"""
606
+ Applies a 2D max pooling over an input Tensor which can be regarded as a composition of 2D planes.
607
+
608
+ Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool2d outputs
609
+ regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
610
+ :math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows.
611
+
612
+ .. math::
613
+ \text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
614
+ \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
615
+
616
+ .. warning::
617
+ Only support on Atlas training series.
618
+
619
+ Args:
620
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the max value,
621
+ is an int number or a single element tuple that represents height and width are both kernel_size,
622
+ or a tuple of two int numbers that represent height and width respectively.
623
+ Default: ``1`` .
624
+ stride (Union[int, tuple[int], None]): The distance of kernel moving, an int number or a single element tuple
625
+ that represents the height and width of movement are both stride, or a tuple of two int numbers that
626
+ represent height and width of movement respectively.
627
+ Default: ``None`` , which indicates the moving step is `kernel_size` .
628
+ padding (Union(int, tuple[int], list[int])): Specifies the padding value of the pooling operation.
629
+ Default: ``0`` . `padding` can only be an integer or a tuple/list containing one or two integers. If
630
+ `padding` is an integer or a tuple/list containing one integer, it will be padded `padding` times in the
631
+ four directions of the input. If `padding` is a tuple/list containing two integers, it will be padded
632
+ `padding[0]` times in the up-down direction of the input and `padding[1]` times in the left-right direction
633
+ of the input.
634
+ dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
635
+ used to increase the receptive field of the pooling operation. If it is a tuple, it must contain one or two
636
+ integers. Default: ``1`` .
637
+ return_indices (bool): If ``True`` , the function will return both the result of max pooling and the indices of
638
+ the max elements. Default: ``False`` .
639
+ ceil_mode (bool): If ``True`` , use ceil to compute the output shape instead of floor. Default: ``False`` .
640
+
641
+ Inputs:
642
+ - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
643
+
644
+ Outputs:
645
+ If `return_indices` is ``False`` , return a Tensor `output`, else return a tuple (`output`, `argmax`).
646
+
647
+ - **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})`. It has the
648
+ same data type as `input`.
649
+ - **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int32.
650
+
651
+ .. math::
652
+ H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
653
+ \times (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
654
+
655
+ .. math::
656
+ W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
657
+ \times (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
658
+
659
+ Raises:
660
+ TypeError: If `input` is not a Tensor.
661
+ ValueError: If length of shape of `input` is not equal to 4.
662
+ TypeError: If `kernel_size` , `stride` , `padding` or `dilation` is not int or tuple.
663
+ ValueError: If `kernel_size`, `stride` or `dilation` is less than 1.
664
+ ValueError: If `dilation` is not all 1.
665
+ ValueError: If `padding` is less than 0.
666
+ ValueError: If `padding` is more than half of `kernel_size`.
667
+ TypeError: If `ceil_mode` is not bool.
668
+
669
+ Supported Platforms:
670
+ ``Ascend``
671
+
672
+ Examples:
673
+ >>> import mindspore as ms
674
+ >>> import numpy as np
675
+ >>> pool = ms.mint.nn.MaxPool2d(kernel_size=3, stride=1)
676
+ >>> input = ms.Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), ms.float32)
677
+ >>> output = pool(input)
678
+ >>> print(output.shape)
679
+ (1, 2, 2, 2)
680
+ """
681
+
682
+ def __init__(self, kernel_size=1, stride=None, padding=0, dilation=1, return_indices=False,
683
+ ceil_mode=False):
684
+ """Initialize MaxPool2d."""
685
+ super(MaxPool2dExt, self).__init__()
686
+ self.return_indices = return_indices
687
+ strides = stride if (stride is not None) else kernel_size
688
+ if return_indices:
689
+ self.max_pool_func_ = MaxPoolWithIndices(kernel_size, strides, padding, dilation, ceil_mode)
690
+ else:
691
+ self.max_pool_func_ = MaxPoolWithMask(kernel_size, strides, padding, dilation, ceil_mode)
692
+
693
+ def construct(self, input):
694
+ out, indices = self.max_pool_func_(input)
695
+ if self.return_indices:
696
+ return out, indices
697
+ return out
698
+
699
+
700
+ class MaxPool1d(_PoolNd):
701
+ r"""
702
+ Applies a 1D max pooling over an input Tensor which can be regarded as a composition of 1D planes.
703
+
704
+ Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})`, MaxPool1d outputs
705
+ regional maximum in the :math:`(L_{in})`-dimension. Given `kernel size`
706
+ :math:`ks = (l_{ker})` and `stride` :math:`s = (s_0)`, the operation is as follows:
707
+
708
+ .. math::
709
+ \text{output}(N_i, C_j, l) = \max_{n=0, \ldots, l_{ker}-1}
710
+ \text{input}(N_i, C_j, s_0 \times l + n)
711
+
712
+ Args:
713
+ kernel_size (int): The size of kernel used to take the max value, Default: ``1`` .
714
+ stride (int): The distance of kernel moving, an int number that represents
715
+ the width of movement is stride, Default: ``1`` .
716
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
717
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
718
+
719
+ - ``"same"``: Pad the input at the begin and end so that the shape of input and output
720
+ are the same when `stride` is set to ``1``.
721
+ The amount of padding to is calculated by the operator internally. If the amount is even, it is
722
+ uniformly distributed around the input, if it is odd, the excess padding is goes to the right side.
723
+ If this mode is set, `padding` must be 0.
724
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
725
+ possible length. Extra pixels that could not complete a full stride will
726
+ be discarded. If this mode is set, `padding` must be 0.
727
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
728
+ at the begin and end is determined by the `padding` parameter.
729
+ If this mode is set, `padding` must be greater than or equal to 0.
730
+
731
+ padding (Union(int, tuple[int], list[int])): Padding value for the pooling. Default value is ``0``.
732
+ padding can only be an integer or a tuple/list containing a single integer, in which case padding times or
733
+ padding[0] times are padded on both sides of the input.
734
+ dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
735
+ used to increase the receptive field of the pooling operation. If it is a tuple, its length can only be 1.
736
+ Default: ``1`` .
737
+ return_indices (bool): If ``True`` , the function will return both the result of max pooling and the indices of
738
+ the max elements. Default: ``False`` .
739
+ ceil_mode (bool): If True, use ceil to compute the output shape instead of floor. Default: ``False`` .
740
+
741
+ Inputs:
742
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
743
+
744
+ Outputs:
745
+ If `return_indices` is False, output is a Tensor, with shape :math:`(N, C_{out}, L_{out})` or
746
+ :math:`(C_{out}, L_{out})`. It has the same data type as `x`.
747
+
748
+ If `return_indices` is True, output is a Tuple of 2 Tensors, representing the maxpool result and where
749
+ the max values are generated.
750
+
751
+ - **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, L_{out})` or
752
+ :math:`(C_{out}, L_{out})`. It has the same data type as `x`.
753
+ - **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int64.
754
+
755
+ If `pad_mode` is in `pad` mode, the output shape calculation formula is as follows:
756
+
757
+ .. math::
758
+ L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
759
+ \times (\text{kernel_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
760
+
761
+ Raises:
762
+ TypeError: If `kernel_size` or `strides` is not an int.
763
+ ValueError: If `pad_mode` is not ``"valid"``, ``"same"`` or ``"pad"``, case-insensitive.
764
+ ValueError: If `data_format` is neither ``'NCHW'`` nor ``'NHWC'``.
765
+ ValueError: If `kernel_size` or `strides` is less than 1.
766
+ ValueError: If length of shape of `x` is not equal to 2 or 3.
767
+ ValueError: If `pad_mode` is not ``"pad"``, `padding`, `dilation`, `return_indices`, `ceil_mode` parameters
768
+ are not set to their default values.
769
+ ValueError: If the length of the tuple/list `padding` parameter is not 1.
770
+ ValueError: If The length of the tuple dilation parameter is not 1.
771
+ ValueError: If dilation parameter is neither an integer nor a tuple.
772
+ ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
773
+
774
+ Supported Platforms:
775
+ ``Ascend`` ``GPU`` ``CPU``
776
+
777
+ Examples:
778
+ >>> import mindspore as ms
779
+ >>> import mindspore.nn as nn
780
+ >>> import numpy as np
781
+ >>> mpool1 = nn.MaxPool1d(kernel_size=3, stride=1)
782
+ >>> x = ms.Tensor(np.random.randint(0, 10, [1, 2, 4]), ms.float32)
783
+ >>> output = mpool1(x)
784
+ >>> result = output.shape
785
+ >>> print(result)
786
+ (1, 2, 2)
787
+ >>> np_x = np.random.randint(0, 10, [5, 3, 4])
788
+ >>> x = ms.Tensor(np_x, ms.float32)
789
+ >>> mpool2 = nn.MaxPool1d(kernel_size=2, stride=1, pad_mode="pad", padding=1, dilation=1, return_indices=True)
790
+ >>> output = mpool2(x)
791
+ >>> print(output[0].shape)
792
+ (5, 3, 5)
793
+ >>> print(output[1].shape)
794
+ (5, 3, 5)
795
+ """
796
+
797
+ def __init__(self, kernel_size=1, stride=1, pad_mode="valid", padding=0, dilation=1, return_indices=False,
798
+ ceil_mode=False):
799
+ """Initialize MaxPool1d."""
800
+ super(MaxPool1d, self).__init__(kernel_size, stride, pad_mode)
801
+ validator.check_int(kernel_size, 1, validator.GE, "kernel_size", self.cls_name)
802
+ validator.check_int(stride, 1, validator.GE, "stride", self.cls_name)
803
+ self.kernel_size = (1, kernel_size)
804
+ self.stride = (1, stride)
805
+ self.return_indices = return_indices
806
+ if pad_mode.upper() == "PAD":
807
+ self.use_pad = True
808
+ self.kernel_size = (1, 1, kernel_size)
809
+ self.stride = (1, 1, stride)
810
+ self.padding = _check_maxpool_padding(padding, 1, self.cls_name)
811
+ dilation = _cal_dilation(dilation, 1, self.cls_name)
812
+ self.max_pool = P.MaxPool3DWithArgmax(ksize=self.kernel_size, strides=self.stride, pads=self.padding,
813
+ dilation=dilation, ceil_mode=ceil_mode)
814
+
815
+ else:
816
+ self.use_pad = False
817
+ if padding != 0 or dilation != 1 or return_indices or ceil_mode:
818
+ raise ValueError(f"For MaxPool1d, the parameter 'padding', 'dilation', 'return_indices', 'ceil_mode' "
819
+ f"can not be set to non-default value when pad_mode is not 'pad', "
820
+ f"but got pad_mode:{pad_mode}.")
821
+ self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
822
+ strides=self.stride,
823
+ pad_mode=self.pad_mode)
824
+ self.shape = F.shape
825
+ self.reduce_mean = P.ReduceMean(keep_dims=True)
826
+ self.expand = P.ExpandDims()
827
+ self.squeeze = P.Squeeze(2)
828
+
829
+ def construct(self, x):
830
+ expand_batch = False
831
+ if x.ndim == 2:
832
+ x = x.unsqueeze(0)
833
+ expand_batch = True
834
+ if self.use_pad:
835
+ x = x.unsqueeze(2).unsqueeze(3)
836
+ output = self.max_pool(x)
837
+ if isinstance(output, tuple):
838
+ output = output[0].squeeze(3).squeeze(2), output[1].squeeze(3).squeeze(2)
839
+ else:
840
+ output = output.squeeze(3).squeeze(2)
841
+ else:
842
+ _shape_check(self.shape(x), self.cls_name)
843
+ x = self.expand(x, 2)
844
+ output = self.max_pool(x)
845
+ output = self.squeeze(output)
846
+ if expand_batch:
847
+ if isinstance(output, tuple):
848
+ output = (output[0].squeeze(0), output[1].squeeze(0))
849
+ else:
850
+ output = output.squeeze(0)
851
+ if self.use_pad and not self.return_indices:
852
+ return output[0]
853
+ return output
854
+
855
+
856
+ def _cal_padding(padding, cls_name, nd):
857
+ """Calculate padding before call primitive"""
858
+ validator.check_value_type('padding', padding, (int, tuple, list), cls_name)
859
+ if isinstance(padding, int):
860
+ padding = (0, 0) * (3 - nd) + (padding,) * nd * 2
861
+ elif isinstance(padding, (tuple, list)):
862
+ validator.check_non_negative_int_sequence(padding, "padding", cls_name)
863
+ if len(padding) == nd:
864
+ padding_start = (0, 0) * (3 - nd)
865
+ padding_end = tuple(padding[i // 2] for i in range(nd * 2))
866
+ padding = padding_start + padding_end
867
+ elif len(padding) == 1:
868
+ padding = (0, 0) * (3 - nd) + tuple(padding * nd * 2)
869
+ else:
870
+ if nd == 1:
871
+ raise ValueError(f"For {cls_name}, the padding must be a int or tuple/list contains one int, "
872
+ f"but got tuple/list with length:{len(padding)}.")
873
+ raise ValueError(f"For {cls_name}, the padding must be a int or tuple/list contains 1 or {nd} int, "
874
+ f"but got tuple/list with length:{len(padding)}.")
875
+ return padding
876
+
877
+
878
+ def _check_tuple_length(arg_name, prim_name, length, cls_name):
879
+ """check the tuple length"""
880
+ if len(arg_name) != length:
881
+ raise ValueError(f"For {cls_name}, the length of {prim_name} must be equal to {length}, "
882
+ f"but got {len(arg_name)}.")
883
+ return arg_name
884
+
885
+
886
+ class AvgPool3d(_PoolNd):
887
+ r"""
888
+ Applies a 3D average pooling over an input Tensor which can be regarded as a composition of 3D input planes.
889
+ Typically, the input is of shape :math:`(N_{in}, C_{in}, D_{in}, H_{in}, W_{in})`, and AvgPool3D outputs
890
+ regional average in the :math:`(D_{in}, H_{in}, W_{in})`-dimension. Given kernel size
891
+ is :math:`ks = (d_{ker}, h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1, s_2)`, the operation is as follows.
892
+
893
+ .. warning::
894
+ `kernel_size` is in the range [1, 255]. `stride` is in the range [1, 63].
895
+
896
+ .. math::
897
+ \text{output}(N_i, C_j, d, h, w) =
898
+ \frac{1}{d_{ker} * h_{ker} * w_{ker}} \sum_{l=0}^{d_{ker}-1} \sum_{m=0}^{h_{ker}-1} \sum_{n=0}^{w_{ker}-1}
899
+ \text{input}(N_i, C_j, s_0 \times d + l, s_1 \times h + m, s_2 \times w + n)
900
+
901
+ Note:
902
+ This interface currently does not support Atlas A2 training series products.
903
+
904
+ Args:
905
+ kernel_size (Union[int, tuple[int]], optional): The size of kernel used to take the average value,
906
+ can be an int number or a single element tuple that represents depth, height and width, or a tuple of three
907
+ positive integers that represent depth, height and width respectively. Default: ``1`` .
908
+ stride (Union[int, tuple[int]], optional): The distance of kernel moving, can be a positive int or a single
909
+ element tuple that represents the depth, height and width of movement, or a tuple of three positive integers
910
+ that represents depth, height and width of movement respectively. If the value is None, the default value
911
+ `kernel_size` is used. Default: ``1`` .
912
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
913
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
914
+
915
+ - ``"same"``: Pad the input around its depth/height/width dimension so that the shape of input and output
916
+ are the same when `stride` is set to ``1``.
917
+ The amount of padding to is calculated by the operator internally. If the amount is even,
918
+ it isuniformly distributed around the input, if it is odd, the excess amount goes
919
+ to the front/right/bottom side.
920
+ If this mode is set, `padding` must be 0.
921
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
922
+ possible depth, height and width. Extra pixels that could not complete a full stride will
923
+ be discarded. If this mode is set, `padding` must be 0.
924
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
925
+ in the depth, height and width dimension is determined by the `padding` parameter.
926
+ If this mode is set, `padding` must be greater than or equal to 0.
927
+
928
+ padding (Union(int, tuple[int], list[int]), optional): Pooling padding value, only ``"pad"`` mode can be set to
929
+ non-zero. Default: ``0`` . Only the following paddings are supported:
930
+
931
+ - `padding` is an integer or a tuple/list containing one integer, it will be padded in six directions of
932
+ front, back, top, bottom, left and right of the input.
933
+
934
+ - `padding` is a tuple/list containing three integers, it will be padded in front and back of the input
935
+ `padding[0]` times, up and down `padding[1]` times, and left and right of the input `padding[2]` times.
936
+
937
+ ceil_mode (bool, optional): If ``True`` , use ceil to compute the output shape instead of floor.
938
+ Default: ``False`` .
939
+ count_include_pad (bool, optional): If ``True`` , averaging calculation will include the zero-padding.
940
+ Default: ``True`` .
941
+ divisor_override (int, optional): If it is specified as a non-zero parameter, this parameter will be used as the
942
+ divisor in the average calculation. Otherwise, `kernel_size` will be used as the divisor.
943
+ Default: ``None`` .
944
+
945
+ Inputs:
946
+ - **x** (Tensor) - Tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})` or
947
+ :math:`(C, D_{in}, H_{in}, W_{in})`.
948
+ Currently support float16, float32 and float64 data type.
949
+
950
+ Outputs:
951
+ Tensor, with shape :math:`(N, C, D_{out}, H_{out}, W_{out})` or
952
+ :math:`(C, D_{out}, H_{out}, W_{out})`, with the same data type as `x`.
953
+
954
+ If `pad_mode` is in `pad` mode, the output shape calculation formula is as follows:
955
+
956
+ .. math::
957
+ D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] -
958
+ \text{kernel_size}[0]}{\text{stride}[0]} + 1\right\rfloor
959
+
960
+ .. math::
961
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] -
962
+ \text{kernel_size}[1]}{\text{stride}[1]} + 1\right\rfloor
963
+
964
+ .. math::
965
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] -
966
+ \text{kernel_size}[2]}{\text{stride}[2]} + 1\right\rfloor
967
+
968
+ Raises:
969
+ TypeError: If `kernel_size` is neither an int nor a tuple.
970
+ TypeError: If `stride` is neither an int nor a tuple.
971
+ TypeError: If `padding` is neither an int nor a tuple/list.
972
+ TypeError: If `ceil_mode` or `count_include_pad` is not a bool.
973
+ TypeError: If `divisor_override` is not an int.
974
+ ValueError: If numbers in `kernel_size` or `stride` are not positive.
975
+ ValueError: If `kernel_size` or `stride` is a tuple whose length is not equal to 3.
976
+ ValueError: If `padding` is a tuple/list whose length is neither 1 nor 3.
977
+ ValueError: If element of `padding` is less than 0.
978
+ ValueError: If length of shape of `x` is neither 4 nor 5.
979
+ ValueError: If `divisor_override` is less than or equal to 0.
980
+ ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
981
+
982
+ Supported Platforms:
983
+ ``Ascend`` ``GPU`` ``CPU``
984
+
985
+ Examples:
986
+ >>> import mindspore as ms
987
+ >>> pool = ms.nn.AvgPool3d(kernel_size=3, stride=1)
988
+ >>> x = ms.ops.randn(1, 2, 4, 4, 5).astype(ms.float32)
989
+ >>> output = pool(x)
990
+ >>> print(output.shape)
991
+ (1, 2, 2, 2, 3)
992
+ >>> x1 = ms.ops.randn(6, 5, 7, 7, 5).astype(ms.float32)
993
+ >>> pool2 = ms.nn.AvgPool3d(4, stride=2, pad_mode="pad", padding=(2, 2, 1), divisor_override=10)
994
+ >>> output2 = pool2(x1)
995
+ >>> print(output2.shape)
996
+ (6, 5, 4, 4, 2)
997
+ """
998
+
999
+ def __init__(self, kernel_size=1, stride=1, pad_mode="valid", padding=0, ceil_mode=False, count_include_pad=True,
1000
+ divisor_override=None):
1001
+ """Initialize AvgPool3d."""
1002
+ super(AvgPool3d, self).__init__(kernel_size, stride, pad_mode)
1003
+ padding = _cal_padding(padding, self.cls_name, 3)
1004
+ if divisor_override is not None and divisor_override <= 0:
1005
+ raise ValueError(f"For '{self.cls_name}', the 'divisor_override' must be > 0, but got {divisor_override}.")
1006
+ divisor_override = 0 if divisor_override is None else divisor_override
1007
+ self.avg_pool = P.AvgPool3D(self.kernel_size, self.stride, pad_mode, padding, ceil_mode, count_include_pad,
1008
+ divisor_override)
1009
+
1010
+ def construct(self, x):
1011
+ expand_batch = False
1012
+ if len(x.shape) == 4:
1013
+ x = x.unsqueeze(0)
1014
+ expand_batch = True
1015
+ out = self.avg_pool(x)
1016
+ if expand_batch:
1017
+ out = out.squeeze(0)
1018
+ return out
1019
+
1020
+
1021
+ class AvgPool2dExt(Cell):
1022
+ r"""
1023
+ Applies a 2D average pooling over an input Tensor which can be regarded as
1024
+ a composition of 2D input planes.
1025
+
1026
+ For details, please refer to :func:`mindspore.mint.nn.functional.avg_pool2d`.
1027
+
1028
+ Supported Platforms:
1029
+ ``Ascend``
1030
+
1031
+ Examples:
1032
+ >>> import numpy as np
1033
+ >>> from mindspore import Tensor, nn
1034
+ >>> from mindspore import dtype as mstype
1035
+ >>> x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mstype.float32)
1036
+ >>> m = nn.AvgPool2dExt(x, kernel_size=2, stride=1)
1037
+ >>> output = m(x)
1038
+ >>> print(output)
1039
+ [[[[ 2.5 3.5 4.5]
1040
+ [ 6.5 7.5 8.5]]
1041
+ [[14.5 15.5 16.5]
1042
+ [18.5 19.5 20.5]]
1043
+ [[26.5 27.5 28.5]
1044
+ [30.5 31.5 32.5]]]]
1045
+ """
1046
+ def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
1047
+ count_include_pad=True, divisor_override=None):
1048
+ super(AvgPool2dExt, self).__init__()
1049
+ self.kernel_size = kernel_size
1050
+ self.stride = stride
1051
+ self.padding = padding
1052
+ self.ceil_mode = ceil_mode
1053
+ self.count_include_pad = count_include_pad
1054
+ self.divisor_override = divisor_override
1055
+
1056
+ def construct(self, input):
1057
+ return avg_pool2d_ext(input, self.kernel_size, self.stride, self.padding,
1058
+ self.ceil_mode, self.count_include_pad, self.divisor_override)
1059
+
1060
+
1061
+ class AvgPool2d(_PoolNd):
1062
+ r"""
1063
+ Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes.
1064
+
1065
+ Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, AvgPool2d outputs
1066
+ regional average in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
1067
+ :math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows:
1068
+
1069
+ .. math::
1070
+ \text{output}(N_i, C_j, h, w) = \frac{1}{h_{ker} * w_{ker}} \sum_{m=0}^{h_{ker}-1} \sum_{n=0}^{w_{ker}-1}
1071
+ \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
1072
+
1073
+ Note:
1074
+ This interface currently does not support Atlas A2 training series products.
1075
+
1076
+ Args:
1077
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value.
1078
+ The data type of kernel_size must be int or a single element tuple and the value represents the height
1079
+ and width, or a tuple of two int numbers that represent height and width respectively.
1080
+ Default: ``1`` .
1081
+ stride (Union[int, tuple[int]]): The distance of kernel moving, an int number or a single element tuple that
1082
+ represents the height and width of movement are both strides, or a tuple of two int numbers that
1083
+ represent height and width of movement respectively. Default: ``1`` .
1084
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
1085
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
1086
+
1087
+ - ``"same"``: Pad the input around its edges so that the shape of input and output
1088
+ are the same when `stride` is set to ``1``.
1089
+ The amount of padding to is calculated by the operator internally, If the amount is even, it is
1090
+ uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
1091
+ If this mode is set, `padding` must be 0.
1092
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
1093
+ possible height and width. Extra pixels that could not complete a full stride will
1094
+ be discarded. If this mode is set, `padding` must be 0.
1095
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
1096
+ in the height and width directions is determined by the `padding` parameter.
1097
+ If this mode is set, `padding` must be greater than or equal to 0.
1098
+
1099
+ padding (Union(int, tuple[int], list[int])): Pooling padding value, only ``"pad"`` mode can be set to non-zero.
1100
+ Default: ``0`` . `padding` can only be an integer or a tuple/list containing one or two integers.
1101
+ If `padding` is an integer or a tuple/list containing one integer, it will be padded `padding` times in the
1102
+ four directions of the input. If `padding` is a tuple/list containing two integers, it will be padded
1103
+ `padding[0]` times in the up-down direction of the input and `padding[1]` times in the left-right direction
1104
+ of the input.
1105
+ ceil_mode (bool): If ``True`` , use ceil to compute the output shape instead of floor. Default: ``False`` .
1106
+ count_include_pad (bool): If ``True`` , averaging calculation will include the zero-padding. Default: ``True`` .
1107
+ divisor_override (int): If it is specified as a non-zero parameter, this parameter will be used as the divisor
1108
+ in the average calculation. Otherwise, `kernel_size` will be used as the divisor. Default: ``None`` .
1109
+ data_format (str): The optional value for data format, is ``'NHWC'`` or ``'NCHW'`` .
1110
+ Default: ``'NCHW'`` .
1111
+
1112
+ Inputs:
1113
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
1114
+
1115
+ Outputs:
1116
+ Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`.
1117
+
1118
+ If `pad_mode` is in `pad` mode, the output shape calculation formula is as follows:
1119
+
1120
+ .. math::
1121
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
1122
+ \text{kernel_size}[0]}{\text{stride}[0]} + 1\right\rfloor
1123
+
1124
+ .. math::
1125
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
1126
+ \text{kernel_size}[1]}{\text{stride}[1]} + 1\right\rfloor
1127
+
1128
+ Raises:
1129
+ TypeError: If `kernel_size` or `strides` is neither int nor tuple.
1130
+ ValueError: If `pad_mode` is not ``"valid"`` , ``"same"`` or ``"pad"`` with not case sensitive.
1131
+ ValueError: If `data_format` is neither ``'NCHW'`` nor ``'NHWC'``.
1132
+ ValueError: If `padding`, `ceil_mode`, `count_include_pad`, or `divisor_override` is used
1133
+ or `pad_mode` is ``"pad"`` when `data_format` is 'NHWC'.
1134
+ ValueError: If `kernel_size` or `strides` is less than 1.
1135
+ ValueError: If length of `padding` tuple/list is not 1 or 2.
1136
+ ValueError: If length of shape of `x` is not equal to 3 or 4.
1137
+ ValueError: If `divisor_override` is less than or equal to 0.
1138
+ ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
1139
+
1140
+ Supported Platforms:
1141
+ ``Ascend`` ``GPU`` ``CPU``
1142
+
1143
+ Examples:
1144
+ >>> import mindspore as ms
1145
+ >>> import numpy as np
1146
+ >>> pool = ms.nn.AvgPool2d(kernel_size=3, stride=1)
1147
+ >>> x = ms.Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), ms.float32)
1148
+ >>> output = pool(x)
1149
+ >>> print(output.shape)
1150
+ (1, 2, 2, 2)
1151
+ >>> x = ms.ops.randn(6, 6, 8, 8)
1152
+ >>> pool2 = ms.nn.AvgPool2d(4, stride=1, pad_mode="pad", padding=2, divisor_override=5)
1153
+ >>> output2 = pool2(x)
1154
+ >>> print(output2.shape)
1155
+ (6, 6, 9, 9)
1156
+ """
1157
+
1158
+ def __init__(self,
1159
+ kernel_size=1,
1160
+ stride=1,
1161
+ pad_mode="valid",
1162
+ padding=0,
1163
+ ceil_mode=False,
1164
+ count_include_pad=True,
1165
+ divisor_override=None,
1166
+ data_format="NCHW"):
1167
+ """Initialize AvgPool2d."""
1168
+ super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode, data_format)
1169
+ self.ascend_910b_target = (MSContext.get_instance().get_ascend_soc_version() in ['ascend910b', 'ascend910_93'])
1170
+ if pad_mode.upper() == 'PAD' or padding != 0 or ceil_mode or not count_include_pad \
1171
+ or divisor_override is not None:
1172
+ if self.ascend_910b_target:
1173
+ raise ValueError(f"For '{self.cls_name}, the pad_mod 'PAD' is not support in Ascend910B or Ascend910_93"
1174
+ f" now, it will be supported in the future.")
1175
+ if self.format == "NHWC":
1176
+ raise ValueError(f"For '{self.cls_name}, the 'NHWC' format are not support when 'pad_mode' is 'pad' or "
1177
+ f"'padding' is not 0 or 'ceil_mode' is not False or 'count_include_pad' is not True"
1178
+ f"or divisor_override is not None, but got pade_mode:{pad_mode}, padding:{padding}, "
1179
+ f"ceil_mode:{ceil_mode}, count_include_pad:{count_include_pad}, "
1180
+ f"divisor_override:{divisor_override}.")
1181
+ self.is_expand = True
1182
+ if divisor_override is not None and divisor_override <= 0:
1183
+ raise ValueError(
1184
+ f"For '{self.cls_name}', the 'divisor_override' must be > 0, but got {divisor_override}.")
1185
+ divisor_override = 0 if divisor_override is None else divisor_override
1186
+ padding = _cal_padding(padding, self.cls_name, 2)
1187
+
1188
+ if isinstance(self.kernel_size, tuple):
1189
+ _check_tuple_length(self.kernel_size, 'kernel_size', 2, self.cls_name)
1190
+ kernel_size = (1,) + self.kernel_size
1191
+ elif isinstance(self.kernel_size, int):
1192
+ kernel_size = (1, self.kernel_size, self.kernel_size)
1193
+
1194
+ if isinstance(self.stride, tuple):
1195
+ _check_tuple_length(self.stride, 'stride', 2, self.cls_name)
1196
+ stride = (1,) + self.stride
1197
+ elif isinstance(self.stride, int):
1198
+ stride = (1, self.stride, self.stride)
1199
+ self.avg_pool = P.AvgPool3D(kernel_size=kernel_size, strides=stride, pad_mode=pad_mode, pad=padding,
1200
+ ceil_mode=ceil_mode,
1201
+ count_include_pad=count_include_pad, divisor_override=divisor_override)
1202
+ else:
1203
+ self.is_expand = False
1204
+ self.avg_pool = P.AvgPool(kernel_size=self.kernel_size,
1205
+ strides=self.stride,
1206
+ pad_mode=self.pad_mode,
1207
+ data_format=self.format)
1208
+
1209
+ def construct(self, x):
1210
+ expand_batch = False
1211
+ if x.ndim == 3:
1212
+ x = x.unsqueeze(0)
1213
+ expand_batch = True
1214
+ if self.is_expand:
1215
+ x = x.unsqueeze(2)
1216
+ out = self.avg_pool(x)
1217
+ res = out.squeeze(2)
1218
+ else:
1219
+ res = self.avg_pool(x)
1220
+ if expand_batch:
1221
+ res = res.squeeze(0)
1222
+ return res
1223
+
1224
+
1225
+ class AvgPool1d(_PoolNd):
1226
+ r"""
1227
+ Applies a 1D average pooling over an input Tensor which can be regarded as a composition of 1D input planes.
1228
+
1229
+ Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})`, AvgPool1d outputs
1230
+ regional average in the :math:`(L_{in})`-dimension. Given `kernel_size`
1231
+ :math:`l_{ker}` and `stride` :math:`s_0`, the operation is as follows:
1232
+
1233
+ .. math::
1234
+ \text{output}(N_i, C_j, l) = \frac{1}{l_{ker}} \sum_{n=0}^{l_{ker}-1}
1235
+ \text{input}(N_i, C_j, s_0 \times l + n)
1236
+
1237
+ Note:
1238
+ This interface currently does not support Atlas A2 training series products.
1239
+
1240
+ Args:
1241
+ kernel_size (int): The size of kernel window used to take the average value, Default: ``1`` .
1242
+ stride (int): The distance of kernel moving, an int number that represents
1243
+ the width of movement is strides, Default: ``1`` .
1244
+ pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
1245
+ ``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
1246
+
1247
+ - ``"same"``: Pad the input at the begin and end so that the shape of input and output
1248
+ are the same when `stride` is set to ``1``.
1249
+ The amount of padding to is calculated by the operator internally. If the amount is even, it is
1250
+ uniformly distributed around the input, if it is odd, the excess padding is goes to the right side.
1251
+ If this mode is set, `padding` must be 0.
1252
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
1253
+ possible length. Extra pixels that could not complete a full stride will
1254
+ be discarded. If this mode is set, `padding` must be 0.
1255
+ - ``"pad"``: Pad the input with a specified amount. In this mode, the amount of padding
1256
+ at the begin and end is determined by the `padding` parameter.
1257
+ If this mode is set, `padding` must be greater than or equal to 0.
1258
+
1259
+ padding (Union(int, tuple[int], list[int])): Pooling padding value, only ``"pad"`` mode can be set to non-zero.
1260
+ Default: ``0`` . padding can only be an integer or a tuple/list containing a single integer, in which case
1261
+ padding times or padding[0] times are padded on both sides of the input.
1262
+ ceil_mode (bool): If ``True`` , use ceil to compute the output shape instead of floor. Default: ``False`` .
1263
+ count_include_pad (bool): If ``True`` , averaging calculation will include the zero-padding. Default: ``True`` .
1264
+
1265
+ Inputs:
1266
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
1267
+
1268
+ Outputs:
1269
+ Tensor of shape :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`.
1270
+
1271
+ If `pad_mode` is in `pad` mode, the output shape calculation formula is as follows:
1272
+
1273
+ .. math::
1274
+ L_{out} = \left\lfloor \frac{L_{in} +
1275
+ 2 \times \text{padding} - \text{kernel_size}}{\text{stride}} + 1\right\rfloor
1276
+
1277
+ Raises:
1278
+ TypeError: If `kernel_size` or `stride` is not an int.
1279
+ ValueError: If `pad_mode` is not ``"valid"`` , ``"same"`` or ``"pad"`` with not case sensitive.
1280
+ ValueError: If `kernel_size` or `strides` is less than 1.
1281
+ ValueError: If length of `padding` tuple/list is not 1.
1282
+ ValueError: If length of shape of `x` is not equal to 2 or 3.
1283
+ ValueError: If `padding` is non-zero when `pad_mode` is not ``"pad"``.
1284
+
1285
+ Supported Platforms:
1286
+ ``Ascend`` ``GPU`` ``CPU``
1287
+
1288
+ Examples:
1289
+ >>> import mindspore as ms
1290
+ >>> import numpy as np
1291
+ >>> pool = ms.nn.AvgPool1d(kernel_size=6, stride=1)
1292
+ >>> x = ms.Tensor(np.random.randint(0, 10, [1, 3, 6]), ms.float32)
1293
+ >>> output = pool(x)
1294
+ >>> result = output.shape
1295
+ >>> print(result)
1296
+ (1, 3, 1)
1297
+ >>> pool2 = ms.nn.AvgPool1d(4, stride=1, ceil_mode=True, pad_mode="pad", padding=2)
1298
+ >>> x1 = ms.ops.randn(6, 6, 8)
1299
+ >>> output = pool2(x1)
1300
+ >>> print(output.shape)
1301
+ (6, 6, 9)
1302
+ """
1303
+
1304
+ def __init__(self,
1305
+ kernel_size=1,
1306
+ stride=1,
1307
+ pad_mode="valid",
1308
+ padding=0,
1309
+ ceil_mode=False,
1310
+ count_include_pad=True):
1311
+ """Initialize AvgPool1d."""
1312
+ super(AvgPool1d, self).__init__(kernel_size, stride, pad_mode)
1313
+ validator.check_int(self.kernel_size, 1, validator.GE, "kernel_size", self.cls_name)
1314
+ validator.check_int(self.stride, 1, validator.GE, "stride", self.cls_name)
1315
+ if pad_mode.upper() == 'PAD' or padding != 0 or ceil_mode or not count_include_pad:
1316
+ padding = _cal_padding(padding, self.cls_name, 1)
1317
+ self.is_expand_3d = True
1318
+ kernel_size = (1, 1, self.kernel_size)
1319
+ stride = (1, 1, self.stride)
1320
+ self.avg_pool = P.AvgPool3D(kernel_size=kernel_size, strides=stride, pad_mode=pad_mode, pad=padding,
1321
+ ceil_mode=ceil_mode,
1322
+ count_include_pad=count_include_pad)
1323
+ else:
1324
+ self.is_expand_3d = False
1325
+ self.kernel_size = (1, self.kernel_size)
1326
+ self.stride = (1, self.stride)
1327
+ self.avg_pool = P.AvgPool(kernel_size=self.kernel_size,
1328
+ strides=self.stride,
1329
+ pad_mode=self.pad_mode)
1330
+ self.shape = F.shape
1331
+ self.reduce_mean = P.ReduceMean(keep_dims=True)
1332
+ self.slice = P.Slice()
1333
+ self.expand = P.ExpandDims()
1334
+ self.squeeze = P.Squeeze(2)
1335
+
1336
+ def construct(self, x):
1337
+ expand_batch = False
1338
+ if x.ndim == 2:
1339
+ x = x.unsqueeze(0)
1340
+ expand_batch = True
1341
+ if self.is_expand_3d:
1342
+ x = x.unsqueeze(2).unsqueeze(3)
1343
+ x = self.avg_pool(x)
1344
+ x = x.squeeze(3).squeeze(2)
1345
+ else:
1346
+ _shape_check(self.shape(x), self.cls_name)
1347
+ batch, channel, width = self.shape(x)
1348
+ if width == self.kernel_size[1]:
1349
+ x = self.reduce_mean(x, 2)
1350
+ elif width - self.kernel_size[1] < self.stride[1]:
1351
+ x = self.slice(x, (0, 0, 0), (batch, channel, self.kernel_size[1]))
1352
+ x = self.reduce_mean(x, 2)
1353
+ else:
1354
+ x = self.expand(x, 2)
1355
+ x = self.avg_pool(x)
1356
+ x = self.squeeze(x)
1357
+ if expand_batch:
1358
+ x = x.squeeze(0)
1359
+ return x
1360
+
1361
+
1362
+ @_primexpr
1363
+ def _adaptive_shape_check(in_shape, output_size, prim_name):
1364
+ """Check shape."""
1365
+ msg_prefix = f"For {prim_name}, the"
1366
+ if len(in_shape) != 3:
1367
+ raise ValueError(f"{msg_prefix} input must has 3 dim, but got {len(in_shape)}.")
1368
+ if in_shape[2] < output_size:
1369
+ raise ValueError(f"{msg_prefix} input's last dimension must be greater or equal to "
1370
+ f"output size {output_size}, but got {in_shape[2]}.")
1371
+ if in_shape[2] % output_size != 0:
1372
+ raise ValueError(f"{msg_prefix} input's last dimension must be divisible by "
1373
+ f"output size {output_size}, but got {in_shape[2]}.")
1374
+
1375
+
1376
+ @constexpr
1377
+ def _adaptive_dtype_check(x_dtype, prim_name):
1378
+ """Check dtype."""
1379
+ if x_dtype not in [mstype.float16, mstype.float32]:
1380
+ raise TypeError(f"For {prim_name}, the x_dtype must be float16 or float32, "
1381
+ f"but got {x_dtype}.")
1382
+
1383
+
1384
+ class AdaptiveAvgPool1d(Cell):
1385
+ r"""
1386
+ Applies a 1D adaptive average pooling over an input Tensor which can be regarded as
1387
+ a composition of 1D input planes.
1388
+
1389
+ Typically, the input is of shape :math:`(N_{in}, C_{in}, L_{in})`,
1390
+ AdaptiveAvgPool1d outputs regional average in the :math:`L_{in}`-dimension.
1391
+ The output is of shape :math:`(N_{in}, C_{in}, L_{out})`,
1392
+ where :math:`L_{out}` is defined by `output_size`.
1393
+
1394
+ Note:
1395
+ :math:`L_{in}` must be divisible by `output_size`.
1396
+
1397
+ Args:
1398
+ output_size (int): the target output size :math:`L_{out}`.
1399
+
1400
+ Inputs:
1401
+ - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})`, with float16 or float32 data type.
1402
+
1403
+ Outputs:
1404
+ Tensor of shape :math:`(N, C_{in}, L_{out})`, has the same type as `input`.
1405
+
1406
+ Raises:
1407
+ TypeError: If `output_size` is not an int.
1408
+ TypeError: If `input` is neither float16 nor float32.
1409
+ ValueError: If `output_size` is less than 1.
1410
+ ValueError: If length of shape of `input` is not equal to 3.
1411
+ ValueError: If the last dimension of `input` is smaller than `output_size`.
1412
+ ValueError: If the last dimension of `input` is not divisible by `output_size`.
1413
+
1414
+
1415
+ Supported Platforms:
1416
+ ``Ascend`` ``GPU`` ``CPU``
1417
+
1418
+ Examples:
1419
+ >>> import mindspore as ms
1420
+ >>> import numpy as np
1421
+ >>> pool = ms.nn.AdaptiveAvgPool1d(output_size=2)
1422
+ >>> input = ms.Tensor(np.random.randint(0, 10, [1, 3, 6]), ms.float32)
1423
+ >>> output = pool(input)
1424
+ >>> result = output.shape
1425
+ >>> print(result)
1426
+ (1, 3, 2)
1427
+ """
1428
+
1429
+ def __init__(self, output_size):
1430
+ """Initialize AdaptiveAvgPool1d."""
1431
+ super(AdaptiveAvgPool1d, self).__init__()
1432
+ validator.check_value_type('output_size', output_size, [int], self.cls_name)
1433
+ validator.check_int(output_size, 1, validator.GE, "output_size", self.cls_name)
1434
+ self.shape = F.shape
1435
+ self.expand = P.ExpandDims()
1436
+ self.squeeze = P.Squeeze(2)
1437
+ self.output_size = output_size
1438
+ self.dtype = P.DType()
1439
+
1440
+ def construct(self, input):
1441
+ _adaptive_shape_check(self.shape(input), self.output_size, self.cls_name)
1442
+ _adaptive_dtype_check(self.dtype(input), self.cls_name)
1443
+
1444
+ _, _, width = self.shape(input)
1445
+ stride = width // self.output_size
1446
+ kernel_size = width - (self.output_size - 1) * stride
1447
+
1448
+ stride = (1, width // self.output_size)
1449
+ kernel_size = (1, kernel_size)
1450
+
1451
+ input = self.expand(input, 2)
1452
+ avg_pool = P.AvgPool(kernel_size=kernel_size, strides=stride)
1453
+ input = avg_pool(input)
1454
+ input = self.squeeze(input)
1455
+
1456
+ return input
1457
+
1458
+
1459
+ class AdaptiveAvgPool2d(Cell):
1460
+ r"""
1461
+ This operator applies a 2D adaptive average pooling to an input signal composed of multiple input planes.
1462
+ That is, for any input size, the size of the specified output is H x W.
1463
+ The number of output features is equal to the number of input features.
1464
+
1465
+ The input and output data format can be "NCHW" and "CHW". N is the batch size, C is the number of channels,
1466
+ H is the feature height, and W is the feature width.
1467
+
1468
+ .. math::
1469
+ \begin{align}
1470
+ h_{start} &= floor(i * H_{in} / H_{out})\\
1471
+ h_{end} &= ceil((i + 1) * H_{in} / H_{out})\\
1472
+ w_{start} &= floor(j * W_{in} / W_{out})\\
1473
+ w_{end} &= ceil((j + 1) * W_{in} / W_{out})\\
1474
+ Output(i,j) &= \frac{\sum Input[h_{start}:h_{end}, w_{start}:w_{end}]}{(h_{end}- h_{start})
1475
+ * (w_{end}- w_{start})}
1476
+ \end{align}
1477
+
1478
+ Args:
1479
+ output_size (Union[int, tuple]): The target output size is H x W.
1480
+ `output_size` can be a tuple consisted of int type H and W, or a single H for H x H, or None.
1481
+ If it is None, it means the output size is the same as the input size.
1482
+
1483
+ Inputs:
1484
+ - **input** (Tensor) - The input of AdaptiveAvgPool2d, which is a 3D or 4D tensor,
1485
+ with float16, float32 or float64 data type.
1486
+
1487
+ Outputs:
1488
+ Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
1489
+
1490
+ Raises:
1491
+ ValueError: If `output_size` is a tuple and the length of `output_size` is not 2.
1492
+ TypeError: If `input` is not a Tensor.
1493
+ TypeError: If dtype of `input` is not float16, float32 or float64.
1494
+ ValueError: If the dimension of `input` is less than or equal to the dimension of `output_size`.
1495
+
1496
+ Supported Platforms:
1497
+ ``Ascend`` ``GPU`` ``CPU``
1498
+
1499
+ Examples:
1500
+ >>> import mindspore as ms
1501
+ >>> import numpy as np
1502
+ >>> pool = ms.nn.AdaptiveAvgPool2d(2)
1503
+ >>> input_x = ms.Tensor(np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
1504
+ ... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
1505
+ ... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]), ms.float32)
1506
+ >>> output = pool(input_x)
1507
+ >>> result = output.shape
1508
+ >>> print(result)
1509
+ (3, 2, 2)
1510
+ """
1511
+
1512
+ def __init__(self, output_size):
1513
+ """Initialize AdaptiveAvgPool2d."""
1514
+ super(AdaptiveAvgPool2d, self).__init__()
1515
+ self.adaptive_avgpool2d = P.AdaptiveAvgPool2D(output_size)
1516
+
1517
+ def construct(self, input):
1518
+ return self.adaptive_avgpool2d(input)
1519
+
1520
+
1521
+ class AdaptiveAvgPool3d(Cell):
1522
+ r"""
1523
+ This operator applies a 3D adaptive average pooling to an input signal composed of multiple input planes.
1524
+ That is, for any input size, the size of the specified output is :math:`(D, H, W)`.
1525
+ The number of output features is equal to the number of input planes.
1526
+
1527
+ Suppose the last 3 dimension size of input is :math:`(inD, inH, inW)`, then the last 3 dimension size of output is
1528
+ :math:`(outD, outH, outW)`.
1529
+
1530
+ .. math::
1531
+ \begin{array}{ll} \\
1532
+ \forall \quad od \in [0,outD-1], oh \in [0,outH-1], ow \in [0,outW-1]\\
1533
+ output[od,oh,ow] = \\
1534
+ \qquad mean(input[istartD:iendD+1,istartH:iendH+1,istartW:iendW+1])\\
1535
+ where,\\
1536
+ \qquad istartD= \left\lceil \frac{od * inD}{outD} \right\rceil \\
1537
+ \qquad iendD=\left\lfloor \frac{(od+1)* inD}{outD} \right\rfloor \\
1538
+ \qquad istartH=\left\lceil \frac{oh * inH}{outH} \right\rceil \\
1539
+ \qquad iendH=\left\lfloor \frac{(oh+1) * inH}{outH} \right\rfloor \\
1540
+ \qquad istartW=\left\lceil \frac{ow * inW}{outW} \right\rceil \\
1541
+ \qquad iendW=\left\lfloor \frac{(ow+1) * inW}{outW} \right\rfloor
1542
+ \end{array}
1543
+
1544
+ Args:
1545
+ output_size (Union[int, tuple]): The target output size. `output_size` can be a tuple :math:`(D, H, W)`,
1546
+ or an int D for :math:`(D, D, D)`. :math:`D`, :math:`H` and :math:`W` can be int or None
1547
+ which means the output size is the same as that of the input.
1548
+
1549
+ Inputs:
1550
+ - **input** (Tensor) - The input of AdaptiveAvgPool3d, which is a 5D or 4D Tensor,
1551
+ with float16, float32 or float64 data type.
1552
+
1553
+ Outputs:
1554
+ Tensor, with the same type as the `input`.
1555
+
1556
+ Raises:
1557
+ TypeError: If `input` is not a Tensor.
1558
+ TypeError: If dtype of `input` is not float16, float32 or float64.
1559
+ ValueError: If the dimension of `input` is not 4D or 5D.
1560
+ ValueError: If `output_size` value is not positive.
1561
+
1562
+ Supported Platforms:
1563
+ ``Ascend`` ``GPU`` ``CPU``
1564
+
1565
+ Examples:
1566
+ >>> import mindspore as ms
1567
+ >>> import numpy as np
1568
+ >>> # case 1: output_size=(3, 3, 4)
1569
+ >>> output_size=(3, 3, 4)
1570
+ >>> input_x_val = np.random.randn(4, 3, 5, 6, 7)
1571
+ >>> input_x = ms.Tensor(input_x_val, ms.float32)
1572
+ >>> net = ms.nn.AdaptiveAvgPool3d(output_size)
1573
+ >>> output = net(input_x)
1574
+ >>> print(output.shape)
1575
+ (4, 3, 3, 3, 4)
1576
+ >>> # case 2: output_size=4
1577
+ >>> output_size=5
1578
+ >>> input_x_val = np.random.randn(2, 3, 8, 6, 12)
1579
+ >>> input_x = ms.Tensor(input_x_val, ms.float32)
1580
+ >>> net = ms.nn.AdaptiveAvgPool3d(output_size)
1581
+ >>> output = net(input_x)
1582
+ >>> print(output.shape)
1583
+ (2, 3, 5, 5, 5)
1584
+ >>> # case 3: output_size=(None, 4, 5)
1585
+ >>> output_size=(None, 4, 5)
1586
+ >>> input_x_val = np.random.randn(4, 1, 9, 10, 8)
1587
+ >>> input_x = ms.Tensor(input_x_val, ms.float32)
1588
+ >>> net = ms.nn.AdaptiveAvgPool3d(output_size)
1589
+ >>> output = net(input_x)
1590
+ >>> print(output.shape)
1591
+ (4, 1, 9, 4, 5)
1592
+ """
1593
+
1594
+ def __init__(self, output_size):
1595
+ """Initialize AdaptiveAvgPool3d."""
1596
+ super(AdaptiveAvgPool3d, self).__init__()
1597
+ self.adaptive_avg_pool3d = AdaptiveAvgPool3D(output_size)
1598
+
1599
+ def construct(self, input):
1600
+ return self.adaptive_avg_pool3d(input)
1601
+
1602
+
1603
+ class AdaptiveMaxPool1d(Cell):
1604
+ r"""
1605
+ Applies a 1D adaptive maximum pooling over an input Tensor which can be regarded as
1606
+ a composition of 1D input planes.
1607
+
1608
+ Typically, the input is of shape :math:`(N_{in}, C_{in}, L_{in})`,
1609
+ AdaptiveMaxPool1d outputs regional maximum in the :math:`L_{in}`-dimension. The output is of
1610
+ shape :math:`(N_{in}, C_{in}, L_{out})`, where :math:`L_{out}` is defined by `output_size`.
1611
+
1612
+ Note:
1613
+ :math:`L_{in}` must be divisible by `output_size`.
1614
+
1615
+ Args:
1616
+ output_size (int): the target output size :math:`L_{out}`.
1617
+
1618
+ Inputs:
1619
+ - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})`, with float16 or float32 data type.
1620
+
1621
+ Outputs:
1622
+ Tensor of shape :math:`(N, C_{in}, L_{out})`, has the same type as `x`.
1623
+
1624
+ Raises:
1625
+ TypeError: If `x` is neither float16 nor float32.
1626
+ TypeError: If `output_size` is not an int.
1627
+ ValueError: If `output_size` is less than 1.
1628
+ ValueError: If the last dimension of `x` is smaller than `output_size`.
1629
+ ValueError: If the last dimension of `x` is not divisible by `output_size`.
1630
+ ValueError: If length of shape of `x` is not equal to 3.
1631
+
1632
+
1633
+ Supported Platforms:
1634
+ ``Ascend`` ``GPU`` ``CPU``
1635
+
1636
+ Examples:
1637
+ >>> import mindspore as ms
1638
+ >>> import numpy as np
1639
+ >>> pool = ms.nn.AdaptiveMaxPool1d(output_size=3)
1640
+ >>> x = ms.Tensor(np.random.randint(0, 10, [1, 3, 6]), ms.float32)
1641
+ >>> output = pool(x)
1642
+ >>> result = output.shape
1643
+ >>> print(result)
1644
+ (1, 3, 3)
1645
+ """
1646
+
1647
+ def __init__(self, output_size):
1648
+ """Initialize AdaptiveMaxPool1d."""
1649
+ super(AdaptiveMaxPool1d, self).__init__()
1650
+ validator.check_int(output_size, 1, validator.GE, "output_size", self.cls_name)
1651
+ validator.check_value_type('output_size', output_size, [int], self.cls_name)
1652
+ self.expand = P.ExpandDims()
1653
+ self.squeeze = P.Squeeze(2)
1654
+ self.output_size = output_size
1655
+ self.shape = F.shape
1656
+ self.dtype = P.DType()
1657
+
1658
+ def construct(self, x):
1659
+ _adaptive_shape_check(self.shape(x), self.output_size, self.cls_name)
1660
+ _adaptive_dtype_check(self.dtype(x), self.cls_name)
1661
+
1662
+ _, _, width = self.shape(x)
1663
+ stride = width // self.output_size
1664
+ kernel_size = width - (self.output_size - 1) * stride
1665
+
1666
+ stride = (1, width // self.output_size)
1667
+ kernel_size = (1, kernel_size)
1668
+
1669
+ max_pool = P.MaxPool(kernel_size=kernel_size, strides=stride)
1670
+ x = self.expand(x, 2)
1671
+ x = max_pool(x)
1672
+ x = self.squeeze(x)
1673
+
1674
+ return x
1675
+
1676
+
1677
+ class AdaptiveMaxPool2d(Cell):
1678
+ r"""
1679
+ This operator applies a 2D adaptive max pooling to an input signal composed of multiple input planes.
1680
+ That is, for any input size, the size of the specified output is H x W.
1681
+ The number of output features is equal to the number of input planes.
1682
+
1683
+ The input and output data format can be "NCHW" and "CHW". N is the batch size, C is the number of channels,
1684
+ H is the feature height, and W is the feature width.
1685
+
1686
+ For max adaptive pool2d:
1687
+
1688
+ .. math::
1689
+
1690
+ \begin{align}
1691
+ h_{start} &= floor(i * H_{in} / H_{out})\\
1692
+ h_{end} &= ceil((i + 1) * H_{in} / H_{out})\\
1693
+ w_{start} &= floor(j * W_{in} / W_{out})\\
1694
+ w_{end} &= ceil((j + 1) * W_{in} / W_{out})\\
1695
+ Output(i,j) &= {\max Input[h_{start}:h_{end}, w_{start}:w_{end}]}
1696
+ \end{align}
1697
+
1698
+ Note:
1699
+ Ascend platform only supports float16 type for input.
1700
+
1701
+ Args:
1702
+ output_size (Union[int, tuple]): The target output size. `output_size` can be a tuple :math:`(H, W)`,
1703
+ or an int H for :math:`(H, H)`. :math:`H` and :math:`W` can be int or None.
1704
+ If it is None, it means the output size is the same as the input size.
1705
+ return_indices (bool): If `return_indices` is ``True`` , the indices of max value would be output.
1706
+ Default: ``False`` .
1707
+
1708
+ Inputs:
1709
+ - **input** (Tensor) - The input of AdaptiveMaxPool2d, which is a 3D or 4D tensor,
1710
+ with float16, float32 or float64 data type.
1711
+
1712
+ Outputs:
1713
+ Tensor, with the same type as the `input`.
1714
+ Shape of the output is :math:`input\_shape[:len(input\_shape) - len(out\_shape)] + out\_shape`.
1715
+
1716
+ Raises:
1717
+ TypeError: If `output_size` is not int or tuple.
1718
+ TypeError: If `input` is not a tensor.
1719
+ TypeError: If `return_indices` is not a bool.
1720
+ TypeError: If dtype of `input` is not float16, float32 or float64.
1721
+ ValueError: If `output_size` is a tuple and the length of `output_size` is not 2.
1722
+ ValueError: If the dimension of `input` is not NCHW or CHW.
1723
+
1724
+ Supported Platforms:
1725
+ ``Ascend`` ``GPU`` ``CPU``
1726
+
1727
+ Examples:
1728
+ >>> import mindspore as ms
1729
+ >>> import numpy as np
1730
+ >>> # case 1: output_size=(None, 2)
1731
+ >>> input = ms.Tensor(np.array([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
1732
+ ... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
1733
+ ... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]]), ms.float32)
1734
+ >>> adaptive_max_pool_2d = ms.nn.AdaptiveMaxPool2d((None, 2))
1735
+ >>> output = adaptive_max_pool_2d(input)
1736
+ >>> print(output)
1737
+ [[[[2. 3.]
1738
+ [5. 6.]
1739
+ [8. 9.]]
1740
+ [[2. 3.]
1741
+ [5. 6.]
1742
+ [8. 9.]]
1743
+ [[2. 3.]
1744
+ [5. 6.]
1745
+ [8. 9.]]]]
1746
+ >>> # case 2: output_size=2
1747
+ >>> adaptive_max_pool_2d = ms.nn.AdaptiveMaxPool2d(2)
1748
+ >>> output = adaptive_max_pool_2d(input)
1749
+ >>> print(output)
1750
+ [[[[5. 6.]
1751
+ [8. 9.]]
1752
+ [[5. 6.]
1753
+ [8. 9.]]
1754
+ [[5. 6.]
1755
+ [8. 9.]]]]
1756
+ >>> # case 3: output_size=(1, 2)
1757
+ >>> adaptive_max_pool_2d = ms.nn.AdaptiveMaxPool2d((1, 2))
1758
+ >>> output = adaptive_max_pool_2d(input)
1759
+ >>> print(output)
1760
+ [[[[8. 9.]]
1761
+ [[8. 9.]]
1762
+ [[8. 9.]]]]
1763
+ """
1764
+
1765
+ def __init__(self, output_size, return_indices=False):
1766
+ """Initialize AdaptiveMaxPool2d."""
1767
+ super(AdaptiveMaxPool2d, self).__init__()
1768
+ validator.check_value_type('return_indices', return_indices, [bool], self.cls_name)
1769
+ self.adaptive_max_pool2d = AdaptiveMaxPool2D(output_size)
1770
+ self.return_indices = return_indices
1771
+
1772
+ def construct(self, input):
1773
+ output = self.adaptive_max_pool2d(input)
1774
+ if self.return_indices:
1775
+ return output
1776
+ return output[0]
1777
+
1778
+
1779
+ class AdaptiveMaxPool3d(Cell):
1780
+ r"""
1781
+ Calculates the 3D adaptive max pooling for an input Tensor.
1782
+ That is, for any input size, the size of the specified output is :math:`(D, H, W)`.
1783
+
1784
+ Args:
1785
+ output_size (Union[int, tuple]): The specified output size, which is a positive integer that represents depth,
1786
+ height and width, or a tuple of three positive integers that represent depth, height and width respectively.
1787
+ If it is None, the output size and input size of the corresponding dimension are the same.
1788
+ return_indices (bool, optional): If `return_indices` is ``True`` , the indices of max value would be output.
1789
+ Otherwise, the indices will not be returned. Default: ``False`` .
1790
+
1791
+ Inputs:
1792
+ - **input** (Tensor) - Tensor, has shape of :math:`(C, D, H, W)` or :math:`(N, C, D, H, W)`.
1793
+
1794
+ Outputs:
1795
+ - **y** (Tensor) - Tensor, has the same number of dims and data type as the `input` .
1796
+ - **argmax** (Tensor) - Tensor, the indices of the maximum values along with the outputs, has the same shape as
1797
+ `y` and a dtype of int32. Return this only when `return_indices` is ``True`` .
1798
+
1799
+ Raises:
1800
+ TypeError: If `input` is not a Tensor.
1801
+ ValueError: If the dimensions number of `input` is not 4 or 5.
1802
+ TypeError: If dtype of `input` is not int, uint or float.
1803
+ ValueError: If `output_size` is neither an int nor a tuple with shape :math:`(3,)`.
1804
+
1805
+ Supported Platforms:
1806
+ ``GPU`` ``CPU``
1807
+
1808
+ Examples:
1809
+ >>> import mindspore as ms
1810
+ >>> import numpy as np
1811
+ >>> input = ms.Tensor(np.arange(0,36).reshape((1, 3, 3, 4)).astype(np.float32))
1812
+ >>> output_size = (1, 1, 2)
1813
+ >>> net = ms.nn.AdaptiveMaxPool3d(output_size, True)
1814
+ >>> output = net(input)
1815
+ >>> print(output[0].asnumpy())
1816
+ [[[[33. 35.]]]]
1817
+ >>> print(output[1].asnumpy())
1818
+ [[[[33 35]]]]
1819
+ """
1820
+
1821
+ def __init__(self, output_size, return_indices=False):
1822
+ """Initialize AdaptiveMaxPool3d."""
1823
+ super(AdaptiveMaxPool3d, self).__init__()
1824
+ if isinstance(output_size, int):
1825
+ output_size = (output_size, output_size, output_size)
1826
+ self.output_size = Tensor(output_size, dtype=mstype.int32)
1827
+ self.return_indices = return_indices
1828
+ self.adaptive_max_pool3d = AdaptiveMaxPool3D()
1829
+
1830
+ def construct(self, input):
1831
+ output = self.adaptive_max_pool3d(input, self.output_size)
1832
+ if self.return_indices:
1833
+ return output
1834
+ return output[0]
1835
+
1836
+
1837
+ class FractionalMaxPool2d(Cell):
1838
+ r"""
1839
+ Applies the 2D FractionalMaxPool operation over input. The output Tensor shape can be determined by either
1840
+ `output_size` or `output_ratio`, and the step size is determined by `_random_samples`. `output_size` will take
1841
+ effect when `output_size` and `output_ratio` are set at the same time.
1842
+ And `output_size` and `output_ratio` can not be ``None`` at the same time.
1843
+
1844
+ Refer to the paper `Fractional MaxPooling by Ben Graham <https://arxiv.org/abs/1412.6071>`_ for more details.
1845
+
1846
+ Args:
1847
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
1848
+ is an int number that represents height and width of the kernel, or a tuple
1849
+ of two int numbers that represent height and width respectively.
1850
+ The value must be a positive integer.
1851
+ output_size (Union[int, tuple[int]], optional): The Shape of the target `output_size`,
1852
+ is a positive int that represents height and width, or a tuple of two positive integers that represent
1853
+ height and width respectively. The value must be a positive integer. If None, the shape of the target will
1854
+ be determined by `output_ratio`. Default: ``None`` .
1855
+ output_ratio (Union[float, tuple[float]], optional): The ratio of target output shape to input shape.
1856
+ Specifying the size of the output tensor by using a ratio of the input size.
1857
+ Data type : float16, float32, float64, and value is between (0, 1). If None, the shape of the target will be
1858
+ determined by `output_size`. Default: ``None`` .
1859
+ return_indices (bool, optional): Whether to return the indices of max value. Default: ``False`` .
1860
+ _random_samples (Tensor, optional): The random step of FractionalMaxPool2d, which is a 3D tensor.
1861
+ Tensor of data type: float16, float32, double, and value is between [0, 1).
1862
+ Supported shape :math:`(N, C, 2)` or :math:`(1, C, 2)`.
1863
+ Default: ``None``, the values of `_random_samples`
1864
+ will be randomly distributed using uniform distribution over an interval [0,1).
1865
+
1866
+ Inputs:
1867
+ - **input** (Tensor) - Tensor of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`,
1868
+ with float16, float32, float64, int32, int64 data type.
1869
+
1870
+ Outputs:
1871
+ - **y** (Tensor) - Has the same type as the `input`.
1872
+ Has the shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` ,
1873
+ where :math:`(H_{out}, W_{out})` = `output_size`
1874
+ or :math:`(H_{out}, W_{out})` = `output_ratio` * :math:`(H_{in}, W_{in})`.
1875
+ - **argmax** (Tensor) - The indices along with the outputs, which is a Tensor, with the same shape as the
1876
+ `y` and int64 data type. It will be returned only when `return_indices` is True.
1877
+
1878
+ Raises:
1879
+ TypeError: If data type of `input` is not one of the following: float16, float32, float64, int32, int64.
1880
+ TypeError: If data type of `_random_samples` is not one of the following: float16, float32, float64.
1881
+ ValueError: If `kernel_size` is not a number and `kernel_size` is not a tuple of length 2.
1882
+ ValueError: If `output_size` is not a number and `output_size` is not a tuple of length 2.
1883
+ ValueError: If the sum of `kernel_size` , `output_size` and -1 is larger than the corresponding
1884
+ dimension of `input`.
1885
+ ValueError: If the dimension of `_random_samples` is not 3.
1886
+ ValueError: if `output_size` and `output_ratio` are None at the same time.
1887
+ ValueError: If the first dimension size of `input` and `_random_samples` is not equal.
1888
+ ValueError: If the second dimension size of `input` and `_random_samples` is not equal.
1889
+ ValueError: If the third dimension size of `_random_samples` is not 2.
1890
+
1891
+ Supported Platforms:
1892
+ ``CPU``
1893
+
1894
+ Examples:
1895
+ >>> # the kernel_size is an int number and the output_size is a tuple.
1896
+ >>> import numpy as np
1897
+ >>> import mindspore as ms
1898
+ >>> input = ms.Tensor(np.array([0.3220, 0.9545, 0.7879, 0.0975, 0.3698,
1899
+ ... 0.5135, 0.5740, 0.3435, 0.1895, 0.8764,
1900
+ ... 0.9581, 0.4760, 0.9014, 0.8522, 0.3664,
1901
+ ... 0.4980, 0.9673, 0.9879, 0.6988, 0.9022,
1902
+ ... 0.9304, 0.1558, 0.0153, 0.1559, 0.9852]).reshape([1, 1, 5, 5]), ms.float32)
1903
+ >>> _random_samples = ms.Tensor(np.array([[[0.8, 0.8]]]), ms.float32)
1904
+ >>> net = ms.nn.FractionalMaxPool2d(kernel_size=2, output_size=(2, 2), _random_samples=_random_samples,
1905
+ ... return_indices=True)
1906
+ >>> y, argmax = net(input)
1907
+ >>> y
1908
+ [[[[0.9545 0.8764]
1909
+ [0.9673 0.9852]]]]
1910
+ >>> argmax
1911
+ [[[[ 1 9]
1912
+ [16 24]]]]
1913
+ >>> net = ms.nn.FractionalMaxPool2d(kernel_size=2, output_ratio=(0.5, 0.5), _random_samples=_random_samples,
1914
+ ... return_indices=True)
1915
+ >>> y, argmax = net(input)
1916
+ >>> print(y)
1917
+ [[[[0.9545 0.8764]
1918
+ [0.9673 0.9852]]]]
1919
+ >>> print(argmax)
1920
+ [[[[ 1 9]
1921
+ [16 24]]]]
1922
+ """
1923
+
1924
+ def __init__(self, kernel_size, output_size=None, output_ratio=None, return_indices=False, _random_samples=None):
1925
+ """Initialize FractionalMaxPool2d."""
1926
+ super(FractionalMaxPool2d, self).__init__()
1927
+ self.kernel_size = kernel_size
1928
+ self.output_size = output_size
1929
+ self.output_ratio = output_ratio
1930
+ self.return_indices = return_indices
1931
+ self._random_samples = _random_samples
1932
+
1933
+ def construct(self, input):
1934
+ return ops.fractional_max_pool2d(input, self.kernel_size, self.output_size, self.output_ratio,
1935
+ self.return_indices, self._random_samples)
1936
+
1937
+
1938
+ class FractionalMaxPool3d(Cell):
1939
+ r"""
1940
+ Applies the 3D FractionalMaxPool operation over `input`. The output Tensor shape can be determined by either
1941
+ `output_size` or `output_ratio`, and the step size is determined by `_random_samples`. `output_size` will take
1942
+ effect when `output_size` and `output_ratio` are set at the same time.
1943
+ And `output_size` and `output_ratio` can not be ``None`` at the same time.
1944
+
1945
+ Refer to the paper `Fractional MaxPooling by Ben Graham <https://arxiv.org/abs/1412.6071>`_ for more details.
1946
+
1947
+ The input and output data format can be "NCDHW". N is the batch size, C is the number of channels,
1948
+ D the feature depth, H is the feature height, and W is the feature width.
1949
+
1950
+ Args:
1951
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value, is a positive int
1952
+ that represents depth, height and width of the kernel, or a tuple of three positive integers that represent
1953
+ depth, height and width respectively.
1954
+ output_size (Union[int, tuple[int]], optional): The shape of the target `output_size`,
1955
+ is an int number that represents depth, height and width, or a tuple of three positive integers that
1956
+ represents depth, height and width respectively. If ``None`` , the shape of the target will be determined
1957
+ by `output_ratio`. Default: ``None`` .
1958
+ output_ratio (Union[float, tuple[float]], optional): The ratio of target output shape to input shape.
1959
+ Specifying the size of the output tensor by using a ratio of the input size.
1960
+ Data type : float16, float32, float64, and value is between (0, 1). If ``None`` , the shape of the target
1961
+ will be determined by `output_size`.Default: ``None`` .
1962
+ return_indices (bool, optional): Whether to return the indices of max value. Default: ``False`` .
1963
+ _random_samples (Tensor, optional): The random step of FractionalMaxPool3d, which is a 3D tensor.
1964
+ Tensor of data type: float16, float32, double, and value is between [0, 1).
1965
+ Supported shape :math:`(N, C, 3)` or :math:`(1, C, 3)` . Default: ``None``, the values of `_random_samples`
1966
+ will be randomly distributed using uniform distribution over an interval [0,1).
1967
+
1968
+ Inputs:
1969
+ - **input** (Tensor) - The input of FractionalMaxPool3d, which is a 4D or 5D tensor.
1970
+ Tensor of data type : float16, float32, float64.
1971
+ Supported shape :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
1972
+
1973
+ Outputs:
1974
+ - **y** (Tensor) - A tensor, the output of FractionalMaxPool3d.
1975
+ Has the same data type with `input`.
1976
+ Has the shape :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})` ,
1977
+ where :math:`(D_{out}, H_{out}, W_{out})` = `output_size`
1978
+ or :math:`(D_{out}, H_{out}, W_{out})` = `output_ratio` * :math:`(D_{in}, H_{in}, W_{in})` .
1979
+
1980
+ - **argmax** (Tensor) - The indices along with the outputs, which is a Tensor, with the same shape as the
1981
+ `y` and int32 data type. It will output only when `return_indices` is True.
1982
+
1983
+ Raises:
1984
+ TypeError: If `input` is not a 4D or 5D tensor.
1985
+ TypeError: If `_random_samples` is not a 3D tensor.
1986
+ TypeError: If data type of `imput_x` is not float16, float32, float64.
1987
+ TypeError: If dtype of `_random_samples` is not float16, float32, float64.
1988
+ TypeError: If dtype of `argmax` is not int32, int64.
1989
+ TypeError: if _random_samples to have the different dtypes as input.
1990
+ ValueError: If `output_size` is a tuple and if `output_size` length is not 3.
1991
+ ValueError: If `kernel_size` is a tuple and if `kernel_size` length is not 3.
1992
+ ValueError: If numbers in `output_size` or `kernel_size` is not positive.
1993
+ ValueError: if `output_size` and `output_ratio` are None at the same time.
1994
+ ValueError: If the first dimension size of `input` and `_random_samples` is not equal.
1995
+ ValueError: If the second dimension size of `input` and `_random_samples` is not equal.
1996
+ ValueError: If the third dimension size of `_random_samples` is not 3.
1997
+
1998
+ Supported Platforms:
1999
+ ``GPU`` ``CPU``
2000
+
2001
+ Examples:
2002
+ >>> import numpy as np
2003
+ >>> import mindspore as ms
2004
+ >>> x = ms.Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
2005
+ ... .reshape([1, 1, 2, 2, 4]), ms.float32)
2006
+ >>> _random_samples = ms.Tensor(np.array([0.7, 0.7, 0.7]).reshape([1, 1, 3]), ms.float32)
2007
+ >>> net = ms.nn.FractionalMaxPool3d(kernel_size=(1, 1, 1), output_size=(1, 1, 3),
2008
+ ... _random_samples=_random_samples, return_indices=True)
2009
+ >>> output, argmax = net(x)
2010
+ >>> print(output)
2011
+ [[[[[13. 14. 16.]]]]]
2012
+ >>> print(argmax)
2013
+ [[[[[12 13 15]]]]]
2014
+ >>> net = ms.nn.FractionalMaxPool3d(kernel_size=(1, 1, 1), output_ratio=(0.5, 0.5, 0.5),
2015
+ ... _random_samples=_random_samples, return_indices=True)
2016
+ >>> output, argmax = net(x)
2017
+ >>> print(output)
2018
+ [[[[[13. 16.]]]]]
2019
+ >>> print(argmax)
2020
+ [[[[[12 15]]]]]
2021
+ """
2022
+
2023
+ def __init__(self, kernel_size, output_size=None, output_ratio=None, return_indices=False, _random_samples=None):
2024
+ """Initialize FractionalMaxPool3d."""
2025
+ super(FractionalMaxPool3d, self).__init__()
2026
+ self.kernel_size = kernel_size
2027
+ self.output_size = output_size
2028
+ self.output_ratio = output_ratio
2029
+ self.return_indices = return_indices
2030
+ self._random_samples = _random_samples
2031
+
2032
+ def construct(self, input):
2033
+ return ops.fractional_max_pool3d(input, self.kernel_size, self.output_size, self.output_ratio,
2034
+ self.return_indices, self._random_samples)
2035
+
2036
+
2037
+ class MaxUnpool1d(Cell):
2038
+ r"""
2039
+ Computes the inverse of :class:`mindspore.nn.MaxPool1d`.
2040
+
2041
+ MaxUnpool1d keeps the maximal value and set all position of non-maximal values to zero. Typically the input
2042
+ is of shape :math:`(N, C, H_{in})` or :math:`(C, H_{in})`, and the output is of shape
2043
+ :math:`(N, C, H_{out})` or :math:`(C, H_{out})`. The operation is as follows.
2044
+
2045
+
2046
+ .. math::
2047
+ \begin{array}{ll} \\
2048
+ H_{out} = (H_{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
2049
+ \end{array}
2050
+
2051
+ Args:
2052
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value.
2053
+ stride (Union[int, tuple[int]]): The distance of kernel moving,
2054
+ If stride is None, then stride equal to kernel_size. Default: ``None`` .
2055
+ padding (Union[int, tuple[int]]): The pad value to be filled. Default: ``0`` .
2056
+
2057
+ Inputs:
2058
+ - **x** (Tensor) - The input Tensor to invert.
2059
+ Tensor of shape :math:`(N, C, H_{in})` or :math:`(C, H_{in})`.
2060
+ - **indices** (Tensor) - Max values' index represented by the indices.
2061
+ Tensor of shape must be same with input 'x'.
2062
+ Values of indices must belong to :math:`[0, H_{in} - 1]`.
2063
+ Data type must be in int32 or int64.
2064
+ - **output_size** (tuple[int], optional) - The output size. Default: ``None`` .
2065
+ If output_size is ``None``, then the shape of output computed by kernel_size, stride and padding.
2066
+ If output_size is not ``None``, then output_size must be :math:`(N, C, H)` , :math:`(C, H)` or
2067
+ :math:`(H)` and output_size must belong to
2068
+ :math:`[(N, C, H_{out} - stride[0]), (N, C, H_{out} + stride[0])]`.
2069
+
2070
+ Outputs:
2071
+ Tensor, with shape :math:`(N, C, H_{out})` or :math:`(C, H_{out})`,
2072
+ with the same data type with `x`.
2073
+
2074
+ Raises:
2075
+ TypeError: If data type of `x` or `indices` is not supported.
2076
+ TypeError: If `kernel_size`, `stride` or `padding` is neither an int nor a tuple.
2077
+ ValueError: If numbers in `stride`, `padding` (also support 0 and (0)) or `kernel_size` is not positive.
2078
+ ValueError: If the shapes of `x` and `indices` are not equal.
2079
+ ValueError: If `x` whose length is not 2 or 3.
2080
+ ValueError: If type of `output_size` is not tuple.
2081
+ ValueError: If `output_size` whose length is not 0, 2 or 3.
2082
+ ValueError: If `output_size` is not close to output size computed by attr `kernel_size`, `stride`, `padding`.
2083
+
2084
+ Supported Platforms:
2085
+ ``GPU`` ``CPU``
2086
+
2087
+ Examples:
2088
+ >>> import mindspore as ms
2089
+ >>> import numpy as np
2090
+ >>> x = ms.Tensor(np.array([[2, 4, 6, 8]]).astype(np.float32))
2091
+ >>> indices = ms.Tensor(np.array([[1, 3, 5, 7]]).astype(np.int64))
2092
+ >>> maxunpool1d = ms.nn.MaxUnpool1d(kernel_size =2, stride=2, padding=0)
2093
+ >>> output = maxunpool1d(x, indices)
2094
+ >>> print(output.asnumpy())
2095
+ [[0. 2. 0. 4. 0. 6. 0. 8.]]
2096
+ """
2097
+
2098
+ def __init__(self, kernel_size, stride=None, padding=0):
2099
+ """Initialize MaxUnpool1d."""
2100
+ super(MaxUnpool1d, self).__init__()
2101
+ if stride is None:
2102
+ stride = kernel_size
2103
+ self.kernel_size = kernel_size
2104
+ self.stride = stride
2105
+ self.padding = padding
2106
+
2107
+ def construct(self, x, indices, output_size=None):
2108
+ if output_size is None:
2109
+ output_size = ()
2110
+ else:
2111
+ if not isinstance(output_size, tuple):
2112
+ raise ValueError(f"For MaxUnpool1d, output_size must be tuple, but type {type(output_size)}.")
2113
+ if not output_size:
2114
+ raise ValueError(f"For MaxUnpool1d, the length of output_size must be positive, but got 0.")
2115
+ out = ops.max_unpool1d(x, indices, self.kernel_size, stride=self.stride, padding=self.padding,
2116
+ output_size=output_size)
2117
+ return out
2118
+
2119
+
2120
+ class MaxUnpool2d(Cell):
2121
+ r"""
2122
+ Computes the inverse of :class:`mindspore.nn.MaxPool2d`.
2123
+
2124
+ MaxUnpool2d keeps the maximal value and set all position of non-maximal values to zero. Typically the input
2125
+ is of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`, and the output is of
2126
+ shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`. The operation is as follows.
2127
+
2128
+ .. math::
2129
+ \begin{array}{ll} \\
2130
+ H_{out} = (H_{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
2131
+ W_{out} = (W_{in} - 1) \times stride[1] - 2 \times padding[1] + kernel\_size[1] \\
2132
+ \end{array}
2133
+
2134
+ Args:
2135
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
2136
+ an int number that represents height and width of the kernel, or a tuple
2137
+ of two int numbers that represent height and width respectively.
2138
+ stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
2139
+ the height and width of movement are both stride, or a tuple of two int numbers that
2140
+ represent height and width of movement respectively.
2141
+ If stride is ``None``, then stride equal to kernel_size. Default: ``None`` .
2142
+ padding (Union[int, tuple[int]]): The pad value to be filled. Default: ``0`` . If `padding` is an integer,
2143
+ the paddings of height and width are the same, equal to padding. If `padding` is a tuple of two
2144
+ integers, the padding of height and width equal to padding[0] and padding[1] correspondingly.
2145
+
2146
+ Inputs:
2147
+ - **x** (Tensor) - The input Tensor to invert.
2148
+ Tensor of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
2149
+ - **indices** (Tensor) - Max values' index represented by the indices.
2150
+ Tensor of shape must be same with input 'x'.
2151
+ Values of indices must belong to :math:`[0, H_{in} \times W_{in} - 1]`.
2152
+ Data type must be in int32 or int64.
2153
+ - **output_size** (tuple[int], optional) - The output size. Default: ``None`` .
2154
+ If output_size is ``None``, then the shape of output computed by kernel_size, stride and padding.
2155
+ If output_size is not ``None``, then output_size must be :math:`(N, C, H, W)`, :math:`(C, H, W)` or
2156
+ :math:`(H, W)` and output_size must belong to
2157
+ :math:`[(N, C, H_{out} - stride[0], W_{out} - stride[1]), (N, C, H_{out} + stride[0], W_{out} + stride[1])]`.
2158
+
2159
+ Outputs:
2160
+ Tensor, with shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`,
2161
+ with the same data type with `x`.
2162
+
2163
+ Raises:
2164
+ TypeError: If data type of `x` or `indices` is not supported.
2165
+ TypeError: If `kernel_size`, `stride` or `padding` is neither an int nor a tuple.
2166
+ ValueError: If numbers in `stride`, `padding` (also support 0 and (0, 0)) or `kernel_size` is not positive.
2167
+ ValueError: If the shape of `x` and `indices` are not equal.
2168
+ ValueError: If `kernel_size`, `stride` or `padding` is a tuple whose length is not equal to 2.
2169
+ ValueError: If `x` whose length is not 3 or 4.
2170
+ ValueError: If `output_size` whose type is not tuple.
2171
+ ValueError: If `output_size` whose length is not 0, 3 or 4.
2172
+ ValueError: If `output_size` is not close to output size computed by attr `kernel_size`, `stride`, `padding`.
2173
+
2174
+ Supported Platforms:
2175
+ ``GPU`` ``CPU``
2176
+
2177
+ Examples:
2178
+ >>> import mindspore as ms
2179
+ >>> import numpy as np
2180
+ >>> x = ms.Tensor(np.array([[[[0, 1], [8, 9]]]]).astype(np.float32))
2181
+ >>> indices = ms.Tensor(np.array([[[[0, 1], [2, 3]]]]).astype(np.int64))
2182
+ >>> maxunpool2d = ms.nn.MaxUnpool2d(kernel_size=1, stride=1, padding=0)
2183
+ >>> output = maxunpool2d(x, indices)
2184
+ >>> print(output.asnumpy())
2185
+ [[[[0. 1.]
2186
+ [8. 9.]]]]
2187
+ """
2188
+
2189
+ def __init__(self, kernel_size, stride=None, padding=0):
2190
+ """Initialize MaxUnpool2d."""
2191
+ super(MaxUnpool2d, self).__init__()
2192
+ if stride is None:
2193
+ stride = kernel_size
2194
+ self.kernel_size = kernel_size
2195
+ self.stride = stride
2196
+ self.padding = padding
2197
+
2198
+ def construct(self, x, indices, output_size=None):
2199
+ if output_size is None:
2200
+ output_size = ()
2201
+ else:
2202
+ if not isinstance(output_size, tuple):
2203
+ raise ValueError(f"For MaxUnpool2d, output_size must be tuple, but type {type(output_size)}.")
2204
+ if not output_size:
2205
+ raise ValueError(f"For MaxUnpool2d, the length of output_size must be positive, but got 0.")
2206
+ out = ops.max_unpool2d(x, indices, self.kernel_size, stride=self.stride, padding=self.padding,
2207
+ output_size=output_size)
2208
+ return out
2209
+
2210
+
2211
+ class MaxUnpool3d(Cell):
2212
+ r"""
2213
+ Computes the inverse of :class:`mindspore.nn.MaxPool3d`.
2214
+
2215
+ MaxUnpool3d keeps the maximal value and set all position of non-maximal values to zero.
2216
+ Typically the input is of shape :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`,
2217
+ and the output is of shape :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`.
2218
+ The operation is as follows.
2219
+
2220
+ .. math::
2221
+ \begin{array}{ll} \\
2222
+ D_{out} = (D_{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
2223
+ H_{out} = (H_{in} - 1) \times stride[1] - 2 \times padding[1] + kernel\_size[1] \\
2224
+ W_{out} = (W_{in} - 1) \times stride[2] - 2 \times padding[2] + kernel\_size[2] \\
2225
+ \end{array}
2226
+
2227
+ Args:
2228
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
2229
+ an int number that represents depth, height and width of the kernel, or a tuple
2230
+ of three int numbers that represent depth, height and width respectively.
2231
+ stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
2232
+ the depth, height and width of movement are both stride, or a tuple of three int numbers that
2233
+ represent depth, height and width of movement respectively.
2234
+ If stride is ``None``, then stride equal to kernel_size. Default: ``None`` .
2235
+ padding (Union[int, tuple[int]]): The pad value to be filled. Default: ``0`` . If `padding` is an integer,
2236
+ the paddings of depth, height and width are the same, equal to padding. If `padding` is a tuple of three
2237
+ integers, the padding of depth, height and width equal to padding[0], padding[1] and padding[2]
2238
+ correspondingly.
2239
+
2240
+ Inputs:
2241
+ - **x** (Tensor) - The input Tensor to invert.
2242
+ Tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
2243
+ - **indices** (Tensor) - Max values' index represented by the indices.
2244
+ Tensor of shape must be same with input 'x'.
2245
+ Values of indices must belong to :math:`[0, D_{in} \times H_{in} \times W_{in} - 1]`.
2246
+ Data type must be in int32 or int64.
2247
+ - **output_size** (tuple[int], optional) - The output size. Default: ``None`` .
2248
+ If output_size is ``None``, then the shape of output computed by kernel_size, stride and padding.
2249
+ If output_size is not ``None``, then output_size must be :math:`(N, C, D, H, W)` , :math:`(C, D, H, W)` or
2250
+ :math:`(D, H, W)` and output_size must belong to
2251
+ :math:`[(N, C, D_{out} - stride[0], H_{out} - stride[1], W_{out} - stride[2]),
2252
+ (N, C, D_{out} + stride[0], H_{out} + stride[1], W_{out} + stride[2])]`.
2253
+
2254
+ Outputs:
2255
+ Tensor, with shape :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
2256
+ with the same data type with `x`.
2257
+
2258
+ Raises:
2259
+ TypeError: If data type of `x` or `indices` is not supported.
2260
+ TypeError: If `kernel_size`, `stride` or `padding` is neither an int nor a tuple.
2261
+ ValueError: If numbers in `stride` or `padding` (also support 0 and (0, 0, 0)) or `kernel_size` is not positive.
2262
+ ValueError: If the shape of `x` and `indices` are not equal.
2263
+ ValueError: If `kernel_size`, `stride` or `padding` is a tuple whose length is not equal to 3.
2264
+ ValueError: If `x` whose length is not 4 or 5.
2265
+ ValueError: If `output_size` whose length is not 0, 4 or 5.
2266
+ ValueError: If `output_size` whose type is not tuple.
2267
+ ValueError: If `output_size` is not close to output size computed by attr `kernel_size`, `stride`, `padding`.
2268
+
2269
+ Supported Platforms:
2270
+ ``GPU`` ``CPU``
2271
+
2272
+ Examples:
2273
+ >>> import mindspore as ms
2274
+ >>> import numpy as np
2275
+ >>> x = ms.Tensor(np.array([[[[[0, 1], [8, 9]]]]]).astype(np.float32))
2276
+ >>> indices= ms.Tensor(np.array([[[[[0, 1], [2, 3]]]]]).astype(np.int64))
2277
+ >>> maxunpool3d = ms.nn.MaxUnpool3d(kernel_size=1, stride=1, padding=0)
2278
+ >>> output = maxunpool3d(x, indices)
2279
+ >>> print(output.asnumpy())
2280
+ [[[[[0. 1.]
2281
+ [8. 9.]]]]]
2282
+ """
2283
+
2284
+ def __init__(self, kernel_size, stride=None, padding=0):
2285
+ super(MaxUnpool3d, self).__init__()
2286
+ if stride is None:
2287
+ stride = kernel_size
2288
+ self.kernel_size = kernel_size
2289
+ self.stride = stride
2290
+ self.padding = padding
2291
+
2292
+ def construct(self, x, indices, output_size=None):
2293
+ if output_size is None:
2294
+ output_size = ()
2295
+ else:
2296
+ if not isinstance(output_size, tuple):
2297
+ raise ValueError(f"For MaxUnpool3d, output_size must be tuple, but type {type(output_size)}.")
2298
+ if not output_size:
2299
+ raise ValueError(f"For MaxUnpool3d, the length of output_size must be positive, but got 0.")
2300
+ out = ops.max_unpool3d(x, indices, self.kernel_size, stride=self.stride, padding=self.padding,
2301
+ output_size=output_size)
2302
+ return out