mindspore 1.10.0__cp37-none-any.whl → 2.0.0rc1__cp37-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (944) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +9064 -0
  3. mindspore/__init__.py +9 -4
  4. mindspore/_akg/akg/composite/build_module.py +11 -0
  5. mindspore/_akg/akg/config/repository_cuda.json +11 -0
  6. mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -3
  7. mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
  8. mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
  9. mindspore/_c_mindrecord.cpython-37m-aarch64-linux-gnu.so +0 -0
  10. mindspore/_check_jit_forbidden_api.py +102 -0
  11. mindspore/_checkparam.py +1066 -1001
  12. mindspore/_extends/builtin_operations.py +32 -4
  13. mindspore/_extends/graph_kernel/model/graph_split.py +66 -222
  14. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +12 -9
  15. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +119 -26
  16. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -50
  17. mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -6
  18. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -25
  19. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
  20. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -27
  21. mindspore/_extends/parse/__init__.py +5 -3
  22. mindspore/_extends/parse/namespace.py +17 -2
  23. mindspore/_extends/parse/parser.py +193 -34
  24. mindspore/_extends/parse/resources.py +7 -8
  25. mindspore/_extends/parse/standard_method.py +1780 -435
  26. mindspore/_extends/parse/trope.py +3 -1
  27. mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
  28. mindspore/amp.py +53 -58
  29. mindspore/bin/cache_admin +0 -0
  30. mindspore/bin/cache_server +0 -0
  31. mindspore/boost/adasum.py +3 -2
  32. mindspore/boost/boost.py +2 -2
  33. mindspore/boost/boost_cell_wrapper.py +46 -26
  34. mindspore/boost/dim_reduce.py +6 -5
  35. mindspore/boost/grad_accumulation.py +2 -1
  36. mindspore/boost/group_loss_scale_manager.py +1 -1
  37. mindspore/common/__init__.py +11 -10
  38. mindspore/common/_decorator.py +2 -0
  39. mindspore/common/_register_for_adapter.py +55 -0
  40. mindspore/common/_stub_tensor.py +201 -0
  41. mindspore/common/_utils.py +57 -0
  42. mindspore/common/api.py +582 -297
  43. mindspore/common/dtype.py +66 -18
  44. mindspore/common/dump.py +2 -2
  45. mindspore/common/initializer.py +38 -1
  46. mindspore/common/jit_config.py +25 -13
  47. mindspore/common/mutable.py +53 -24
  48. mindspore/common/parameter.py +60 -37
  49. mindspore/common/seed.py +8 -24
  50. mindspore/common/sparse_tensor.py +927 -0
  51. mindspore/common/tensor.py +1627 -3900
  52. mindspore/communication/__init__.py +10 -5
  53. mindspore/communication/_comm_helper.py +78 -214
  54. mindspore/communication/_hccl_management.py +2 -1
  55. mindspore/communication/management.py +136 -47
  56. mindspore/config/op_info.config +501 -1008
  57. mindspore/config/super_bar_config.json +512 -0
  58. mindspore/context.py +291 -56
  59. mindspore/dataset/__init__.py +12 -8
  60. mindspore/dataset/audio/__init__.py +9 -9
  61. mindspore/dataset/audio/transforms.py +1090 -228
  62. mindspore/dataset/audio/utils.py +87 -39
  63. mindspore/dataset/audio/validators.py +223 -1
  64. mindspore/dataset/callback/ds_callback.py +17 -15
  65. mindspore/dataset/core/config.py +246 -17
  66. mindspore/dataset/core/py_util_helpers.py +4 -3
  67. mindspore/dataset/core/validator_helpers.py +10 -10
  68. mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
  69. mindspore/dataset/debug/debug_hook.py +65 -0
  70. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  71. mindspore/dataset/engine/__init__.py +7 -3
  72. mindspore/dataset/engine/cache_client.py +9 -9
  73. mindspore/dataset/engine/datasets.py +648 -477
  74. mindspore/dataset/engine/datasets_audio.py +165 -167
  75. mindspore/dataset/engine/datasets_standard_format.py +93 -67
  76. mindspore/dataset/engine/datasets_text.py +492 -342
  77. mindspore/dataset/engine/datasets_user_defined.py +85 -50
  78. mindspore/dataset/engine/datasets_vision.py +1224 -699
  79. mindspore/dataset/engine/graphdata.py +134 -69
  80. mindspore/dataset/engine/iterators.py +50 -9
  81. mindspore/dataset/engine/offload.py +52 -31
  82. mindspore/dataset/engine/samplers.py +27 -24
  83. mindspore/dataset/engine/serializer_deserializer.py +14 -15
  84. mindspore/dataset/engine/validators.py +213 -52
  85. mindspore/dataset/text/__init__.py +10 -8
  86. mindspore/dataset/text/transforms.py +152 -57
  87. mindspore/dataset/text/utils.py +98 -49
  88. mindspore/dataset/text/validators.py +25 -0
  89. mindspore/dataset/transforms/__init__.py +4 -2
  90. mindspore/dataset/transforms/c_transforms.py +11 -13
  91. mindspore/dataset/transforms/py_transforms.py +2 -2
  92. mindspore/dataset/transforms/py_transforms_util.py +10 -0
  93. mindspore/dataset/transforms/transforms.py +13 -15
  94. mindspore/dataset/transforms/validators.py +7 -7
  95. mindspore/dataset/utils/__init__.py +2 -1
  96. mindspore/dataset/utils/browse_dataset.py +13 -13
  97. mindspore/dataset/utils/line_reader.py +121 -0
  98. mindspore/dataset/vision/__init__.py +8 -7
  99. mindspore/dataset/vision/c_transforms.py +125 -126
  100. mindspore/dataset/vision/py_transforms.py +37 -37
  101. mindspore/dataset/vision/py_transforms_util.py +23 -20
  102. mindspore/dataset/vision/transforms.py +316 -315
  103. mindspore/dataset/vision/utils.py +313 -17
  104. mindspore/dataset/vision/validators.py +6 -6
  105. mindspore/default_config.py +0 -1
  106. mindspore/{compression → experimental}/__init__.py +6 -5
  107. mindspore/experimental/map_parameter.py +275 -0
  108. mindspore/include/OWNERS +0 -1
  109. mindspore/include/api/callback/callback.h +9 -13
  110. mindspore/include/api/callback/ckpt_saver.h +2 -2
  111. mindspore/include/api/callback/loss_monitor.h +2 -2
  112. mindspore/include/api/callback/lr_scheduler.h +5 -5
  113. mindspore/include/api/callback/time_monitor.h +2 -2
  114. mindspore/include/api/callback/train_accuracy.h +4 -6
  115. mindspore/include/api/cfg.h +19 -6
  116. mindspore/include/api/context.h +70 -9
  117. mindspore/include/api/delegate.h +8 -1
  118. mindspore/include/api/dual_abi_helper.h +8 -24
  119. mindspore/include/api/metrics/accuracy.h +2 -2
  120. mindspore/include/api/metrics/metrics.h +4 -3
  121. mindspore/include/api/model.h +9 -4
  122. mindspore/include/api/model_group.h +68 -0
  123. mindspore/include/api/model_parallel_runner.h +17 -17
  124. mindspore/include/api/net.h +12 -11
  125. mindspore/include/api/serialization.h +20 -4
  126. mindspore/include/api/status.h +7 -1
  127. mindspore/include/api/types.h +25 -21
  128. mindspore/include/api/visible.h +4 -0
  129. mindspore/include/c_api/model_c.h +5 -0
  130. mindspore/include/c_api/status_c.h +1 -1
  131. mindspore/include/dataset/config.h +1 -1
  132. mindspore/include/dataset/constants.h +14 -0
  133. mindspore/include/dataset/text.h +59 -0
  134. mindspore/include/dataset/vision.h +56 -117
  135. mindspore/include/dataset/vision_lite.h +102 -0
  136. mindspore/include/mindapi/base/type_id.h +42 -3
  137. mindspore/lib/libdnnl.so.2 +0 -0
  138. mindspore/lib/libicudata.so.69 +0 -0
  139. mindspore/lib/libicui18n.so.69 +0 -0
  140. mindspore/lib/libicuuc.so.69 +0 -0
  141. mindspore/lib/libmindspore.so +0 -0
  142. mindspore/lib/libmindspore_backend.so +0 -0
  143. mindspore/lib/libmindspore_common.so +0 -0
  144. mindspore/lib/libmindspore_core.so +0 -0
  145. mindspore/lib/libmindspore_glog.so.0 +0 -0
  146. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  147. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  148. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  149. mindspore/lib/libmindspore_shared_lib.so +0 -0
  150. mindspore/lib/libmpi_adapter.so +0 -0
  151. mindspore/lib/libmpi_collective.so +0 -0
  152. mindspore/lib/libnnacl.so +0 -0
  153. mindspore/lib/libopencv_core.so.4.5 +0 -0
  154. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  155. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  156. mindspore/lib/libps_cache.so +0 -0
  157. mindspore/lib/plugin/ascend/libakg.so +0 -0
  158. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  159. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  160. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  161. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  162. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  163. mindspore/lib/{libakg.so → plugin/cpu/libakg.so} +0 -0
  164. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  165. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  166. mindspore/log.py +28 -28
  167. mindspore/mindrecord/common/exceptions.py +2 -4
  168. mindspore/mindrecord/filereader.py +19 -1
  169. mindspore/mindrecord/filewriter.py +250 -88
  170. mindspore/mindrecord/mindpage.py +13 -13
  171. mindspore/mindrecord/shardheader.py +15 -15
  172. mindspore/mindrecord/shardreader.py +9 -0
  173. mindspore/mindrecord/shardwriter.py +29 -29
  174. mindspore/mindrecord/tools/cifar100_to_mr.py +9 -9
  175. mindspore/mindrecord/tools/cifar10_to_mr.py +9 -9
  176. mindspore/mindrecord/tools/csv_to_mr.py +4 -4
  177. mindspore/mindrecord/tools/imagenet_to_mr.py +70 -65
  178. mindspore/mindrecord/tools/mnist_to_mr.py +41 -41
  179. mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
  180. mindspore/nn/__init__.py +1 -5
  181. mindspore/nn/cell.py +297 -234
  182. mindspore/nn/dynamic_lr.py +1 -1
  183. mindspore/nn/grad/cell_grad.py +17 -42
  184. mindspore/nn/layer/__init__.py +7 -4
  185. mindspore/nn/layer/activation.py +131 -88
  186. mindspore/nn/layer/basic.py +313 -613
  187. mindspore/nn/layer/channel_shuffle.py +103 -0
  188. mindspore/nn/layer/combined.py +1 -1
  189. mindspore/nn/layer/container.py +52 -6
  190. mindspore/nn/layer/conv.py +112 -43
  191. mindspore/nn/layer/dense.py +10 -9
  192. mindspore/nn/layer/embedding.py +36 -34
  193. mindspore/nn/layer/image.py +123 -27
  194. mindspore/nn/layer/math.py +108 -107
  195. mindspore/nn/layer/normalization.py +212 -366
  196. mindspore/nn/layer/padding.py +370 -42
  197. mindspore/nn/layer/pooling.py +1443 -219
  198. mindspore/nn/layer/rnn_cells.py +11 -16
  199. mindspore/nn/layer/rnns.py +38 -39
  200. mindspore/nn/layer/thor_layer.py +24 -25
  201. mindspore/nn/layer/timedistributed.py +5 -5
  202. mindspore/nn/layer/transformer.py +701 -0
  203. mindspore/nn/learning_rate_schedule.py +8 -8
  204. mindspore/nn/loss/__init__.py +9 -6
  205. mindspore/nn/loss/loss.py +678 -142
  206. mindspore/nn/metrics.py +53 -0
  207. mindspore/nn/optim/_dist_optimizer_registry.py +2 -2
  208. mindspore/nn/optim/ada_grad.py +8 -8
  209. mindspore/nn/optim/adadelta.py +2 -3
  210. mindspore/nn/optim/adafactor.py +18 -14
  211. mindspore/nn/optim/adam.py +429 -87
  212. mindspore/nn/optim/adamax.py +5 -6
  213. mindspore/nn/optim/adasum.py +10 -8
  214. mindspore/nn/optim/asgd.py +7 -7
  215. mindspore/nn/optim/ftrl.py +81 -11
  216. mindspore/nn/optim/lamb.py +7 -8
  217. mindspore/nn/optim/lars.py +4 -4
  218. mindspore/nn/optim/lazyadam.py +82 -7
  219. mindspore/nn/optim/momentum.py +8 -7
  220. mindspore/nn/optim/optimizer.py +19 -10
  221. mindspore/nn/optim/proximal_ada_grad.py +6 -5
  222. mindspore/nn/optim/rmsprop.py +3 -3
  223. mindspore/nn/optim/rprop.py +20 -16
  224. mindspore/nn/optim/sgd.py +21 -15
  225. mindspore/nn/optim/thor.py +23 -21
  226. mindspore/nn/probability/__init__.py +0 -2
  227. mindspore/nn/probability/bijector/bijector.py +7 -6
  228. mindspore/nn/probability/bijector/invert.py +4 -2
  229. mindspore/nn/probability/bijector/softplus.py +2 -2
  230. mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
  231. mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
  232. mindspore/nn/probability/distribution/__init__.py +6 -0
  233. mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -2
  234. mindspore/nn/probability/distribution/_utils/utils.py +11 -17
  235. mindspore/nn/probability/distribution/bernoulli.py +6 -6
  236. mindspore/nn/probability/distribution/beta.py +1 -1
  237. mindspore/nn/probability/distribution/categorical.py +9 -9
  238. mindspore/nn/probability/distribution/cauchy.py +8 -8
  239. mindspore/nn/probability/distribution/distribution.py +12 -6
  240. mindspore/nn/probability/distribution/exponential.py +5 -5
  241. mindspore/nn/probability/distribution/gamma.py +3 -3
  242. mindspore/nn/probability/distribution/geometric.py +6 -5
  243. mindspore/nn/probability/distribution/gumbel.py +5 -5
  244. mindspore/nn/probability/distribution/half_normal.py +133 -0
  245. mindspore/nn/probability/distribution/laplace.py +128 -0
  246. mindspore/nn/probability/distribution/log_normal.py +0 -1
  247. mindspore/nn/probability/distribution/logistic.py +4 -5
  248. mindspore/nn/probability/distribution/normal.py +11 -15
  249. mindspore/nn/probability/distribution/poisson.py +6 -2
  250. mindspore/nn/probability/distribution/student_t.py +150 -0
  251. mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
  252. mindspore/nn/probability/distribution/uniform.py +5 -5
  253. mindspore/nn/reinforcement/_tensors_queue.py +3 -3
  254. mindspore/nn/reinforcement/tensor_array.py +2 -2
  255. mindspore/nn/sparse/sparse.py +8 -1
  256. mindspore/nn/wrap/cell_wrapper.py +55 -27
  257. mindspore/nn/wrap/grad_reducer.py +20 -11
  258. mindspore/nn/wrap/loss_scale.py +47 -30
  259. mindspore/numpy/array_creations.py +33 -22
  260. mindspore/numpy/array_ops.py +46 -42
  261. mindspore/numpy/logic_ops.py +6 -27
  262. mindspore/numpy/math_ops.py +26 -19
  263. mindspore/numpy/utils.py +1 -8
  264. mindspore/numpy/utils_const.py +112 -62
  265. mindspore/ops/__init__.py +6 -3
  266. mindspore/ops/_constants.py +0 -6
  267. mindspore/ops/_grad/__init__.py +2 -1
  268. mindspore/ops/_grad/grad_array_ops.py +209 -152
  269. mindspore/ops/_grad/grad_base.py +55 -17
  270. mindspore/ops/_grad/grad_clip_ops.py +11 -3
  271. mindspore/ops/_grad/grad_comm_ops.py +58 -47
  272. mindspore/ops/_grad/grad_implementations.py +21 -61
  273. mindspore/ops/_grad/grad_inner_ops.py +48 -6
  274. mindspore/ops/_grad/grad_math_ops.py +306 -161
  275. mindspore/ops/_grad/grad_nn_ops.py +192 -181
  276. mindspore/ops/_grad/grad_other_ops.py +1 -1
  277. mindspore/ops/_grad/grad_quant_ops.py +5 -5
  278. mindspore/ops/_grad/grad_sequence_ops.py +296 -0
  279. mindspore/ops/_grad/grad_sparse.py +15 -9
  280. mindspore/ops/_grad_experimental/__init__.py +1 -0
  281. mindspore/ops/_grad_experimental/grad_array_ops.py +441 -55
  282. mindspore/ops/_grad_experimental/grad_image_ops.py +25 -7
  283. mindspore/ops/_grad_experimental/grad_inner_ops.py +3 -44
  284. mindspore/ops/_grad_experimental/grad_linalg_ops.py +16 -21
  285. mindspore/ops/_grad_experimental/grad_math_ops.py +979 -49
  286. mindspore/ops/_grad_experimental/grad_nn_ops.py +78 -8
  287. mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
  288. mindspore/ops/_grad_experimental/grad_sparse_ops.py +197 -13
  289. mindspore/ops/_op_impl/__init__.py +3 -3
  290. mindspore/ops/_op_impl/_custom_op/__init__.py +0 -1
  291. mindspore/ops/_op_impl/_custom_op/_basic.py +0 -1
  292. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
  293. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +4 -2
  294. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
  295. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
  296. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +5 -5
  297. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
  298. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
  299. mindspore/ops/_op_impl/_custom_op/correction_mul.py +3 -3
  300. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
  301. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +4 -8
  302. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
  303. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
  304. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
  305. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
  306. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
  307. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
  308. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
  309. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
  310. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
  311. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
  312. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
  313. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
  314. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
  315. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  316. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
  317. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
  318. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
  319. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
  320. mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +0 -1
  321. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -1
  322. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
  323. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
  324. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
  325. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
  326. mindspore/ops/_op_impl/aicpu/__init__.py +238 -3
  327. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  328. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
  329. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  330. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
  331. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
  332. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
  333. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
  334. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
  335. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  336. mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
  337. mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
  338. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  339. mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
  340. mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
  341. mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
  342. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
  343. mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
  344. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  345. mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
  346. mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
  347. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +43 -0
  348. mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
  349. mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/cauchy.py} +17 -10
  350. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  351. mindspore/ops/_op_impl/aicpu/cholesky.py +1 -1
  352. mindspore/ops/_op_impl/{cpu/bias_add.py → aicpu/choleskygrad.py} +9 -7
  353. mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
  354. mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
  355. mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
  356. mindspore/ops/_op_impl/aicpu/conj.py +11 -0
  357. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
  358. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
  359. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  360. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +2 -2
  361. mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
  362. mindspore/ops/_op_impl/aicpu/diag.py +36 -0
  363. mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
  364. mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
  365. mindspore/ops/_op_impl/{cpu/bias_add_grad.py → aicpu/digamma.py} +9 -7
  366. mindspore/ops/_op_impl/aicpu/eig.py +35 -0
  367. mindspore/ops/_op_impl/aicpu/fft_with_size.py +41 -0
  368. mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
  369. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  370. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  371. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
  372. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  373. mindspore/ops/_op_impl/aicpu/glu.py +33 -0
  374. mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
  375. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  376. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  377. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  378. mindspore/ops/_op_impl/{tbe/scatter_add_ds.py → aicpu/inplace_index_add.py} +17 -21
  379. mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
  380. mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
  381. mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
  382. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  383. mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
  384. mindspore/ops/_op_impl/aicpu/lgamma.py +32 -0
  385. mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
  386. mindspore/ops/_op_impl/aicpu/logit.py +33 -0
  387. mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
  388. mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
  389. mindspore/ops/_op_impl/aicpu/masked_scatter.py +39 -0
  390. mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
  391. mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
  392. mindspore/ops/_op_impl/aicpu/matrix_power.py +32 -0
  393. mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
  394. mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
  395. mindspore/ops/_op_impl/aicpu/mirror_pad.py +2 -0
  396. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
  397. mindspore/ops/_op_impl/aicpu/mul.py +3 -1
  398. mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
  399. mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
  400. mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
  401. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  402. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  403. mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
  404. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  405. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  406. mindspore/ops/_op_impl/aicpu/qr.py +36 -0
  407. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  408. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  409. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  410. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
  411. mindspore/ops/_op_impl/aicpu/random_shuffle.py +3 -0
  412. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  413. mindspore/ops/_op_impl/aicpu/range.py +36 -0
  414. mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
  415. mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
  416. mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
  417. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
  418. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
  419. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  420. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  421. mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
  422. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
  423. mindspore/ops/_op_impl/aicpu/search_sorted.py +12 -6
  424. mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
  425. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  426. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  427. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  428. mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
  429. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  430. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  431. mindspore/ops/_op_impl/aicpu/sort.py +39 -0
  432. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
  433. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  434. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
  435. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
  436. mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
  437. mindspore/ops/_op_impl/{tbe/slice_ds.py → aicpu/sparse_segment_sum.py} +16 -24
  438. mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
  439. mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
  440. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
  441. mindspore/ops/_op_impl/aicpu/squared_difference.py +2 -0
  442. mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +93 -0
  443. mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +66 -0
  444. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  445. mindspore/ops/_op_impl/{tbe/gather_v2.py → aicpu/tile.py} +24 -24
  446. mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
  447. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  448. mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
  449. mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
  450. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
  451. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
  452. mindspore/ops/_op_impl/cpu/__init__.py +1 -2
  453. mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
  454. mindspore/ops/_op_impl/cpu/maximum_grad.py +2 -0
  455. mindspore/{compression/common/__init__.py → ops/_op_impl/cpu/pyexecute.py} +13 -8
  456. mindspore/ops/_op_impl/cpu/reduce_sum.py +8 -0
  457. mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
  458. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
  459. mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
  460. mindspore/ops/_op_impl/tbe/__init__.py +27 -608
  461. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +42 -0
  462. mindspore/ops/_op_impl/tbe/addcmul_ds.py +44 -0
  463. mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
  464. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  465. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
  466. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -1
  467. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  468. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
  469. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +41 -0
  470. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +1 -0
  471. mindspore/ops/_op_impl/tbe/bias_add_grad.py +2 -0
  472. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
  473. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +40 -0
  474. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
  475. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
  476. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
  477. mindspore/ops/_op_impl/tbe/cast.py +0 -2
  478. mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
  479. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -2
  480. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -2
  481. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
  482. mindspore/ops/_op_impl/tbe/deformable_offsets.py +1 -0
  483. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +1 -1
  484. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
  485. mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
  486. mindspore/ops/_op_impl/tbe/greater.py +2 -0
  487. mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
  488. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -1
  489. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
  490. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
  491. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -6
  492. mindspore/ops/_op_impl/tbe/{greater_ds.py → reduce_all_ds.py} +13 -16
  493. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +39 -0
  494. mindspore/ops/_op_impl/tbe/roi_align_ds.py +44 -0
  495. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +44 -0
  496. mindspore/ops/_op_impl/tbe/scatter_add.py +2 -0
  497. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +2 -2
  498. mindspore/ops/_op_impl/tbe/slice.py +26 -15
  499. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  500. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
  501. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +1 -0
  502. mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
  503. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +1 -1
  504. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +2 -0
  505. mindspore/ops/_primitive_cache.py +3 -2
  506. mindspore/ops/_register_for_op.py +11 -0
  507. mindspore/ops/_utils/__init__.py +1 -1
  508. mindspore/ops/_utils/utils.py +20 -41
  509. mindspore/ops/_vmap/__init__.py +2 -2
  510. mindspore/ops/_vmap/vmap_array_ops.py +170 -78
  511. mindspore/ops/_vmap/vmap_base.py +24 -10
  512. mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
  513. mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
  514. mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -9
  515. mindspore/ops/_vmap/vmap_image_ops.py +52 -0
  516. mindspore/ops/_vmap/vmap_math_ops.py +77 -6
  517. mindspore/ops/_vmap/vmap_nn_ops.py +78 -29
  518. mindspore/ops/_vmap/vmap_other_ops.py +3 -1
  519. mindspore/ops/_vmap/vmap_random_ops.py +55 -3
  520. mindspore/ops/_vmap/vmap_sparse_ops.py +1 -0
  521. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  522. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  523. mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +18 -19
  524. mindspore/ops/bprop_mindir/Argmax_bprop.mindir +13 -12
  525. mindspore/ops/bprop_mindir/Argmin_bprop.mindir +14 -13
  526. mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +17 -18
  527. mindspore/ops/bprop_mindir/Assign_bprop.mindir +16 -16
  528. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
  529. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
  530. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  531. mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +13 -12
  532. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  533. mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +28 -0
  534. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  535. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
  536. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +306 -0
  537. mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +12 -8
  538. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  539. mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
  540. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
  541. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
  542. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
  543. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
  544. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
  545. mindspore/ops/bprop_mindir/DType_bprop.mindir +12 -12
  546. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
  547. mindspore/ops/bprop_mindir/Depend_bprop.mindir +12 -13
  548. mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +23 -0
  549. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
  550. mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +15 -0
  551. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  552. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  553. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -24
  554. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -14
  555. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
  556. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  557. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  558. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  559. mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +12 -12
  560. mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
  561. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  562. mindspore/ops/bprop_mindir/Equal_bprop.mindir +18 -19
  563. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +58 -0
  564. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
  565. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +54 -0
  566. mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +18 -15
  567. mindspore/ops/bprop_mindir/GatherD_bprop.mindir +26 -0
  568. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +57 -0
  569. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  570. mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +17 -18
  571. mindspore/ops/bprop_mindir/Greater_bprop.mindir +18 -19
  572. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
  573. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
  574. mindspore/ops/bprop_mindir/IOU_bprop.mindir +18 -19
  575. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  576. mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +13 -12
  577. mindspore/ops/bprop_mindir/IsInf_bprop.mindir +13 -10
  578. mindspore/ops/bprop_mindir/IsNan_bprop.mindir +14 -11
  579. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
  580. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
  581. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
  582. mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
  583. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  584. mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +18 -19
  585. mindspore/ops/bprop_mindir/Less_bprop.mindir +17 -18
  586. mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +22 -19
  587. mindspore/ops/bprop_mindir/Load_bprop.mindir +12 -13
  588. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
  589. mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +17 -18
  590. mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +14 -13
  591. mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +21 -0
  592. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
  593. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
  594. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
  595. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
  596. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  597. mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
  598. mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
  599. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
  600. mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
  601. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  602. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  603. mindspore/ops/bprop_mindir/NonZero_bprop.mindir +14 -0
  604. mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +18 -19
  605. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +25 -23
  606. mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +13 -13
  607. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  608. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  609. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  610. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
  611. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
  612. mindspore/ops/bprop_mindir/Range_bprop.mindir +21 -19
  613. mindspore/ops/bprop_mindir/Rank_bprop.mindir +11 -11
  614. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
  615. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  616. mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +18 -17
  617. mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +18 -17
  618. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +19 -23
  619. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +60 -0
  620. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
  621. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +89 -0
  622. mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +52 -0
  623. mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +22 -0
  624. mindspore/ops/bprop_mindir/Round_bprop.mindir +14 -13
  625. mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
  626. mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
  627. mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +22 -0
  628. mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +24 -0
  629. mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +22 -0
  630. mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
  631. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
  632. mindspore/ops/bprop_mindir/Select_bprop.mindir +30 -34
  633. mindspore/ops/bprop_mindir/Shape_bprop.mindir +12 -12
  634. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
  635. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  636. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
  637. mindspore/ops/bprop_mindir/Sign_bprop.mindir +13 -12
  638. mindspore/ops/bprop_mindir/Slice_bprop.mindir +26 -0
  639. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
  640. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  641. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
  642. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
  643. mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
  644. mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +28 -0
  645. mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +23 -0
  646. mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
  647. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  648. mindspore/ops/bprop_mindir/Split_bprop.mindir +22 -0
  649. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +54 -0
  650. mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +95 -0
  651. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +98 -0
  652. mindspore/ops/bprop_mindir/Switch_bprop.mindir +28 -32
  653. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  654. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
  655. mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +22 -0
  656. mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +29 -0
  657. mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +14 -0
  658. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  659. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  660. mindspore/ops/bprop_mindir/TransShape_bprop.mindir +23 -0
  661. mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +18 -15
  662. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +11 -13
  663. mindspore/ops/bprop_mindir/Unique_bprop.mindir +16 -0
  664. mindspore/ops/bprop_mindir/Unstack_bprop.mindir +22 -0
  665. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
  666. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
  667. mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +13 -12
  668. mindspore/ops/bprop_mindir/__init__.py +1 -4
  669. mindspore/ops/bprop_mindir/generate_mindir.py +32 -20
  670. mindspore/ops/composite/__init__.py +12 -13
  671. mindspore/ops/composite/base.py +261 -254
  672. mindspore/ops/composite/env_ops.py +41 -0
  673. mindspore/ops/composite/math_ops.py +197 -156
  674. mindspore/ops/composite/multitype_ops/_compile_utils.py +428 -176
  675. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +188 -87
  676. mindspore/ops/composite/multitype_ops/add_impl.py +23 -1
  677. mindspore/ops/composite/multitype_ops/div_impl.py +3 -3
  678. mindspore/ops/composite/multitype_ops/equal_impl.py +1 -0
  679. mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -1
  680. mindspore/ops/composite/multitype_ops/getitem_impl.py +52 -5
  681. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
  682. mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
  683. mindspore/ops/composite/multitype_ops/in_impl.py +15 -3
  684. mindspore/ops/composite/multitype_ops/less_equal_impl.py +33 -2
  685. mindspore/ops/composite/multitype_ops/less_impl.py +33 -0
  686. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -2
  687. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  688. mindspore/ops/composite/multitype_ops/mod_impl.py +1 -1
  689. mindspore/ops/composite/multitype_ops/mul_impl.py +21 -7
  690. mindspore/ops/composite/multitype_ops/not_in_impl.py +15 -3
  691. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
  692. mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
  693. mindspore/ops/composite/multitype_ops/setitem_impl.py +62 -70
  694. mindspore/ops/composite/multitype_ops/sub_impl.py +3 -3
  695. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +41 -4
  696. mindspore/ops/function/__init__.py +323 -8
  697. mindspore/ops/function/array_func.py +3511 -780
  698. mindspore/ops/function/clip_func.py +329 -0
  699. mindspore/ops/function/debug_func.py +6 -6
  700. mindspore/ops/function/grad/__init__.py +5 -1
  701. mindspore/ops/function/grad/grad_func.py +736 -65
  702. mindspore/ops/function/image_func.py +270 -0
  703. mindspore/ops/function/linalg_func.py +268 -8
  704. mindspore/ops/function/math_func.py +8032 -3164
  705. mindspore/ops/function/nn_func.py +5619 -1855
  706. mindspore/ops/function/other_func.py +115 -0
  707. mindspore/ops/function/parameter_func.py +11 -10
  708. mindspore/ops/function/random_func.py +939 -77
  709. mindspore/ops/function/sparse_func.py +249 -84
  710. mindspore/ops/function/sparse_unary_func.py +2303 -0
  711. mindspore/ops/function/spectral_func.py +146 -0
  712. mindspore/ops/function/vmap_func.py +114 -0
  713. mindspore/ops/functional.py +182 -254
  714. mindspore/ops/op_info_register.py +79 -34
  715. mindspore/ops/operations/__init__.py +210 -118
  716. mindspore/ops/operations/_csr_ops.py +7 -7
  717. mindspore/ops/operations/_embedding_cache_ops.py +25 -15
  718. mindspore/ops/operations/_grad_ops.py +447 -322
  719. mindspore/ops/operations/_inner_ops.py +547 -176
  720. mindspore/ops/operations/_map_tensor_ops.py +112 -0
  721. mindspore/ops/operations/_ms_kernel.py +29 -27
  722. mindspore/ops/operations/_ocr_ops.py +11 -11
  723. mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
  724. mindspore/ops/operations/_quant_ops.py +186 -101
  725. mindspore/ops/operations/_rl_inner_ops.py +122 -61
  726. mindspore/ops/operations/_scalar_ops.py +466 -0
  727. mindspore/ops/operations/_sequence_ops.py +1047 -0
  728. mindspore/ops/operations/_tensor_array.py +10 -11
  729. mindspore/ops/operations/_thor_ops.py +4 -4
  730. mindspore/ops/operations/array_ops.py +1428 -1226
  731. mindspore/ops/operations/comm_ops.py +180 -117
  732. mindspore/ops/operations/control_ops.py +4 -2
  733. mindspore/ops/operations/custom_ops.py +185 -98
  734. mindspore/ops/operations/debug_ops.py +92 -54
  735. mindspore/ops/operations/image_ops.py +406 -211
  736. mindspore/ops/operations/inner_ops.py +42 -53
  737. mindspore/ops/operations/linalg_ops.py +32 -29
  738. mindspore/ops/operations/math_ops.py +2076 -897
  739. mindspore/ops/operations/nn_ops.py +1282 -1252
  740. mindspore/ops/operations/other_ops.py +124 -278
  741. mindspore/ops/operations/random_ops.py +345 -178
  742. mindspore/ops/operations/rl_ops.py +8 -9
  743. mindspore/ops/operations/sparse_ops.py +502 -157
  744. mindspore/ops/operations/spectral_ops.py +107 -0
  745. mindspore/ops/primitive.py +192 -15
  746. mindspore/ops/vm_impl_registry.py +23 -2
  747. mindspore/parallel/__init__.py +6 -1
  748. mindspore/parallel/_auto_parallel_context.py +199 -92
  749. mindspore/parallel/_cell_wrapper.py +4 -2
  750. mindspore/parallel/_cost_model_context.py +3 -0
  751. mindspore/parallel/_dp_allreduce_fusion.py +2 -1
  752. mindspore/parallel/_offload_context.py +185 -0
  753. mindspore/parallel/_parallel_serialization.py +167 -28
  754. mindspore/parallel/_ps_context.py +9 -5
  755. mindspore/parallel/_recovery_context.py +1 -1
  756. mindspore/parallel/_tensor.py +9 -1
  757. mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
  758. mindspore/{nn/transformer → parallel/_transformer}/layers.py +59 -37
  759. mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
  760. mindspore/{nn/transformer → parallel/_transformer}/moe.py +160 -35
  761. mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
  762. mindspore/{nn/transformer → parallel/_transformer}/transformer.py +235 -196
  763. mindspore/parallel/_utils.py +47 -7
  764. mindspore/parallel/algo_parameter_config.py +5 -1
  765. mindspore/parallel/checkpoint_transform.py +329 -0
  766. mindspore/parallel/shard.py +229 -0
  767. mindspore/profiler/__init__.py +2 -1
  768. mindspore/profiler/common/util.py +4 -3
  769. mindspore/profiler/common/validator/validate_path.py +2 -2
  770. mindspore/profiler/envprofiling.py +249 -0
  771. mindspore/profiler/parser/aicpu_data_parser.py +38 -39
  772. mindspore/profiler/parser/ascend_timeline_generator.py +497 -0
  773. mindspore/profiler/parser/base_timeline_generator.py +471 -0
  774. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +684 -0
  775. mindspore/profiler/parser/framework_parser.py +42 -16
  776. mindspore/profiler/parser/hccl_parser.py +158 -158
  777. mindspore/profiler/parser/hwts_log_parser.py +7 -6
  778. mindspore/profiler/parser/integrator.py +18 -1579
  779. mindspore/profiler/parser/minddata_analyzer.py +8 -8
  780. mindspore/profiler/parser/msadvisor_analyzer.py +14 -27
  781. mindspore/profiler/parser/msadvisor_parser.py +2 -4
  782. mindspore/profiler/parser/optime_parser.py +17 -18
  783. mindspore/profiler/parser/profiler_info.py +108 -0
  784. mindspore/profiler/parser/step_trace_parser.py +1 -1
  785. mindspore/profiler/profiling.py +396 -194
  786. mindspore/rewrite/__init__.py +6 -2
  787. mindspore/rewrite/api/node.py +51 -110
  788. mindspore/rewrite/api/node_type.py +10 -6
  789. mindspore/rewrite/api/pattern_engine.py +51 -7
  790. mindspore/rewrite/api/scoped_value.py +64 -53
  791. mindspore/rewrite/api/symbol_tree.py +108 -61
  792. mindspore/rewrite/api/tree_node_helper.py +2 -3
  793. mindspore/{compression/quant/__init__.py → rewrite/ast_creator_register.py} +20 -11
  794. mindspore/rewrite/ast_helpers/__init__.py +6 -3
  795. mindspore/rewrite/ast_helpers/ast_creator.py +115 -0
  796. mindspore/rewrite/ast_helpers/ast_finder.py +99 -1
  797. mindspore/rewrite/ast_helpers/ast_modifier.py +17 -4
  798. mindspore/rewrite/ast_helpers/ast_replacer.py +1 -1
  799. mindspore/rewrite/ast_transformers/__init__.py +0 -1
  800. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +46 -5
  801. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +6 -3
  802. mindspore/rewrite/common/__init__.py +2 -0
  803. mindspore/rewrite/common/event.py +1 -1
  804. mindspore/rewrite/common/observable.py +1 -1
  805. mindspore/rewrite/common/observer.py +1 -1
  806. mindspore/rewrite/common/rewrite_elog.py +35 -0
  807. mindspore/rewrite/namer.py +2 -2
  808. mindspore/rewrite/namespace.py +14 -4
  809. mindspore/rewrite/node.py +161 -13
  810. mindspore/rewrite/parser.py +0 -1
  811. mindspore/rewrite/parser_register.py +0 -1
  812. mindspore/rewrite/parsers/arguments_parser.py +3 -2
  813. mindspore/rewrite/parsers/assign_parser.py +267 -67
  814. mindspore/rewrite/parsers/attribute_parser.py +56 -0
  815. mindspore/rewrite/parsers/class_def_parser.py +191 -108
  816. mindspore/rewrite/parsers/constant_parser.py +101 -0
  817. mindspore/rewrite/parsers/container_parser.py +88 -0
  818. mindspore/rewrite/parsers/for_parser.py +28 -15
  819. mindspore/rewrite/parsers/function_def_parser.py +21 -5
  820. mindspore/rewrite/parsers/if_parser.py +11 -28
  821. mindspore/rewrite/parsers/module_parser.py +9 -6
  822. mindspore/rewrite/parsers/return_parser.py +3 -2
  823. mindspore/rewrite/sparsify/__init__.py +0 -0
  824. mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
  825. mindspore/rewrite/sparsify/sparsify.py +109 -0
  826. mindspore/rewrite/sparsify/utils.py +173 -0
  827. mindspore/rewrite/symbol_tree.py +322 -109
  828. mindspore/rewrite/symbol_tree_builder.py +45 -8
  829. mindspore/rewrite/symbol_tree_dumper.py +0 -1
  830. mindspore/rewrite/topological_manager.py +1 -2
  831. mindspore/run_check/_check_version.py +209 -112
  832. mindspore/run_check/run_check.py +2 -1
  833. mindspore/scipy/linalg.py +13 -117
  834. mindspore/scipy/ops.py +5 -71
  835. mindspore/scipy/ops_grad.py +1 -25
  836. mindspore/scipy/ops_wrapper.py +1 -1
  837. mindspore/scipy/optimize/_bfgs.py +1 -1
  838. mindspore/scipy/optimize/_lagrange.py +200 -0
  839. mindspore/scipy/optimize/line_search.py +3 -2
  840. mindspore/scipy/optimize/minimize.py +43 -6
  841. mindspore/scipy/sparse/__init__.py +2 -2
  842. mindspore/scipy/sparse/linalg.py +5 -465
  843. mindspore/scipy/utils.py +2 -1
  844. mindspore/scipy/utils_const.py +7 -1
  845. mindspore/train/__init__.py +6 -4
  846. mindspore/train/_utils.py +28 -5
  847. mindspore/train/amp.py +321 -50
  848. mindspore/train/callback/__init__.py +3 -1
  849. mindspore/train/callback/_backup_and_restore.py +120 -0
  850. mindspore/train/callback/_callback.py +8 -8
  851. mindspore/train/callback/_checkpoint.py +12 -9
  852. mindspore/train/callback/_early_stop.py +13 -7
  853. mindspore/train/callback/_history.py +8 -8
  854. mindspore/train/callback/_lambda_callback.py +6 -6
  855. mindspore/train/callback/_landscape.py +36 -38
  856. mindspore/train/callback/_loss_monitor.py +12 -6
  857. mindspore/train/callback/_lr_scheduler_callback.py +2 -4
  858. mindspore/train/callback/_on_request_exit.py +212 -0
  859. mindspore/train/callback/_reduce_lr_on_plateau.py +13 -7
  860. mindspore/train/callback/_summary_collector.py +27 -19
  861. mindspore/train/callback/_time_monitor.py +13 -7
  862. mindspore/train/checkpoint_pb2.py +68 -8
  863. mindspore/train/data_sink.py +122 -33
  864. mindspore/train/dataset_helper.py +28 -87
  865. mindspore/train/loss_scale_manager.py +4 -7
  866. mindspore/{nn → train}/metrics/__init__.py +20 -20
  867. mindspore/{nn → train}/metrics/accuracy.py +12 -10
  868. mindspore/{nn → train}/metrics/auc.py +4 -4
  869. mindspore/{nn → train}/metrics/bleu_score.py +4 -4
  870. mindspore/{nn → train}/metrics/confusion_matrix.py +10 -8
  871. mindspore/{nn → train}/metrics/cosine_similarity.py +4 -4
  872. mindspore/{nn → train}/metrics/dice.py +6 -5
  873. mindspore/{nn → train}/metrics/error.py +7 -5
  874. mindspore/{nn → train}/metrics/fbeta.py +9 -7
  875. mindspore/{nn → train}/metrics/hausdorff_distance.py +8 -6
  876. mindspore/{nn → train}/metrics/loss.py +4 -3
  877. mindspore/{nn → train}/metrics/mean_surface_distance.py +6 -5
  878. mindspore/{nn → train}/metrics/metric.py +6 -5
  879. mindspore/{nn → train}/metrics/occlusion_sensitivity.py +4 -3
  880. mindspore/{nn → train}/metrics/perplexity.py +5 -4
  881. mindspore/{nn → train}/metrics/precision.py +5 -4
  882. mindspore/{nn → train}/metrics/recall.py +5 -4
  883. mindspore/{nn → train}/metrics/roc.py +7 -6
  884. mindspore/{nn → train}/metrics/root_mean_square_surface_distance.py +6 -5
  885. mindspore/{nn → train}/metrics/topk.py +7 -5
  886. mindspore/train/mind_ir_pb2.py +339 -32
  887. mindspore/train/model.py +113 -84
  888. mindspore/train/serialization.py +547 -167
  889. mindspore/train/summary/_summary_adapter.py +1 -1
  890. mindspore/train/summary/summary_record.py +43 -12
  891. mindspore/train/train_thor/convert_utils.py +7 -1
  892. mindspore/train/train_thor/dataset_helper.py +3 -3
  893. mindspore/train/train_thor/model_thor.py +0 -4
  894. mindspore/version.py +1 -1
  895. {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +4 -3
  896. {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +899 -675
  897. mindspore/compression/common/constant.py +0 -124
  898. mindspore/compression/export/__init__.py +0 -19
  899. mindspore/compression/export/quant_export.py +0 -514
  900. mindspore/compression/quant/qat.py +0 -636
  901. mindspore/compression/quant/quant_utils.py +0 -462
  902. mindspore/compression/quant/quantizer.py +0 -68
  903. mindspore/nn/layer/quant.py +0 -1868
  904. mindspore/nn/layer/rnn_utils.py +0 -90
  905. mindspore/nn/probability/dpn/__init__.py +0 -22
  906. mindspore/nn/probability/dpn/vae/__init__.py +0 -25
  907. mindspore/nn/probability/dpn/vae/cvae.py +0 -138
  908. mindspore/nn/probability/dpn/vae/vae.py +0 -122
  909. mindspore/nn/probability/infer/__init__.py +0 -22
  910. mindspore/nn/probability/infer/variational/elbo.py +0 -70
  911. mindspore/nn/probability/infer/variational/svi.py +0 -84
  912. mindspore/nn/probability/toolbox/__init__.py +0 -22
  913. mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
  914. mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -363
  915. mindspore/nn/probability/transforms/__init__.py +0 -22
  916. mindspore/nn/probability/transforms/transform_bnn.py +0 -262
  917. mindspore/nn/probability/zhusuan/__init__.py +0 -18
  918. mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
  919. mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
  920. mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
  921. mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
  922. mindspore/ops/_op_impl/tbe/bias_add_grad_ds.py +0 -52
  923. mindspore/ops/_op_impl/tbe/scatter_nd_add_ds.py +0 -43
  924. mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -20
  925. mindspore/ops/bprop_mindir/Identity_bprop.mindir +0 -9
  926. mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -20
  927. mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -16
  928. mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -17
  929. mindspore/ops/bprop_mindir/stop_gradient_bprop.mindir +0 -12
  930. mindspore/ops/composite/array_ops.py +0 -210
  931. mindspore/ops/composite/clip_ops.py +0 -238
  932. mindspore/ops/composite/random_ops.py +0 -426
  933. mindspore/ops/composite/vmap_ops.py +0 -38
  934. mindspore/ops/operations/sponge_ops.py +0 -3531
  935. mindspore/ops/operations/sponge_update_ops.py +0 -2546
  936. mindspore/parallel/nn/__init__.py +0 -42
  937. mindspore/parallel/nn/loss.py +0 -22
  938. mindspore/parallel/nn/moe.py +0 -21
  939. mindspore/parallel/nn/op_parallel_config.py +0 -22
  940. mindspore/parallel/nn/transformer.py +0 -31
  941. mindspore/run_check/_check_deps_version.py +0 -84
  942. {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
  943. {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
  944. {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -28,16 +28,17 @@ from mindspore.common.initializer import initializer
28
28
  from mindspore.ops import operations as P
29
29
  from mindspore.ops import functional as F
30
30
  from mindspore.ops.operations import _inner_ops as inner
31
- from mindspore.ops.primitive import constexpr, Primitive
31
+ from mindspore.ops.primitive import constexpr, Primitive, _primexpr
32
32
  from mindspore.common.parameter import Parameter
33
33
  from mindspore._extends import cell_attr_register
34
- from mindspore._checkparam import Rel, Validator
34
+ from mindspore import _checkparam as Validator
35
35
  from mindspore.nn.cell import Cell
36
36
  from mindspore.nn.layer.activation import get_activation
37
+ from mindspore.common._decorator import deprecated
37
38
 
38
39
  __all__ = ['Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'Pad', 'Unfold', 'Tril', 'Triu',
39
- 'ResizeBilinear', 'MatrixDiag', 'MatrixDiagPart', 'MatrixSetDiag', 'L1Regularizer', 'Dropout2d',
40
- 'Dropout3d', 'Roll', 'Identity', 'Unflatten']
40
+ 'ResizeBilinear', 'MatrixDiag', 'MatrixDiagPart', 'MatrixSetDiag', 'L1Regularizer', 'Dropout1d',
41
+ 'Dropout2d', 'Dropout3d', 'Upsample', 'Roll', 'Identity', 'Unflatten']
41
42
 
42
43
 
43
44
  class L1Regularizer(Cell):
@@ -87,15 +88,18 @@ class L1Regularizer(Cell):
87
88
  super(L1Regularizer, self).__init__()
88
89
  Validator.check_value_type("scale", scale, [int, float], self.cls_name)
89
90
  if scale <= 0:
90
- raise ValueError(f"For '{self.cls_name}', the 'scale' must be greater than 0, but got {scale}.")
91
+ raise ValueError(
92
+ f"For '{self.cls_name}', the 'scale' must be greater than 0, but got {scale}.")
91
93
  if math.isinf(scale) or math.isnan(scale):
92
- raise ValueError(f"For '{self.cls_name}', the 'scale' can not be INF or NAN, but got {scale}.")
94
+ raise ValueError(
95
+ f"For '{self.cls_name}', the 'scale' can not be INF or NAN, but got {scale}.")
93
96
  self.abs = P.Abs()
94
97
  self.reduce_sum = P.ReduceSum()
95
98
  self.scale = Tensor(scale, dtype=mstype.float32)
96
99
 
97
100
  def construct(self, weights):
98
- const_utils.check_type_valid(F.dtype(weights), mstype.number_type, 'weights')
101
+ const_utils.check_type_valid(
102
+ F.dtype(weights), mstype.number_type, 'weights')
99
103
  l1_regularization = self.scale * self.reduce_sum(self.abs(weights))
100
104
  return l1_regularization
101
105
 
@@ -104,12 +108,9 @@ class Dropout(Cell):
104
108
  r"""
105
109
  Dropout layer for the input.
106
110
 
107
- Randomly set some elements of the input tensor to zero with probability :math:`1 - keep\_prob` during training
108
- using samples from a Bernoulli distribution.
109
-
110
- The outputs are scaled by a factor of :math:`\frac{1}{keep\_prob}` during training so
111
- that the output layer remains at a similar scale. During inference, this
112
- layer returns the same tensor as the `x`.
111
+ Dropout is a regularization method. The operator randomly sets some neurons output to 0
112
+ according to the probability of discarding the probability of discarding.
113
+ During the reasoning, this layer returns the same Tensor as the `x`.
113
114
 
114
115
  This technique is proposed in paper `Dropout: A Simple Way to Prevent Neural Networks from Overfitting
115
116
  <http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ and proved to be effective to reduce
@@ -118,25 +119,30 @@ class Dropout(Cell):
118
119
  <https://arxiv.org/pdf/1207.0580.pdf>`_.
119
120
 
120
121
  Note:
121
- Each channel will be zeroed out independently on every construct call.
122
- Parameter `dtype` will be removed in a future version. It is not recommended to define this parameter.
122
+ - Each channel will be zeroed out independently on every construct call.
123
+ - Parameter `keep_prob` will be removed in a future version, please use parameter `p` instead.
124
+ Parameter `p` means the probability of the element of the input tensor to be zeroed.
125
+ - Parameter `dtype` will be removed in a future version. It is not recommended to define this parameter.
123
126
 
124
127
  Args:
125
- keep_prob (float): The keep rate, greater than 0 and less equal than 1. E.g. rate=0.9,
126
- dropping out 10% of input units. Default: 0.5.
127
- dtype (:class:`mindspore.dtype`): Data type of `x`. Default: mindspore.float32.
128
+ keep_prob (float): Deprecated. The keep rate, greater than 0 and less equal than 1.
129
+ E.g. rate=0.9, dropping out 10% of input neurons. Default: 0.5.
130
+ p (Union[float, int, None]): The dropout rate, greater than or equal to 0 and less than 1.
131
+ E.g. rate=0.9, dropping out 90% of input neurons. Default: None.
132
+ dtype (:class:`mindspore.dtype`): Data type of `input`. Default: mindspore.float32.
128
133
 
129
134
  Inputs:
130
135
  - **x** (Tensor) - The input of Dropout with data type of float16 or float32.
131
- The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
132
136
 
133
137
  Outputs:
134
138
  Tensor, output tensor with the same shape as the `x`.
135
139
 
136
140
  Raises:
137
141
  TypeError: If `keep_prob` is not a float.
142
+ TypeError: If the dtype of `p` is not float or int.
138
143
  TypeError: If dtype of `x` is not neither float16 nor float32.
139
144
  ValueError: If `keep_prob` is not in range (0, 1].
145
+ ValueError: If `p` is not in range [0, 1).
140
146
  ValueError: If length of shape of `x` is less than 1.
141
147
 
142
148
  Supported Platforms:
@@ -144,42 +150,116 @@ class Dropout(Cell):
144
150
 
145
151
  Examples:
146
152
  >>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32)
147
- >>> net = nn.Dropout(keep_prob=0.8)
153
+ >>> net = nn.Dropout(p=0.2)
148
154
  >>> net.set_train()
149
- Dropout<keep_prob=0.8>
150
155
  >>> output = net(x)
151
156
  >>> print(output.shape)
152
157
  (2, 2, 3)
153
158
  """
154
159
 
155
- def __init__(self, keep_prob=0.5, dtype=mstype.float32):
160
+ def __init__(self, keep_prob=0.5, p=None, dtype=mstype.float32):
156
161
  """Initialize Dropout."""
157
162
  super(Dropout, self).__init__()
158
- Validator.check_value_type('keep_prob', keep_prob, [float], self.cls_name)
159
- if keep_prob <= 0 or keep_prob > 1:
160
- raise ValueError(f"For '{self.cls_name}', the 'keep_prob' must be a number in range (0, 1], "
161
- f"but got {keep_prob}.")
162
- Validator.check_subclass("dtype", dtype, mstype.number_type, self.cls_name)
163
163
  if dtype != mstype.float32:
164
- logger.info("This parameter `dtype` will be deleted or invisible in the future. Please don't use it.")
164
+ logger.warning(
165
+ "This parameter `dtype` will be deleted or invisible in the future. Please don't use it.")
166
+ if p is None:
167
+ logger.warning("For Dropout, this parameter `keep_prob` will be deprecated, please use `p` instead.")
168
+ Validator.check_value_type('keep_prob', keep_prob, [float], self.cls_name)
169
+ if keep_prob <= 0 or keep_prob > 1:
170
+ raise ValueError(f"For '{self.cls_name}', the 'keep_prob' must be a number in range (0, 1], "
171
+ f"but got {keep_prob}.")
172
+ seed0, seed1 = _get_graph_seed(0, "dropout")
173
+ self.dropout = P.Dropout(keep_prob, seed0, seed1)
174
+ else:
175
+ Validator.check_value_type('p', p, [float, int], self.cls_name)
176
+ if p < 0 or p >= 1:
177
+ raise ValueError(f"For '{self.cls_name}', the 'p' must be a number in range [0, 1), "
178
+ f"but got {p}.")
179
+ seed0, seed1 = _get_graph_seed(0, "dropout")
180
+ self.dropout = P.Dropout(1.0 - p, seed0, seed1)
181
+ self.p = p
165
182
  self.keep_prob = keep_prob
166
- seed0, seed1 = _get_graph_seed(0, "dropout")
167
- self.seed0 = seed0
168
- self.seed1 = seed1
169
- self.dropout = P.Dropout(keep_prob, seed0, seed1)
170
183
 
171
184
  def construct(self, x):
172
- if not self.training:
173
- return x
174
-
175
- if self.keep_prob == 1:
185
+ if not self.training or self.keep_prob == 1 or self.p == 0:
176
186
  return x
177
187
 
178
188
  out, _ = self.dropout(x)
179
189
  return out
180
190
 
181
191
  def extend_repr(self):
182
- return 'keep_prob={}'.format(self.keep_prob)
192
+ if self.p is None:
193
+ logger.warning("For Dropout, this parameter `keep_prob` will be deprecated, please use `p` instead.")
194
+ return f'keep_prob={self.keep_prob}'
195
+ return f'p={self.p}'
196
+
197
+
198
+ class Dropout1d(Cell):
199
+ r"""
200
+ During training, randomly zeroes entire channels of the input tensor with probability `p`
201
+ from a Bernoulli distribution (For a 3-dimensional tensor with a shape of :math:`(N, C, L)`,
202
+ the channel feature map refers to a 1-dimensional feature map with the shape of :math:`L`).
203
+
204
+ For example, the :math:`j\_th` channel of the :math:`i\_th` sample in the batched input is a to-be-processed
205
+ `1D` tensor input[i,j].
206
+ Each channel will be zeroed out independently on every forward call with probability `p` using samples
207
+ from a Bernoulli distribution.
208
+
209
+ The paper `Dropout: A Simple Way to Prevent Neural Networks from Overfitting
210
+ <http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ mentioned this technology, And it is proved that
211
+ it can effectively reduce over fitting and prevent neuronal coadaptation.
212
+ For more details, refer to `Improving neural networks by preventing co-adaptation of feature detectors
213
+ <https://arxiv.org/pdf/1207.0580.pdf>`_ .
214
+
215
+ `Dropout1d` can improve the independence between channel feature maps.
216
+
217
+ Args:
218
+ p (float, optional): The dropping probability of a channel, between 0 and 1, e.g. `p` = 0.8,
219
+ which means an 80% chance of being set to 0. Default: 0.5.
220
+
221
+ Inputs:
222
+ - **x** (Tensor) - A tensor with shape :math:`(N, C, L)` or :math:`(C, L)`, where `N` is the batch size,
223
+ `C` is the number of channels, `L` is the feature length. The data type must be int8, int16, int32,
224
+ int64, float16, float32 or float64.
225
+
226
+ Outputs:
227
+ Tensor, output, with the same shape and data type as `x`.
228
+
229
+ Raises:
230
+ TypeError: If `x` is not a Tensor.
231
+ TypeError: If the data type of `p` is not float.
232
+ ValueError: If `p` is out of the range `[0.0, 1.0]`.
233
+ ValueError: If `x` shape is not `2D` or `3D`.
234
+
235
+ Supported Platforms:
236
+ ``Ascend`` ``GPU`` ``CPU``
237
+
238
+ Examples:
239
+ >>> import numpy as np
240
+ >>> import mindspore as ms
241
+ >>> from mindspore import nn, Tensor
242
+ >>> op = nn.Dropout1d(p=0.6)
243
+ >>> op.training = True
244
+ >>> a = Tensor(np.ones((3, 3)), ms.float32)
245
+ >>> output = op(a)
246
+ """
247
+
248
+ def __init__(self, p=0.5):
249
+ """Initialize Dropout1d."""
250
+ super(Dropout1d, self).__init__()
251
+ Validator.check_value_type('p', p, [float], self.cls_name)
252
+ if p < 0 or p > 1:
253
+ raise ValueError(f"For '{self.cls_name}', the 'p' must be a number in range [0, 1], "
254
+ f"but got {p}.")
255
+ self.prob = p
256
+
257
+ def construct(self, x):
258
+ if not self.training or self.prob == 0:
259
+ return x
260
+
261
+ out = F.dropout1d(x, self.prob)
262
+ return out
183
263
 
184
264
 
185
265
  class Dropout2d(Cell):
@@ -219,10 +299,7 @@ class Dropout2d(Cell):
219
299
  self.dropout2d = P.Dropout2D(self.keep_prob)
220
300
 
221
301
  def construct(self, x):
222
- if not self.training:
223
- return x
224
-
225
- if self.keep_prob == 1:
302
+ if not self.training or self.keep_prob == 1:
226
303
  return x
227
304
 
228
305
  out, _ = self.dropout2d(x)
@@ -270,10 +347,7 @@ class Dropout3d(Cell):
270
347
  self.dropout3d = P.Dropout3D(self.keep_prob)
271
348
 
272
349
  def construct(self, x):
273
- if not self.training:
274
- return x
275
-
276
- if self.keep_prob == 1:
350
+ if not self.training or self.keep_prob == 1:
277
351
  return x
278
352
 
279
353
  out, _ = self.dropout3d(x)
@@ -283,22 +357,65 @@ class Dropout3d(Cell):
283
357
  return 'p={}'.format(self.keep_prob)
284
358
 
285
359
 
360
+ class Upsample(Cell):
361
+ r"""
362
+ For details, please refer to :func:`mindspore.ops.interpolate`.
363
+
364
+ Supported Platforms:
365
+ ``Ascend`` ``GPU`` ``CPU``
366
+
367
+ Examples:
368
+ >>> x = Tensor([[[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]])
369
+ >>> upsample = nn.Upsample(size=(5, 5))
370
+ >>> out = upsample(x)
371
+ >>> print(x.asnumpy())
372
+ [[[[1. 2. 3. 4.]
373
+ [5. 6. 7. 8.]]]]
374
+ >>> print(out.asnumpy())
375
+ [[[[1. 1. 2. 3. 4.]
376
+ [1. 1. 2. 3. 4.]
377
+ [1. 1. 2. 3. 4.]
378
+ [5. 5. 6. 7. 8.]
379
+ [5. 5. 6. 7. 8.]]]]
380
+ >>> print(out.shape)
381
+ (1, 1, 5, 5)
382
+ """
383
+
384
+ def __init__(self, size=None, scale_factor=None, mode="nearest", align_corners=None, recompute_scale_factor=None):
385
+ """Initialize Upsample."""
386
+ super(Upsample, self).__init__()
387
+ self.size = size
388
+ self.scale_factor = scale_factor
389
+ self.mode = mode
390
+ self.align_corners = align_corners
391
+ self.recompute_scale_factor = recompute_scale_factor
392
+
393
+ def construct(self, x):
394
+ out = F.interpolate(x, self.size, self.scale_factor, self.mode,
395
+ self.align_corners, self.recompute_scale_factor)
396
+ return out
397
+
398
+
286
399
  class Flatten(Cell):
287
400
  r"""
288
- Flatten the dimensions other than the 0th dimension of the input Tensor.
401
+ Flatten the input Tensor along dimensions from `start_dim` to `end_dim`.
402
+
403
+ Args:
404
+ start_dim (int, optional): The first dimension to flatten. Default: 1.
405
+ end_dim (int, optional): The last dimension to flatten. Default: -1.
289
406
 
290
407
  Inputs:
291
- - **x** (Tensor) - The input Tensor to be flattened. The data type is
292
- `number <https://www.mindspore.cn/docs/en/r1.10/api_python/mindspore.html#mindspore.dtype>`_ .
293
- The shape is :math:`(N, *)` , where :math:`*` means any number of additional dimensions
294
- and the shape can't be ().
408
+ - **x** (Tensor) - The input Tensor to be flattened.
295
409
 
296
410
  Outputs:
297
- Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
298
- the product of the remaining dimensions.
411
+ Tensor. If no dimensions are flattened, returns the original `x`, otherwise return the flattened Tensor.
412
+ If `x` is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
299
413
 
300
414
  Raises:
301
- TypeError: If `x` is not a subclass of Tensor.
415
+ TypeError: If `x` is not a Tensor.
416
+ TypeError: If `start_dim` or `end_dim` is not int.
417
+ ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
418
+ ValueError: If `start_dim` or `end_dim` is not in range of [-x.dim, x.dim-1].
302
419
 
303
420
  Supported Platforms:
304
421
  ``Ascend`` ``GPU`` ``CPU``
@@ -316,16 +433,25 @@ class Flatten(Cell):
316
433
  after flatten the output shape is (2, 4)
317
434
  """
318
435
 
319
- def __init__(self):
436
+ def __init__(self, start_dim=1, end_dim=-1):
320
437
  """Initialize Flatten."""
321
438
  super(Flatten, self).__init__()
439
+ self.start_dim = start_dim
440
+ self.end_dim = end_dim
322
441
 
323
442
  def construct(self, x):
324
- return F.reshape(x, (F.shape(x)[0], -1))
443
+ x_rank = F.rank(x)
444
+ ndim = x_rank if x_rank != 0 else 1
445
+ if self.start_dim < -ndim or self.start_dim >= ndim:
446
+ const_utils.raise_value_error("'start_dim' out of range.")
447
+ if self.end_dim < -ndim or self.end_dim >= ndim:
448
+ const_utils.raise_value_error("'end_dim' out of range.")
449
+ return F.flatten(x, start_dim=self.start_dim, end_dim=self.end_dim)
325
450
 
326
451
 
327
- @constexpr
452
+ @_primexpr
328
453
  def check_dense_input_shape(x, prim_name=None):
454
+ """ check the shape of inputs"""
329
455
  msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
330
456
  if len(x) < 2:
331
457
  raise ValueError(f"{msg_prefix} dimension of 'x' should not be less than 2, but got {len(x)}.")
@@ -339,13 +465,13 @@ class Identity(Cell):
339
465
  - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.
340
466
 
341
467
  Outputs:
342
- Tensor, the shape of tensor and the data type are the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.
468
+ Tensor, the shape of tensor and the data type are the same as `x`.
343
469
 
344
470
  Raises:
345
471
  TypeError: If `x` is not a Tensor.
346
472
 
347
473
  Supported Platforms:
348
- ``Ascend`` ``CPU`` ``GPU``
474
+ ``Ascend`` ``GPU`` ``CPU``
349
475
 
350
476
  Examples:
351
477
  >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
@@ -386,7 +512,7 @@ class Dense(Cell):
386
512
  is same as `x`. The values of str refer to the function `initializer`. Default: 'normal'.
387
513
  bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
388
514
  same as `x`. The values of str refer to the function `initializer`. Default: 'zeros'.
389
- has_bias (bool): Specifies whether the layer uses a bias vector. Default: True.
515
+ has_bias (bool): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: True.
390
516
  activation (Union[str, Cell, Primitive, None]): activate function applied to the output of the fully connected
391
517
  layer. Both activation name, e.g. 'relu', and mindspore activation function, e.g. mindspore.ops.ReLU(),
392
518
  are supported. Default: None.
@@ -428,9 +554,12 @@ class Dense(Cell):
428
554
  activation=None):
429
555
  """Initialize Dense."""
430
556
  super(Dense, self).__init__()
431
- self.in_channels = Validator.check_positive_int(in_channels, "in_channels", self.cls_name)
432
- self.out_channels = Validator.check_positive_int(out_channels, "out_channels", self.cls_name)
433
- self.has_bias = Validator.check_bool(has_bias, "has_bias", self.cls_name)
557
+ self.in_channels = Validator.check_positive_int(
558
+ in_channels, "in_channels", self.cls_name)
559
+ self.out_channels = Validator.check_positive_int(
560
+ out_channels, "out_channels", self.cls_name)
561
+ self.has_bias = Validator.check_bool(
562
+ has_bias, "has_bias", self.cls_name)
434
563
  self.reshape = P.Reshape()
435
564
  self.shape_op = P.Shape()
436
565
 
@@ -441,7 +570,8 @@ class Dense(Cell):
441
570
  f"be equal to 2, and the first dim must be equal to 'out_channels', and the "
442
571
  f"second dim must be equal to 'in_channels'. But got 'weight_init': {weight_init}, "
443
572
  f"'out_channels': {out_channels}, 'in_channels': {in_channels}.")
444
- self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")
573
+ self.weight = Parameter(initializer(
574
+ weight_init, [out_channels, in_channels]), name="weight")
445
575
 
446
576
  self.bias = None
447
577
  if self.has_bias:
@@ -450,11 +580,13 @@ class Dense(Cell):
450
580
  raise ValueError(f"For '{self.cls_name}', bias init shape error. The ndim of 'bias_init' must "
451
581
  f"be equal to 1, and the first dim must be equal to 'out_channels'. But got "
452
582
  f"'bias_init': {bias_init}, 'out_channels': {out_channels}.")
453
- self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")
583
+ self.bias = Parameter(initializer(
584
+ bias_init, [out_channels]), name="bias")
454
585
  self.bias_add = P.BiasAdd()
455
586
 
456
587
  self.matmul = P.MatMul(transpose_b=True)
457
- self.activation = get_activation(activation) if isinstance(activation, str) else activation
588
+ self.activation = get_activation(activation) if isinstance(
589
+ activation, str) else activation
458
590
  if activation is not None and not isinstance(self.activation, (Cell, Primitive)):
459
591
  raise TypeError(f"For '{self.cls_name}', the 'activation' must be str or Cell or Primitive, but got "
460
592
  f"{type(activation).__name__}.")
@@ -471,12 +603,13 @@ class Dense(Cell):
471
603
  if self.activation_flag:
472
604
  x = self.activation(x)
473
605
  if len(x_shape) != 2:
474
- out_shape = x_shape[:-1] + (-1,)
606
+ out_shape = x_shape[:-1] + (F.shape(x)[-1],)
475
607
  x = self.reshape(x, out_shape)
476
608
  return x
477
609
 
478
610
  def extend_repr(self):
479
- s = 'input_channels={}, output_channels={}'.format(self.in_channels, self.out_channels)
611
+ s = 'input_channels={}, output_channels={}'.format(
612
+ self.in_channels, self.out_channels)
480
613
  if self.has_bias:
481
614
  s += ', has_bias={}'.format(self.has_bias)
482
615
  if self.activation_flag:
@@ -488,14 +621,15 @@ class Dense(Cell):
488
621
  def _is_equal_one(x):
489
622
  if x is None:
490
623
  return False
491
- return bool(x.asnumpy().mean() == 1.0)
624
+ return F.equal(F.reduce_mean(x), 1.0)
492
625
 
493
626
 
494
627
  @constexpr
495
628
  def _dtype_check(x_dtype, prim_name=None):
496
629
  msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
497
630
  if x_dtype not in [mstype.float32, mstype.float16]:
498
- raise TypeError(f"{msg_prefix} x_dtype must be float32 or float16, but got {x_dtype}.")
631
+ raise TypeError(
632
+ f"{msg_prefix} x_dtype must be float32 or float16, but got {x_dtype}.")
499
633
 
500
634
 
501
635
  @constexpr
@@ -565,66 +699,16 @@ class ClipByNorm(Cell):
565
699
 
566
700
  class Norm(Cell):
567
701
  r"""
568
- Computes the norm of vectors, currently including Euclidean norm, i.e., :math:`L_2`-norm.
569
-
570
- .. math::
571
-
572
- norm(x) = \sqrt{\sum_{i=1}^{n} (x_i^2)}
573
-
574
- Args:
575
- axis (Union[tuple, int]): The axis over which to compute vector norms. Default: ().
576
- keep_dims (bool): If true, the axis indicated in `axis` are kept with size 1. Otherwise,
577
- the dimensions in `axis` are removed from the output shape. Default: False.
578
-
579
- Inputs:
580
- - **x** (Tensor) - Tensor which is not empty. The data type should be float16 or float32.
581
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
582
-
583
- Outputs:
584
- Tensor, output tensor with dimensions in 'axis' reduced to 1 will be returned if 'keep_dims' is True;
585
- otherwise a Tensor with dimensions in 'axis' removed is returned. The data type is the same with `x`.
586
-
587
- Raises:
588
- TypeError: If `axis` is neither an int nor a tuple.
589
- TypeError: If `keep_dims` is not a bool.
590
-
591
- Supported Platforms:
592
- ``Ascend`` ``GPU`` ``CPU``
593
-
594
- Examples:
595
- >>> net = nn.Norm(axis=0)
596
- >>> x = Tensor(np.array([[4, 4, 9, 1], [2, 1, 3, 6]]), mindspore.float32)
597
- >>> print(x.shape)
598
- (2, 4)
599
- >>> output = net(x)
600
- >>> print(output)
601
- [4.472136 4.1231055 9.486833 6.0827627]
602
- >>> print(output.shape)
603
- (4,)
604
- >>> net = nn.Norm(axis=0, keep_dims=True)
605
- >>> x = Tensor(np.array([[4, 4, 9, 1], [2, 1, 3, 6]]), mindspore.float32)
606
- >>> print(x.shape)
607
- (2, 4)
608
- >>> output = net(x)
609
- >>> print(output)
610
- [4.472136 4.1231055 9.486833 6.0827627]
611
- >>> print(output.shape)
612
- (1, 4)
613
- >>> net = nn.Norm(axis=1)
614
- >>> x = Tensor(np.array([[4, 4, 9, 1], [2, 1, 3, 6]]), mindspore.float32)
615
- >>> print(x.shape)
616
- (2, 4)
617
- >>> output = net(x)
618
- >>> print(output)
619
- [10.677078 7.071068]
620
- >>> print(output.shape)
621
- (2,)
702
+ 'nn.Norm' is deprecated from version 2.0 and will be removed in a future version,
703
+ use 'ops.norm' instead.
622
704
  """
623
705
 
706
+ @deprecated("2.0", "ops.norm", False)
624
707
  def __init__(self, axis=(), keep_dims=False):
625
708
  """Initialize Norm."""
626
709
  super(Norm, self).__init__()
627
- Validator.check_value_type("keep_dims", keep_dims, [bool], self.cls_name)
710
+ Validator.check_value_type(
711
+ "keep_dims", keep_dims, [bool], self.cls_name)
628
712
  self.axis = axis
629
713
  self.keep_dims = keep_dims
630
714
  self.reduce_sum = P.ReduceSum(True)
@@ -644,119 +728,11 @@ class Norm(Cell):
644
728
 
645
729
  class OneHot(Cell):
646
730
  """
647
- Returns a one-hot tensor.
648
-
649
- The locations represented by indices in argument `indices` take value on_value,
650
- while all other locations take value off_value.
651
-
652
- Note:
653
- If the input indices is rank :math:`N`, the output will have rank :math:`N+1`. The new
654
- axis is created at dimension `axis`.
655
-
656
- If `indices` is a scalar, the output shape will be a vector of length `depth`.
657
-
658
- If `indices` is a vector of length `features`, the output shape will be:
659
-
660
- .. code-block::
661
-
662
- features * depth if axis == -1
663
-
664
- depth * features if axis == 0
665
-
666
- If `indices` is a matrix with shape `[batch, features]`, the output shape will be:
667
-
668
- .. code-block::
669
-
670
- batch * features * depth if axis == -1
671
-
672
- batch * depth * features if axis == 1
673
-
674
- depth * batch * features if axis == 0
675
-
676
- Args:
677
- axis (int): Features x depth if axis is -1, depth x features
678
- if axis is 0. Default: -1.
679
- depth (int): A scalar defining the depth of the one hot dimension. Default: 1.
680
- on_value (float): A scalar defining the value to fill in output[i][j]
681
- when indices[j] = i. Default: 1.0.
682
- off_value (float): A scalar defining the value to fill in output[i][j]
683
- when indices[j] != i. Default: 0.0.
684
- dtype (:class:`mindspore.dtype`): Data type of 'on_value' and 'off_value', not the
685
- data type of indices. Default: mindspore.float32.
686
-
687
- Inputs:
688
- - **indices** (Tensor) - A tensor of indices with data type of int32 or int64.
689
- The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
690
-
691
- Outputs:
692
- Tensor, the one-hot tensor of data type `dtype` with dimension at `axis` expanded to `depth` and filled with
693
- on_value and off_value. The dimension of the `Outputs` is equal to the dimension of the `indices` plus one.
694
-
695
- Raises:
696
- TypeError: If `axis` or `depth` is not an int.
697
- TypeError: If dtype of `indices` is neither int32 nor int64.
698
- ValueError: If `axis` is not in range [-1, len(indices_shape)].
699
- ValueError: If `depth` is less than 0.
700
-
701
- Supported Platforms:
702
- ``Ascend`` ``GPU`` ``CPU``
703
-
704
- Examples:
705
- >>> # 1st sample: add new coordinates at axis 1
706
- >>> net = nn.OneHot(depth=4, axis=1)
707
- >>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32)
708
- >>> output = net(indices)
709
- >>> print(output)
710
- [[[0. 0.]
711
- [1. 0.]
712
- [0. 0.]
713
- [0. 1.]]
714
- [[1. 0.]
715
- [0. 0.]
716
- [0. 1.]
717
- [0. 0.]]]
718
- >>> # The results are shown below:
719
- >>> print(output.shape)
720
- (2, 4, 2)
721
- >>> # 2nd sample: add new coordinates at axis 0
722
- >>> net = nn.OneHot(depth=4, axis=0)
723
- >>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32)
724
- >>> output = net(indices)
725
- >>> print(output)
726
- [[[0. 0.]
727
- [1. 0.]]
728
- [[1. 0.]
729
- [0. 0.]]
730
- [[0. 0.]
731
- [0. 1.]]
732
- [[0. 1.]
733
- [0. 0.]]]
734
- >>> # The results are shown below:
735
- >>> print(output.shape)
736
- (4, 2, 2)
737
- >>> # 3rd sample: add new coordinates at the last dimension.
738
- >>> net = nn.OneHot(depth=4, axis=-1)
739
- >>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32)
740
- >>> output = net(indices)
741
- >>> # The results are shown below:
742
- >>> print(output)
743
- [[[0. 1. 0. 0.]
744
- [0. 0. 0. 1.]]
745
- [[1. 0. 0. 0.]
746
- [0. 0. 1. 0.]]]
747
- >>> print(output.shape)
748
- (2, 2, 4)
749
- >>> indices = Tensor([1, 3, 0, 2], dtype=mindspore.int32)
750
- >>> output = net(indices)
751
- >>> print(output)
752
- [[0. 1. 0. 0.]
753
- [0. 0. 0. 1.]
754
- [1. 0. 0. 0.]
755
- [0. 0. 1. 0.]]
756
- >>> print(output.shape)
757
- (4, 4)
731
+ 'nn.OneHot' is deprecated from version 2.0 and will be removed in a future version,
732
+ use 'ops.one_hot' instead.
758
733
  """
759
734
 
735
+ @deprecated("2.0", "ops.one_hot", False)
760
736
  def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype=mstype.float32):
761
737
  """Initialize OneHot."""
762
738
  super(OneHot, self).__init__()
@@ -775,11 +751,11 @@ class Pad(Cell):
775
751
  Pads the input tensor according to the paddings and mode.
776
752
 
777
753
  Args:
778
- paddings (tuple): The shape of parameter `paddings` is (N, 2). N is the rank of input data. All elements of
779
- paddings are int type. For `D` th dimension of the `x`, paddings[D, 0] indicates how many sizes to be
780
- extended ahead of the `D` th dimension of the input tensor, and paddings[D, 1] indicates how many sizes to
781
- be extended behind of the `D` th dimension of the input tensor. The padded size of each dimension D of the
782
- output is: :math:`paddings[D, 0] + input\_x.dim\_size(D) + paddings[D, 1]`,
754
+ paddings (tuple): The shape of parameter `paddings` is :math:`(N, 2)` . N is the rank of input data. All
755
+ elements of paddings are int type. For `D` th dimension of the `x`, paddings[D, 0] indicates how many
756
+ sizes to be extended ahead of the `D` th dimension of the input tensor, and paddings[D, 1] indicates how
757
+ many sizes to be extended behind of the `D` th dimension of the input tensor. The padded size of each
758
+ dimension D of the output is: :math:`paddings[D, 0] + input\_x.dim\_size(D) + paddings[D, 1]`,
783
759
  e.g.:
784
760
 
785
761
  .. code-block::
@@ -815,7 +791,7 @@ class Pad(Cell):
815
791
 
816
792
  Raises:
817
793
  TypeError: If `paddings` is not a tuple.
818
- ValueError: If length of `paddings` is more than 4 or its shape is not (N, 2).
794
+ ValueError: If length of `paddings` is more than 4 or its shape is not :math:`(N, 2)` .
819
795
  ValueError: If `mode` is not one of 'CONSTANT', 'REFLECT', 'SYMMETRIC'.
820
796
 
821
797
  Supported Platforms:
@@ -910,7 +886,8 @@ class Pad(Cell):
910
886
  super(Pad, self).__init__()
911
887
  self.mode = mode
912
888
  self.paddings = paddings
913
- Validator.check_string(self.mode, ["CONSTANT", "REFLECT", "SYMMETRIC"], 'mode', self.cls_name)
889
+ Validator.check_string(
890
+ self.mode, ["CONSTANT", "REFLECT", "SYMMETRIC"], 'mode', self.cls_name)
914
891
  if not isinstance(paddings, tuple):
915
892
  raise TypeError(f"For '{self.cls_name}', the type of 'paddings' must be tuple, "
916
893
  f"but got {type(paddings).__name__}.")
@@ -940,66 +917,32 @@ def bilinear(shape, size, scale, align_corners, prim_name=None):
940
917
  """Check input and calculate shape"""
941
918
  msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
942
919
  if not isinstance(align_corners, bool):
943
- raise TypeError(f"{msg_prefix} type of 'align_corners' must be boolean, "
944
- f"but got {type(align_corners).__name__}.")
920
+ raise TypeError(
921
+ f"{msg_prefix} type of 'align_corners' must be bool, but got {type(align_corners).__name__}.")
945
922
  if size is None and scale is None:
946
923
  raise ValueError(f"{msg_prefix} 'size' and 'scale' both none.")
947
924
  if size is not None and scale is not None:
948
925
  raise ValueError(f"{msg_prefix} 'size' and 'scale' both not none.")
949
926
  if size is not None:
950
927
  if not isinstance(size, (tuple, list)):
951
- raise ValueError(f"{msg_prefix} 'size' must be tuple or list or None, but got {type(size).__name__}.")
952
- Validator.check_int(len(size), 2, Rel.EQ, "size", "bilinear")
953
- Validator.check_int(size[0], 1, Rel.GE, "size[0]", "bilinear")
954
- Validator.check_int(size[1], 1, Rel.GE, "size[1]", "bilinear")
928
+ raise ValueError(
929
+ f"{msg_prefix} 'size' must be tuple or list or None, but got {type(size).__name__}.")
930
+ Validator.check_int(len(size), 2, Validator.EQ, "size", "bilinear")
931
+ Validator.check_int(size[0], 1, Validator.GE, "size[0]", "bilinear")
932
+ Validator.check_int(size[1], 1, Validator.GE, "size[1]", "bilinear")
955
933
  return size
956
- Validator.check_int(scale, 1, Rel.GE, "scale factor", "bilinear")
934
+ Validator.check_int(scale, 1, Validator.GE, "scale factor", "bilinear")
957
935
  ret = (scale * shape[2], scale * shape[3])
958
936
  return ret
959
937
 
960
938
 
961
939
  class ResizeBilinear(Cell):
962
940
  r"""
963
- Samples the input tensor to the given size or scale_factor by using bilinear interpolate.
964
-
965
- Args:
966
- half_pixel_centers (bool): Whether half pixel center. If set to True, `align_corners` should be False.
967
- Default: False.
968
-
969
- Inputs:
970
- - **x** (Tensor) - Tensor to be resized. Input tensor must be a 4-D tensor with shape
971
- :math:`(batch, channels, height, width)`, with data type of float16 or float32.
972
- - **size** (Union[tuple[int], list[int], None]): A tuple or list of 2 int elements
973
- :math:`(new\_height, new\_width)`,the new size of the tensor.
974
- One and only one of size and scale_factor can be set to None. Default: None.
975
- - **scale_factor** (int, None): The scale factor of new size of the tensor. The value should be positive
976
- integer. One and only one of size and scale_factor can be set to None. Default: None.
977
- - **align_corners** (bool): If true, rescale input by :math:`(new\_height - 1) / (height - 1)`, which exactly
978
- aligns the 4 corners of images and resized images. If false, rescale by :math:`new\_height / height`.
979
- Default: False.
980
-
981
- Outputs:
982
- Resized tensor.
983
- If size is set, the result is 4-D tensor with shape :math:`(batch, channels, new\_height, new\_width)`,
984
- and the data type is the same as `x`.
985
- If scale is set, the result is 4-D tensor with shape
986
- :math:`(batch, channels, scale\_factor * height, scale\_factor * width)` and the data type is the same as `x`.
987
-
988
- Raises:
989
- TypeError: If `size` is not one of tuple, list, None.
990
- TypeError: If `scale_factor` is neither int nor None.
991
- TypeError: If `align_corners` is not a bool.
992
- TypeError: If `half_pixel_centers` is not a bool.
993
- TypeError: If `align_corners` and `half_pixel_centers` are all True.
994
- TypeError: If `half_pixel_centers` is True and device_target not Ascend.
995
- TypeError: If dtype of `x` is neither float16 nor float32.
996
- ValueError: If `size` and `scale_factor` are both None or not None.
997
- ValueError: If length of shape of `x` is not equal to 4.
998
- ValueError: If `scale_factor` is an int which is less than 0.
999
- ValueError: If `size` is a list or tuple whose length is not equal to 2.
941
+ 'nn.ResizeBilinear' is deprecated from version 2.0 and will be removed in a future version,
942
+ use :class:`mindspore.ops.ResizeBilinearV2` or :func:`mindspore.ops.interpolate` instead.
1000
943
 
1001
944
  Supported Platforms:
1002
- ``Ascend`` ``CPU`` ``GPU``
945
+ Deprecated
1003
946
 
1004
947
  Examples:
1005
948
  >>> x = Tensor([[[[1, 2, 3, 4], [5, 6, 7, 8]]]], mindspore.float32)
@@ -1021,11 +964,15 @@ class ResizeBilinear(Cell):
1021
964
  def __init__(self, half_pixel_centers=False):
1022
965
  """Initialize ResizeBilinear."""
1023
966
  super(ResizeBilinear, self).__init__()
967
+ logger.warning("'nn.ResizeBilinear' is deprecated from version 2.0 and will be removed in a "
968
+ "future version, use 'ops.ResizeBilinearV2' or 'ops.interpolate' instead.")
1024
969
  self.half_pixel_centers = half_pixel_centers
1025
970
 
1026
971
  def construct(self, x, size=None, scale_factor=None, align_corners=False):
1027
- shape = bilinear(x.shape, size, scale_factor, align_corners, self.cls_name)
1028
- resize_bilinear = P.ResizeBilinear(shape, align_corners, self.half_pixel_centers)
972
+ shape = bilinear(x.shape, size, scale_factor,
973
+ align_corners, self.cls_name)
974
+ resize_bilinear = P.ResizeBilinear(
975
+ shape, align_corners, self.half_pixel_centers)
1029
976
  return resize_bilinear(x)
1030
977
 
1031
978
 
@@ -1056,11 +1003,9 @@ class Unfold(Cell):
1056
1003
  Tensor, a 4-D tensor whose data type is same as `x`,
1057
1004
  and the shape is [out_batch, out_depth, out_row, out_col] where `out_batch` is the same as the `in_batch`.
1058
1005
 
1059
- :math:`out\_depth = ksize\_row * ksize\_col * in\_depth`
1060
-
1061
- :math:`out\_row = (in\_row - (ksize\_row + (ksize\_row - 1) * (rate\_row - 1))) // stride\_row + 1`
1062
-
1063
- :math:`out\_col = (in\_col - (ksize\_col + (ksize\_col - 1) * (rate\_col - 1))) // stride\_col + 1`
1006
+ - :math:`out\_depth = ksize\_row * ksize\_col * in\_depth`
1007
+ - :math:`out\_row = (in\_row - (ksize\_row + (ksize\_row - 1) * (rate\_row - 1))) // stride\_row + 1`
1008
+ - :math:`out\_col = (in\_col - (ksize\_col + (ksize\_col - 1) * (rate\_col - 1))) // stride\_col + 1`
1064
1009
 
1065
1010
  Raises:
1066
1011
  TypeError: If `ksizes`, `strides` or `rates` is neither a tuple nor list.
@@ -1091,7 +1036,8 @@ class Unfold(Cell):
1091
1036
  super(Unfold, self).__init__()
1092
1037
 
1093
1038
  def _check_tuple_or_list(arg_name, arg_val, prim_name):
1094
- Validator.check_value_type(f"{arg_name}s", ksizes, [tuple, list], self.cls_name)
1039
+ Validator.check_value_type(f"{arg_name}s", ksizes, [
1040
+ tuple, list], self.cls_name)
1095
1041
  if len(arg_val) != 4 or arg_val[0] != 1 or arg_val[3] != 1:
1096
1042
  raise ValueError(f"For '{prim_name}' the format of '{arg_name}s' must be [1, {arg_name}_row, "
1097
1043
  f"{arg_name}_col, 1], but got {arg_val}.")
@@ -1106,102 +1052,29 @@ class Unfold(Cell):
1106
1052
  ksizes = ksizes[0], ksizes[3], ksizes[1], ksizes[2]
1107
1053
  strides = strides[0], strides[3], strides[1], strides[2]
1108
1054
  rates = rates[0], rates[3], rates[1], rates[2]
1109
- self.extract_image_patches = inner.ExtractImagePatches(ksizes, strides, rates, padding)
1055
+ self.extract_image_patches = inner.ExtractImagePatches(
1056
+ ksizes, strides, rates, padding)
1110
1057
 
1111
1058
  def construct(self, input_x):
1112
1059
  result = self.extract_image_patches(input_x)
1113
1060
  return result
1114
1061
 
1115
1062
 
1116
- @constexpr
1063
+ @_primexpr
1117
1064
  def tril(x_shape, x_dtype, k):
1118
- Validator.check_int(len(x_shape), 1, Rel.GE, "x rank", "tril")
1065
+ Validator.check_int(len(x_shape), 1, Validator.GE, "x rank", "tril")
1119
1066
  Validator.check_is_int(k, "k value", "tril")
1120
- mask = np.tril(np.ones(x_shape), k)
1121
- return Tensor(mask, x_dtype)
1067
+ value = F.cast(P.Tril(diagonal=k)(F.ones(x_shape, x_dtype)), x_dtype)
1068
+ return value
1122
1069
 
1123
1070
 
1124
1071
  class Tril(Cell):
1125
1072
  """
1126
- Returns a tensor, the elements above the specified main diagonal are set to zero.
1127
-
1128
- Divide the matrix elements into upper and lower triangles along the main diagonal (including diagonals).
1129
-
1130
- The parameter `k` controls the choice of diagonal.
1131
- If `k` = 0, split along the main diagonal and keep all the elements of the lower triangle.
1132
- If `k` > 0, select the diagonal `k` along the main diagonal upwards, and keep all the elements of the lower
1133
- triangle.
1134
- If `k` < 0, select the diagonal `k` along the main diagonal down, and keep all the elements of the lower
1135
- triangle.
1136
-
1137
- Inputs:
1138
- - **x** (Tensor) - The input tensor. The data type is
1139
- `number <https://www.mindspore.cn/docs/en/r1.10/api_python/mindspore.html#mindspore.dtype>`_.
1140
- - **k** (Int) - The index of diagonal. Default: 0. If the dimensions of the input matrix are d1 and d2,
1141
- the range of k should be in [-min(d1, d2)+1, min(d1, d2)-1], and the output value will be the same as the
1142
- input `x` when `k` is out of range.
1143
-
1144
- Outputs:
1145
- Tensor, has the same shape and type as input `x`.
1146
-
1147
- Raises:
1148
- TypeError: If `k` is not an int.
1149
- ValueError: If length of shape of `x` is less than 1.
1150
-
1151
- Supported Platforms:
1152
- ``Ascend`` ``GPU`` ``CPU``
1153
-
1154
- Examples:
1155
- >>> # case1: k = 0
1156
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1157
- ... [ 5, 6, 7, 8],
1158
- ... [10, 11, 12, 13],
1159
- ... [14, 15, 16, 17]]))
1160
- >>> tril = nn.Tril()
1161
- >>> result = tril(x)
1162
- >>> print(result)
1163
- [[ 1 0 0 0]
1164
- [ 5 6 0 0]
1165
- [10 11 12 0]
1166
- [14 15 16 17]]
1167
- >>> # case2: k = 1
1168
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1169
- ... [ 5, 6, 7, 8],
1170
- ... [10, 11, 12, 13],
1171
- ... [14, 15, 16, 17]]))
1172
- >>> tril = nn.Tril()
1173
- >>> result = tril(x, 1)
1174
- >>> print(result)
1175
- [[ 1 2 0 0]
1176
- [ 5 6 7 0]
1177
- [10 11 12 13]
1178
- [14 15 16 17]]
1179
- >>> # case3: k = 2
1180
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1181
- ... [ 5, 6, 7, 8],
1182
- ... [10, 11, 12, 13],
1183
- ... [14, 15, 16, 17]]))
1184
- >>> tril = nn.Tril()
1185
- >>> result = tril(x, 2)
1186
- >>> print(result)
1187
- [[ 1 2 3 0]
1188
- [ 5 6 7 8]
1189
- [10 11 12 13]
1190
- [14 15 16 17]]
1191
- >>> # case4: k = -1
1192
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1193
- ... [ 5, 6, 7, 8],
1194
- ... [10, 11, 12, 13],
1195
- ... [14, 15, 16, 17]]))
1196
- >>> tril = nn.Tril()
1197
- >>> result = tril(x, -1)
1198
- >>> print(result)
1199
- [[ 0 0 0 0]
1200
- [ 5 0 0 0]
1201
- [10 11 0 0]
1202
- [14 15 16 0]]
1073
+ 'nn.Tril' is deprecated from version 2.0 and will be removed in a future version,
1074
+ use 'ops.tril' instead.
1203
1075
  """
1204
1076
 
1077
+ @deprecated("2.0", "ops.tril", False)
1205
1078
  def __init__(self):
1206
1079
  """Initialize Tril."""
1207
1080
  super(Tril, self).__init__()
@@ -1211,90 +1084,26 @@ class Tril(Cell):
1211
1084
 
1212
1085
  def construct(self, x, k=0):
1213
1086
  assist = tril(x.shape, self.dtype(x), k)
1214
- result = self.mul(self.cast(x, mstype.float32), self.cast(assist, mstype.float32))
1087
+ result = self.mul(self.cast(x, mstype.float32),
1088
+ self.cast(assist, mstype.float32))
1215
1089
  return self.cast(result, self.dtype(x))
1216
1090
 
1217
1091
 
1218
- @constexpr
1092
+ @_primexpr
1219
1093
  def triu(x_shape, x_dtype, k):
1220
- Validator.check_int(len(x_shape), 1, Rel.GE, "x rank", "triu")
1094
+ Validator.check_int(len(x_shape), 1, Validator.GE, "x rank", "triu")
1221
1095
  Validator.check_is_int(k, "k value", "triu")
1222
- mask = np.triu(np.ones(x_shape), k)
1223
- return Tensor(mask, x_dtype)
1096
+ value = F.cast(P.Triu(k)(F.ones(x_shape, x_dtype)), x_dtype)
1097
+ return value
1224
1098
 
1225
1099
 
1226
1100
  class Triu(Cell):
1227
1101
  """
1228
- Returns a tensor with elements below the kth diagonal zeroed.
1229
-
1230
- The upper triangular part of the matrix is defined as the elements on and above the diagonal.
1231
-
1232
- The parameter `k` controls the diagonal to be considered. If `k` = 0, all elements on and above the main diagonal
1233
- are retained. Positive values do not include as many diagonals above the main diagonal. Similarly,
1234
- negative values include as many diagonals below the main diagonal.
1235
-
1236
- Inputs:
1237
- - **x** (Tensor) - The input tensor. The data type is Number.
1238
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1239
- - **k** (Int) - The index of diagonal. Default: 0
1240
-
1241
- Outputs:
1242
- Tensor, has the same type and shape as input `x`.
1243
-
1244
- Raises:
1245
- TypeError: If `k` is not an int.
1246
- ValueError: If length of shape of `x` is less than 1.
1247
-
1248
- Supported Platforms:
1249
- ``Ascend`` ``GPU`` ``CPU``
1250
-
1251
- Examples:
1252
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1253
- ... [ 5, 6, 7, 8],
1254
- ... [10, 11, 12, 13],
1255
- ... [14, 15, 16, 17]]))
1256
- >>> triu = nn.Triu()
1257
- >>> result = triu(x)
1258
- >>> print(result)
1259
- [[ 1 2 3 4]
1260
- [ 0 6 7 8]
1261
- [ 0 0 12 13]
1262
- [ 0 0 0 17]]
1263
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1264
- ... [ 5, 6, 7, 8],
1265
- ... [10, 11, 12, 13],
1266
- ... [14, 15, 16, 17]]))
1267
- >>> triu = nn.Triu()
1268
- >>> result = triu(x, 1)
1269
- >>> print(result)
1270
- [[ 0 2 3 4]
1271
- [ 0 0 7 8]
1272
- [ 0 0 0 13]
1273
- [ 0 0 0 0]]
1274
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1275
- ... [ 5, 6, 7, 8],
1276
- ... [10, 11, 12, 13],
1277
- ... [14, 15, 16, 17]]))
1278
- >>> triu = nn.Triu()
1279
- >>> result = triu(x, 2)
1280
- >>> print(result)
1281
- [[ 0 0 3 4]
1282
- [ 0 0 0 8]
1283
- [ 0 0 0 0]
1284
- [ 0 0 0 0]]
1285
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1286
- ... [ 5, 6, 7, 8],
1287
- ... [10, 11, 12, 13],
1288
- ... [14, 15, 16, 17]]))
1289
- >>> triu = nn.Triu()
1290
- >>> result = triu(x, -1)
1291
- >>> print(result)
1292
- [[ 1 2 3 4]
1293
- [ 5 6 7 8]
1294
- [ 0 11 12 13]
1295
- [ 0 0 16 17]]
1102
+ 'nn.Triu' is deprecated from version 2.0 and will be removed in a future version,
1103
+ use 'ops.triu' instead.
1296
1104
  """
1297
1105
 
1106
+ @deprecated("2.0", "ops.triu", False)
1298
1107
  def __init__(self):
1299
1108
  """Initialize Triu."""
1300
1109
  super(Triu, self).__init__()
@@ -1304,87 +1113,47 @@ class Triu(Cell):
1304
1113
 
1305
1114
  def construct(self, x, k=0):
1306
1115
  assist = triu(x.shape, self.dtype(x), k)
1307
- result = self.mul(self.cast(x, mstype.float32), self.cast(assist, mstype.float32))
1116
+ result = self.mul(self.cast(x, mstype.float32),
1117
+ self.cast(assist, mstype.float32))
1308
1118
  return self.cast(result, self.dtype(x))
1309
1119
 
1310
1120
 
1311
- @constexpr
1121
+ @_primexpr
1312
1122
  def _get_matrix_diag_assist(x_shape, x_dtype):
1313
- Validator.check_int(len(x_shape), 1, Rel.GE, "x rank", "_get_matrix_diag_assist")
1314
- base_eye = np.eye(x_shape[-1], x_shape[-1]).reshape(-1)
1315
- assist = np.tile(base_eye, x_shape[:-1]).reshape(x_shape + (x_shape[-1],))
1316
- return Tensor(assist, x_dtype)
1123
+ """Get matrix diag assist"""
1124
+ Validator.check_int(len(x_shape), 1, Validator.GE, "x rank", "_get_matrix_diag_assist")
1125
+ base_eye = F.reshape(
1126
+ F.eye(x_shape[-1], x_shape[-1], x_dtype), (x_shape[-1] * x_shape[-1],))
1127
+ if len(x_shape) == 1:
1128
+ assist = F.reshape(base_eye, x_shape + (x_shape[-1],))
1129
+ else:
1130
+ assist = F.reshape(
1131
+ F.tile(base_eye, x_shape[:-1]), x_shape + (x_shape[-1],))
1132
+ value = F.cast(assist, x_dtype)
1133
+ return value
1317
1134
 
1318
1135
 
1319
1136
  @constexpr
1320
1137
  def _get_matrix_diag_part_assist(x_shape, x_dtype):
1321
- Validator.check_int(len(x_shape), 2, Rel.GE, "x rank", "_get_matrix_diag_part_assist")
1322
- base_eye = np.eye(x_shape[-2], x_shape[-1]).reshape(-1)
1323
- assist = np.tile(base_eye, x_shape[:-2]).reshape(x_shape)
1324
- return Tensor(assist, x_dtype)
1138
+ """Get matrix diag part assist"""
1139
+ Validator.check_int(len(x_shape), 2, Validator.GE, "x rank", "_get_matrix_diag_part_assist")
1140
+ base_eye = F.reshape(
1141
+ F.eye(x_shape[-2], x_shape[-1], x_dtype), (x_shape[-2] * x_shape[-1],))
1142
+ if len(x_shape) <= 2:
1143
+ assist = F.reshape(base_eye, x_shape)
1144
+ else:
1145
+ assist = F.reshape(F.tile(base_eye, x_shape[:-2]), x_shape)
1146
+ value = F.cast(assist, x_dtype)
1147
+ return value
1325
1148
 
1326
1149
 
1327
1150
  class MatrixDiag(Cell):
1328
1151
  r"""
1329
- Returns a batched diagonal tensor with a given batched diagonal values.
1330
-
1331
- Assume `x` has :math:`k` dimensions :math:`[I, J, K, ..., N]`, then the output is a tensor of rank
1332
- :math:`k+1` with dimensions :math:`[I, J, K, ..., N, N]` where:
1333
- :math:`output[i, j, k, ..., m, n] = 1\{m=n\} * x[i, j, k, ..., n]`.
1334
-
1335
- Inputs:
1336
- - **x** (Tensor) - The diagonal values. It can be one of the following data types:
1337
- float32, float16, int32, int8, and uint8.
1338
- The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1339
-
1340
- Outputs:
1341
- Tensor, has the same type as input `x`. The shape must be x.shape + (x.shape[-1], ).
1342
-
1343
- Raises:
1344
- TypeError: If dtype of `x` is not one of float32, float16, int32, int8 or uint8.
1345
-
1346
- Supported Platforms:
1347
- ``Ascend``
1348
-
1349
- Examples:
1350
- >>> x = Tensor(np.array([1, -1]), mindspore.float32)
1351
- >>> matrix_diag = nn.MatrixDiag()
1352
- >>> output = matrix_diag(x)
1353
- >>> print(x.shape)
1354
- (2,)
1355
- >>> print(output)
1356
- [[ 1. 0.]
1357
- [ 0. -1.]]
1358
- >>> print(output.shape)
1359
- (2, 2)
1360
- >>> x = Tensor(np.array([[1, -1], [1, -1]]), mindspore.float32)
1361
- >>> matrix_diag = nn.MatrixDiag()
1362
- >>> output = matrix_diag(x)
1363
- >>> print(x.shape)
1364
- (2, 2)
1365
- >>> print(output)
1366
- [[[ 1. 0.]
1367
- [ 0. -1.]]
1368
- [[ 1. 0.]
1369
- [ 0. -1.]]]
1370
- >>> print(output.shape)
1371
- (2, 2, 2)
1372
- >>> x = Tensor(np.array([[1, -1, 1], [1, -1, 1]]), mindspore.float32)
1373
- >>> matrix_diag = nn.MatrixDiag()
1374
- >>> output = matrix_diag(x)
1375
- >>> print(x.shape)
1376
- (2, 3)
1377
- >>> print(output)
1378
- [[[ 1. 0. 0.]
1379
- [ 0. -1. 0.]
1380
- [ 0. 0. 1.]]
1381
- [[ 1. 0. 0.]
1382
- [ 0. -1. 0.]
1383
- [ 0. 0. 1.]]]
1384
- >>> print(output.shape)
1385
- (2, 3, 3)
1152
+ 'nn.MatrixDiag' is deprecated from version 2.0 and will be removed in a future version,
1153
+ use 'ops.diag' instead.
1386
1154
  """
1387
1155
 
1156
+ @deprecated("2.0", "ops.diag", False)
1388
1157
  def __init__(self):
1389
1158
  """Initialize MatrixDiag."""
1390
1159
  super(MatrixDiag, self).__init__()
@@ -1401,47 +1170,11 @@ class MatrixDiag(Cell):
1401
1170
 
1402
1171
  class MatrixDiagPart(Cell):
1403
1172
  r"""
1404
- Returns the batched diagonal part of a batched tensor.
1405
-
1406
- Assume `x` has :math:`k` dimensions :math:`[I, J, K, ..., M, N]`, then the output is a tensor of rank
1407
- :math:`k-1` with dimensions :math:`[I, J, K, ..., min(M, N)]` where:
1408
- :math:`output[i, j, k, ..., n] = x[i, j, k, ..., n, n]`.
1409
-
1410
- Inputs:
1411
- - **x** (Tensor) - The batched tensor. It can be one of the following data types:
1412
- float32, float16, int32, int8, and uint8.
1413
-
1414
- Outputs:
1415
- Tensor, has the same type as input `x`. The shape must be x.shape[:-2] + [min(x.shape[-2:])].
1416
-
1417
- Raises:
1418
- TypeError: If dtype of `x` is not one of float32, float16, int32, int8 or uint8.
1419
-
1420
- Supported Platforms:
1421
- ``Ascend``
1422
-
1423
- Examples:
1424
- >>> import mindspore
1425
- >>> from mindspore import Tensor, nn
1426
- >>> x = Tensor([[[-1, 0], [0, 1]],
1427
- ... [[-1, 0], [0, 1]],
1428
- ... [[-1, 0], [0, 1]]], mindspore.float32)
1429
- >>> matrix_diag_part = nn.MatrixDiagPart()
1430
- >>> output = matrix_diag_part(x)
1431
- >>> print(output)
1432
- [[-1. 1.]
1433
- [-1. 1.]
1434
- [-1. 1.]]
1435
- >>> x = Tensor([[-1, 0, 0, 1],
1436
- ... [-1, 0, 0, 1],
1437
- ... [-1, 0, 0, 1],
1438
- ... [-1, 0, 0, 1]], mindspore.float32)
1439
- >>> matrix_diag_part = nn.MatrixDiagPart()
1440
- >>> output = matrix_diag_part(x)
1441
- >>> print(output)
1442
- [-1. 0. 0. 1.]
1173
+ 'nn.MatrixDiagPart' is deprecated from version 2.0 and will be removed in a future version,
1174
+ use 'ops.diagonal' instead.
1443
1175
  """
1444
1176
 
1177
+ @deprecated("2.0", "ops.diagonal", False)
1445
1178
  def __init__(self):
1446
1179
  """Initialize MatrixDiagPart."""
1447
1180
  super(MatrixDiagPart, self).__init__()
@@ -1465,7 +1198,7 @@ class MatrixSetDiag(Cell):
1465
1198
  :math:`[I, J, K, ..., M, N]`, where:
1466
1199
 
1467
1200
  .. math::
1468
- output[i, j, k, ..., m, n] = diagnoal[i, j, k, ..., n]\ for\ m == n
1201
+ output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]\ for\ m == n
1469
1202
 
1470
1203
  .. math::
1471
1204
  output[i, j, k, ..., m, n] = x[i, j, k, ..., m, n]\ for\ m != n
@@ -1517,59 +1250,23 @@ class MatrixSetDiag(Cell):
1517
1250
 
1518
1251
  @constexpr
1519
1252
  def _check_input_dim(axis, dim, cls_name):
1520
- Validator.check_int_range(axis, -dim, dim, Rel.INC_LEFT, 'axis', cls_name)
1253
+ Validator.check_int_range(axis, -dim, dim, Validator.INC_LEFT, 'axis', cls_name)
1521
1254
 
1522
1255
 
1523
1256
  class Roll(Cell):
1524
1257
  """
1525
- Rolls the elements of a tensor along an axis.
1526
-
1527
- The elements are shifted positively (towards larger indices) by the offset of `shift` along the dimension of `axis`.
1528
- Negative `shift` values will shift elements in the opposite direction. Elements that roll passed the last position
1529
- will wrap around to the first and vice versa. Multiple shifts along multiple axes may be specified.
1530
-
1531
- Args:
1532
- shift (Union[list(int), tuple(int), int]): Specifies the number of places by which elements are shifted
1533
- positively (towards larger indices) along the specified dimension. Negative shifts will roll the elements
1534
- in the opposite direction.
1535
- axis (Union[list(int), tuple(int), int]): Specifies the dimension indexes of shape to be rolled.
1536
-
1537
- Inputs:
1538
- - **input_x** (Tensor) - Input tensor.
1539
-
1540
- Outputs:
1541
- Tensor, has the same shape and type as `input_x`.
1542
-
1543
- Raises:
1544
- TypeError: If `shift` is not an int, a tuple or a list.
1545
- TypeError: If `axis` is not an int, a tuple or a list.
1546
- TypeError: If element of `shift` is not an int.
1547
- TypeError: If element of `axis` is not an int.
1548
- ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).
1549
- ValueError: If length of shape of `shift` is not equal to length of shape of `axis`.
1550
-
1551
- Supported Platforms:
1552
- ``Ascend`` ``GPU``
1553
-
1554
- Examples:
1555
- >>> input_x = Tensor(np.array([0, 1, 2, 3, 4]).astype(np.float32))
1556
- >>> op = nn.Roll(shift=2, axis=0)
1557
- >>> output = op(input_x)
1558
- >>> print(output)
1559
- [3. 4. 0. 1. 2.]
1560
- >>> input_x = Tensor(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]).astype(np.float32))
1561
- >>> op = nn.Roll(shift=[1, -2], axis=[0, 1])
1562
- >>> output = op(input_x)
1563
- >>> print(output)
1564
- [[7. 8. 9. 5. 6.]
1565
- [2. 3. 4. 0. 1.]]
1258
+ 'nn.Roll' is deprecated from version 2.0 and will be removed in a future version,
1259
+ use 'ops.roll' instead.
1566
1260
  """
1567
1261
 
1262
+ @deprecated("2.0", "ops.roll", False)
1568
1263
  def __init__(self, shift, axis):
1569
1264
  """Initialize Roll"""
1570
1265
  super(Roll, self).__init__()
1571
- Validator.check_value_type("shift", shift, [int, tuple, list], self.cls_name)
1572
- Validator.check_value_type("axis", axis, [int, tuple, list], self.cls_name)
1266
+ Validator.check_value_type(
1267
+ "shift", shift, [int, tuple, list], self.cls_name)
1268
+ Validator.check_value_type(
1269
+ "axis", axis, [int, tuple, list], self.cls_name)
1573
1270
  self.shape_op = P.Shape()
1574
1271
  self.shift = shift
1575
1272
  self.axis = axis
@@ -1581,13 +1278,13 @@ class Roll(Cell):
1581
1278
  if not isinstance(self.shift, (list, tuple)):
1582
1279
  self.shift = [self.shift]
1583
1280
  if context.get_context("device_target") == "GPU":
1584
- Validator.check_int(len(self.shift), 1, Rel.GE, "shift", "Roll")
1585
- Validator.check_int(len(self.axis), 1, Rel.GE, "axis", "Roll")
1281
+ Validator.check_int(len(self.shift), 1, Validator.GE, "shift", "Roll")
1282
+ Validator.check_int(len(self.axis), 1, Validator.GE, "axis", "Roll")
1586
1283
  for s_axis in self.axis:
1587
1284
  Validator.check_is_int(s_axis, "axis", "Roll")
1588
1285
  for s_shift in self.shift:
1589
1286
  Validator.check_is_int(s_shift, "shift", "Roll")
1590
- self.roll = inner.Roll(self.shift, self.axis)
1287
+ self.roll = P.Roll(self.shift, self.axis)
1591
1288
  self.gpu = True
1592
1289
  if len(self.shift) != len(self.axis):
1593
1290
  raise ValueError(f"For '{self.cls_name}', the shape of 'shift' and the shape of 'axis' must be "
@@ -1595,14 +1292,16 @@ class Roll(Cell):
1595
1292
  f"and the length of 'axis' {len(self.axis)}.")
1596
1293
  else:
1597
1294
  if not isinstance(self.axis, (list, tuple)):
1598
- self.op_list.append((inner.Roll(shift=self.shift, axis=0), self.axis))
1295
+ self.op_list.append(
1296
+ (P.Roll(shift=self.shift, axis=0), self.axis))
1599
1297
  else:
1600
1298
  if len(self.shift) != len(self.axis):
1601
1299
  raise ValueError(f"For '{self.cls_name}', the shape of 'shift' and the shape of 'axis' must be "
1602
1300
  f"the same, but got the length of 'shift' {len(self.shift)} "
1603
1301
  f"and the length of 'axis' {len(self.axis)}.")
1604
1302
  for idx, _ in enumerate(self.axis):
1605
- self.op_list.append((inner.Roll(shift=self.shift[idx], axis=0), self.axis[idx]))
1303
+ self.op_list.append(
1304
+ (P.Roll(shift=self.shift[idx], axis=0), self.axis[idx]))
1606
1305
 
1607
1306
  def construct(self, input_x):
1608
1307
  dim = len(self.shape_op(input_x))
@@ -1628,12 +1327,12 @@ class Roll(Cell):
1628
1327
  class Unflatten(Cell):
1629
1328
  r"""
1630
1329
  Summary:
1631
- Unflattens a tensor dim according to axis and unflattened_size.
1330
+ Unflattens a Tensor dim according to `axis` and `unflattened_size`.
1632
1331
 
1633
1332
  Args:
1634
- axis (int): specifies the dimension of the input tensor to be unflattened.
1635
- unflattened_size (Union(tuple[int], list[int])): is the new shape of the unflattened dimension of
1636
- the tensor and it can be a tuple of ints or a list of ints. The product of unflattened_size
1333
+ axis (int): specifies the dimension of the input Tensor to be unflattened.
1334
+ unflattened_size (Union(tuple[int], list[int])): the new shape of the unflattened dimension of
1335
+ the Tensor and it can be a tuple of ints or a list of ints. The product of `unflattened_size`
1637
1336
  must equal to input_shape[axis].
1638
1337
 
1639
1338
  Inputs:
@@ -1645,7 +1344,7 @@ class Unflatten(Cell):
1645
1344
  Raises:
1646
1345
  TypeError: If `axis` is not int.
1647
1346
  TypeError: If `unflattened_size` is neither tuple of ints nor list of ints.
1648
- TypeError: If the value specified by `axis` is not equal to product of `unflattened_size`.
1347
+ TypeError: The product of `unflattened_size` does not equal to input_shape[axis].
1649
1348
 
1650
1349
  Supported Platforms:
1651
1350
  ``Ascend`` ``GPU`` ``CPU``
@@ -1666,7 +1365,8 @@ class Unflatten(Cell):
1666
1365
  self.shape = P.Shape()
1667
1366
  self.reshape = P.Reshape()
1668
1367
  Validator.check_is_int(axis, 'axis', 'Unflatten')
1669
- Validator.check_value_type('unflattended_size', unflattened_size, (list, tuple), 'Unflatten')
1368
+ Validator.check_value_type(
1369
+ 'unflattended_size', unflattened_size, (list, tuple), 'Unflatten')
1670
1370
  self.axis = axis
1671
1371
  if isinstance(unflattened_size, list):
1672
1372
  unflattened_size = tuple(unflattened_size)
@@ -1678,5 +1378,5 @@ class Unflatten(Cell):
1678
1378
  new_shape += input_shape[: self.axis]
1679
1379
  new_shape += self.unflattened_size
1680
1380
  if self.axis != -1:
1681
- new_shape += input_shape[self.axis+1 :]
1381
+ new_shape += input_shape[self.axis + 1:]
1682
1382
  return self.reshape(input_x, new_shape)