mindspore 2.0.0a0__cp37-none-any.whl → 2.0.0rc1__cp37-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (693) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +9064 -0
  3. mindspore/__init__.py +4 -2
  4. mindspore/_akg/akg/composite/build_module.py +11 -0
  5. mindspore/_akg/akg/config/repository_cuda.json +11 -0
  6. mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -3
  7. mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
  8. mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
  9. mindspore/_c_mindrecord.cpython-37m-aarch64-linux-gnu.so +0 -0
  10. mindspore/_check_jit_forbidden_api.py +102 -0
  11. mindspore/_checkparam.py +1066 -1001
  12. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
  13. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
  14. mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
  15. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
  16. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
  17. mindspore/_extends/parse/__init__.py +5 -3
  18. mindspore/_extends/parse/namespace.py +16 -1
  19. mindspore/_extends/parse/parser.py +107 -22
  20. mindspore/_extends/parse/resources.py +0 -7
  21. mindspore/_extends/parse/standard_method.py +885 -413
  22. mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
  23. mindspore/amp.py +52 -57
  24. mindspore/bin/cache_admin +0 -0
  25. mindspore/bin/cache_server +0 -0
  26. mindspore/boost/boost.py +2 -2
  27. mindspore/boost/boost_cell_wrapper.py +38 -20
  28. mindspore/boost/dim_reduce.py +3 -3
  29. mindspore/boost/group_loss_scale_manager.py +1 -1
  30. mindspore/common/__init__.py +4 -6
  31. mindspore/common/_decorator.py +2 -0
  32. mindspore/common/_register_for_adapter.py +55 -0
  33. mindspore/common/_stub_tensor.py +201 -0
  34. mindspore/common/_utils.py +41 -7
  35. mindspore/common/api.py +215 -141
  36. mindspore/common/dtype.py +8 -1
  37. mindspore/common/dump.py +2 -2
  38. mindspore/common/initializer.py +4 -2
  39. mindspore/common/jit_config.py +17 -13
  40. mindspore/common/mutable.py +33 -13
  41. mindspore/common/parameter.py +23 -21
  42. mindspore/common/seed.py +8 -24
  43. mindspore/common/sparse_tensor.py +62 -41
  44. mindspore/common/tensor.py +852 -1154
  45. mindspore/communication/__init__.py +2 -2
  46. mindspore/communication/_comm_helper.py +11 -4
  47. mindspore/communication/management.py +22 -21
  48. mindspore/config/op_info.config +501 -1008
  49. mindspore/config/super_bar_config.json +512 -0
  50. mindspore/context.py +201 -23
  51. mindspore/dataset/__init__.py +6 -6
  52. mindspore/dataset/audio/__init__.py +7 -7
  53. mindspore/dataset/audio/transforms.py +670 -30
  54. mindspore/dataset/audio/utils.py +47 -4
  55. mindspore/dataset/audio/validators.py +223 -1
  56. mindspore/dataset/callback/ds_callback.py +2 -2
  57. mindspore/dataset/core/config.py +210 -14
  58. mindspore/dataset/core/validator_helpers.py +2 -2
  59. mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
  60. mindspore/dataset/debug/debug_hook.py +65 -0
  61. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  62. mindspore/dataset/engine/__init__.py +7 -3
  63. mindspore/dataset/engine/cache_client.py +1 -1
  64. mindspore/dataset/engine/datasets.py +322 -66
  65. mindspore/dataset/engine/datasets_audio.py +80 -76
  66. mindspore/dataset/engine/datasets_standard_format.py +51 -38
  67. mindspore/dataset/engine/datasets_text.py +232 -118
  68. mindspore/dataset/engine/datasets_user_defined.py +41 -17
  69. mindspore/dataset/engine/datasets_vision.py +746 -225
  70. mindspore/dataset/engine/graphdata.py +75 -10
  71. mindspore/dataset/engine/iterators.py +45 -5
  72. mindspore/dataset/engine/offload.py +48 -28
  73. mindspore/dataset/engine/validators.py +117 -8
  74. mindspore/dataset/text/__init__.py +6 -5
  75. mindspore/dataset/text/transforms.py +86 -3
  76. mindspore/dataset/text/utils.py +6 -4
  77. mindspore/dataset/text/validators.py +25 -0
  78. mindspore/dataset/transforms/__init__.py +3 -2
  79. mindspore/dataset/transforms/c_transforms.py +1 -1
  80. mindspore/dataset/transforms/transforms.py +2 -2
  81. mindspore/dataset/utils/__init__.py +2 -1
  82. mindspore/dataset/utils/line_reader.py +121 -0
  83. mindspore/dataset/vision/__init__.py +2 -3
  84. mindspore/dataset/vision/c_transforms.py +9 -9
  85. mindspore/dataset/vision/py_transforms.py +5 -5
  86. mindspore/dataset/vision/py_transforms_util.py +2 -0
  87. mindspore/dataset/vision/transforms.py +160 -161
  88. mindspore/dataset/vision/utils.py +3 -3
  89. mindspore/experimental/map_parameter.py +38 -26
  90. mindspore/include/OWNERS +0 -1
  91. mindspore/include/api/callback/callback.h +9 -13
  92. mindspore/include/api/callback/ckpt_saver.h +2 -2
  93. mindspore/include/api/callback/loss_monitor.h +2 -2
  94. mindspore/include/api/callback/lr_scheduler.h +5 -5
  95. mindspore/include/api/callback/time_monitor.h +2 -2
  96. mindspore/include/api/callback/train_accuracy.h +4 -6
  97. mindspore/include/api/cfg.h +19 -6
  98. mindspore/include/api/context.h +44 -9
  99. mindspore/include/api/delegate.h +1 -1
  100. mindspore/include/api/metrics/accuracy.h +2 -2
  101. mindspore/include/api/metrics/metrics.h +4 -3
  102. mindspore/include/api/model.h +9 -4
  103. mindspore/include/api/model_parallel_runner.h +2 -2
  104. mindspore/include/api/net.h +12 -11
  105. mindspore/include/api/serialization.h +19 -3
  106. mindspore/include/api/types.h +3 -3
  107. mindspore/include/dataset/constants.h +7 -0
  108. mindspore/include/dataset/text.h +59 -0
  109. mindspore/include/mindapi/base/type_id.h +1 -0
  110. mindspore/lib/libdnnl.so.2 +0 -0
  111. mindspore/lib/libicudata.so.69 +0 -0
  112. mindspore/lib/libicui18n.so.69 +0 -0
  113. mindspore/lib/libicuuc.so.69 +0 -0
  114. mindspore/lib/libmindspore.so +0 -0
  115. mindspore/lib/libmindspore_backend.so +0 -0
  116. mindspore/lib/libmindspore_common.so +0 -0
  117. mindspore/lib/libmindspore_core.so +0 -0
  118. mindspore/lib/libmindspore_glog.so.0 +0 -0
  119. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  120. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  121. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  122. mindspore/lib/libmindspore_shared_lib.so +0 -0
  123. mindspore/lib/libmpi_adapter.so +0 -0
  124. mindspore/lib/libmpi_collective.so +0 -0
  125. mindspore/lib/libnnacl.so +0 -0
  126. mindspore/lib/libopencv_core.so.4.5 +0 -0
  127. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  128. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  129. mindspore/lib/libps_cache.so +0 -0
  130. mindspore/lib/plugin/ascend/libakg.so +0 -0
  131. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  132. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  133. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  134. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  135. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  136. mindspore/lib/plugin/cpu/libakg.so +0 -0
  137. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  138. mindspore/lib/plugin/{libmindspore_ascend.so → libmindspore_ascend.so.2} +0 -0
  139. mindspore/log.py +1 -1
  140. mindspore/mindrecord/filereader.py +18 -0
  141. mindspore/mindrecord/filewriter.py +197 -34
  142. mindspore/mindrecord/shardreader.py +9 -0
  143. mindspore/mindrecord/shardwriter.py +1 -1
  144. mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
  145. mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
  146. mindspore/mindrecord/tools/csv_to_mr.py +3 -3
  147. mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
  148. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  149. mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
  150. mindspore/nn/__init__.py +0 -4
  151. mindspore/nn/cell.py +204 -132
  152. mindspore/nn/dynamic_lr.py +1 -1
  153. mindspore/nn/grad/cell_grad.py +7 -6
  154. mindspore/nn/layer/__init__.py +5 -4
  155. mindspore/nn/layer/activation.py +40 -89
  156. mindspore/nn/layer/basic.py +255 -624
  157. mindspore/nn/layer/channel_shuffle.py +7 -6
  158. mindspore/nn/layer/combined.py +1 -1
  159. mindspore/nn/layer/container.py +41 -4
  160. mindspore/nn/layer/conv.py +64 -28
  161. mindspore/nn/layer/dense.py +9 -8
  162. mindspore/nn/layer/embedding.py +27 -25
  163. mindspore/nn/layer/image.py +53 -46
  164. mindspore/nn/layer/math.py +97 -105
  165. mindspore/nn/layer/normalization.py +117 -86
  166. mindspore/nn/layer/padding.py +185 -95
  167. mindspore/nn/layer/pooling.py +817 -414
  168. mindspore/nn/layer/rnn_cells.py +10 -15
  169. mindspore/nn/layer/rnns.py +37 -38
  170. mindspore/nn/layer/thor_layer.py +11 -12
  171. mindspore/nn/layer/timedistributed.py +5 -5
  172. mindspore/nn/layer/transformer.py +701 -0
  173. mindspore/nn/learning_rate_schedule.py +8 -8
  174. mindspore/nn/loss/__init__.py +5 -4
  175. mindspore/nn/loss/loss.py +334 -199
  176. mindspore/nn/optim/ada_grad.py +6 -6
  177. mindspore/nn/optim/adadelta.py +2 -3
  178. mindspore/nn/optim/adafactor.py +4 -5
  179. mindspore/nn/optim/adam.py +126 -62
  180. mindspore/nn/optim/adamax.py +3 -4
  181. mindspore/nn/optim/adasum.py +6 -6
  182. mindspore/nn/optim/asgd.py +2 -2
  183. mindspore/nn/optim/ftrl.py +67 -38
  184. mindspore/nn/optim/lamb.py +4 -5
  185. mindspore/nn/optim/lars.py +2 -2
  186. mindspore/nn/optim/lazyadam.py +43 -4
  187. mindspore/nn/optim/momentum.py +6 -5
  188. mindspore/nn/optim/optimizer.py +3 -1
  189. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  190. mindspore/nn/optim/rmsprop.py +1 -1
  191. mindspore/nn/optim/rprop.py +8 -9
  192. mindspore/nn/optim/sgd.py +19 -13
  193. mindspore/nn/optim/thor.py +10 -15
  194. mindspore/nn/probability/__init__.py +0 -2
  195. mindspore/nn/probability/bijector/bijector.py +4 -4
  196. mindspore/nn/probability/bijector/invert.py +1 -1
  197. mindspore/nn/probability/bijector/softplus.py +2 -2
  198. mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
  199. mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
  200. mindspore/nn/probability/distribution/_utils/utils.py +9 -15
  201. mindspore/nn/probability/distribution/bernoulli.py +3 -3
  202. mindspore/nn/probability/distribution/beta.py +1 -1
  203. mindspore/nn/probability/distribution/categorical.py +5 -7
  204. mindspore/nn/probability/distribution/cauchy.py +3 -3
  205. mindspore/nn/probability/distribution/distribution.py +2 -2
  206. mindspore/nn/probability/distribution/exponential.py +2 -2
  207. mindspore/nn/probability/distribution/gamma.py +3 -3
  208. mindspore/nn/probability/distribution/geometric.py +1 -1
  209. mindspore/nn/probability/distribution/gumbel.py +3 -3
  210. mindspore/nn/probability/distribution/half_normal.py +15 -11
  211. mindspore/nn/probability/distribution/laplace.py +16 -13
  212. mindspore/nn/probability/distribution/logistic.py +2 -2
  213. mindspore/nn/probability/distribution/normal.py +1 -1
  214. mindspore/nn/probability/distribution/poisson.py +1 -1
  215. mindspore/nn/probability/distribution/student_t.py +20 -15
  216. mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
  217. mindspore/nn/probability/distribution/uniform.py +2 -2
  218. mindspore/nn/reinforcement/_tensors_queue.py +3 -3
  219. mindspore/nn/reinforcement/tensor_array.py +2 -2
  220. mindspore/nn/sparse/sparse.py +2 -2
  221. mindspore/nn/wrap/cell_wrapper.py +27 -10
  222. mindspore/nn/wrap/grad_reducer.py +2 -2
  223. mindspore/nn/wrap/loss_scale.py +40 -24
  224. mindspore/numpy/array_creations.py +33 -22
  225. mindspore/numpy/array_ops.py +35 -30
  226. mindspore/numpy/logic_ops.py +6 -27
  227. mindspore/numpy/math_ops.py +22 -19
  228. mindspore/numpy/utils.py +1 -1
  229. mindspore/numpy/utils_const.py +108 -58
  230. mindspore/ops/_constants.py +0 -6
  231. mindspore/ops/_grad/__init__.py +2 -1
  232. mindspore/ops/_grad/grad_array_ops.py +86 -117
  233. mindspore/ops/_grad/grad_base.py +23 -1
  234. mindspore/ops/_grad/grad_clip_ops.py +2 -3
  235. mindspore/ops/_grad/grad_comm_ops.py +34 -24
  236. mindspore/ops/_grad/grad_implementations.py +9 -45
  237. mindspore/ops/_grad/grad_inner_ops.py +47 -4
  238. mindspore/ops/_grad/grad_math_ops.py +142 -117
  239. mindspore/ops/_grad/grad_nn_ops.py +71 -165
  240. mindspore/ops/_grad/grad_sequence_ops.py +296 -0
  241. mindspore/ops/_grad/grad_sparse.py +7 -6
  242. mindspore/ops/_grad_experimental/__init__.py +1 -0
  243. mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
  244. mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
  245. mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
  246. mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
  247. mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
  248. mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
  249. mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
  250. mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
  251. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
  252. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
  253. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
  254. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
  255. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
  256. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
  257. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
  258. mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
  259. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
  260. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
  261. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
  262. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
  263. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
  264. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
  265. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
  266. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
  267. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
  268. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
  269. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
  270. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
  271. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
  272. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
  273. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
  274. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  275. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
  276. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
  277. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
  278. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
  279. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
  280. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
  281. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
  282. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
  283. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
  284. mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
  285. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  286. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
  287. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  288. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  289. mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
  290. mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
  291. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  292. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
  293. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  294. mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
  295. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  296. mindspore/ops/_op_impl/aicpu/conj.py +11 -0
  297. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
  298. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  299. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  300. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
  301. mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
  302. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  303. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  304. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
  305. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  306. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  307. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  308. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  309. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  310. mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
  311. mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
  312. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
  313. mindspore/ops/_op_impl/aicpu/mul.py +3 -1
  314. mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
  315. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  316. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  317. mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
  318. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  319. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  320. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  321. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  322. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  323. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  324. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
  325. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
  326. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  327. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  328. mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
  329. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
  330. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  331. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  332. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  333. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  334. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  335. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
  336. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  337. mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
  338. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
  339. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  340. mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
  341. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  342. mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
  343. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
  344. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
  345. mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
  346. mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
  347. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
  348. mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
  349. mindspore/ops/_op_impl/tbe/__init__.py +27 -611
  350. mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
  351. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  352. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
  353. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
  354. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  355. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
  356. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
  357. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
  358. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
  359. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
  360. mindspore/ops/_op_impl/tbe/cast.py +0 -2
  361. mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
  362. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
  363. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
  364. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
  365. mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
  366. mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
  367. mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
  368. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
  369. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
  370. mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
  371. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
  372. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  373. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
  374. mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
  375. mindspore/ops/_register_for_op.py +1 -0
  376. mindspore/ops/_utils/__init__.py +1 -2
  377. mindspore/ops/_utils/utils.py +19 -40
  378. mindspore/ops/_vmap/vmap_array_ops.py +116 -38
  379. mindspore/ops/_vmap/vmap_base.py +16 -9
  380. mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
  381. mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
  382. mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
  383. mindspore/ops/_vmap/vmap_image_ops.py +12 -5
  384. mindspore/ops/_vmap/vmap_math_ops.py +46 -5
  385. mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
  386. mindspore/ops/_vmap/vmap_random_ops.py +1 -1
  387. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  388. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  389. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
  390. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
  391. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  392. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  393. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  394. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
  395. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
  396. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  397. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
  398. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
  399. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
  400. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
  401. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
  402. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
  403. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
  404. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  405. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  406. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
  407. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
  408. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
  409. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  410. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  411. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  412. mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
  413. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  414. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
  415. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
  416. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
  417. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
  418. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  419. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
  420. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
  421. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  422. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
  423. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
  424. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
  425. mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
  426. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  427. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
  428. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
  429. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
  430. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
  431. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
  432. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  433. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
  434. mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
  435. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  436. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  437. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
  438. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  439. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  440. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  441. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
  442. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
  443. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
  444. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  445. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
  446. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
  447. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
  448. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
  449. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
  450. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
  451. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  452. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
  453. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
  454. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  455. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
  456. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
  457. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  458. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
  459. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
  460. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  461. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
  462. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  463. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  464. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
  465. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
  466. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
  467. mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
  468. mindspore/ops/composite/__init__.py +7 -8
  469. mindspore/ops/composite/base.py +101 -47
  470. mindspore/ops/composite/math_ops.py +188 -158
  471. mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
  472. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
  473. mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
  474. mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
  475. mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
  476. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
  477. mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
  478. mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
  479. mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
  480. mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
  481. mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
  482. mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
  483. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
  484. mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
  485. mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
  486. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
  487. mindspore/ops/function/__init__.py +152 -8
  488. mindspore/ops/function/array_func.py +2555 -674
  489. mindspore/ops/function/clip_func.py +209 -13
  490. mindspore/ops/function/debug_func.py +2 -2
  491. mindspore/ops/function/grad/__init__.py +2 -1
  492. mindspore/ops/function/grad/grad_func.py +147 -62
  493. mindspore/ops/function/image_func.py +54 -38
  494. mindspore/ops/function/linalg_func.py +167 -16
  495. mindspore/ops/function/math_func.py +4849 -1492
  496. mindspore/ops/function/nn_func.py +2573 -988
  497. mindspore/ops/function/other_func.py +115 -0
  498. mindspore/ops/function/parameter_func.py +3 -3
  499. mindspore/ops/function/random_func.py +790 -73
  500. mindspore/ops/function/sparse_func.py +98 -78
  501. mindspore/ops/function/sparse_unary_func.py +54 -53
  502. mindspore/ops/function/spectral_func.py +27 -24
  503. mindspore/ops/function/vmap_func.py +22 -2
  504. mindspore/ops/functional.py +97 -37
  505. mindspore/ops/op_info_register.py +70 -28
  506. mindspore/ops/operations/__init__.py +47 -14
  507. mindspore/ops/operations/_csr_ops.py +7 -7
  508. mindspore/ops/operations/_embedding_cache_ops.py +5 -5
  509. mindspore/ops/operations/_grad_ops.py +276 -187
  510. mindspore/ops/operations/_inner_ops.py +319 -113
  511. mindspore/ops/operations/_ms_kernel.py +10 -8
  512. mindspore/ops/operations/_ocr_ops.py +9 -9
  513. mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
  514. mindspore/ops/operations/_quant_ops.py +137 -102
  515. mindspore/ops/operations/_rl_inner_ops.py +121 -60
  516. mindspore/ops/operations/_scalar_ops.py +466 -0
  517. mindspore/ops/operations/_sequence_ops.py +1004 -2
  518. mindspore/ops/operations/_tensor_array.py +10 -11
  519. mindspore/ops/operations/_thor_ops.py +1 -1
  520. mindspore/ops/operations/array_ops.py +801 -466
  521. mindspore/ops/operations/comm_ops.py +51 -49
  522. mindspore/ops/operations/control_ops.py +2 -2
  523. mindspore/ops/operations/custom_ops.py +123 -44
  524. mindspore/ops/operations/debug_ops.py +24 -24
  525. mindspore/ops/operations/image_ops.py +240 -153
  526. mindspore/ops/operations/inner_ops.py +34 -50
  527. mindspore/ops/operations/linalg_ops.py +31 -9
  528. mindspore/ops/operations/math_ops.py +988 -757
  529. mindspore/ops/operations/nn_ops.py +965 -819
  530. mindspore/ops/operations/other_ops.py +51 -40
  531. mindspore/ops/operations/random_ops.py +204 -122
  532. mindspore/ops/operations/rl_ops.py +8 -9
  533. mindspore/ops/operations/sparse_ops.py +254 -93
  534. mindspore/ops/operations/spectral_ops.py +35 -3
  535. mindspore/ops/primitive.py +111 -9
  536. mindspore/parallel/_auto_parallel_context.py +189 -83
  537. mindspore/parallel/_offload_context.py +185 -0
  538. mindspore/parallel/_parallel_serialization.py +99 -7
  539. mindspore/parallel/_ps_context.py +9 -5
  540. mindspore/parallel/_recovery_context.py +1 -1
  541. mindspore/parallel/_tensor.py +7 -1
  542. mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
  543. mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
  544. mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
  545. mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
  546. mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
  547. mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
  548. mindspore/parallel/_utils.py +1 -2
  549. mindspore/parallel/algo_parameter_config.py +1 -1
  550. mindspore/parallel/checkpoint_transform.py +37 -34
  551. mindspore/parallel/shard.py +17 -18
  552. mindspore/profiler/common/validator/validate_path.py +2 -2
  553. mindspore/profiler/envprofiling.py +69 -47
  554. mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
  555. mindspore/profiler/parser/base_timeline_generator.py +49 -56
  556. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
  557. mindspore/profiler/parser/hwts_log_parser.py +1 -1
  558. mindspore/profiler/parser/integrator.py +15 -14
  559. mindspore/profiler/parser/minddata_analyzer.py +2 -2
  560. mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
  561. mindspore/profiler/parser/msadvisor_parser.py +2 -4
  562. mindspore/profiler/parser/optime_parser.py +17 -18
  563. mindspore/profiler/parser/profiler_info.py +2 -1
  564. mindspore/profiler/profiling.py +218 -186
  565. mindspore/rewrite/__init__.py +3 -1
  566. mindspore/rewrite/api/node.py +1 -114
  567. mindspore/rewrite/api/node_type.py +3 -0
  568. mindspore/rewrite/api/pattern_engine.py +31 -1
  569. mindspore/rewrite/api/scoped_value.py +4 -4
  570. mindspore/rewrite/api/symbol_tree.py +3 -78
  571. mindspore/rewrite/api/tree_node_helper.py +1 -1
  572. mindspore/rewrite/ast_creator_register.py +1 -0
  573. mindspore/rewrite/ast_helpers/__init__.py +2 -2
  574. mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
  575. mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
  576. mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
  577. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
  578. mindspore/rewrite/namespace.py +0 -2
  579. mindspore/rewrite/node.py +157 -11
  580. mindspore/rewrite/parsers/assign_parser.py +231 -53
  581. mindspore/rewrite/parsers/class_def_parser.py +187 -109
  582. mindspore/rewrite/parsers/for_parser.py +24 -14
  583. mindspore/rewrite/parsers/function_def_parser.py +21 -4
  584. mindspore/rewrite/parsers/if_parser.py +6 -2
  585. mindspore/rewrite/sparsify/__init__.py +0 -0
  586. mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
  587. mindspore/rewrite/sparsify/sparsify.py +109 -0
  588. mindspore/rewrite/sparsify/utils.py +173 -0
  589. mindspore/rewrite/symbol_tree.py +256 -133
  590. mindspore/rewrite/symbol_tree_builder.py +38 -1
  591. mindspore/run_check/_check_version.py +69 -63
  592. mindspore/run_check/run_check.py +2 -1
  593. mindspore/scipy/linalg.py +10 -114
  594. mindspore/scipy/ops.py +2 -2
  595. mindspore/scipy/ops_wrapper.py +1 -1
  596. mindspore/scipy/optimize/_bfgs.py +1 -1
  597. mindspore/scipy/optimize/_lagrange.py +200 -0
  598. mindspore/scipy/optimize/line_search.py +3 -2
  599. mindspore/scipy/optimize/minimize.py +41 -2
  600. mindspore/scipy/sparse/__init__.py +2 -2
  601. mindspore/scipy/sparse/linalg.py +4 -464
  602. mindspore/scipy/utils.py +1 -1
  603. mindspore/scipy/utils_const.py +7 -1
  604. mindspore/train/__init__.py +1 -1
  605. mindspore/train/_utils.py +28 -5
  606. mindspore/train/amp.py +273 -102
  607. mindspore/train/callback/_backup_and_restore.py +5 -5
  608. mindspore/train/callback/_callback.py +2 -2
  609. mindspore/train/callback/_checkpoint.py +3 -3
  610. mindspore/train/callback/_early_stop.py +3 -3
  611. mindspore/train/callback/_lambda_callback.py +2 -2
  612. mindspore/train/callback/_landscape.py +29 -31
  613. mindspore/train/callback/_loss_monitor.py +3 -3
  614. mindspore/train/callback/_on_request_exit.py +3 -3
  615. mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
  616. mindspore/train/callback/_summary_collector.py +23 -16
  617. mindspore/train/callback/_time_monitor.py +3 -3
  618. mindspore/train/checkpoint_pb2.py +68 -8
  619. mindspore/train/data_sink.py +15 -3
  620. mindspore/train/dataset_helper.py +10 -15
  621. mindspore/train/loss_scale_manager.py +8 -11
  622. mindspore/train/metrics/__init__.py +1 -1
  623. mindspore/train/metrics/bleu_score.py +1 -1
  624. mindspore/train/metrics/confusion_matrix.py +1 -1
  625. mindspore/train/metrics/cosine_similarity.py +1 -1
  626. mindspore/train/metrics/dice.py +2 -2
  627. mindspore/train/metrics/fbeta.py +1 -1
  628. mindspore/train/metrics/hausdorff_distance.py +4 -3
  629. mindspore/train/metrics/mean_surface_distance.py +2 -2
  630. mindspore/train/metrics/occlusion_sensitivity.py +1 -1
  631. mindspore/train/metrics/perplexity.py +1 -1
  632. mindspore/train/metrics/precision.py +1 -1
  633. mindspore/train/metrics/recall.py +1 -1
  634. mindspore/train/metrics/roc.py +2 -2
  635. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  636. mindspore/train/mind_ir_pb2.py +116 -37
  637. mindspore/train/model.py +45 -28
  638. mindspore/train/serialization.py +295 -188
  639. mindspore/train/summary/_summary_adapter.py +1 -1
  640. mindspore/train/summary/summary_record.py +43 -13
  641. mindspore/train/train_thor/convert_utils.py +2 -2
  642. mindspore/train/train_thor/dataset_helper.py +3 -3
  643. mindspore/version.py +1 -1
  644. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
  645. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +648 -574
  646. mindspore/compression/__init__.py +0 -19
  647. mindspore/compression/common/constant.py +0 -124
  648. mindspore/compression/export/__init__.py +0 -19
  649. mindspore/compression/export/quant_export.py +0 -515
  650. mindspore/compression/quant/__init__.py +0 -28
  651. mindspore/compression/quant/qat.py +0 -634
  652. mindspore/compression/quant/quant_utils.py +0 -462
  653. mindspore/compression/quant/quantizer.py +0 -68
  654. mindspore/nn/layer/quant.py +0 -1868
  655. mindspore/nn/layer/rnn_utils.py +0 -90
  656. mindspore/nn/probability/dpn/__init__.py +0 -22
  657. mindspore/nn/probability/dpn/vae/__init__.py +0 -25
  658. mindspore/nn/probability/dpn/vae/cvae.py +0 -140
  659. mindspore/nn/probability/dpn/vae/vae.py +0 -124
  660. mindspore/nn/probability/infer/__init__.py +0 -22
  661. mindspore/nn/probability/infer/variational/elbo.py +0 -70
  662. mindspore/nn/probability/infer/variational/svi.py +0 -84
  663. mindspore/nn/probability/toolbox/__init__.py +0 -22
  664. mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
  665. mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
  666. mindspore/nn/probability/transforms/__init__.py +0 -22
  667. mindspore/nn/probability/transforms/transform_bnn.py +0 -262
  668. mindspore/nn/probability/zhusuan/__init__.py +0 -18
  669. mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
  670. mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
  671. mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
  672. mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
  673. mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
  674. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  675. mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
  676. mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
  677. mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
  678. mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
  679. mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
  680. mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
  681. mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
  682. mindspore/ops/composite/array_ops.py +0 -241
  683. mindspore/ops/composite/clip_ops.py +0 -134
  684. mindspore/ops/composite/random_ops.py +0 -426
  685. mindspore/ops/composite/vmap_ops.py +0 -38
  686. mindspore/parallel/nn/__init__.py +0 -42
  687. mindspore/parallel/nn/loss.py +0 -22
  688. mindspore/parallel/nn/moe.py +0 -21
  689. mindspore/parallel/nn/op_parallel_config.py +0 -22
  690. mindspore/parallel/nn/transformer.py +0 -31
  691. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
  692. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
  693. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -28,16 +28,17 @@ from mindspore.common.initializer import initializer
28
28
  from mindspore.ops import operations as P
29
29
  from mindspore.ops import functional as F
30
30
  from mindspore.ops.operations import _inner_ops as inner
31
- from mindspore.ops.primitive import constexpr, Primitive
31
+ from mindspore.ops.primitive import constexpr, Primitive, _primexpr
32
32
  from mindspore.common.parameter import Parameter
33
33
  from mindspore._extends import cell_attr_register
34
- from mindspore._checkparam import Rel, Validator
34
+ from mindspore import _checkparam as Validator
35
35
  from mindspore.nn.cell import Cell
36
36
  from mindspore.nn.layer.activation import get_activation
37
+ from mindspore.common._decorator import deprecated
37
38
 
38
39
  __all__ = ['Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'Pad', 'Unfold', 'Tril', 'Triu',
39
40
  'ResizeBilinear', 'MatrixDiag', 'MatrixDiagPart', 'MatrixSetDiag', 'L1Regularizer', 'Dropout1d',
40
- 'Dropout2d', 'Dropout3d', 'Roll', 'Identity', 'Unflatten']
41
+ 'Dropout2d', 'Dropout3d', 'Upsample', 'Roll', 'Identity', 'Unflatten']
41
42
 
42
43
 
43
44
  class L1Regularizer(Cell):
@@ -87,15 +88,18 @@ class L1Regularizer(Cell):
87
88
  super(L1Regularizer, self).__init__()
88
89
  Validator.check_value_type("scale", scale, [int, float], self.cls_name)
89
90
  if scale <= 0:
90
- raise ValueError(f"For '{self.cls_name}', the 'scale' must be greater than 0, but got {scale}.")
91
+ raise ValueError(
92
+ f"For '{self.cls_name}', the 'scale' must be greater than 0, but got {scale}.")
91
93
  if math.isinf(scale) or math.isnan(scale):
92
- raise ValueError(f"For '{self.cls_name}', the 'scale' can not be INF or NAN, but got {scale}.")
94
+ raise ValueError(
95
+ f"For '{self.cls_name}', the 'scale' can not be INF or NAN, but got {scale}.")
93
96
  self.abs = P.Abs()
94
97
  self.reduce_sum = P.ReduceSum()
95
98
  self.scale = Tensor(scale, dtype=mstype.float32)
96
99
 
97
100
  def construct(self, weights):
98
- const_utils.check_type_valid(F.dtype(weights), mstype.number_type, 'weights')
101
+ const_utils.check_type_valid(
102
+ F.dtype(weights), mstype.number_type, 'weights')
99
103
  l1_regularization = self.scale * self.reduce_sum(self.abs(weights))
100
104
  return l1_regularization
101
105
 
@@ -104,12 +108,9 @@ class Dropout(Cell):
104
108
  r"""
105
109
  Dropout layer for the input.
106
110
 
107
- Randomly set some elements of the input tensor to zero with probability :math:`1 - keep\_prob` during training
108
- using samples from a Bernoulli distribution.
109
-
110
- The outputs are scaled by a factor of :math:`\frac{1}{keep\_prob}` during training so
111
- that the output layer remains at a similar scale. During inference, this
112
- layer returns the same tensor as the `x`.
111
+ Dropout is a regularization method. The operator randomly sets some neurons output to 0
112
+ according to the probability of discarding the probability of discarding.
113
+ During the reasoning, this layer returns the same Tensor as the `x`.
113
114
 
114
115
  This technique is proposed in paper `Dropout: A Simple Way to Prevent Neural Networks from Overfitting
115
116
  <http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ and proved to be effective to reduce
@@ -118,25 +119,30 @@ class Dropout(Cell):
118
119
  <https://arxiv.org/pdf/1207.0580.pdf>`_.
119
120
 
120
121
  Note:
121
- Each channel will be zeroed out independently on every construct call.
122
- Parameter `dtype` will be removed in a future version. It is not recommended to define this parameter.
122
+ - Each channel will be zeroed out independently on every construct call.
123
+ - Parameter `keep_prob` will be removed in a future version, please use parameter `p` instead.
124
+ Parameter `p` means the probability of the element of the input tensor to be zeroed.
125
+ - Parameter `dtype` will be removed in a future version. It is not recommended to define this parameter.
123
126
 
124
127
  Args:
125
- keep_prob (float): The keep rate, greater than 0 and less equal than 1. E.g. rate=0.9,
126
- dropping out 10% of input units. Default: 0.5.
127
- dtype (:class:`mindspore.dtype`): Data type of `x`. Default: mindspore.float32.
128
+ keep_prob (float): Deprecated. The keep rate, greater than 0 and less equal than 1.
129
+ E.g. rate=0.9, dropping out 10% of input neurons. Default: 0.5.
130
+ p (Union[float, int, None]): The dropout rate, greater than or equal to 0 and less than 1.
131
+ E.g. rate=0.9, dropping out 90% of input neurons. Default: None.
132
+ dtype (:class:`mindspore.dtype`): Data type of `input`. Default: mindspore.float32.
128
133
 
129
134
  Inputs:
130
135
  - **x** (Tensor) - The input of Dropout with data type of float16 or float32.
131
- The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
132
136
 
133
137
  Outputs:
134
138
  Tensor, output tensor with the same shape as the `x`.
135
139
 
136
140
  Raises:
137
141
  TypeError: If `keep_prob` is not a float.
142
+ TypeError: If the dtype of `p` is not float or int.
138
143
  TypeError: If dtype of `x` is not neither float16 nor float32.
139
144
  ValueError: If `keep_prob` is not in range (0, 1].
145
+ ValueError: If `p` is not in range [0, 1).
140
146
  ValueError: If length of shape of `x` is less than 1.
141
147
 
142
148
  Supported Platforms:
@@ -144,48 +150,55 @@ class Dropout(Cell):
144
150
 
145
151
  Examples:
146
152
  >>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32)
147
- >>> net = nn.Dropout(keep_prob=0.8)
153
+ >>> net = nn.Dropout(p=0.2)
148
154
  >>> net.set_train()
149
- Dropout<keep_prob=0.8>
150
155
  >>> output = net(x)
151
156
  >>> print(output.shape)
152
157
  (2, 2, 3)
153
158
  """
154
159
 
155
- def __init__(self, keep_prob=0.5, dtype=mstype.float32):
160
+ def __init__(self, keep_prob=0.5, p=None, dtype=mstype.float32):
156
161
  """Initialize Dropout."""
157
162
  super(Dropout, self).__init__()
158
- Validator.check_value_type('keep_prob', keep_prob, [float], self.cls_name)
159
- if keep_prob <= 0 or keep_prob > 1:
160
- raise ValueError(f"For '{self.cls_name}', the 'keep_prob' must be a number in range (0, 1], "
161
- f"but got {keep_prob}.")
162
- Validator.check_subclass("dtype", dtype, mstype.number_type, self.cls_name)
163
163
  if dtype != mstype.float32:
164
- logger.info("This parameter `dtype` will be deleted or invisible in the future. Please don't use it.")
164
+ logger.warning(
165
+ "This parameter `dtype` will be deleted or invisible in the future. Please don't use it.")
166
+ if p is None:
167
+ logger.warning("For Dropout, this parameter `keep_prob` will be deprecated, please use `p` instead.")
168
+ Validator.check_value_type('keep_prob', keep_prob, [float], self.cls_name)
169
+ if keep_prob <= 0 or keep_prob > 1:
170
+ raise ValueError(f"For '{self.cls_name}', the 'keep_prob' must be a number in range (0, 1], "
171
+ f"but got {keep_prob}.")
172
+ seed0, seed1 = _get_graph_seed(0, "dropout")
173
+ self.dropout = P.Dropout(keep_prob, seed0, seed1)
174
+ else:
175
+ Validator.check_value_type('p', p, [float, int], self.cls_name)
176
+ if p < 0 or p >= 1:
177
+ raise ValueError(f"For '{self.cls_name}', the 'p' must be a number in range [0, 1), "
178
+ f"but got {p}.")
179
+ seed0, seed1 = _get_graph_seed(0, "dropout")
180
+ self.dropout = P.Dropout(1.0 - p, seed0, seed1)
181
+ self.p = p
165
182
  self.keep_prob = keep_prob
166
- seed0, seed1 = _get_graph_seed(0, "dropout")
167
- self.seed0 = seed0
168
- self.seed1 = seed1
169
- self.dropout = P.Dropout(keep_prob, seed0, seed1)
170
183
 
171
184
  def construct(self, x):
172
- if not self.training:
173
- return x
174
-
175
- if self.keep_prob == 1:
185
+ if not self.training or self.keep_prob == 1 or self.p == 0:
176
186
  return x
177
187
 
178
188
  out, _ = self.dropout(x)
179
189
  return out
180
190
 
181
191
  def extend_repr(self):
182
- return 'keep_prob={}'.format(self.keep_prob)
192
+ if self.p is None:
193
+ logger.warning("For Dropout, this parameter `keep_prob` will be deprecated, please use `p` instead.")
194
+ return f'keep_prob={self.keep_prob}'
195
+ return f'p={self.p}'
183
196
 
184
197
 
185
198
  class Dropout1d(Cell):
186
199
  r"""
187
200
  During training, randomly zeroes entire channels of the input tensor with probability `p`
188
- from a Bernoulli distribution (For a 3-dimensional tensor with a shape of :math:`NCL`,
201
+ from a Bernoulli distribution (For a 3-dimensional tensor with a shape of :math:`(N, C, L)`,
189
202
  the channel feature map refers to a 1-dimensional feature map with the shape of :math:`L`).
190
203
 
191
204
  For example, the :math:`j\_th` channel of the :math:`i\_th` sample in the batched input is a to-be-processed
@@ -193,8 +206,8 @@ class Dropout1d(Cell):
193
206
  Each channel will be zeroed out independently on every forward call with probability `p` using samples
194
207
  from a Bernoulli distribution.
195
208
 
196
- The parper `Dropout: A Simple Way to Prevent Neural Networks from Overfitting
197
- <http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ mentioned this technologyAnd it is proved that
209
+ The paper `Dropout: A Simple Way to Prevent Neural Networks from Overfitting
210
+ <http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ mentioned this technology, And it is proved that
198
211
  it can effectively reduce over fitting and prevent neuronal coadaptation.
199
212
  For more details, refer to `Improving neural networks by preventing co-adaptation of feature detectors
200
213
  <https://arxiv.org/pdf/1207.0580.pdf>`_ .
@@ -202,8 +215,8 @@ class Dropout1d(Cell):
202
215
  `Dropout1d` can improve the independence between channel feature maps.
203
216
 
204
217
  Args:
205
- p (float): The dropping probability of a channel, between 0 and 1, e.g. `p` = 0.8,
206
- which means an 80% chance of clearing. Default: 0.5.
218
+ p (float, optional): The dropping probability of a channel, between 0 and 1, e.g. `p` = 0.8,
219
+ which means an 80% chance of being set to 0. Default: 0.5.
207
220
 
208
221
  Inputs:
209
222
  - **x** (Tensor) - A tensor with shape :math:`(N, C, L)` or :math:`(C, L)`, where `N` is the batch size,
@@ -215,7 +228,6 @@ class Dropout1d(Cell):
215
228
 
216
229
  Raises:
217
230
  TypeError: If `x` is not a Tensor.
218
- TypeError: If dtype of `x` is not int8, int16, int32, int64, float16, float32 or float64.
219
231
  TypeError: If the data type of `p` is not float.
220
232
  ValueError: If `p` is out of the range `[0.0, 1.0]`.
221
233
  ValueError: If `x` shape is not `2D` or `3D`.
@@ -224,11 +236,13 @@ class Dropout1d(Cell):
224
236
  ``Ascend`` ``GPU`` ``CPU``
225
237
 
226
238
  Examples:
227
- >>> dropout = nn.Dropout1d(p=0.5)
228
- >>> x = Tensor(np.ones([4, 3]), mindspore.float32)
229
- >>> output = dropout(x)
230
- >>> print(output.shape)
231
- (4, 3)
239
+ >>> import numpy as np
240
+ >>> import mindspore as ms
241
+ >>> from mindspore import nn, Tensor
242
+ >>> op = nn.Dropout1d(p=0.6)
243
+ >>> op.training = True
244
+ >>> a = Tensor(np.ones((3, 3)), ms.float32)
245
+ >>> output = op(a)
232
246
  """
233
247
 
234
248
  def __init__(self, p=0.5):
@@ -241,10 +255,7 @@ class Dropout1d(Cell):
241
255
  self.prob = p
242
256
 
243
257
  def construct(self, x):
244
- if not self.training:
245
- return x
246
-
247
- if self.prob == 0:
258
+ if not self.training or self.prob == 0:
248
259
  return x
249
260
 
250
261
  out = F.dropout1d(x, self.prob)
@@ -288,10 +299,7 @@ class Dropout2d(Cell):
288
299
  self.dropout2d = P.Dropout2D(self.keep_prob)
289
300
 
290
301
  def construct(self, x):
291
- if not self.training:
292
- return x
293
-
294
- if self.keep_prob == 1:
302
+ if not self.training or self.keep_prob == 1:
295
303
  return x
296
304
 
297
305
  out, _ = self.dropout2d(x)
@@ -339,10 +347,7 @@ class Dropout3d(Cell):
339
347
  self.dropout3d = P.Dropout3D(self.keep_prob)
340
348
 
341
349
  def construct(self, x):
342
- if not self.training:
343
- return x
344
-
345
- if self.keep_prob == 1:
350
+ if not self.training or self.keep_prob == 1:
346
351
  return x
347
352
 
348
353
  out, _ = self.dropout3d(x)
@@ -352,22 +357,65 @@ class Dropout3d(Cell):
352
357
  return 'p={}'.format(self.keep_prob)
353
358
 
354
359
 
360
+ class Upsample(Cell):
361
+ r"""
362
+ For details, please refer to :func:`mindspore.ops.interpolate`.
363
+
364
+ Supported Platforms:
365
+ ``Ascend`` ``GPU`` ``CPU``
366
+
367
+ Examples:
368
+ >>> x = Tensor([[[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]])
369
+ >>> upsample = nn.Upsample(size=(5, 5))
370
+ >>> out = upsample(x)
371
+ >>> print(x.asnumpy())
372
+ [[[[1. 2. 3. 4.]
373
+ [5. 6. 7. 8.]]]]
374
+ >>> print(out.asnumpy())
375
+ [[[[1. 1. 2. 3. 4.]
376
+ [1. 1. 2. 3. 4.]
377
+ [1. 1. 2. 3. 4.]
378
+ [5. 5. 6. 7. 8.]
379
+ [5. 5. 6. 7. 8.]]]]
380
+ >>> print(out.shape)
381
+ (1, 1, 5, 5)
382
+ """
383
+
384
+ def __init__(self, size=None, scale_factor=None, mode="nearest", align_corners=None, recompute_scale_factor=None):
385
+ """Initialize Upsample."""
386
+ super(Upsample, self).__init__()
387
+ self.size = size
388
+ self.scale_factor = scale_factor
389
+ self.mode = mode
390
+ self.align_corners = align_corners
391
+ self.recompute_scale_factor = recompute_scale_factor
392
+
393
+ def construct(self, x):
394
+ out = F.interpolate(x, self.size, self.scale_factor, self.mode,
395
+ self.align_corners, self.recompute_scale_factor)
396
+ return out
397
+
398
+
355
399
  class Flatten(Cell):
356
400
  r"""
357
- Flatten the dimensions other than the 0th dimension of the input Tensor.
401
+ Flatten the input Tensor along dimensions from `start_dim` to `end_dim`.
402
+
403
+ Args:
404
+ start_dim (int, optional): The first dimension to flatten. Default: 1.
405
+ end_dim (int, optional): The last dimension to flatten. Default: -1.
358
406
 
359
407
  Inputs:
360
- - **x** (Tensor) - The input Tensor to be flattened. The data type is
361
- `number <https://www.mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore.html#mindspore.dtype>`_ .
362
- The shape is :math:`(N, *)` , where :math:`*` means any number of additional dimensions
363
- and the shape can't be ().
408
+ - **x** (Tensor) - The input Tensor to be flattened.
364
409
 
365
410
  Outputs:
366
- Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
367
- the product of the remaining dimensions.
411
+ Tensor. If no dimensions are flattened, returns the original `x`, otherwise return the flattened Tensor.
412
+ If `x` is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
368
413
 
369
414
  Raises:
370
- TypeError: If `x` is not a subclass of Tensor.
415
+ TypeError: If `x` is not a Tensor.
416
+ TypeError: If `start_dim` or `end_dim` is not int.
417
+ ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
418
+ ValueError: If `start_dim` or `end_dim` is not in range of [-x.dim, x.dim-1].
371
419
 
372
420
  Supported Platforms:
373
421
  ``Ascend`` ``GPU`` ``CPU``
@@ -385,16 +433,25 @@ class Flatten(Cell):
385
433
  after flatten the output shape is (2, 4)
386
434
  """
387
435
 
388
- def __init__(self):
436
+ def __init__(self, start_dim=1, end_dim=-1):
389
437
  """Initialize Flatten."""
390
438
  super(Flatten, self).__init__()
439
+ self.start_dim = start_dim
440
+ self.end_dim = end_dim
391
441
 
392
442
  def construct(self, x):
393
- return F.reshape(x, (F.shape(x)[0], -1))
443
+ x_rank = F.rank(x)
444
+ ndim = x_rank if x_rank != 0 else 1
445
+ if self.start_dim < -ndim or self.start_dim >= ndim:
446
+ const_utils.raise_value_error("'start_dim' out of range.")
447
+ if self.end_dim < -ndim or self.end_dim >= ndim:
448
+ const_utils.raise_value_error("'end_dim' out of range.")
449
+ return F.flatten(x, start_dim=self.start_dim, end_dim=self.end_dim)
394
450
 
395
451
 
396
- @constexpr
452
+ @_primexpr
397
453
  def check_dense_input_shape(x, prim_name=None):
454
+ """ check the shape of inputs"""
398
455
  msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
399
456
  if len(x) < 2:
400
457
  raise ValueError(f"{msg_prefix} dimension of 'x' should not be less than 2, but got {len(x)}.")
@@ -408,13 +465,13 @@ class Identity(Cell):
408
465
  - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.
409
466
 
410
467
  Outputs:
411
- Tensor, the shape of tensor and the data type are the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.
468
+ Tensor, the shape of tensor and the data type are the same as `x`.
412
469
 
413
470
  Raises:
414
471
  TypeError: If `x` is not a Tensor.
415
472
 
416
473
  Supported Platforms:
417
- ``Ascend`` ``CPU`` ``GPU``
474
+ ``Ascend`` ``GPU`` ``CPU``
418
475
 
419
476
  Examples:
420
477
  >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
@@ -455,7 +512,7 @@ class Dense(Cell):
455
512
  is same as `x`. The values of str refer to the function `initializer`. Default: 'normal'.
456
513
  bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
457
514
  same as `x`. The values of str refer to the function `initializer`. Default: 'zeros'.
458
- has_bias (bool): Specifies whether the layer uses a bias vector. Default: True.
515
+ has_bias (bool): Specifies whether the layer uses a bias vector :math:`\text{bias}`. Default: True.
459
516
  activation (Union[str, Cell, Primitive, None]): activate function applied to the output of the fully connected
460
517
  layer. Both activation name, e.g. 'relu', and mindspore activation function, e.g. mindspore.ops.ReLU(),
461
518
  are supported. Default: None.
@@ -497,9 +554,12 @@ class Dense(Cell):
497
554
  activation=None):
498
555
  """Initialize Dense."""
499
556
  super(Dense, self).__init__()
500
- self.in_channels = Validator.check_positive_int(in_channels, "in_channels", self.cls_name)
501
- self.out_channels = Validator.check_positive_int(out_channels, "out_channels", self.cls_name)
502
- self.has_bias = Validator.check_bool(has_bias, "has_bias", self.cls_name)
557
+ self.in_channels = Validator.check_positive_int(
558
+ in_channels, "in_channels", self.cls_name)
559
+ self.out_channels = Validator.check_positive_int(
560
+ out_channels, "out_channels", self.cls_name)
561
+ self.has_bias = Validator.check_bool(
562
+ has_bias, "has_bias", self.cls_name)
503
563
  self.reshape = P.Reshape()
504
564
  self.shape_op = P.Shape()
505
565
 
@@ -510,7 +570,8 @@ class Dense(Cell):
510
570
  f"be equal to 2, and the first dim must be equal to 'out_channels', and the "
511
571
  f"second dim must be equal to 'in_channels'. But got 'weight_init': {weight_init}, "
512
572
  f"'out_channels': {out_channels}, 'in_channels': {in_channels}.")
513
- self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")
573
+ self.weight = Parameter(initializer(
574
+ weight_init, [out_channels, in_channels]), name="weight")
514
575
 
515
576
  self.bias = None
516
577
  if self.has_bias:
@@ -519,11 +580,13 @@ class Dense(Cell):
519
580
  raise ValueError(f"For '{self.cls_name}', bias init shape error. The ndim of 'bias_init' must "
520
581
  f"be equal to 1, and the first dim must be equal to 'out_channels'. But got "
521
582
  f"'bias_init': {bias_init}, 'out_channels': {out_channels}.")
522
- self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")
583
+ self.bias = Parameter(initializer(
584
+ bias_init, [out_channels]), name="bias")
523
585
  self.bias_add = P.BiasAdd()
524
586
 
525
587
  self.matmul = P.MatMul(transpose_b=True)
526
- self.activation = get_activation(activation) if isinstance(activation, str) else activation
588
+ self.activation = get_activation(activation) if isinstance(
589
+ activation, str) else activation
527
590
  if activation is not None and not isinstance(self.activation, (Cell, Primitive)):
528
591
  raise TypeError(f"For '{self.cls_name}', the 'activation' must be str or Cell or Primitive, but got "
529
592
  f"{type(activation).__name__}.")
@@ -540,12 +603,13 @@ class Dense(Cell):
540
603
  if self.activation_flag:
541
604
  x = self.activation(x)
542
605
  if len(x_shape) != 2:
543
- out_shape = x_shape[:-1] + (-1,)
606
+ out_shape = x_shape[:-1] + (F.shape(x)[-1],)
544
607
  x = self.reshape(x, out_shape)
545
608
  return x
546
609
 
547
610
  def extend_repr(self):
548
- s = 'input_channels={}, output_channels={}'.format(self.in_channels, self.out_channels)
611
+ s = 'input_channels={}, output_channels={}'.format(
612
+ self.in_channels, self.out_channels)
549
613
  if self.has_bias:
550
614
  s += ', has_bias={}'.format(self.has_bias)
551
615
  if self.activation_flag:
@@ -557,14 +621,15 @@ class Dense(Cell):
557
621
  def _is_equal_one(x):
558
622
  if x is None:
559
623
  return False
560
- return bool(x.asnumpy().mean() == 1.0)
624
+ return F.equal(F.reduce_mean(x), 1.0)
561
625
 
562
626
 
563
627
  @constexpr
564
628
  def _dtype_check(x_dtype, prim_name=None):
565
629
  msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
566
630
  if x_dtype not in [mstype.float32, mstype.float16]:
567
- raise TypeError(f"{msg_prefix} x_dtype must be float32 or float16, but got {x_dtype}.")
631
+ raise TypeError(
632
+ f"{msg_prefix} x_dtype must be float32 or float16, but got {x_dtype}.")
568
633
 
569
634
 
570
635
  @constexpr
@@ -634,66 +699,16 @@ class ClipByNorm(Cell):
634
699
 
635
700
  class Norm(Cell):
636
701
  r"""
637
- Computes the norm of vectors, currently including Euclidean norm, i.e., :math:`L_2`-norm.
638
-
639
- .. math::
640
-
641
- norm(x) = \sqrt{\sum_{i=1}^{n} (x_i^2)}
642
-
643
- Args:
644
- axis (Union[tuple, int]): The axis over which to compute vector norms. Default: ().
645
- keep_dims (bool): If true, the axis indicated in `axis` are kept with size 1. Otherwise,
646
- the dimensions in `axis` are removed from the output shape. Default: False.
647
-
648
- Inputs:
649
- - **x** (Tensor) - Tensor which is not empty. The data type should be float16 or float32.
650
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
651
-
652
- Outputs:
653
- Tensor, output tensor with dimensions in 'axis' reduced to 1 will be returned if 'keep_dims' is True;
654
- otherwise a Tensor with dimensions in 'axis' removed is returned. The data type is the same with `x`.
655
-
656
- Raises:
657
- TypeError: If `axis` is neither an int nor a tuple.
658
- TypeError: If `keep_dims` is not a bool.
659
-
660
- Supported Platforms:
661
- ``Ascend`` ``GPU`` ``CPU``
662
-
663
- Examples:
664
- >>> net = nn.Norm(axis=0)
665
- >>> x = Tensor(np.array([[4, 4, 9, 1], [2, 1, 3, 6]]), mindspore.float32)
666
- >>> print(x.shape)
667
- (2, 4)
668
- >>> output = net(x)
669
- >>> print(output)
670
- [4.472136 4.1231055 9.486833 6.0827627]
671
- >>> print(output.shape)
672
- (4,)
673
- >>> net = nn.Norm(axis=0, keep_dims=True)
674
- >>> x = Tensor(np.array([[4, 4, 9, 1], [2, 1, 3, 6]]), mindspore.float32)
675
- >>> print(x.shape)
676
- (2, 4)
677
- >>> output = net(x)
678
- >>> print(output)
679
- [4.472136 4.1231055 9.486833 6.0827627]
680
- >>> print(output.shape)
681
- (1, 4)
682
- >>> net = nn.Norm(axis=1)
683
- >>> x = Tensor(np.array([[4, 4, 9, 1], [2, 1, 3, 6]]), mindspore.float32)
684
- >>> print(x.shape)
685
- (2, 4)
686
- >>> output = net(x)
687
- >>> print(output)
688
- [10.677078 7.071068]
689
- >>> print(output.shape)
690
- (2,)
702
+ 'nn.Norm' is deprecated from version 2.0 and will be removed in a future version,
703
+ use 'ops.norm' instead.
691
704
  """
692
705
 
706
+ @deprecated("2.0", "ops.norm", False)
693
707
  def __init__(self, axis=(), keep_dims=False):
694
708
  """Initialize Norm."""
695
709
  super(Norm, self).__init__()
696
- Validator.check_value_type("keep_dims", keep_dims, [bool], self.cls_name)
710
+ Validator.check_value_type(
711
+ "keep_dims", keep_dims, [bool], self.cls_name)
697
712
  self.axis = axis
698
713
  self.keep_dims = keep_dims
699
714
  self.reduce_sum = P.ReduceSum(True)
@@ -713,119 +728,11 @@ class Norm(Cell):
713
728
 
714
729
  class OneHot(Cell):
715
730
  """
716
- Returns a one-hot tensor.
717
-
718
- The locations represented by indices in argument `indices` take value on_value,
719
- while all other locations take value off_value.
720
-
721
- Note:
722
- If the input indices is rank :math:`N`, the output will have rank :math:`N+1`. The new
723
- axis is created at dimension `axis`.
724
-
725
- If `indices` is a scalar, the output shape will be a vector of length `depth`.
726
-
727
- If `indices` is a vector of length `features`, the output shape will be:
728
-
729
- .. code-block::
730
-
731
- features * depth if axis == -1
732
-
733
- depth * features if axis == 0
734
-
735
- If `indices` is a matrix with shape `[batch, features]`, the output shape will be:
736
-
737
- .. code-block::
738
-
739
- batch * features * depth if axis == -1
740
-
741
- batch * depth * features if axis == 1
742
-
743
- depth * batch * features if axis == 0
744
-
745
- Args:
746
- axis (int): Features x depth if axis is -1, depth x features
747
- if axis is 0. Default: -1.
748
- depth (int): A scalar defining the depth of the one hot dimension. Default: 1.
749
- on_value (float): A scalar defining the value to fill in output[i][j]
750
- when indices[j] = i. Default: 1.0.
751
- off_value (float): A scalar defining the value to fill in output[i][j]
752
- when indices[j] != i. Default: 0.0.
753
- dtype (:class:`mindspore.dtype`): Data type of 'on_value' and 'off_value', not the
754
- data type of indices. Default: mindspore.float32.
755
-
756
- Inputs:
757
- - **indices** (Tensor) - A tensor of indices with data type of int32 or int64.
758
- The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
759
-
760
- Outputs:
761
- Tensor, the one-hot tensor of data type `dtype` with dimension at `axis` expanded to `depth` and filled with
762
- on_value and off_value. The dimension of the `Outputs` is equal to the dimension of the `indices` plus one.
763
-
764
- Raises:
765
- TypeError: If `axis` or `depth` is not an int.
766
- TypeError: If dtype of `indices` is neither int32 nor int64.
767
- ValueError: If `axis` is not in range [-1, len(indices_shape)].
768
- ValueError: If `depth` is less than 0.
769
-
770
- Supported Platforms:
771
- ``Ascend`` ``GPU`` ``CPU``
772
-
773
- Examples:
774
- >>> # 1st sample: add new coordinates at axis 1
775
- >>> net = nn.OneHot(depth=4, axis=1)
776
- >>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32)
777
- >>> output = net(indices)
778
- >>> print(output)
779
- [[[0. 0.]
780
- [1. 0.]
781
- [0. 0.]
782
- [0. 1.]]
783
- [[1. 0.]
784
- [0. 0.]
785
- [0. 1.]
786
- [0. 0.]]]
787
- >>> # The results are shown below:
788
- >>> print(output.shape)
789
- (2, 4, 2)
790
- >>> # 2nd sample: add new coordinates at axis 0
791
- >>> net = nn.OneHot(depth=4, axis=0)
792
- >>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32)
793
- >>> output = net(indices)
794
- >>> print(output)
795
- [[[0. 0.]
796
- [1. 0.]]
797
- [[1. 0.]
798
- [0. 0.]]
799
- [[0. 0.]
800
- [0. 1.]]
801
- [[0. 1.]
802
- [0. 0.]]]
803
- >>> # The results are shown below:
804
- >>> print(output.shape)
805
- (4, 2, 2)
806
- >>> # 3rd sample: add new coordinates at the last dimension.
807
- >>> net = nn.OneHot(depth=4, axis=-1)
808
- >>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32)
809
- >>> output = net(indices)
810
- >>> # The results are shown below:
811
- >>> print(output)
812
- [[[0. 1. 0. 0.]
813
- [0. 0. 0. 1.]]
814
- [[1. 0. 0. 0.]
815
- [0. 0. 1. 0.]]]
816
- >>> print(output.shape)
817
- (2, 2, 4)
818
- >>> indices = Tensor([1, 3, 0, 2], dtype=mindspore.int32)
819
- >>> output = net(indices)
820
- >>> print(output)
821
- [[0. 1. 0. 0.]
822
- [0. 0. 0. 1.]
823
- [1. 0. 0. 0.]
824
- [0. 0. 1. 0.]]
825
- >>> print(output.shape)
826
- (4, 4)
731
+ 'nn.OneHot' is deprecated from version 2.0 and will be removed in a future version,
732
+ use 'ops.one_hot' instead.
827
733
  """
828
734
 
735
+ @deprecated("2.0", "ops.one_hot", False)
829
736
  def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype=mstype.float32):
830
737
  """Initialize OneHot."""
831
738
  super(OneHot, self).__init__()
@@ -844,11 +751,11 @@ class Pad(Cell):
844
751
  Pads the input tensor according to the paddings and mode.
845
752
 
846
753
  Args:
847
- paddings (tuple): The shape of parameter `paddings` is (N, 2). N is the rank of input data. All elements of
848
- paddings are int type. For `D` th dimension of the `x`, paddings[D, 0] indicates how many sizes to be
849
- extended ahead of the `D` th dimension of the input tensor, and paddings[D, 1] indicates how many sizes to
850
- be extended behind of the `D` th dimension of the input tensor. The padded size of each dimension D of the
851
- output is: :math:`paddings[D, 0] + input\_x.dim\_size(D) + paddings[D, 1]`,
754
+ paddings (tuple): The shape of parameter `paddings` is :math:`(N, 2)` . N is the rank of input data. All
755
+ elements of paddings are int type. For `D` th dimension of the `x`, paddings[D, 0] indicates how many
756
+ sizes to be extended ahead of the `D` th dimension of the input tensor, and paddings[D, 1] indicates how
757
+ many sizes to be extended behind of the `D` th dimension of the input tensor. The padded size of each
758
+ dimension D of the output is: :math:`paddings[D, 0] + input\_x.dim\_size(D) + paddings[D, 1]`,
852
759
  e.g.:
853
760
 
854
761
  .. code-block::
@@ -884,7 +791,7 @@ class Pad(Cell):
884
791
 
885
792
  Raises:
886
793
  TypeError: If `paddings` is not a tuple.
887
- ValueError: If length of `paddings` is more than 4 or its shape is not (N, 2).
794
+ ValueError: If length of `paddings` is more than 4 or its shape is not :math:`(N, 2)` .
888
795
  ValueError: If `mode` is not one of 'CONSTANT', 'REFLECT', 'SYMMETRIC'.
889
796
 
890
797
  Supported Platforms:
@@ -979,7 +886,8 @@ class Pad(Cell):
979
886
  super(Pad, self).__init__()
980
887
  self.mode = mode
981
888
  self.paddings = paddings
982
- Validator.check_string(self.mode, ["CONSTANT", "REFLECT", "SYMMETRIC"], 'mode', self.cls_name)
889
+ Validator.check_string(
890
+ self.mode, ["CONSTANT", "REFLECT", "SYMMETRIC"], 'mode', self.cls_name)
983
891
  if not isinstance(paddings, tuple):
984
892
  raise TypeError(f"For '{self.cls_name}', the type of 'paddings' must be tuple, "
985
893
  f"but got {type(paddings).__name__}.")
@@ -1009,66 +917,32 @@ def bilinear(shape, size, scale, align_corners, prim_name=None):
1009
917
  """Check input and calculate shape"""
1010
918
  msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
1011
919
  if not isinstance(align_corners, bool):
1012
- raise TypeError(f"{msg_prefix} type of 'align_corners' must be boolean, "
1013
- f"but got {type(align_corners).__name__}.")
920
+ raise TypeError(
921
+ f"{msg_prefix} type of 'align_corners' must be bool, but got {type(align_corners).__name__}.")
1014
922
  if size is None and scale is None:
1015
923
  raise ValueError(f"{msg_prefix} 'size' and 'scale' both none.")
1016
924
  if size is not None and scale is not None:
1017
925
  raise ValueError(f"{msg_prefix} 'size' and 'scale' both not none.")
1018
926
  if size is not None:
1019
927
  if not isinstance(size, (tuple, list)):
1020
- raise ValueError(f"{msg_prefix} 'size' must be tuple or list or None, but got {type(size).__name__}.")
1021
- Validator.check_int(len(size), 2, Rel.EQ, "size", "bilinear")
1022
- Validator.check_int(size[0], 1, Rel.GE, "size[0]", "bilinear")
1023
- Validator.check_int(size[1], 1, Rel.GE, "size[1]", "bilinear")
928
+ raise ValueError(
929
+ f"{msg_prefix} 'size' must be tuple or list or None, but got {type(size).__name__}.")
930
+ Validator.check_int(len(size), 2, Validator.EQ, "size", "bilinear")
931
+ Validator.check_int(size[0], 1, Validator.GE, "size[0]", "bilinear")
932
+ Validator.check_int(size[1], 1, Validator.GE, "size[1]", "bilinear")
1024
933
  return size
1025
- Validator.check_int(scale, 1, Rel.GE, "scale factor", "bilinear")
934
+ Validator.check_int(scale, 1, Validator.GE, "scale factor", "bilinear")
1026
935
  ret = (scale * shape[2], scale * shape[3])
1027
936
  return ret
1028
937
 
1029
938
 
1030
939
  class ResizeBilinear(Cell):
1031
940
  r"""
1032
- Samples the input tensor to the given size or scale_factor by using bilinear interpolate.
1033
-
1034
- Args:
1035
- half_pixel_centers (bool): Whether half pixel center. If set to True, `align_corners` should be False.
1036
- Default: False.
1037
-
1038
- Inputs:
1039
- - **x** (Tensor) - Tensor to be resized. Input tensor must be a 4-D tensor with shape
1040
- :math:`(batch, channels, height, width)`, with data type of float16 or float32.
1041
- - **size** (Union[tuple[int], list[int], None]): A tuple or list of 2 int elements
1042
- :math:`(new\_height, new\_width)`,the new size of the tensor.
1043
- One and only one of size and scale_factor can be set to None. Default: None.
1044
- - **scale_factor** (int, None): The scale factor of new size of the tensor. The value should be positive
1045
- integer. One and only one of size and scale_factor can be set to None. Default: None.
1046
- - **align_corners** (bool): If true, rescale input by :math:`(new\_height - 1) / (height - 1)`, which exactly
1047
- aligns the 4 corners of images and resized images. If false, rescale by :math:`new\_height / height`.
1048
- Default: False.
1049
-
1050
- Outputs:
1051
- Resized tensor.
1052
- If size is set, the result is 4-D tensor with shape :math:`(batch, channels, new\_height, new\_width)`,
1053
- and the data type is the same as `x`.
1054
- If scale is set, the result is 4-D tensor with shape
1055
- :math:`(batch, channels, scale\_factor * height, scale\_factor * width)` and the data type is the same as `x`.
1056
-
1057
- Raises:
1058
- TypeError: If `size` is not one of tuple, list, None.
1059
- TypeError: If `scale_factor` is neither int nor None.
1060
- TypeError: If `align_corners` is not a bool.
1061
- TypeError: If `half_pixel_centers` is not a bool.
1062
- TypeError: If `align_corners` and `half_pixel_centers` are all True.
1063
- TypeError: If `half_pixel_centers` is True and device_target not Ascend.
1064
- TypeError: If dtype of `x` is neither float16 nor float32.
1065
- ValueError: If `size` and `scale_factor` are both None or not None.
1066
- ValueError: If length of shape of `x` is not equal to 4.
1067
- ValueError: If `scale_factor` is an int which is less than 0.
1068
- ValueError: If `size` is a list or tuple whose length is not equal to 2.
941
+ 'nn.ResizeBilinear' is deprecated from version 2.0 and will be removed in a future version,
942
+ use :class:`mindspore.ops.ResizeBilinearV2` or :func:`mindspore.ops.interpolate` instead.
1069
943
 
1070
944
  Supported Platforms:
1071
- ``Ascend`` ``CPU`` ``GPU``
945
+ Deprecated
1072
946
 
1073
947
  Examples:
1074
948
  >>> x = Tensor([[[[1, 2, 3, 4], [5, 6, 7, 8]]]], mindspore.float32)
@@ -1090,11 +964,15 @@ class ResizeBilinear(Cell):
1090
964
  def __init__(self, half_pixel_centers=False):
1091
965
  """Initialize ResizeBilinear."""
1092
966
  super(ResizeBilinear, self).__init__()
967
+ logger.warning("'nn.ResizeBilinear' is deprecated from version 2.0 and will be removed in a "
968
+ "future version, use 'ops.ResizeBilinearV2' or 'ops.interpolate' instead.")
1093
969
  self.half_pixel_centers = half_pixel_centers
1094
970
 
1095
971
  def construct(self, x, size=None, scale_factor=None, align_corners=False):
1096
- shape = bilinear(x.shape, size, scale_factor, align_corners, self.cls_name)
1097
- resize_bilinear = P.ResizeBilinear(shape, align_corners, self.half_pixel_centers)
972
+ shape = bilinear(x.shape, size, scale_factor,
973
+ align_corners, self.cls_name)
974
+ resize_bilinear = P.ResizeBilinear(
975
+ shape, align_corners, self.half_pixel_centers)
1098
976
  return resize_bilinear(x)
1099
977
 
1100
978
 
@@ -1125,11 +1003,9 @@ class Unfold(Cell):
1125
1003
  Tensor, a 4-D tensor whose data type is same as `x`,
1126
1004
  and the shape is [out_batch, out_depth, out_row, out_col] where `out_batch` is the same as the `in_batch`.
1127
1005
 
1128
- :math:`out\_depth = ksize\_row * ksize\_col * in\_depth`
1129
-
1130
- :math:`out\_row = (in\_row - (ksize\_row + (ksize\_row - 1) * (rate\_row - 1))) // stride\_row + 1`
1131
-
1132
- :math:`out\_col = (in\_col - (ksize\_col + (ksize\_col - 1) * (rate\_col - 1))) // stride\_col + 1`
1006
+ - :math:`out\_depth = ksize\_row * ksize\_col * in\_depth`
1007
+ - :math:`out\_row = (in\_row - (ksize\_row + (ksize\_row - 1) * (rate\_row - 1))) // stride\_row + 1`
1008
+ - :math:`out\_col = (in\_col - (ksize\_col + (ksize\_col - 1) * (rate\_col - 1))) // stride\_col + 1`
1133
1009
 
1134
1010
  Raises:
1135
1011
  TypeError: If `ksizes`, `strides` or `rates` is neither a tuple nor list.
@@ -1160,7 +1036,8 @@ class Unfold(Cell):
1160
1036
  super(Unfold, self).__init__()
1161
1037
 
1162
1038
  def _check_tuple_or_list(arg_name, arg_val, prim_name):
1163
- Validator.check_value_type(f"{arg_name}s", ksizes, [tuple, list], self.cls_name)
1039
+ Validator.check_value_type(f"{arg_name}s", ksizes, [
1040
+ tuple, list], self.cls_name)
1164
1041
  if len(arg_val) != 4 or arg_val[0] != 1 or arg_val[3] != 1:
1165
1042
  raise ValueError(f"For '{prim_name}' the format of '{arg_name}s' must be [1, {arg_name}_row, "
1166
1043
  f"{arg_name}_col, 1], but got {arg_val}.")
@@ -1175,102 +1052,29 @@ class Unfold(Cell):
1175
1052
  ksizes = ksizes[0], ksizes[3], ksizes[1], ksizes[2]
1176
1053
  strides = strides[0], strides[3], strides[1], strides[2]
1177
1054
  rates = rates[0], rates[3], rates[1], rates[2]
1178
- self.extract_image_patches = inner.ExtractImagePatches(ksizes, strides, rates, padding)
1055
+ self.extract_image_patches = inner.ExtractImagePatches(
1056
+ ksizes, strides, rates, padding)
1179
1057
 
1180
1058
  def construct(self, input_x):
1181
1059
  result = self.extract_image_patches(input_x)
1182
1060
  return result
1183
1061
 
1184
1062
 
1185
- @constexpr
1063
+ @_primexpr
1186
1064
  def tril(x_shape, x_dtype, k):
1187
- Validator.check_int(len(x_shape), 1, Rel.GE, "x rank", "tril")
1065
+ Validator.check_int(len(x_shape), 1, Validator.GE, "x rank", "tril")
1188
1066
  Validator.check_is_int(k, "k value", "tril")
1189
- mask = np.tril(np.ones(x_shape), k)
1190
- return Tensor(mask, x_dtype)
1067
+ value = F.cast(P.Tril(diagonal=k)(F.ones(x_shape, x_dtype)), x_dtype)
1068
+ return value
1191
1069
 
1192
1070
 
1193
1071
  class Tril(Cell):
1194
1072
  """
1195
- Returns a tensor, the elements above the specified main diagonal are set to zero.
1196
-
1197
- Divide the matrix elements into upper and lower triangles along the main diagonal (including diagonals).
1198
-
1199
- The parameter `k` controls the choice of diagonal.
1200
- If `k` = 0, split along the main diagonal and keep all the elements of the lower triangle.
1201
- If `k` > 0, select the diagonal `k` along the main diagonal upwards, and keep all the elements of the lower
1202
- triangle.
1203
- If `k` < 0, select the diagonal `k` along the main diagonal down, and keep all the elements of the lower
1204
- triangle.
1205
-
1206
- Inputs:
1207
- - **x** (Tensor) - The input tensor. The data type is
1208
- `number <https://www.mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore.html#mindspore.dtype>`_.
1209
- - **k** (Int) - The index of diagonal. Default: 0. If the dimensions of the input matrix are d1 and d2,
1210
- the range of k should be in [-min(d1, d2)+1, min(d1, d2)-1], and the output value will be the same as the
1211
- input `x` when `k` is out of range.
1212
-
1213
- Outputs:
1214
- Tensor, has the same shape and type as input `x`.
1215
-
1216
- Raises:
1217
- TypeError: If `k` is not an int.
1218
- ValueError: If length of shape of `x` is less than 1.
1219
-
1220
- Supported Platforms:
1221
- ``Ascend`` ``GPU`` ``CPU``
1222
-
1223
- Examples:
1224
- >>> # case1: k = 0
1225
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1226
- ... [ 5, 6, 7, 8],
1227
- ... [10, 11, 12, 13],
1228
- ... [14, 15, 16, 17]]))
1229
- >>> tril = nn.Tril()
1230
- >>> result = tril(x)
1231
- >>> print(result)
1232
- [[ 1 0 0 0]
1233
- [ 5 6 0 0]
1234
- [10 11 12 0]
1235
- [14 15 16 17]]
1236
- >>> # case2: k = 1
1237
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1238
- ... [ 5, 6, 7, 8],
1239
- ... [10, 11, 12, 13],
1240
- ... [14, 15, 16, 17]]))
1241
- >>> tril = nn.Tril()
1242
- >>> result = tril(x, 1)
1243
- >>> print(result)
1244
- [[ 1 2 0 0]
1245
- [ 5 6 7 0]
1246
- [10 11 12 13]
1247
- [14 15 16 17]]
1248
- >>> # case3: k = 2
1249
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1250
- ... [ 5, 6, 7, 8],
1251
- ... [10, 11, 12, 13],
1252
- ... [14, 15, 16, 17]]))
1253
- >>> tril = nn.Tril()
1254
- >>> result = tril(x, 2)
1255
- >>> print(result)
1256
- [[ 1 2 3 0]
1257
- [ 5 6 7 8]
1258
- [10 11 12 13]
1259
- [14 15 16 17]]
1260
- >>> # case4: k = -1
1261
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1262
- ... [ 5, 6, 7, 8],
1263
- ... [10, 11, 12, 13],
1264
- ... [14, 15, 16, 17]]))
1265
- >>> tril = nn.Tril()
1266
- >>> result = tril(x, -1)
1267
- >>> print(result)
1268
- [[ 0 0 0 0]
1269
- [ 5 0 0 0]
1270
- [10 11 0 0]
1271
- [14 15 16 0]]
1073
+ 'nn.Tril' is deprecated from version 2.0 and will be removed in a future version,
1074
+ use 'ops.tril' instead.
1272
1075
  """
1273
1076
 
1077
+ @deprecated("2.0", "ops.tril", False)
1274
1078
  def __init__(self):
1275
1079
  """Initialize Tril."""
1276
1080
  super(Tril, self).__init__()
@@ -1280,90 +1084,26 @@ class Tril(Cell):
1280
1084
 
1281
1085
  def construct(self, x, k=0):
1282
1086
  assist = tril(x.shape, self.dtype(x), k)
1283
- result = self.mul(self.cast(x, mstype.float32), self.cast(assist, mstype.float32))
1087
+ result = self.mul(self.cast(x, mstype.float32),
1088
+ self.cast(assist, mstype.float32))
1284
1089
  return self.cast(result, self.dtype(x))
1285
1090
 
1286
1091
 
1287
- @constexpr
1092
+ @_primexpr
1288
1093
  def triu(x_shape, x_dtype, k):
1289
- Validator.check_int(len(x_shape), 1, Rel.GE, "x rank", "triu")
1094
+ Validator.check_int(len(x_shape), 1, Validator.GE, "x rank", "triu")
1290
1095
  Validator.check_is_int(k, "k value", "triu")
1291
- mask = np.triu(np.ones(x_shape), k)
1292
- return Tensor(mask, x_dtype)
1096
+ value = F.cast(P.Triu(k)(F.ones(x_shape, x_dtype)), x_dtype)
1097
+ return value
1293
1098
 
1294
1099
 
1295
1100
  class Triu(Cell):
1296
1101
  """
1297
- Returns a tensor with elements below the kth diagonal zeroed.
1298
-
1299
- The upper triangular part of the matrix is defined as the elements on and above the diagonal.
1300
-
1301
- The parameter `k` controls the diagonal to be considered. If `k` = 0, all elements on and above the main diagonal
1302
- are retained. Positive values do not include as many diagonals above the main diagonal. Similarly,
1303
- negative values include as many diagonals below the main diagonal.
1304
-
1305
- Inputs:
1306
- - **x** (Tensor) - The input tensor. The data type is Number.
1307
- :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1308
- - **k** (Int) - The index of diagonal. Default: 0
1309
-
1310
- Outputs:
1311
- Tensor, has the same type and shape as input `x`.
1312
-
1313
- Raises:
1314
- TypeError: If `k` is not an int.
1315
- ValueError: If length of shape of `x` is less than 1.
1316
-
1317
- Supported Platforms:
1318
- ``Ascend`` ``GPU`` ``CPU``
1319
-
1320
- Examples:
1321
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1322
- ... [ 5, 6, 7, 8],
1323
- ... [10, 11, 12, 13],
1324
- ... [14, 15, 16, 17]]))
1325
- >>> triu = nn.Triu()
1326
- >>> result = triu(x)
1327
- >>> print(result)
1328
- [[ 1 2 3 4]
1329
- [ 0 6 7 8]
1330
- [ 0 0 12 13]
1331
- [ 0 0 0 17]]
1332
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1333
- ... [ 5, 6, 7, 8],
1334
- ... [10, 11, 12, 13],
1335
- ... [14, 15, 16, 17]]))
1336
- >>> triu = nn.Triu()
1337
- >>> result = triu(x, 1)
1338
- >>> print(result)
1339
- [[ 0 2 3 4]
1340
- [ 0 0 7 8]
1341
- [ 0 0 0 13]
1342
- [ 0 0 0 0]]
1343
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1344
- ... [ 5, 6, 7, 8],
1345
- ... [10, 11, 12, 13],
1346
- ... [14, 15, 16, 17]]))
1347
- >>> triu = nn.Triu()
1348
- >>> result = triu(x, 2)
1349
- >>> print(result)
1350
- [[ 0 0 3 4]
1351
- [ 0 0 0 8]
1352
- [ 0 0 0 0]
1353
- [ 0 0 0 0]]
1354
- >>> x = Tensor(np.array([[ 1, 2, 3, 4],
1355
- ... [ 5, 6, 7, 8],
1356
- ... [10, 11, 12, 13],
1357
- ... [14, 15, 16, 17]]))
1358
- >>> triu = nn.Triu()
1359
- >>> result = triu(x, -1)
1360
- >>> print(result)
1361
- [[ 1 2 3 4]
1362
- [ 5 6 7 8]
1363
- [ 0 11 12 13]
1364
- [ 0 0 16 17]]
1102
+ 'nn.Triu' is deprecated from version 2.0 and will be removed in a future version,
1103
+ use 'ops.triu' instead.
1365
1104
  """
1366
1105
 
1106
+ @deprecated("2.0", "ops.triu", False)
1367
1107
  def __init__(self):
1368
1108
  """Initialize Triu."""
1369
1109
  super(Triu, self).__init__()
@@ -1373,87 +1113,47 @@ class Triu(Cell):
1373
1113
 
1374
1114
  def construct(self, x, k=0):
1375
1115
  assist = triu(x.shape, self.dtype(x), k)
1376
- result = self.mul(self.cast(x, mstype.float32), self.cast(assist, mstype.float32))
1116
+ result = self.mul(self.cast(x, mstype.float32),
1117
+ self.cast(assist, mstype.float32))
1377
1118
  return self.cast(result, self.dtype(x))
1378
1119
 
1379
1120
 
1380
- @constexpr
1121
+ @_primexpr
1381
1122
  def _get_matrix_diag_assist(x_shape, x_dtype):
1382
- Validator.check_int(len(x_shape), 1, Rel.GE, "x rank", "_get_matrix_diag_assist")
1383
- base_eye = np.eye(x_shape[-1], x_shape[-1]).reshape(-1)
1384
- assist = np.tile(base_eye, x_shape[:-1]).reshape(x_shape + (x_shape[-1],))
1385
- return Tensor(assist, x_dtype)
1123
+ """Get matrix diag assist"""
1124
+ Validator.check_int(len(x_shape), 1, Validator.GE, "x rank", "_get_matrix_diag_assist")
1125
+ base_eye = F.reshape(
1126
+ F.eye(x_shape[-1], x_shape[-1], x_dtype), (x_shape[-1] * x_shape[-1],))
1127
+ if len(x_shape) == 1:
1128
+ assist = F.reshape(base_eye, x_shape + (x_shape[-1],))
1129
+ else:
1130
+ assist = F.reshape(
1131
+ F.tile(base_eye, x_shape[:-1]), x_shape + (x_shape[-1],))
1132
+ value = F.cast(assist, x_dtype)
1133
+ return value
1386
1134
 
1387
1135
 
1388
1136
  @constexpr
1389
1137
  def _get_matrix_diag_part_assist(x_shape, x_dtype):
1390
- Validator.check_int(len(x_shape), 2, Rel.GE, "x rank", "_get_matrix_diag_part_assist")
1391
- base_eye = np.eye(x_shape[-2], x_shape[-1]).reshape(-1)
1392
- assist = np.tile(base_eye, x_shape[:-2]).reshape(x_shape)
1393
- return Tensor(assist, x_dtype)
1138
+ """Get matrix diag part assist"""
1139
+ Validator.check_int(len(x_shape), 2, Validator.GE, "x rank", "_get_matrix_diag_part_assist")
1140
+ base_eye = F.reshape(
1141
+ F.eye(x_shape[-2], x_shape[-1], x_dtype), (x_shape[-2] * x_shape[-1],))
1142
+ if len(x_shape) <= 2:
1143
+ assist = F.reshape(base_eye, x_shape)
1144
+ else:
1145
+ assist = F.reshape(F.tile(base_eye, x_shape[:-2]), x_shape)
1146
+ value = F.cast(assist, x_dtype)
1147
+ return value
1394
1148
 
1395
1149
 
1396
1150
  class MatrixDiag(Cell):
1397
1151
  r"""
1398
- Returns a batched diagonal tensor with a given batched diagonal values.
1399
-
1400
- Assume `x` has :math:`k` dimensions :math:`[I, J, K, ..., N]`, then the output is a tensor of rank
1401
- :math:`k+1` with dimensions :math:`[I, J, K, ..., N, N]` where:
1402
- :math:`output[i, j, k, ..., m, n] = 1\{m=n\} * x[i, j, k, ..., n]`.
1403
-
1404
- Inputs:
1405
- - **x** (Tensor) - The diagonal values. It can be one of the following data types:
1406
- float32, float16, int32, int8, and uint8.
1407
- The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1408
-
1409
- Outputs:
1410
- Tensor, has the same type as input `x`. The shape must be x.shape + (x.shape[-1], ).
1411
-
1412
- Raises:
1413
- TypeError: If dtype of `x` is not one of float32, float16, int32, int8 or uint8.
1414
-
1415
- Supported Platforms:
1416
- ``Ascend``
1417
-
1418
- Examples:
1419
- >>> x = Tensor(np.array([1, -1]), mindspore.float32)
1420
- >>> matrix_diag = nn.MatrixDiag()
1421
- >>> output = matrix_diag(x)
1422
- >>> print(x.shape)
1423
- (2,)
1424
- >>> print(output)
1425
- [[ 1. 0.]
1426
- [ 0. -1.]]
1427
- >>> print(output.shape)
1428
- (2, 2)
1429
- >>> x = Tensor(np.array([[1, -1], [1, -1]]), mindspore.float32)
1430
- >>> matrix_diag = nn.MatrixDiag()
1431
- >>> output = matrix_diag(x)
1432
- >>> print(x.shape)
1433
- (2, 2)
1434
- >>> print(output)
1435
- [[[ 1. 0.]
1436
- [ 0. -1.]]
1437
- [[ 1. 0.]
1438
- [ 0. -1.]]]
1439
- >>> print(output.shape)
1440
- (2, 2, 2)
1441
- >>> x = Tensor(np.array([[1, -1, 1], [1, -1, 1]]), mindspore.float32)
1442
- >>> matrix_diag = nn.MatrixDiag()
1443
- >>> output = matrix_diag(x)
1444
- >>> print(x.shape)
1445
- (2, 3)
1446
- >>> print(output)
1447
- [[[ 1. 0. 0.]
1448
- [ 0. -1. 0.]
1449
- [ 0. 0. 1.]]
1450
- [[ 1. 0. 0.]
1451
- [ 0. -1. 0.]
1452
- [ 0. 0. 1.]]]
1453
- >>> print(output.shape)
1454
- (2, 3, 3)
1152
+ 'nn.MatrixDiag' is deprecated from version 2.0 and will be removed in a future version,
1153
+ use 'ops.diag' instead.
1455
1154
  """
1456
1155
 
1156
+ @deprecated("2.0", "ops.diag", False)
1457
1157
  def __init__(self):
1458
1158
  """Initialize MatrixDiag."""
1459
1159
  super(MatrixDiag, self).__init__()
@@ -1470,47 +1170,11 @@ class MatrixDiag(Cell):
1470
1170
 
1471
1171
  class MatrixDiagPart(Cell):
1472
1172
  r"""
1473
- Returns the batched diagonal part of a batched tensor.
1474
-
1475
- Assume `x` has :math:`k` dimensions :math:`[I, J, K, ..., M, N]`, then the output is a tensor of rank
1476
- :math:`k-1` with dimensions :math:`[I, J, K, ..., min(M, N)]` where:
1477
- :math:`output[i, j, k, ..., n] = x[i, j, k, ..., n, n]`.
1478
-
1479
- Inputs:
1480
- - **x** (Tensor) - The batched tensor. It can be one of the following data types:
1481
- float32, float16, int32, int8, and uint8.
1482
-
1483
- Outputs:
1484
- Tensor, has the same type as input `x`. The shape must be x.shape[:-2] + [min(x.shape[-2:])].
1485
-
1486
- Raises:
1487
- TypeError: If dtype of `x` is not one of float32, float16, int32, int8 or uint8.
1488
-
1489
- Supported Platforms:
1490
- ``Ascend``
1491
-
1492
- Examples:
1493
- >>> import mindspore
1494
- >>> from mindspore import Tensor, nn
1495
- >>> x = Tensor([[[-1, 0], [0, 1]],
1496
- ... [[-1, 0], [0, 1]],
1497
- ... [[-1, 0], [0, 1]]], mindspore.float32)
1498
- >>> matrix_diag_part = nn.MatrixDiagPart()
1499
- >>> output = matrix_diag_part(x)
1500
- >>> print(output)
1501
- [[-1. 1.]
1502
- [-1. 1.]
1503
- [-1. 1.]]
1504
- >>> x = Tensor([[-1, 0, 0, 1],
1505
- ... [-1, 0, 0, 1],
1506
- ... [-1, 0, 0, 1],
1507
- ... [-1, 0, 0, 1]], mindspore.float32)
1508
- >>> matrix_diag_part = nn.MatrixDiagPart()
1509
- >>> output = matrix_diag_part(x)
1510
- >>> print(output)
1511
- [-1. 0. 0. 1.]
1173
+ 'nn.MatrixDiagPart' is deprecated from version 2.0 and will be removed in a future version,
1174
+ use 'ops.diagonal' instead.
1512
1175
  """
1513
1176
 
1177
+ @deprecated("2.0", "ops.diagonal", False)
1514
1178
  def __init__(self):
1515
1179
  """Initialize MatrixDiagPart."""
1516
1180
  super(MatrixDiagPart, self).__init__()
@@ -1586,59 +1250,23 @@ class MatrixSetDiag(Cell):
1586
1250
 
1587
1251
  @constexpr
1588
1252
  def _check_input_dim(axis, dim, cls_name):
1589
- Validator.check_int_range(axis, -dim, dim, Rel.INC_LEFT, 'axis', cls_name)
1253
+ Validator.check_int_range(axis, -dim, dim, Validator.INC_LEFT, 'axis', cls_name)
1590
1254
 
1591
1255
 
1592
1256
  class Roll(Cell):
1593
1257
  """
1594
- Rolls the elements of a tensor along an axis.
1595
-
1596
- The elements are shifted positively (towards larger indices) by the offset of `shift` along the dimension of `axis`.
1597
- Negative `shift` values will shift elements in the opposite direction. Elements that roll passed the last position
1598
- will wrap around to the first and vice versa. Multiple shifts along multiple axes may be specified.
1599
-
1600
- Args:
1601
- shift (Union[list(int), tuple(int), int]): Specifies the number of places by which elements are shifted
1602
- positively (towards larger indices) along the specified dimension. Negative shifts will roll the elements
1603
- in the opposite direction.
1604
- axis (Union[list(int), tuple(int), int]): Specifies the dimension indexes of shape to be rolled.
1605
-
1606
- Inputs:
1607
- - **input_x** (Tensor) - Input tensor.
1608
-
1609
- Outputs:
1610
- Tensor, has the same shape and type as `input_x`.
1611
-
1612
- Raises:
1613
- TypeError: If `shift` is not an int, a tuple or a list.
1614
- TypeError: If `axis` is not an int, a tuple or a list.
1615
- TypeError: If element of `shift` is not an int.
1616
- TypeError: If element of `axis` is not an int.
1617
- ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).
1618
- ValueError: If length of shape of `shift` is not equal to length of shape of `axis`.
1619
-
1620
- Supported Platforms:
1621
- ``Ascend`` ``GPU``
1622
-
1623
- Examples:
1624
- >>> input_x = Tensor(np.array([0, 1, 2, 3, 4]).astype(np.float32))
1625
- >>> op = nn.Roll(shift=2, axis=0)
1626
- >>> output = op(input_x)
1627
- >>> print(output)
1628
- [3. 4. 0. 1. 2.]
1629
- >>> input_x = Tensor(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]).astype(np.float32))
1630
- >>> op = nn.Roll(shift=[1, -2], axis=[0, 1])
1631
- >>> output = op(input_x)
1632
- >>> print(output)
1633
- [[7. 8. 9. 5. 6.]
1634
- [2. 3. 4. 0. 1.]]
1258
+ 'nn.Roll' is deprecated from version 2.0 and will be removed in a future version,
1259
+ use 'ops.roll' instead.
1635
1260
  """
1636
1261
 
1262
+ @deprecated("2.0", "ops.roll", False)
1637
1263
  def __init__(self, shift, axis):
1638
1264
  """Initialize Roll"""
1639
1265
  super(Roll, self).__init__()
1640
- Validator.check_value_type("shift", shift, [int, tuple, list], self.cls_name)
1641
- Validator.check_value_type("axis", axis, [int, tuple, list], self.cls_name)
1266
+ Validator.check_value_type(
1267
+ "shift", shift, [int, tuple, list], self.cls_name)
1268
+ Validator.check_value_type(
1269
+ "axis", axis, [int, tuple, list], self.cls_name)
1642
1270
  self.shape_op = P.Shape()
1643
1271
  self.shift = shift
1644
1272
  self.axis = axis
@@ -1650,8 +1278,8 @@ class Roll(Cell):
1650
1278
  if not isinstance(self.shift, (list, tuple)):
1651
1279
  self.shift = [self.shift]
1652
1280
  if context.get_context("device_target") == "GPU":
1653
- Validator.check_int(len(self.shift), 1, Rel.GE, "shift", "Roll")
1654
- Validator.check_int(len(self.axis), 1, Rel.GE, "axis", "Roll")
1281
+ Validator.check_int(len(self.shift), 1, Validator.GE, "shift", "Roll")
1282
+ Validator.check_int(len(self.axis), 1, Validator.GE, "axis", "Roll")
1655
1283
  for s_axis in self.axis:
1656
1284
  Validator.check_is_int(s_axis, "axis", "Roll")
1657
1285
  for s_shift in self.shift:
@@ -1664,14 +1292,16 @@ class Roll(Cell):
1664
1292
  f"and the length of 'axis' {len(self.axis)}.")
1665
1293
  else:
1666
1294
  if not isinstance(self.axis, (list, tuple)):
1667
- self.op_list.append((P.Roll(shift=self.shift, axis=0), self.axis))
1295
+ self.op_list.append(
1296
+ (P.Roll(shift=self.shift, axis=0), self.axis))
1668
1297
  else:
1669
1298
  if len(self.shift) != len(self.axis):
1670
1299
  raise ValueError(f"For '{self.cls_name}', the shape of 'shift' and the shape of 'axis' must be "
1671
1300
  f"the same, but got the length of 'shift' {len(self.shift)} "
1672
1301
  f"and the length of 'axis' {len(self.axis)}.")
1673
1302
  for idx, _ in enumerate(self.axis):
1674
- self.op_list.append((P.Roll(shift=self.shift[idx], axis=0), self.axis[idx]))
1303
+ self.op_list.append(
1304
+ (P.Roll(shift=self.shift[idx], axis=0), self.axis[idx]))
1675
1305
 
1676
1306
  def construct(self, input_x):
1677
1307
  dim = len(self.shape_op(input_x))
@@ -1697,12 +1327,12 @@ class Roll(Cell):
1697
1327
  class Unflatten(Cell):
1698
1328
  r"""
1699
1329
  Summary:
1700
- Unflattens a tensor dim according to axis and unflattened_size.
1330
+ Unflattens a Tensor dim according to `axis` and `unflattened_size`.
1701
1331
 
1702
1332
  Args:
1703
- axis (int): specifies the dimension of the input tensor to be unflattened.
1704
- unflattened_size (Union(tuple[int], list[int])): is the new shape of the unflattened dimension of
1705
- the tensor and it can be a tuple of ints or a list of ints. The product of unflattened_size
1333
+ axis (int): specifies the dimension of the input Tensor to be unflattened.
1334
+ unflattened_size (Union(tuple[int], list[int])): the new shape of the unflattened dimension of
1335
+ the Tensor and it can be a tuple of ints or a list of ints. The product of `unflattened_size`
1706
1336
  must equal to input_shape[axis].
1707
1337
 
1708
1338
  Inputs:
@@ -1714,7 +1344,7 @@ class Unflatten(Cell):
1714
1344
  Raises:
1715
1345
  TypeError: If `axis` is not int.
1716
1346
  TypeError: If `unflattened_size` is neither tuple of ints nor list of ints.
1717
- TypeError: If the value specified by `axis` is not equal to product of `unflattened_size`.
1347
+ TypeError: The product of `unflattened_size` does not equal to input_shape[axis].
1718
1348
 
1719
1349
  Supported Platforms:
1720
1350
  ``Ascend`` ``GPU`` ``CPU``
@@ -1735,7 +1365,8 @@ class Unflatten(Cell):
1735
1365
  self.shape = P.Shape()
1736
1366
  self.reshape = P.Reshape()
1737
1367
  Validator.check_is_int(axis, 'axis', 'Unflatten')
1738
- Validator.check_value_type('unflattended_size', unflattened_size, (list, tuple), 'Unflatten')
1368
+ Validator.check_value_type(
1369
+ 'unflattended_size', unflattened_size, (list, tuple), 'Unflatten')
1739
1370
  self.axis = axis
1740
1371
  if isinstance(unflattened_size, list):
1741
1372
  unflattened_size = tuple(unflattened_size)