mindspore 2.0.0a0__cp38-cp38-win_amd64.whl → 2.0.0rc1__cp38-cp38-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (655) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -2
  3. mindspore/_c_dataengine.cp38-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp38-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp38-win_amd64.pyd +0 -0
  6. mindspore/_check_jit_forbidden_api.py +102 -0
  7. mindspore/_checkparam.py +1066 -1001
  8. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
  9. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
  10. mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
  11. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
  12. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
  13. mindspore/_extends/parse/__init__.py +5 -3
  14. mindspore/_extends/parse/namespace.py +16 -1
  15. mindspore/_extends/parse/parser.py +107 -22
  16. mindspore/_extends/parse/resources.py +0 -7
  17. mindspore/_extends/parse/standard_method.py +885 -413
  18. mindspore/amp.py +52 -57
  19. mindspore/boost/boost.py +2 -2
  20. mindspore/boost/boost_cell_wrapper.py +38 -20
  21. mindspore/boost/dim_reduce.py +3 -3
  22. mindspore/boost/group_loss_scale_manager.py +1 -1
  23. mindspore/common/__init__.py +4 -6
  24. mindspore/common/_decorator.py +2 -0
  25. mindspore/common/_register_for_adapter.py +55 -0
  26. mindspore/common/_stub_tensor.py +201 -0
  27. mindspore/common/_utils.py +41 -7
  28. mindspore/common/api.py +215 -141
  29. mindspore/common/dtype.py +8 -1
  30. mindspore/common/dump.py +2 -2
  31. mindspore/common/initializer.py +4 -2
  32. mindspore/common/jit_config.py +17 -13
  33. mindspore/common/mutable.py +33 -13
  34. mindspore/common/parameter.py +23 -21
  35. mindspore/common/seed.py +8 -24
  36. mindspore/common/sparse_tensor.py +62 -41
  37. mindspore/common/tensor.py +852 -1154
  38. mindspore/communication/__init__.py +2 -2
  39. mindspore/communication/_comm_helper.py +11 -4
  40. mindspore/communication/management.py +22 -21
  41. mindspore/config/op_info.config +501 -1008
  42. mindspore/context.py +201 -23
  43. mindspore/dataset/__init__.py +6 -6
  44. mindspore/dataset/audio/__init__.py +7 -7
  45. mindspore/dataset/audio/transforms.py +670 -30
  46. mindspore/dataset/audio/utils.py +47 -4
  47. mindspore/dataset/audio/validators.py +223 -1
  48. mindspore/dataset/callback/ds_callback.py +2 -2
  49. mindspore/dataset/core/config.py +210 -14
  50. mindspore/dataset/core/validator_helpers.py +2 -2
  51. mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
  52. mindspore/dataset/debug/debug_hook.py +65 -0
  53. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  54. mindspore/dataset/engine/__init__.py +7 -3
  55. mindspore/dataset/engine/cache_client.py +1 -1
  56. mindspore/dataset/engine/datasets.py +322 -66
  57. mindspore/dataset/engine/datasets_audio.py +80 -76
  58. mindspore/dataset/engine/datasets_standard_format.py +51 -38
  59. mindspore/dataset/engine/datasets_text.py +232 -118
  60. mindspore/dataset/engine/datasets_user_defined.py +41 -17
  61. mindspore/dataset/engine/datasets_vision.py +746 -225
  62. mindspore/dataset/engine/graphdata.py +75 -10
  63. mindspore/dataset/engine/iterators.py +45 -5
  64. mindspore/dataset/engine/offload.py +48 -28
  65. mindspore/dataset/engine/validators.py +117 -8
  66. mindspore/dataset/text/__init__.py +6 -5
  67. mindspore/dataset/text/transforms.py +86 -3
  68. mindspore/dataset/text/utils.py +6 -4
  69. mindspore/dataset/text/validators.py +25 -0
  70. mindspore/dataset/transforms/__init__.py +3 -2
  71. mindspore/dataset/transforms/c_transforms.py +1 -1
  72. mindspore/dataset/transforms/transforms.py +2 -2
  73. mindspore/dataset/utils/__init__.py +2 -1
  74. mindspore/dataset/utils/line_reader.py +121 -0
  75. mindspore/dataset/vision/__init__.py +2 -3
  76. mindspore/dataset/vision/c_transforms.py +9 -9
  77. mindspore/dataset/vision/py_transforms.py +5 -5
  78. mindspore/dataset/vision/py_transforms_util.py +2 -0
  79. mindspore/dataset/vision/transforms.py +160 -161
  80. mindspore/dataset/vision/utils.py +3 -3
  81. mindspore/experimental/map_parameter.py +38 -26
  82. mindspore/include/OWNERS +0 -1
  83. mindspore/include/api/callback/callback.h +9 -13
  84. mindspore/include/api/callback/ckpt_saver.h +2 -2
  85. mindspore/include/api/callback/loss_monitor.h +2 -2
  86. mindspore/include/api/callback/lr_scheduler.h +5 -5
  87. mindspore/include/api/callback/time_monitor.h +2 -2
  88. mindspore/include/api/callback/train_accuracy.h +4 -6
  89. mindspore/include/api/cfg.h +19 -6
  90. mindspore/include/api/context.h +44 -9
  91. mindspore/include/api/delegate.h +1 -1
  92. mindspore/include/api/metrics/accuracy.h +2 -2
  93. mindspore/include/api/metrics/metrics.h +4 -3
  94. mindspore/include/api/model.h +9 -4
  95. mindspore/include/api/model_parallel_runner.h +2 -2
  96. mindspore/include/api/net.h +12 -11
  97. mindspore/include/api/serialization.h +19 -3
  98. mindspore/include/api/types.h +3 -3
  99. mindspore/include/dataset/constants.h +7 -0
  100. mindspore/include/dataset/text.h +59 -0
  101. mindspore/jpeg62.dll +0 -0
  102. mindspore/log.py +1 -1
  103. mindspore/mindrecord/filereader.py +18 -0
  104. mindspore/mindrecord/filewriter.py +197 -34
  105. mindspore/mindrecord/shardreader.py +9 -0
  106. mindspore/mindrecord/shardwriter.py +1 -1
  107. mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
  108. mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
  109. mindspore/mindrecord/tools/csv_to_mr.py +3 -3
  110. mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
  111. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  112. mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
  113. mindspore/mindspore_backend.dll +0 -0
  114. mindspore/mindspore_common.dll +0 -0
  115. mindspore/mindspore_core.dll +0 -0
  116. mindspore/mindspore_glog.dll +0 -0
  117. mindspore/mindspore_shared_lib.dll +0 -0
  118. mindspore/nn/__init__.py +0 -4
  119. mindspore/nn/cell.py +204 -132
  120. mindspore/nn/dynamic_lr.py +1 -1
  121. mindspore/nn/grad/cell_grad.py +7 -6
  122. mindspore/nn/layer/__init__.py +5 -4
  123. mindspore/nn/layer/activation.py +40 -89
  124. mindspore/nn/layer/basic.py +255 -624
  125. mindspore/nn/layer/channel_shuffle.py +7 -6
  126. mindspore/nn/layer/combined.py +1 -1
  127. mindspore/nn/layer/container.py +41 -4
  128. mindspore/nn/layer/conv.py +64 -28
  129. mindspore/nn/layer/dense.py +9 -8
  130. mindspore/nn/layer/embedding.py +27 -25
  131. mindspore/nn/layer/image.py +53 -46
  132. mindspore/nn/layer/math.py +97 -105
  133. mindspore/nn/layer/normalization.py +117 -86
  134. mindspore/nn/layer/padding.py +185 -95
  135. mindspore/nn/layer/pooling.py +817 -414
  136. mindspore/nn/layer/rnn_cells.py +10 -15
  137. mindspore/nn/layer/rnns.py +37 -38
  138. mindspore/nn/layer/thor_layer.py +11 -12
  139. mindspore/nn/layer/timedistributed.py +5 -5
  140. mindspore/nn/layer/transformer.py +701 -0
  141. mindspore/nn/learning_rate_schedule.py +8 -8
  142. mindspore/nn/loss/__init__.py +5 -4
  143. mindspore/nn/loss/loss.py +334 -199
  144. mindspore/nn/optim/ada_grad.py +6 -6
  145. mindspore/nn/optim/adadelta.py +2 -3
  146. mindspore/nn/optim/adafactor.py +4 -5
  147. mindspore/nn/optim/adam.py +126 -62
  148. mindspore/nn/optim/adamax.py +3 -4
  149. mindspore/nn/optim/adasum.py +6 -6
  150. mindspore/nn/optim/asgd.py +2 -2
  151. mindspore/nn/optim/ftrl.py +67 -38
  152. mindspore/nn/optim/lamb.py +4 -5
  153. mindspore/nn/optim/lars.py +2 -2
  154. mindspore/nn/optim/lazyadam.py +43 -4
  155. mindspore/nn/optim/momentum.py +6 -5
  156. mindspore/nn/optim/optimizer.py +3 -1
  157. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  158. mindspore/nn/optim/rmsprop.py +1 -1
  159. mindspore/nn/optim/rprop.py +8 -9
  160. mindspore/nn/optim/sgd.py +19 -13
  161. mindspore/nn/optim/thor.py +10 -15
  162. mindspore/nn/probability/__init__.py +0 -2
  163. mindspore/nn/probability/bijector/bijector.py +4 -4
  164. mindspore/nn/probability/bijector/invert.py +1 -1
  165. mindspore/nn/probability/bijector/softplus.py +2 -2
  166. mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
  167. mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
  168. mindspore/nn/probability/distribution/_utils/utils.py +9 -15
  169. mindspore/nn/probability/distribution/bernoulli.py +3 -3
  170. mindspore/nn/probability/distribution/beta.py +1 -1
  171. mindspore/nn/probability/distribution/categorical.py +5 -7
  172. mindspore/nn/probability/distribution/cauchy.py +3 -3
  173. mindspore/nn/probability/distribution/distribution.py +2 -2
  174. mindspore/nn/probability/distribution/exponential.py +2 -2
  175. mindspore/nn/probability/distribution/gamma.py +3 -3
  176. mindspore/nn/probability/distribution/geometric.py +1 -1
  177. mindspore/nn/probability/distribution/gumbel.py +3 -3
  178. mindspore/nn/probability/distribution/half_normal.py +15 -11
  179. mindspore/nn/probability/distribution/laplace.py +16 -13
  180. mindspore/nn/probability/distribution/logistic.py +2 -2
  181. mindspore/nn/probability/distribution/normal.py +1 -1
  182. mindspore/nn/probability/distribution/poisson.py +1 -1
  183. mindspore/nn/probability/distribution/student_t.py +20 -15
  184. mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
  185. mindspore/nn/probability/distribution/uniform.py +2 -2
  186. mindspore/nn/reinforcement/_tensors_queue.py +3 -3
  187. mindspore/nn/reinforcement/tensor_array.py +2 -2
  188. mindspore/nn/sparse/sparse.py +2 -2
  189. mindspore/nn/wrap/cell_wrapper.py +27 -10
  190. mindspore/nn/wrap/grad_reducer.py +2 -2
  191. mindspore/nn/wrap/loss_scale.py +40 -24
  192. mindspore/numpy/array_creations.py +33 -22
  193. mindspore/numpy/array_ops.py +35 -30
  194. mindspore/numpy/logic_ops.py +6 -27
  195. mindspore/numpy/math_ops.py +22 -19
  196. mindspore/numpy/utils.py +1 -1
  197. mindspore/numpy/utils_const.py +108 -58
  198. mindspore/opencv_core452.dll +0 -0
  199. mindspore/opencv_imgcodecs452.dll +0 -0
  200. mindspore/opencv_imgproc452.dll +0 -0
  201. mindspore/ops/_constants.py +0 -6
  202. mindspore/ops/_grad/__init__.py +2 -1
  203. mindspore/ops/_grad/grad_array_ops.py +86 -117
  204. mindspore/ops/_grad/grad_base.py +23 -1
  205. mindspore/ops/_grad/grad_clip_ops.py +2 -3
  206. mindspore/ops/_grad/grad_comm_ops.py +34 -24
  207. mindspore/ops/_grad/grad_implementations.py +9 -45
  208. mindspore/ops/_grad/grad_inner_ops.py +47 -4
  209. mindspore/ops/_grad/grad_math_ops.py +142 -117
  210. mindspore/ops/_grad/grad_nn_ops.py +71 -165
  211. mindspore/ops/_grad/grad_sequence_ops.py +296 -0
  212. mindspore/ops/_grad/grad_sparse.py +7 -6
  213. mindspore/ops/_grad_experimental/__init__.py +1 -0
  214. mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
  215. mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
  216. mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
  217. mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
  218. mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
  219. mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
  220. mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
  221. mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
  222. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
  223. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
  224. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
  225. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
  226. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
  227. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
  228. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
  229. mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
  230. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
  231. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
  232. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
  233. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
  234. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
  235. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
  236. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
  237. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
  238. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
  239. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
  240. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
  241. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
  242. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
  243. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
  244. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
  245. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  246. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
  247. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
  248. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
  249. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
  250. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
  251. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
  252. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
  253. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
  254. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
  255. mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
  256. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  257. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
  258. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  259. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  260. mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
  261. mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
  262. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  263. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
  264. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  265. mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
  266. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  267. mindspore/ops/_op_impl/aicpu/conj.py +11 -0
  268. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
  269. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  270. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  271. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
  272. mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
  273. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  274. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  275. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
  276. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  277. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  278. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  279. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  280. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  281. mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
  282. mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
  283. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
  284. mindspore/ops/_op_impl/aicpu/mul.py +3 -1
  285. mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
  286. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  287. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  288. mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
  289. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  290. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  291. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  292. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  293. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  294. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  295. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
  296. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
  297. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  298. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  299. mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
  300. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
  301. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  302. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  303. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  304. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  305. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  306. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
  307. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  308. mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
  309. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
  310. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  311. mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
  312. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  313. mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
  314. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
  315. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
  316. mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
  317. mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
  318. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
  319. mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
  320. mindspore/ops/_op_impl/tbe/__init__.py +27 -611
  321. mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
  322. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  323. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
  324. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
  325. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  326. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
  327. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
  328. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
  329. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
  330. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
  331. mindspore/ops/_op_impl/tbe/cast.py +0 -2
  332. mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
  333. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
  334. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
  335. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
  336. mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
  337. mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
  338. mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
  339. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
  340. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
  341. mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
  342. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
  343. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  344. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
  345. mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
  346. mindspore/ops/_register_for_op.py +1 -0
  347. mindspore/ops/_utils/__init__.py +1 -2
  348. mindspore/ops/_utils/utils.py +19 -40
  349. mindspore/ops/_vmap/vmap_array_ops.py +116 -38
  350. mindspore/ops/_vmap/vmap_base.py +16 -9
  351. mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
  352. mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
  353. mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
  354. mindspore/ops/_vmap/vmap_image_ops.py +12 -5
  355. mindspore/ops/_vmap/vmap_math_ops.py +46 -5
  356. mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
  357. mindspore/ops/_vmap/vmap_random_ops.py +1 -1
  358. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  359. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  360. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
  361. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
  362. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  363. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  364. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  365. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
  366. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
  367. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  368. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
  369. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
  370. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
  371. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
  372. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
  373. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
  374. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
  375. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  376. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  377. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
  378. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
  379. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
  380. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  381. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  382. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  383. mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
  384. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  385. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
  386. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
  387. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
  388. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
  389. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  390. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
  391. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
  392. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  393. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
  394. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
  395. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
  396. mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
  397. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  398. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
  399. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
  400. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
  401. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
  402. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
  403. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  404. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
  405. mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
  406. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  407. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  408. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
  409. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  410. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  411. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  412. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
  413. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
  414. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
  415. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  416. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
  417. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
  418. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
  419. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
  420. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
  421. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
  422. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  423. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
  424. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
  425. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  426. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
  427. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
  428. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  429. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
  430. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
  431. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  432. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
  433. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  434. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  435. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
  436. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
  437. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
  438. mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
  439. mindspore/ops/composite/__init__.py +7 -8
  440. mindspore/ops/composite/base.py +101 -47
  441. mindspore/ops/composite/math_ops.py +188 -158
  442. mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
  443. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
  444. mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
  445. mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
  446. mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
  447. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
  448. mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
  449. mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
  450. mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
  451. mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
  452. mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
  453. mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
  454. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
  455. mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
  456. mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
  457. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
  458. mindspore/ops/function/__init__.py +152 -8
  459. mindspore/ops/function/array_func.py +2555 -674
  460. mindspore/ops/function/clip_func.py +209 -13
  461. mindspore/ops/function/debug_func.py +2 -2
  462. mindspore/ops/function/grad/__init__.py +2 -1
  463. mindspore/ops/function/grad/grad_func.py +147 -62
  464. mindspore/ops/function/image_func.py +54 -38
  465. mindspore/ops/function/linalg_func.py +167 -16
  466. mindspore/ops/function/math_func.py +4849 -1492
  467. mindspore/ops/function/nn_func.py +2573 -988
  468. mindspore/ops/function/other_func.py +115 -0
  469. mindspore/ops/function/parameter_func.py +3 -3
  470. mindspore/ops/function/random_func.py +790 -73
  471. mindspore/ops/function/sparse_func.py +98 -78
  472. mindspore/ops/function/sparse_unary_func.py +54 -53
  473. mindspore/ops/function/spectral_func.py +27 -24
  474. mindspore/ops/function/vmap_func.py +22 -2
  475. mindspore/ops/functional.py +97 -37
  476. mindspore/ops/op_info_register.py +70 -28
  477. mindspore/ops/operations/__init__.py +47 -14
  478. mindspore/ops/operations/_csr_ops.py +7 -7
  479. mindspore/ops/operations/_embedding_cache_ops.py +5 -5
  480. mindspore/ops/operations/_grad_ops.py +276 -187
  481. mindspore/ops/operations/_inner_ops.py +319 -113
  482. mindspore/ops/operations/_ms_kernel.py +10 -8
  483. mindspore/ops/operations/_ocr_ops.py +9 -9
  484. mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
  485. mindspore/ops/operations/_quant_ops.py +137 -102
  486. mindspore/ops/operations/_rl_inner_ops.py +121 -60
  487. mindspore/ops/operations/_scalar_ops.py +466 -0
  488. mindspore/ops/operations/_sequence_ops.py +1004 -2
  489. mindspore/ops/operations/_tensor_array.py +10 -11
  490. mindspore/ops/operations/_thor_ops.py +1 -1
  491. mindspore/ops/operations/array_ops.py +801 -466
  492. mindspore/ops/operations/comm_ops.py +51 -49
  493. mindspore/ops/operations/control_ops.py +2 -2
  494. mindspore/ops/operations/custom_ops.py +123 -44
  495. mindspore/ops/operations/debug_ops.py +24 -24
  496. mindspore/ops/operations/image_ops.py +240 -153
  497. mindspore/ops/operations/inner_ops.py +34 -50
  498. mindspore/ops/operations/linalg_ops.py +31 -9
  499. mindspore/ops/operations/math_ops.py +988 -757
  500. mindspore/ops/operations/nn_ops.py +965 -819
  501. mindspore/ops/operations/other_ops.py +51 -40
  502. mindspore/ops/operations/random_ops.py +204 -122
  503. mindspore/ops/operations/rl_ops.py +8 -9
  504. mindspore/ops/operations/sparse_ops.py +254 -93
  505. mindspore/ops/operations/spectral_ops.py +35 -3
  506. mindspore/ops/primitive.py +111 -9
  507. mindspore/parallel/_auto_parallel_context.py +189 -83
  508. mindspore/parallel/_offload_context.py +185 -0
  509. mindspore/parallel/_parallel_serialization.py +99 -7
  510. mindspore/parallel/_ps_context.py +9 -5
  511. mindspore/parallel/_recovery_context.py +1 -1
  512. mindspore/parallel/_tensor.py +7 -1
  513. mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
  514. mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
  515. mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
  516. mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
  517. mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
  518. mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
  519. mindspore/parallel/_utils.py +1 -2
  520. mindspore/parallel/algo_parameter_config.py +1 -1
  521. mindspore/parallel/checkpoint_transform.py +37 -34
  522. mindspore/parallel/shard.py +17 -18
  523. mindspore/profiler/common/validator/validate_path.py +2 -2
  524. mindspore/profiler/envprofiling.py +69 -47
  525. mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
  526. mindspore/profiler/parser/base_timeline_generator.py +49 -56
  527. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
  528. mindspore/profiler/parser/hwts_log_parser.py +1 -1
  529. mindspore/profiler/parser/integrator.py +15 -14
  530. mindspore/profiler/parser/minddata_analyzer.py +2 -2
  531. mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
  532. mindspore/profiler/parser/msadvisor_parser.py +2 -4
  533. mindspore/profiler/parser/optime_parser.py +17 -18
  534. mindspore/profiler/parser/profiler_info.py +2 -1
  535. mindspore/profiler/profiling.py +218 -186
  536. mindspore/rewrite/__init__.py +3 -1
  537. mindspore/rewrite/api/node.py +1 -114
  538. mindspore/rewrite/api/node_type.py +3 -0
  539. mindspore/rewrite/api/pattern_engine.py +31 -1
  540. mindspore/rewrite/api/scoped_value.py +4 -4
  541. mindspore/rewrite/api/symbol_tree.py +3 -78
  542. mindspore/rewrite/api/tree_node_helper.py +1 -1
  543. mindspore/rewrite/ast_creator_register.py +1 -0
  544. mindspore/rewrite/ast_helpers/__init__.py +2 -2
  545. mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
  546. mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
  547. mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
  548. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
  549. mindspore/rewrite/namespace.py +0 -2
  550. mindspore/rewrite/node.py +157 -11
  551. mindspore/rewrite/parsers/assign_parser.py +231 -53
  552. mindspore/rewrite/parsers/class_def_parser.py +187 -109
  553. mindspore/rewrite/parsers/for_parser.py +24 -14
  554. mindspore/rewrite/parsers/function_def_parser.py +21 -4
  555. mindspore/rewrite/parsers/if_parser.py +6 -2
  556. mindspore/rewrite/sparsify/__init__.py +0 -0
  557. mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
  558. mindspore/rewrite/sparsify/sparsify.py +109 -0
  559. mindspore/rewrite/sparsify/utils.py +173 -0
  560. mindspore/rewrite/symbol_tree.py +256 -133
  561. mindspore/rewrite/symbol_tree_builder.py +38 -1
  562. mindspore/run_check/_check_version.py +69 -63
  563. mindspore/run_check/run_check.py +2 -1
  564. mindspore/tinyxml2.dll +0 -0
  565. mindspore/train/__init__.py +1 -1
  566. mindspore/train/_utils.py +28 -5
  567. mindspore/train/amp.py +273 -102
  568. mindspore/train/callback/_backup_and_restore.py +5 -5
  569. mindspore/train/callback/_callback.py +2 -2
  570. mindspore/train/callback/_checkpoint.py +3 -3
  571. mindspore/train/callback/_early_stop.py +3 -3
  572. mindspore/train/callback/_lambda_callback.py +2 -2
  573. mindspore/train/callback/_landscape.py +29 -31
  574. mindspore/train/callback/_loss_monitor.py +3 -3
  575. mindspore/train/callback/_on_request_exit.py +3 -3
  576. mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
  577. mindspore/train/callback/_summary_collector.py +23 -16
  578. mindspore/train/callback/_time_monitor.py +3 -3
  579. mindspore/train/checkpoint_pb2.py +68 -8
  580. mindspore/train/data_sink.py +15 -3
  581. mindspore/train/dataset_helper.py +10 -15
  582. mindspore/train/loss_scale_manager.py +8 -11
  583. mindspore/train/metrics/__init__.py +1 -1
  584. mindspore/train/metrics/bleu_score.py +1 -1
  585. mindspore/train/metrics/confusion_matrix.py +1 -1
  586. mindspore/train/metrics/cosine_similarity.py +1 -1
  587. mindspore/train/metrics/dice.py +2 -2
  588. mindspore/train/metrics/fbeta.py +1 -1
  589. mindspore/train/metrics/hausdorff_distance.py +4 -3
  590. mindspore/train/metrics/mean_surface_distance.py +2 -2
  591. mindspore/train/metrics/occlusion_sensitivity.py +1 -1
  592. mindspore/train/metrics/perplexity.py +1 -1
  593. mindspore/train/metrics/precision.py +1 -1
  594. mindspore/train/metrics/recall.py +1 -1
  595. mindspore/train/metrics/roc.py +2 -2
  596. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  597. mindspore/train/mind_ir_pb2.py +116 -37
  598. mindspore/train/model.py +45 -28
  599. mindspore/train/serialization.py +295 -188
  600. mindspore/train/summary/_summary_adapter.py +1 -1
  601. mindspore/train/summary/summary_record.py +43 -13
  602. mindspore/train/train_thor/convert_utils.py +2 -2
  603. mindspore/train/train_thor/dataset_helper.py +3 -3
  604. mindspore/turbojpeg.dll +0 -0
  605. mindspore/version.py +1 -1
  606. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
  607. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
  608. mindspore/compression/__init__.py +0 -19
  609. mindspore/compression/common/constant.py +0 -124
  610. mindspore/compression/export/__init__.py +0 -19
  611. mindspore/compression/export/quant_export.py +0 -515
  612. mindspore/compression/quant/__init__.py +0 -28
  613. mindspore/compression/quant/qat.py +0 -634
  614. mindspore/compression/quant/quant_utils.py +0 -462
  615. mindspore/compression/quant/quantizer.py +0 -68
  616. mindspore/nn/layer/quant.py +0 -1868
  617. mindspore/nn/layer/rnn_utils.py +0 -90
  618. mindspore/nn/probability/dpn/__init__.py +0 -22
  619. mindspore/nn/probability/dpn/vae/__init__.py +0 -25
  620. mindspore/nn/probability/dpn/vae/cvae.py +0 -140
  621. mindspore/nn/probability/dpn/vae/vae.py +0 -124
  622. mindspore/nn/probability/infer/__init__.py +0 -22
  623. mindspore/nn/probability/infer/variational/elbo.py +0 -70
  624. mindspore/nn/probability/infer/variational/svi.py +0 -84
  625. mindspore/nn/probability/toolbox/__init__.py +0 -22
  626. mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
  627. mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
  628. mindspore/nn/probability/transforms/__init__.py +0 -22
  629. mindspore/nn/probability/transforms/transform_bnn.py +0 -262
  630. mindspore/nn/probability/zhusuan/__init__.py +0 -18
  631. mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
  632. mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
  633. mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
  634. mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
  635. mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
  636. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  637. mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
  638. mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
  639. mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
  640. mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
  641. mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
  642. mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
  643. mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
  644. mindspore/ops/composite/array_ops.py +0 -241
  645. mindspore/ops/composite/clip_ops.py +0 -134
  646. mindspore/ops/composite/random_ops.py +0 -426
  647. mindspore/ops/composite/vmap_ops.py +0 -38
  648. mindspore/parallel/nn/__init__.py +0 -42
  649. mindspore/parallel/nn/loss.py +0 -22
  650. mindspore/parallel/nn/moe.py +0 -21
  651. mindspore/parallel/nn/op_parallel_config.py +0 -22
  652. mindspore/parallel/nn/transformer.py +0 -31
  653. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
  654. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
  655. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -23,10 +23,10 @@ from mindspore.ops import signature as sig
23
23
  from mindspore.ops._utils import get_concat_offset
24
24
  from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, prim_attr_register
25
25
  import mindspore.context as context
26
- from mindspore._checkparam import Validator as validator, Rel
26
+ from mindspore import _checkparam as validator
27
27
  from mindspore.common import dtype as mstype
28
28
  from mindspore.communication.management import GlobalComm
29
- from mindspore.ops._utils import is_shape_unknown, is_dim_unknown
29
+ from mindspore.common._utils import is_shape_unknown, is_dim_unknown
30
30
 
31
31
 
32
32
  class SparseFillEmptyRowsGrad(Primitive):
@@ -156,7 +156,7 @@ class BatchNormGrad(Primitive):
156
156
  @prim_attr_register
157
157
  def __init__(self, is_training=False, epsilon=1e-5, data_format='NCHW'):
158
158
  self.is_training = validator.check_value_type('is_training', is_training, (bool,), self.name)
159
- self.epsilon = validator.check_float_range(epsilon, 0, 1, Rel.INC_RIGHT, 'epsilon', self.name)
159
+ self.epsilon = validator.check_float_range(epsilon, 0, 1, validator.INC_RIGHT, 'epsilon', self.name)
160
160
  self.data_format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name)
161
161
 
162
162
 
@@ -166,7 +166,7 @@ class BatchNormGradGrad(Primitive):
166
166
  @prim_attr_register
167
167
  def __init__(self, is_training=False, epsilon=1e-5, data_format='NCHW'):
168
168
  self.is_training = validator.check_value_type('is_training', is_training, (bool,), self.name)
169
- self.epsilon = validator.check_float_range(epsilon, 0, 1, Rel.INC_RIGHT, 'epsilon', self.name)
169
+ self.epsilon = validator.check_float_range(epsilon, 0, 1, validator.INC_RIGHT, 'epsilon', self.name)
170
170
  self.data_format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name)
171
171
 
172
172
 
@@ -175,10 +175,10 @@ class SyncBatchNormGrad(Primitive):
175
175
 
176
176
  @prim_attr_register
177
177
  def __init__(self, epsilon=1e-5, group="group0", device_num=2):
178
- validator.check_float_range(epsilon, 0, 1, Rel.INC_RIGHT, 'epsilon', self.name)
178
+ validator.check_float_range(epsilon, 0, 1, validator.INC_RIGHT, 'epsilon', self.name)
179
179
  if not isinstance(group, str):
180
180
  raise TypeError("The group attr of SyncBatchNormGrad must be str.")
181
- validator.check_int(device_num, 2, Rel.GE, "device_num", self.name)
181
+ validator.check_int(device_num, 2, validator.GE, "device_num", self.name)
182
182
 
183
183
 
184
184
  class BiasAddGrad(Primitive):
@@ -591,7 +591,7 @@ class DropoutGrad(Primitive):
591
591
 
592
592
  @prim_attr_register
593
593
  def __init__(self, keep_prob=0.5):
594
- self.keep_prob = validator.check_float_range(keep_prob, 0, 1, Rel.INC_RIGHT, "keep_prob", self.name)
594
+ self.keep_prob = validator.check_float_range(keep_prob, 0, 1, validator.INC_RIGHT, "keep_prob", self.name)
595
595
 
596
596
 
597
597
  class FlattenGrad(PrimitiveWithInfer):
@@ -627,7 +627,7 @@ class InstanceNormV2Grad(Primitive):
627
627
  self.init_prim_io_names(inputs=['dy', 'x', 'gamma', 'mean', 'variance', 'save_mean', 'save_variance'],
628
628
  outputs=['pd_x', 'pd_gamma', 'pd_beta'])
629
629
  validator.check_is_float(epsilon, 'epsilon', self.name)
630
- validator.check_float_range(epsilon, 0, 1, Rel.INC_RIGHT, 'epsilon', self.name)
630
+ validator.check_float_range(epsilon, 0, 1, validator.INC_RIGHT, 'epsilon', self.name)
631
631
  validator.check_bool(is_training, "is_training", self.name)
632
632
 
633
633
 
@@ -695,7 +695,7 @@ class NeighborExchangeV2Grad(PrimitiveWithInfer):
695
695
 
696
696
  def __infer__(self, dy):
697
697
  dy_shape = dy['shape']
698
- validator.check(f'dy_shape.size()', len(dy_shape), f'4', 4, Rel.EQ, self.name)
698
+ validator.check(f'dy_shape.size()', len(dy_shape), f'4', 4, validator.EQ, self.name)
699
699
  if self.send_rank_ids[5] != -1 or self.send_rank_ids[6] != -1 or self.send_rank_ids[7] != -1:
700
700
  dy_shape[3] -= self.send_lens[2]
701
701
 
@@ -885,16 +885,7 @@ class AdaptiveAvgPool2DGrad(Primitive):
885
885
  @prim_attr_register
886
886
  def __init__(self):
887
887
  """Initialize AdaptiveAvgPool2DGrad"""
888
-
889
-
890
- class AdaptiveAvgPool2DGradV1(Primitive):
891
- """Gradients of the adaptive avg pool 2D V1 operation."""
892
-
893
- @prim_attr_register
894
- def __init__(self, orig_input_shape):
895
- """Initialize AdaptiveAvgPool2DGradV1"""
896
- self.init_prim_io_names(inputs=['input_grad'], outputs=['output_grad'])
897
- self.add_prim_attr('orig_input_shape', self.orig_input_shape)
888
+ self.init_prim_io_names(inputs=['input_grad', 'orig_input_shape'], outputs=['output_grad'])
898
889
 
899
890
 
900
891
  class AdaptiveAvgPool3DGrad(Primitive):
@@ -1048,7 +1039,7 @@ class MaxPoolGradGrad(_PoolGrad):
1048
1039
  ValueError: If the shapes of `origin_input` and `grad` are not equal.
1049
1040
 
1050
1041
  Supported Platforms:
1051
- ``GPU`` ``CPU``
1042
+ ``Ascend`` ``GPU`` ``CPU``
1052
1043
  """
1053
1044
 
1054
1045
  @prim_attr_register
@@ -1220,13 +1211,71 @@ class MaximumGradGrad(Primitive):
1220
1211
  self.init_prim_io_names(inputs=['x1', 'x2', 'dy1', 'dy2'], outputs=['sopd_x1', 'sopd_x2', 'sopd_grad'])
1221
1212
 
1222
1213
 
1223
- class MaxPoolGradWithArgmax(_PoolGrad):
1214
+ class MaxPoolGradWithArgmax(Primitive):
1224
1215
  """Computes the gradients of MaxPoolWithArgmax."""
1216
+ @prim_attr_register
1217
+ def __init__(self, kernel_size=1, strides=1, pad_mode="VALID", data_format="NCHW"):
1218
+ self.init_prim_io_names(inputs=['x_origin', 'out_origin', 'grad'], outputs=['output'])
1219
+ validator.check_value_type('kernel_size', kernel_size, [int, tuple], self.name)
1220
+ validator.check_value_type('strides', strides, [int, tuple], self.name)
1221
+ validator.check_value_type('pad_mode', pad_mode, [str], self.name)
1222
+ self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.name)
1223
+ self.add_prim_attr("pad_mode", self.pad_mode)
1224
+ self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name)
1225
+ if context.get_context("device_target") != "GPU" and self.format == "NHWC":
1226
+ raise ValueError("NHWC format only support in GPU target.")
1227
+ self.is_maxpoolgradwithargmax = (self.name == "MaxPoolGradWithArgmax")
1228
+ if not self.is_maxpoolgradwithargmax:
1229
+ self.add_prim_attr('data_format', self.format)
1230
+
1231
+ def _grad_check_int_or_tuple(arg_name, arg_val):
1232
+ validator.check_value_type(arg_name, arg_val, (int, tuple), self.name)
1233
+ error_msg = ValueError(f"For '{self.name}' the '{arg_name}' must be an positive int number "
1234
+ f"or a tuple of two or four positive int numbers, but got {arg_val}")
1235
+ if isinstance(arg_val, int):
1236
+ ret = (1, arg_val, arg_val, 1)
1237
+ elif len(arg_val) == 2:
1238
+ ret = (1, arg_val[0], arg_val[1], 1)
1239
+ elif len(arg_val) == 4:
1240
+ ret = arg_val
1241
+ else:
1242
+ raise error_msg
1243
+ # whether all elements of tuple are positive integers
1244
+ for item in ret:
1245
+ if not isinstance(item, int) or item <= 0:
1246
+ raise error_msg
1247
+ return ret
1248
+
1249
+ kernel_size = _grad_check_int_or_tuple("kernel_size", kernel_size)
1250
+ self.kernel_size = kernel_size
1251
+ self.add_prim_attr("kernel_size", self.kernel_size)
1252
+
1253
+ strides = _grad_check_int_or_tuple("strides", strides)
1254
+ self.strides = strides
1255
+ self.add_prim_attr("strides", self.strides)
1256
+
1257
+
1258
+ class MaxPoolGradWithArgmaxV2(Primitive):
1259
+ """Gradients of the MaxPoolWithArgmaxV2 operation."""
1225
1260
 
1226
1261
  @prim_attr_register
1227
- def __init__(self, kernel_size=1, strides=1, pad_mode="VALID"):
1228
- self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output'])
1229
- super(MaxPoolGradWithArgmax, self).__init__(kernel_size, strides, pad_mode)
1262
+ def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
1263
+ self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['y'])
1264
+ self.kernel_size = _check_positive_int_or_tuple("kernel_size", kernel_size, self.name, allow_four=True,
1265
+ ret_four=True)
1266
+ self.add_prim_attr('kernel_size', self.kernel_size)
1267
+ if strides is None:
1268
+ strides = kernel_size
1269
+ self.strides = _check_positive_int_or_tuple("strides", strides, self.name, allow_four=True, ret_four=True)
1270
+ self.add_prim_attr('strides', self.strides)
1271
+ self.pads = _check_positive_int_or_tuple("pads", pads, self.name, allow_four=True, ret_four=True,
1272
+ strict_positive=False)
1273
+ self.add_prim_attr('pads', self.pads)
1274
+ validator.check_value_type('ceil_mode', ceil_mode, bool, self.name)
1275
+ self.add_prim_attr('ceil_mode', self.ceil_mode)
1276
+ self.dilation = _check_positive_int_or_tuple("dilation", dilation, self.name, allow_four=True, ret_four=True)
1277
+ self.add_prim_attr('dilation', self.dilation)
1278
+ self.add_prim_attr('argmax_type', self.argmax_type)
1230
1279
 
1231
1280
 
1232
1281
  class MaxPool3DGradWithArgmax(Primitive):
@@ -1291,7 +1340,7 @@ class MaxPoolGradGradWithArgmax(_PoolGrad):
1291
1340
  ValueError: If the shapes of `x` and `grad` are not equal.
1292
1341
 
1293
1342
  Supported Platforms:
1294
- ``GPU`` ``CPU``
1343
+ ``Ascend`` ``GPU`` ``CPU``
1295
1344
  """
1296
1345
 
1297
1346
  @prim_attr_register
@@ -1409,7 +1458,7 @@ class LayerNormGradGrad(Primitive):
1409
1458
  ValueError: If gamma, d_dg, d_db don't have the same shape.
1410
1459
 
1411
1460
  Supported Platforms:
1412
- ``GPU`` ``CPU``
1461
+ ``Ascend`` ``GPU`` ``CPU``
1413
1462
  """
1414
1463
 
1415
1464
  @prim_attr_register
@@ -1430,7 +1479,7 @@ class LogSoftmaxGrad(Primitive):
1430
1479
  validator.check_value_type("axis", axis, [int], self.name)
1431
1480
 
1432
1481
 
1433
- class LSTMGradData(PrimitiveWithInfer):
1482
+ class LSTMGradData(Primitive):
1434
1483
  """Computes the data gradients of LSTM."""
1435
1484
 
1436
1485
  @prim_attr_register
@@ -1441,43 +1490,15 @@ class LSTMGradData(PrimitiveWithInfer):
1441
1490
  self.has_bias = validator.check_value_type('has_bias', has_bias, (bool,), self.name)
1442
1491
  self.bidirectional = validator.check_value_type('bidirectional', bidirectional, (bool,), self.name)
1443
1492
  self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
1444
- self.dropout = validator.check_float_range(dropout, 0, 1, Rel.INC_BOTH, 'dropout', self.name)
1493
+ self.dropout = validator.check_float_range(dropout, 0, 1, validator.INC_BOTH, 'dropout', self.name)
1445
1494
 
1446
1495
  if bidirectional:
1447
1496
  self.num_directions = 2
1448
1497
  else:
1449
1498
  self.num_directions = 1
1450
1499
 
1451
- def infer_shape(self, y_shape, dy_shape, dhy_shape, dcy_shape, w_shape,
1452
- hx_shape, cx_shape, reserve_shape, state_shape):
1453
- # dhy and dcy should be same shape
1454
- validator.check_equal_int(len(dhy_shape), 3, "h_shape", self.name)
1455
- validator.check_equal_int(len(dhy_shape), len(dcy_shape), "h_shape", self.name)
1456
- validator.check_equal_int(dhy_shape[0], dcy_shape[0], "h_shape[0]", self.name)
1457
- validator.check_equal_int(dhy_shape[1], dcy_shape[1], "h_shape[1]", self.name)
1458
- validator.check_equal_int(dhy_shape[2], dcy_shape[2], "h_shape[2]", self.name)
1459
-
1460
- validator.check_int(dhy_shape[0], self.num_layers * self.num_directions, Rel.EQ, "h_shape[0]", self.name)
1461
- validator.check_equal_int(dhy_shape[2], self.hidden_size, "h_shape[2]", self.name)
1462
-
1463
- validator.check_equal_int(len(dy_shape), 3, "dy_shape", self.name)
1464
- validator.check_equal_int(dy_shape[1], dhy_shape[1], "dy[1]", self.name)
1465
- validator.check_int(dy_shape[2], self.hidden_size * self.num_directions, Rel.EQ, "dy[2]", self.name)
1466
-
1467
- dx_shape = (y_shape[0], y_shape[1], self.input_size)
1468
- dhx_shape = dhy_shape
1469
- dcx_shape = dcy_shape
1470
-
1471
- return (dx_shape, dhx_shape, dcx_shape)
1472
-
1473
- def infer_dtype(self, y_dtype, dy_dtype, dhy_dtype, dcy_dtype, w_dtype,
1474
- hx_dtype, cx_dtype, reserve_dtype, state_dtype):
1475
- args = {"dy": dy_dtype, "dhy": dhy_dtype, "dcy": dcy_dtype}
1476
- validator.check_tensors_dtypes_same_and_valid(args, (mstype.float32, mstype.float16), self.name)
1477
- return (dy_dtype, dy_dtype, dy_dtype)
1478
1500
 
1479
-
1480
- class LSTMGradWeight(PrimitiveWithInfer):
1501
+ class LSTMGradWeight(Primitive):
1481
1502
  """Computes the weight gradients of LSTM."""
1482
1503
 
1483
1504
  @prim_attr_register
@@ -1488,31 +1509,15 @@ class LSTMGradWeight(PrimitiveWithInfer):
1488
1509
  self.has_bias = validator.check_value_type('has_bias', has_bias, (bool,), self.name)
1489
1510
  self.bidirectional = validator.check_value_type('bidirectional', bidirectional, (bool,), self.name)
1490
1511
  self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
1491
- self.dropout = validator.check_float_range(dropout, 0, 1, Rel.INC_BOTH, 'dropout', self.name)
1512
+ self.dropout = validator.check_float_range(dropout, 0, 1, validator.INC_BOTH, 'dropout', self.name)
1492
1513
 
1493
1514
  if bidirectional:
1494
1515
  self.num_directions = 2
1495
1516
  else:
1496
1517
  self.num_directions = 1
1497
1518
 
1498
- def infer_shape(self, x_shape, hx_shape, y_shape, reserve_shape, state_shape):
1499
- weight_size = 0
1500
- gate_size = 4 * self.hidden_size
1501
- for layer in range(self.num_layers):
1502
- for _ in range(self.num_directions):
1503
- input_layer_size = self.input_size if layer == 0 else self.hidden_size * self.num_directions
1504
- weight_size += gate_size * input_layer_size
1505
- weight_size += gate_size * self.hidden_size
1506
- if self.has_bias:
1507
- weight_size += 2 * gate_size
1508
1519
 
1509
- return (weight_size, 1, 1)
1510
-
1511
- def infer_dtype(self, x_dtype, hx_dtype, y_dtype, reserve_dtype, state_dtype):
1512
- return hx_dtype
1513
-
1514
-
1515
- class LSTMGrad(PrimitiveWithInfer):
1520
+ class LSTMGrad(Primitive):
1516
1521
  """Computes the data and weight gradients of LSTM."""
1517
1522
 
1518
1523
  @prim_attr_register
@@ -1523,48 +1528,13 @@ class LSTMGrad(PrimitiveWithInfer):
1523
1528
  self.has_bias = validator.check_value_type('has_bias', has_bias, (bool,), self.name)
1524
1529
  self.bidirectional = validator.check_value_type('bidirectional', bidirectional, (bool,), self.name)
1525
1530
  self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
1526
- self.dropout = validator.check_float_range(dropout, 0, 1, Rel.INC_BOTH, 'dropout', self.name)
1531
+ self.dropout = validator.check_float_range(dropout, 0, 1, validator.INC_BOTH, 'dropout', self.name)
1527
1532
 
1528
1533
  if bidirectional:
1529
1534
  self.num_directions = 2
1530
1535
  else:
1531
1536
  self.num_directions = 1
1532
1537
 
1533
- def infer_shape(self, x_shape, hx_shape, cx_shape, w_shape, y_shape, hy_shape, cy_shape, dy_shape, dhy_shape,
1534
- dcy_shape, reserve_shape):
1535
- # dhy and dcy should be same shape
1536
- validator.check_equal_int(len(dhy_shape), 3, "h_shape", self.name)
1537
- validator.check_equal_int(len(dhy_shape), len(dcy_shape), "h_shape", self.name)
1538
- validator.check_equal_int(dhy_shape[0], dcy_shape[0], "h_shape[0]", self.name)
1539
- validator.check_equal_int(dhy_shape[1], dcy_shape[1], "h_shape[1]", self.name)
1540
- validator.check_equal_int(dhy_shape[2], dcy_shape[2], "h_shape[2]", self.name)
1541
-
1542
- validator.check_int(dhy_shape[0], self.num_layers * self.num_directions, Rel.EQ, "h_shape[0]", self.name)
1543
- validator.check_equal_int(dhy_shape[2], self.hidden_size, "h_shape[2]", self.name)
1544
-
1545
- validator.check_equal_int(len(dy_shape), 3, "dy_shape", self.name)
1546
- validator.check_equal_int(dy_shape[1], dhy_shape[1], "dy[1]", self.name)
1547
- validator.check_int(dy_shape[2], self.hidden_size * self.num_directions, Rel.EQ, "dy[2]", self.name)
1548
-
1549
- dx_shape = (y_shape[0], y_shape[1], self.input_size)
1550
- dhx_shape = dhy_shape
1551
- dcx_shape = dcy_shape
1552
- weight_size = 0
1553
- gate_size = 4 * self.hidden_size
1554
- for layer in range(self.num_layers):
1555
- for _ in range(self.num_directions):
1556
- input_layer_size = self.input_size if layer == 0 else self.hidden_size * self.num_directions
1557
- weight_size += gate_size * input_layer_size
1558
- weight_size += gate_size * self.hidden_size
1559
- if self.has_bias:
1560
- weight_size += gate_size
1561
-
1562
- return (dx_shape, dhx_shape, dcx_shape, (weight_size, 1, 1))
1563
-
1564
- def infer_dtype(self, x_dtype, hx_dtype, cx_dtype, w_dtype, y_dtype, hy_dtype, cy_dtype, dy_dtype, dhy_dtype,
1565
- dcy_dtype, reserve_dtype):
1566
- return (dy_dtype, dy_dtype, dy_dtype, hx_dtype)
1567
-
1568
1538
 
1569
1539
  class DynamicRNNGrad(Primitive):
1570
1540
  """Computes the input gradients of DynamicRNN."""
@@ -1594,7 +1564,7 @@ class GruGradData(PrimitiveWithInfer):
1594
1564
  self.has_bias = validator.check_value_type('has_bias', has_bias, (bool,), self.name)
1595
1565
  self.bidirectional = validator.check_value_type('bidirectional', bidirectional, (bool,), self.name)
1596
1566
  self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
1597
- self.dropout = validator.check_float_range(dropout, 0, 1, Rel.INC_BOTH, 'dropout', self.name)
1567
+ self.dropout = validator.check_float_range(dropout, 0, 1, validator.INC_BOTH, 'dropout', self.name)
1598
1568
 
1599
1569
  if bidirectional:
1600
1570
  self.num_directions = 2
@@ -1606,12 +1576,12 @@ class GruGradData(PrimitiveWithInfer):
1606
1576
  # dhy and dcy should be same shape
1607
1577
  validator.check_equal_int(len(dhy_shape), 3, "h_shape", self.name)
1608
1578
 
1609
- validator.check_int(dhy_shape[0], self.num_layers * self.num_directions, Rel.EQ, "h_shape[0]", self.name)
1579
+ validator.check_int(dhy_shape[0], self.num_layers * self.num_directions, validator.EQ, "h_shape[0]", self.name)
1610
1580
  validator.check_equal_int(dhy_shape[2], self.hidden_size, "h_shape[2]", self.name)
1611
1581
 
1612
1582
  validator.check_equal_int(len(dy_shape), 3, "dy_shape", self.name)
1613
1583
  validator.check_equal_int(dy_shape[1], dhy_shape[1], "dy[1]", self.name)
1614
- validator.check_int(dy_shape[2], self.hidden_size * self.num_directions, Rel.EQ, "dy[2]", self.name)
1584
+ validator.check_int(dy_shape[2], self.hidden_size * self.num_directions, validator.EQ, "dy[2]", self.name)
1615
1585
 
1616
1586
  dx_shape = (y_shape[0], y_shape[1], self.input_size)
1617
1587
  dhx_shape = dhy_shape
@@ -1636,7 +1606,7 @@ class GruGradWeight(PrimitiveWithInfer):
1636
1606
  self.has_bias = validator.check_value_type('has_bias', has_bias, (bool,), self.name)
1637
1607
  self.bidirectional = validator.check_value_type('bidirectional', bidirectional, (bool,), self.name)
1638
1608
  self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
1639
- self.dropout = validator.check_float_range(dropout, 0, 1, Rel.INC_BOTH, 'dropout', self.name)
1609
+ self.dropout = validator.check_float_range(dropout, 0, 1, validator.INC_BOTH, 'dropout', self.name)
1640
1610
 
1641
1611
  if bidirectional:
1642
1612
  self.num_directions = 2
@@ -1660,6 +1630,25 @@ class GruGradWeight(PrimitiveWithInfer):
1660
1630
  return hx_dtype
1661
1631
 
1662
1632
 
1633
+ class GRUV2Grad(Primitive):
1634
+ """Computes the grad gradients of GRU."""
1635
+
1636
+ @prim_attr_register
1637
+ def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
1638
+ self.input_size = validator.check_positive_int(input_size, 'input_size', self.name)
1639
+ self.hidden_size = validator.check_positive_int(hidden_size, 'hidden_size', self.name)
1640
+ self.num_layers = validator.check_positive_int(num_layers, 'num_layers', self.name)
1641
+ self.has_bias = validator.check_value_type('has_bias', has_bias, (bool,), self.name)
1642
+ self.bidirectional = validator.check_value_type('bidirectional', bidirectional, (bool,), self.name)
1643
+ self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
1644
+ self.dropout = validator.check_float_range(dropout, 0, 1, validator.INC_BOTH, 'dropout', self.name)
1645
+
1646
+ if bidirectional:
1647
+ self.num_directions = 2
1648
+ else:
1649
+ self.num_directions = 1
1650
+
1651
+
1663
1652
  class DynamicGRUV2Grad(Primitive):
1664
1653
  r"""
1665
1654
  Computes the input gradients of DynamicGRUV2.
@@ -1690,7 +1679,7 @@ class DynamicGRUV2Grad(Primitive):
1690
1679
  - **init_h** (Tensor) - Hidden state of initial time.
1691
1680
  Tensor of shape :math:`(batch\_size, hidden\_size)`.
1692
1681
  The data type must be float16 or float32.
1693
- - **h** (Tensor) - A Tensor of shape :math:`(num\_step, batch\_size, hidden_size)`.
1682
+ - **h** (Tensor) - A Tensor of shape :math:`(num\_step, batch\_size, hidden\_size)`.
1694
1683
  The data type must be float16 or float32.
1695
1684
  - **dy** (Tensor) - Gradient of `y`, has the same shape and data type as `y`.
1696
1685
  - **dh** (Tensor) - Gradient of `h`, has the same shape and data type as `init_h`.
@@ -1712,13 +1701,13 @@ class DynamicGRUV2Grad(Primitive):
1712
1701
  - **dw_hidden** (Tensor) - A Tensor has the same shape as `weight_hidden`.
1713
1702
  Has the same type with input `x`.
1714
1703
  - **db_input** (Tensor) - A Tensor of shape :math:`(3 x hidden\_size)`.
1715
- Has the same type with input `x`.
1704
+ Has the same type with input `init\_h`.
1716
1705
  - **db_hidden** (Tensor) - A Tensor of shape :math:`(3 x hidden\_size)`.
1717
- Has the same type with input `x`.
1706
+ Has the same type with input `init\_h`.
1718
1707
  - **dx** (Tensor) - A Tensor of shape :math:`(num\_step, batch\_size, hidden\_size)`.
1719
1708
  Has the same type with input `x`.
1720
1709
  - **dh_prev** (Tensor) - A Tensor of shape :math:`(batch\_size, hidden\_size)`.
1721
- Has the same type with input `x`.
1710
+ Has the same type with input `init\_h`.
1722
1711
  """
1723
1712
 
1724
1713
  @prim_attr_register
@@ -1770,6 +1759,44 @@ class PReLUGrad(Primitive):
1770
1759
  pass
1771
1760
 
1772
1761
 
1762
+ class RandomGammaGrad(Primitive):
1763
+ r"""
1764
+ Computes the derivative of a random sample of Gamma with respect to alpha.:
1765
+
1766
+ Inputs:
1767
+ - **alpha** (Tensor) - α is the shape parameter of RandomGamma distribution.
1768
+ It must be greater than 0. Must be one of the following types: float32, float64.
1769
+ - **sample** (Tensor) - The sample of random gamma tensor. Must be one of the
1770
+ following types: float32, float64.
1771
+
1772
+ Outputs:
1773
+ The dtype is the same type as alpha.
1774
+ The output shape is derived from the input through broadcasting.
1775
+
1776
+ Raises:
1777
+ TypeError: If data type of `alpha` and `sample` is not float32 or float64.
1778
+ TypeError: If data type of `alpha` and `sample` is not same.
1779
+ ValueError: If the shape last dim of `sample` and `alpha` is not equal.
1780
+
1781
+ Supported Platforms:
1782
+ ``GPU``
1783
+
1784
+ Examples:
1785
+ >>> alpha = Tensor(np.array([1., 0.6, 3., 26.]), mstype.float32)
1786
+ >>> sample = Tensor(np.array([6., 7, 11., 0.5]), mstype.float32)
1787
+ >>> randomgammagrad = ops.RandomGammaGrad()
1788
+ >>> output = randomgammagrad(alpha, sample)
1789
+ >>> print(output)
1790
+ [2.5142431 3.4334087 1.8847835 0.07780622]
1791
+ """
1792
+
1793
+ @prim_attr_register
1794
+ def __init__(self):
1795
+ """Initialize RandomGammaGrad"""
1796
+ self.init_prim_io_names(inputs=['alpha', 'sample'], outputs=['output'])
1797
+ self.add_prim_attr("side_effect_hidden", True)
1798
+
1799
+
1773
1800
  class ReluGrad(Primitive):
1774
1801
  """Performs grad of Relu operation."""
1775
1802
 
@@ -1779,6 +1806,15 @@ class ReluGrad(Primitive):
1779
1806
  self.init_prim_io_names(inputs=['y_backprop', 'x'], outputs=['output'])
1780
1807
 
1781
1808
 
1809
+ class SiLUGrad(Primitive):
1810
+ """Performs grad of SiLU operation."""
1811
+
1812
+ @prim_attr_register
1813
+ def __init__(self):
1814
+ """Initialize SiLUGrad"""
1815
+ self.init_prim_io_names(inputs=['dout', 'out'], outputs=['output'])
1816
+
1817
+
1782
1818
  class ReLU6Grad(Primitive):
1783
1819
  """Performs grad of ReLU6 operation."""
1784
1820
 
@@ -1864,12 +1900,12 @@ class ResizeLinear1DGrad(Primitive):
1864
1900
  """
1865
1901
  Compute gradient of `ResizeLinear1D` operator.
1866
1902
 
1867
- Note:
1868
- This is an experimental feature and is subjected to change.
1903
+ .. warning::
1904
+ This is an experimental API that is subject to change.
1869
1905
 
1870
1906
  Args:
1871
1907
  coordinate_transformation_mode (string): Default is 'align_corners'. Describes how to transform the coordinate
1872
- in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel', 'asymmetric'.
1908
+ in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'.
1873
1909
  """
1874
1910
 
1875
1911
  @prim_attr_register
@@ -1879,7 +1915,7 @@ class ResizeLinear1DGrad(Primitive):
1879
1915
  inputs=['grads', 'input_x'], outputs=['y'])
1880
1916
  validator.check_value_type(
1881
1917
  "coordinate_transformation_mode", coordinate_transformation_mode, [str], self.name)
1882
- validator.check_string(coordinate_transformation_mode, ["align_corners", "half_pixel", "asymmetric"],
1918
+ validator.check_string(coordinate_transformation_mode, ["align_corners", "half_pixel"],
1883
1919
  "coordinate_transformation_mode", self.name)
1884
1920
 
1885
1921
 
@@ -2081,8 +2117,9 @@ class SliceGrad(PrimitiveWithInfer):
2081
2117
  for i in range(dy_shape_len):
2082
2118
  if size_value[i] == -1:
2083
2119
  size_value[i] = x_shape[i] - begin_v[i]
2084
- validator.check(f'dy_shape[{i}]', dy_shape[i], f'x_shape[{i}]', x_shape[i], Rel.LE, self.name)
2085
- validator.check(f'dy_shape[{i}]', dy_shape[i], f'size_shape[{i}]', size_value[i], Rel.EQ, self.name)
2120
+ validator.check(f'dy_shape[{i}]', dy_shape[i], f'x_shape[{i}]', x_shape[i], validator.LE, self.name)
2121
+ validator.check(f'dy_shape[{i}]', dy_shape[i], f'size_shape[{i}]',
2122
+ size_value[i], validator.EQ, self.name)
2086
2123
 
2087
2124
  return {'shape': x_shape,
2088
2125
  'dtype': x['dtype'],
@@ -2105,6 +2142,7 @@ class SmoothL1LossGrad(Primitive):
2105
2142
 
2106
2143
  @prim_attr_register
2107
2144
  def __init__(self, beta=1.0, reduction='none'):
2145
+ self.add_prim_attr('sigma', self.beta)
2108
2146
  self.reduction = validator.check_string(
2109
2147
  reduction, ['none', 'sum', 'mean'], 'reduction', self.name)
2110
2148
 
@@ -2216,7 +2254,7 @@ class PadV3Grad(Primitive):
2216
2254
  """Initialize Padv3Grad"""
2217
2255
  self.add_prim_attr("cust_aicpu", self.name)
2218
2256
  self.init_prim_io_names(inputs=['x', 'paddings'], outputs=['y'])
2219
- validator.check_string(mode, ['reflect', 'edge'], 'mode', self.name)
2257
+ validator.check_string(mode, ['reflect', 'edge', 'circular'], 'mode', self.name)
2220
2258
  validator.check_bool(paddings_contiguous, "paddings_contiguous", self.name)
2221
2259
  self.set_const_input_indexes([1])
2222
2260
  self.mode = mode
@@ -2312,20 +2350,20 @@ class BasicLSTMCellCStateGrad(PrimitiveWithInfer):
2312
2350
  def infer_shape(self, c_shape, dht_shape, dct_shape, it_shape, jt_shape, ft_shape, ot_shape, tanhct_shape):
2313
2351
  # dhy and dcy should be same shape
2314
2352
  validator.check_equal_int(len(c_shape), 2, "c rank", self.name)
2315
- validator.check("dht rank", len(dht_shape), "c rank", len(c_shape), Rel.EQ, self.name)
2316
- validator.check("dct rank", len(dct_shape), "c rank", len(c_shape), Rel.EQ, self.name)
2317
- validator.check("it rank", len(it_shape), "c rank", len(c_shape), Rel.EQ, self.name)
2318
- validator.check("jt rank", len(jt_shape), "c rank", len(c_shape), Rel.EQ, self.name)
2319
- validator.check("ft rank", len(ft_shape), "c rank", len(c_shape), Rel.EQ, self.name)
2320
- validator.check("ot rank", len(ot_shape), "c rank", len(c_shape), Rel.EQ, self.name)
2321
- validator.check("tanhct rank", len(tanhct_shape), "c rank", len(c_shape), Rel.EQ, self.name)
2322
- validator.check("dht shape", dht_shape, "c shape", c_shape, Rel.EQ, self.name)
2323
- validator.check("dct shape", dct_shape, "c shape", c_shape, Rel.EQ, self.name)
2324
- validator.check("it shape", it_shape, "c shape", c_shape, Rel.EQ, self.name)
2325
- validator.check("jt shape", jt_shape, "c shape", c_shape, Rel.EQ, self.name)
2326
- validator.check("ft shape", ft_shape, "c shape", c_shape, Rel.EQ, self.name)
2327
- validator.check("ot shape", ot_shape, "c shape", c_shape, Rel.EQ, self.name)
2328
- validator.check("tanhct shape", tanhct_shape, "c shape", c_shape, Rel.EQ, self.name)
2353
+ validator.check("dht rank", len(dht_shape), "c rank", len(c_shape), validator.EQ, self.name)
2354
+ validator.check("dct rank", len(dct_shape), "c rank", len(c_shape), validator.EQ, self.name)
2355
+ validator.check("it rank", len(it_shape), "c rank", len(c_shape), validator.EQ, self.name)
2356
+ validator.check("jt rank", len(jt_shape), "c rank", len(c_shape), validator.EQ, self.name)
2357
+ validator.check("ft rank", len(ft_shape), "c rank", len(c_shape), validator.EQ, self.name)
2358
+ validator.check("ot rank", len(ot_shape), "c rank", len(c_shape), validator.EQ, self.name)
2359
+ validator.check("tanhct rank", len(tanhct_shape), "c rank", len(c_shape), validator.EQ, self.name)
2360
+ validator.check("dht shape", dht_shape, "c shape", c_shape, validator.EQ, self.name)
2361
+ validator.check("dct shape", dct_shape, "c shape", c_shape, validator.EQ, self.name)
2362
+ validator.check("it shape", it_shape, "c shape", c_shape, validator.EQ, self.name)
2363
+ validator.check("jt shape", jt_shape, "c shape", c_shape, validator.EQ, self.name)
2364
+ validator.check("ft shape", ft_shape, "c shape", c_shape, validator.EQ, self.name)
2365
+ validator.check("ot shape", ot_shape, "c shape", c_shape, validator.EQ, self.name)
2366
+ validator.check("tanhct shape", tanhct_shape, "c shape", c_shape, validator.EQ, self.name)
2329
2367
 
2330
2368
  dgate_shape = (c_shape[0], 4 * c_shape[1])
2331
2369
  dct_1_shape = c_shape
@@ -2361,11 +2399,11 @@ class BasicLSTMCellWeightGrad(PrimitiveWithInfer):
2361
2399
 
2362
2400
  def infer_shape(self, x_shape, h_shape, dgate_shape):
2363
2401
  validator.check_equal_int(len(x_shape), 2, "x rank", self.name)
2364
- validator.check("h rank", len(h_shape), " x rank", len(x_shape), Rel.EQ, self.name)
2365
- validator.check("dgate rank", len(dgate_shape), "x rank", len(x_shape), Rel.EQ, self.name)
2366
- validator.check("h_shape[0]", h_shape[0], "x_shape[0]", x_shape[0], Rel.EQ, self.name)
2367
- validator.check("dgate_shape[0]", dgate_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
2368
- validator.check("dgate_shape[1]", dgate_shape[1], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
2402
+ validator.check("h rank", len(h_shape), " x rank", len(x_shape), validator.EQ, self.name)
2403
+ validator.check("dgate rank", len(dgate_shape), "x rank", len(x_shape), validator.EQ, self.name)
2404
+ validator.check("h_shape[0]", h_shape[0], "x_shape[0]", x_shape[0], validator.EQ, self.name)
2405
+ validator.check("dgate_shape[0]", dgate_shape[0], "h_shape[0]", h_shape[0], validator.EQ, self.name)
2406
+ validator.check("dgate_shape[1]", dgate_shape[1], "4*h_shape[1]", 4 * h_shape[1], validator.EQ, self.name)
2369
2407
  input_size = x_shape[1]
2370
2408
  hidden_size = h_shape[1]
2371
2409
  dw_shape = (input_size + hidden_size, 4 * hidden_size)
@@ -2388,12 +2426,12 @@ class BasicLSTMCellInputGrad(PrimitiveWithInfer):
2388
2426
  @prim_attr_register
2389
2427
  def __init__(self, keep_prob):
2390
2428
  self.keep_prob = validator.check_value_type("keep_prob", keep_prob, [float], self.name)
2391
- self.keep_prob = validator.check_float_range(keep_prob, 0.0, 1.0, Rel.INC_BOTH, "keep_prob", self.name)
2429
+ self.keep_prob = validator.check_float_range(keep_prob, 0.0, 1.0, validator.INC_BOTH, "keep_prob", self.name)
2392
2430
 
2393
2431
  def infer_shape(self, dgate_shape, w_shape):
2394
2432
  validator.check_equal_int(len(dgate_shape), 2, "dgate rank", self.name)
2395
2433
  validator.check_equal_int(len(w_shape), 2, "w rank", self.name)
2396
- validator.check("dgate_shape[1]", dgate_shape[1], "w_shape[1]", w_shape[1], Rel.EQ, self.name)
2434
+ validator.check("dgate_shape[1]", dgate_shape[1], "w_shape[1]", w_shape[1], validator.EQ, self.name)
2397
2435
  batch_size = dgate_shape[0]
2398
2436
  hidden_size = dgate_shape[1] // 4
2399
2437
  input_size = w_shape[0] - hidden_size
@@ -2417,7 +2455,7 @@ class InvGrad(Primitive):
2417
2455
  self.init_prim_io_names(inputs=['x', 'grad'], outputs=['y'])
2418
2456
 
2419
2457
 
2420
- class LRNGrad(PrimitiveWithInfer):
2458
+ class LRNGrad(Primitive):
2421
2459
  """Computes gradients for LRN operation."""
2422
2460
 
2423
2461
  @prim_attr_register
@@ -2428,14 +2466,6 @@ class LRNGrad(PrimitiveWithInfer):
2428
2466
  validator.check_value_type("alpha", alpha, [float], self.name)
2429
2467
  validator.check_value_type("beta", beta, [float], self.name)
2430
2468
 
2431
- def infer_dtype(self, grads, x, y):
2432
- args = {"grads": grads, "x": x, "y": y}
2433
- validator.check_tensors_dtypes_same_and_valid(args, (mstype.float16, mstype.float32,), self.name)
2434
- return x
2435
-
2436
- def infer_shape(self, grads, x, y):
2437
- return x
2438
-
2439
2469
 
2440
2470
  class MvlgammaGrad(Primitive):
2441
2471
  r"""
@@ -2466,7 +2496,7 @@ class MvlgammaGrad(Primitive):
2466
2496
  ValueError: If all elements of `x` are not greater than (p-1)/2.
2467
2497
 
2468
2498
  Supported Platforms:
2469
- ``CPU``
2499
+ ``Ascend`` ``CPU``
2470
2500
  """
2471
2501
 
2472
2502
  @prim_attr_register
@@ -2517,7 +2547,7 @@ class SoftShrinkGrad(Primitive):
2517
2547
  def __init__(self, lambd=0.5):
2518
2548
  self.init_prim_io_names(inputs=['input_grad', 'input_x'], outputs=['output'])
2519
2549
  validator.check_value_type("lambd", lambd, [float], self.name)
2520
- validator.check_number("lambd", lambd, 0, Rel.GE, self.name)
2550
+ validator.check_number("lambd", lambd, 0, validator.GE, self.name)
2521
2551
 
2522
2552
 
2523
2553
  class CdistGrad(Primitive):
@@ -2554,7 +2584,7 @@ class PdistGrad(Primitive):
2554
2584
  ValueError: If dimension of `x` is not 2.
2555
2585
 
2556
2586
  Supported Platforms:
2557
- ``GPU`` ``CPU``
2587
+ ``Ascend`` ``GPU`` ``CPU``
2558
2588
  """
2559
2589
 
2560
2590
  @prim_attr_register
@@ -2629,7 +2659,7 @@ class HShrinkGrad(Primitive):
2629
2659
  TypeError: If dtype of `gradients` or `features` is neither float16 nor float32.
2630
2660
 
2631
2661
  Supported Platforms:
2632
- ``Ascend`` ``CPU`` ``GPU``
2662
+ ``Ascend`` ``GPU`` ``CPU``
2633
2663
  """
2634
2664
 
2635
2665
  @prim_attr_register
@@ -2686,7 +2716,7 @@ class Dilation2DBackpropInput(Primitive):
2686
2716
  ValueError: If `data_format` is not the str of 'NCHW'.
2687
2717
 
2688
2718
  Supported Platforms:
2689
- ``GPU`` ``CPU``
2719
+ ``Ascend`` ``GPU`` ``CPU``
2690
2720
 
2691
2721
  Examples:
2692
2722
  (pad_mode="SAME", data_format="NCHW")
@@ -2803,7 +2833,7 @@ class Dilation2DBackpropFilter(Primitive):
2803
2833
 
2804
2834
 
2805
2835
  Supported Platforms:
2806
- ``GPU`` ``CPU``
2836
+ ``Ascend`` ``GPU`` ``CPU``
2807
2837
 
2808
2838
  Examples:
2809
2839
  (pad_mode="SAME", data_format="NCHW")
@@ -2943,14 +2973,14 @@ class MultiMarginLossGrad(Primitive):
2943
2973
  ValueError: If rank of `x` is not 2 or rank of 'target' is not 1.
2944
2974
 
2945
2975
  Supported Platforms:
2946
- ``CPU``
2976
+ ``Ascend`` ``CPU``
2947
2977
  """
2948
2978
 
2949
2979
  @prim_attr_register
2950
2980
  def __init__(self, p=1, margin=1.0, reduction="mean"):
2951
2981
  """Initialize MultiMarginLossGrad"""
2952
2982
  self.p = validator.check_value_type('p', p, [int], self.name)
2953
- validator.check_int(p, {1, 2}, Rel.IN, 'p', self.name)
2983
+ validator.check_int(p, {1, 2}, validator.IN, 'p', self.name)
2954
2984
  self.margin = validator.check_value_type('margin', margin, [float], self.name)
2955
2985
  self.reduction = validator.check_string(reduction, ['none', 'sum', 'mean'], 'reduction', self.name)
2956
2986
  self.init_prim_io_names(inputs=['y_grad', 'x', 'target', 'weight'], outputs=['x_grad'])
@@ -3003,7 +3033,7 @@ class UpsampleTrilinear3DGrad(Primitive):
3003
3033
  ValueError: If elements number of `input_size` is not 5.
3004
3034
 
3005
3035
  Supported Platforms:
3006
- ``GPU`` ``CPU``
3036
+ ``Ascend`` ``GPU`` ``CPU``
3007
3037
  """
3008
3038
  @prim_attr_register
3009
3039
  def __init__(self, input_size, output_size=None, scales=None, align_corners=False):
@@ -3071,7 +3101,7 @@ class GridSampler3DGrad(Primitive):
3071
3101
  ValueError: If the shape of `grad` is inconsistent with the shape of the output result of forward calculation.
3072
3102
 
3073
3103
  Supported Platforms:
3074
- ``Ascend`` ``GPU`` ``CPU``
3104
+ ``GPU`` ``CPU``
3075
3105
  """
3076
3106
 
3077
3107
  @prim_attr_register
@@ -3116,7 +3146,7 @@ class SparseSegmentMeanGrad(Primitive):
3116
3146
  ValueError: If `indices` is out of range of `output_dim0`.
3117
3147
 
3118
3148
  Supported Platforms:
3119
- ``Ascend`` ``CPU``
3149
+ ``Ascend`` ``GPU`` ``CPU``
3120
3150
  """
3121
3151
 
3122
3152
  @prim_attr_register
@@ -3159,9 +3189,9 @@ class MaxUnpool2DGrad(Primitive):
3159
3189
  validator.check_value_type("pads", pads, [int, tuple], self.name)
3160
3190
  validator.check_value_type("output_shape", output_shape, [tuple], self.name)
3161
3191
  validator.check_string(data_format, ['NCHW', 'NHWC'], 'data_format', self.name)
3162
- validator.check_int(len(ksize), 4, Rel.EQ, "ksize rank", self.name)
3163
- validator.check_int(len(strides), 4, Rel.EQ, "strides rank", self.name)
3164
- validator.check_int(len(pads), 4, Rel.EQ, "pads rank", self.name)
3192
+ validator.check_int(len(ksize), 4, validator.EQ, "ksize rank", self.name)
3193
+ validator.check_int(len(strides), 4, validator.EQ, "strides rank", self.name)
3194
+ validator.check_int(len(pads), 4, validator.EQ, "pads rank", self.name)
3165
3195
 
3166
3196
 
3167
3197
  class MaxUnpool3DGrad(Primitive):
@@ -3178,9 +3208,9 @@ class MaxUnpool3DGrad(Primitive):
3178
3208
  validator.check_value_type("pads", pads, [int, tuple], self.name)
3179
3209
  validator.check_value_type("output_shape", output_shape, [tuple], self.name)
3180
3210
  validator.check_string(data_format, ['NCDHW', 'NDHWC'], 'data_format', self.name)
3181
- validator.check_int(len(ksize), 5, Rel.EQ, "ksize rank", self.name)
3182
- validator.check_int(len(strides), 5, Rel.EQ, "strides rank", self.name)
3183
- validator.check_int(len(pads), 5, Rel.EQ, "pads rank", self.name)
3211
+ validator.check_int(len(ksize), 5, validator.EQ, "ksize rank", self.name)
3212
+ validator.check_int(len(strides), 5, validator.EQ, "strides rank", self.name)
3213
+ validator.check_int(len(pads), 5, validator.EQ, "pads rank", self.name)
3184
3214
 
3185
3215
 
3186
3216
  class FractionalAvgPoolGrad(Primitive):
@@ -3253,7 +3283,7 @@ class TraceGrad(Primitive):
3253
3283
  ValueError: If length of shape of `x_shape` is not equal to 2.
3254
3284
 
3255
3285
  Support Platforms:
3256
- ``Ascend`` ``CPU`` ``GPU``
3286
+ ``Ascend`` ``GPU`` ``CPU``
3257
3287
  """
3258
3288
 
3259
3289
  @prim_attr_register
@@ -3280,7 +3310,7 @@ class IgammaGradA(Primitive):
3280
3310
  ValueError: If `a` could not be broadcast to a tensor with shape of `x`.
3281
3311
 
3282
3312
  Supported Platforms:
3283
- ``GPU`` ``CPU``
3313
+ ``Ascend`` ``GPU`` ``CPU``
3284
3314
 
3285
3315
  Examples:
3286
3316
  >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
@@ -3390,7 +3420,7 @@ class MedianGrad(Primitive):
3390
3420
  ValueError: If shape of `y_grad` is not the same as `y`.
3391
3421
 
3392
3422
  Supported Platforms:
3393
- ``CPU``
3423
+ ``Ascend`` ``CPU``
3394
3424
  """
3395
3425
 
3396
3426
  @prim_attr_register
@@ -3479,7 +3509,7 @@ class SparseSegmentSqrtNGrad(Primitive):
3479
3509
  ValueError: If `indices` is bigger than or equal to `output_dim0`.
3480
3510
 
3481
3511
  Supported Platforms:
3482
- ``GPU`` ``CPU``
3512
+ ``Ascend`` ``GPU`` ``CPU``
3483
3513
  """
3484
3514
 
3485
3515
  @prim_attr_register
@@ -3525,7 +3555,7 @@ class GridSampler2DGrad(Primitive):
3525
3555
  ValueError: If the shape of `grad` is inconsistent with the shape of the output result of forward calculation.
3526
3556
 
3527
3557
  Supported Platforms:
3528
- ``Ascend`` ``GPU`` ``CPU``
3558
+ ``GPU`` ``CPU``
3529
3559
  """
3530
3560
 
3531
3561
  @prim_attr_register
@@ -3566,7 +3596,7 @@ class ResizeBicubicGrad(Primitive):
3566
3596
  ValueError: If `size` dim is not 4.
3567
3597
 
3568
3598
  Supported Platforms:
3569
- ``GPU`` ``CPU``
3599
+ ``Ascend`` ``GPU`` ``CPU``
3570
3600
  """
3571
3601
  @prim_attr_register
3572
3602
  def __init__(self, align_corners=False, half_pixel_centers=False):
@@ -3595,15 +3625,16 @@ class ResizeBicubicGrad(Primitive):
3595
3625
  validator.check_tensor_dtype_valid("original_image", original_image_dtype,
3596
3626
  [mstype.float32, mstype.float64], self.name)
3597
3627
  # check input shape rank
3598
- validator.check("grads rank", len(grads_shape), "expected", 4, Rel.EQ, self.name)
3599
- validator.check("original_image rank", len(original_image_shape), "expected", 4, Rel.EQ, self.name)
3600
- validator.check("batch_size equal", grads_shape[0], "expected", original_image_shape[0], Rel.EQ, self.name)
3601
- validator.check("channel equal", grads_shape[3], "expected", original_image_shape[3], Rel.EQ, self.name)
3628
+ validator.check("grads rank", len(grads_shape), "expected", 4, validator.EQ, self.name)
3629
+ validator.check("original_image rank", len(original_image_shape), "expected", 4, validator.EQ, self.name)
3630
+ validator.check("batch_size equal", grads_shape[0], "expected",
3631
+ original_image_shape[0], validator.EQ, self.name)
3632
+ validator.check("channel equal", grads_shape[3], "expected", original_image_shape[3], validator.EQ, self.name)
3602
3633
  # check original_image_shape and grads_shape
3603
3634
  validator.check("original_image[0] and grads[0]", original_image_shape[0],
3604
- "expected", grads_shape[0], Rel.EQ, self.name)
3635
+ "expected", grads_shape[0], validator.EQ, self.name)
3605
3636
  validator.check("original_image[3] and grads[3]", original_image_shape[3],
3606
- "expected", grads_shape[3], Rel.EQ, self.name)
3637
+ "expected", grads_shape[3], validator.EQ, self.name)
3607
3638
 
3608
3639
  batch_size = grads_shape[0]
3609
3640
  height = original_image_shape[1]
@@ -3642,7 +3673,7 @@ class SparseSliceGrad(Primitive):
3642
3673
  ValueError: If the number of `backprop_val_grad` is not corresponding to the number of `new_indices`.
3643
3674
  ValueError: If the shape of `indices[1]` is not corresponding to `start[1]`.
3644
3675
  ValueError: If the shape of `indices[1]` is not corresponding to `new_indices[1]`.
3645
- RunTimeError: If the `backprop_val_grad` is not all backpropagated, because `indices` or `new_indices`
3676
+ RuntimeError: If the `backprop_val_grad` is not all backpropagated, because `indices` or `new_indices`
3646
3677
  is not sorted.
3647
3678
 
3648
3679
  Supported Platforms:
@@ -3690,7 +3721,7 @@ class FractionalMaxPoolGradWithFixedKsize(Primitive):
3690
3721
  ValueError: If the second dimension size of `origin_input` and `out_backprop` is not equal.
3691
3722
 
3692
3723
  Supported Platforms:
3693
- ``GPU`` ``CPU``
3724
+ ``Ascend`` ``GPU`` ``CPU``
3694
3725
  """
3695
3726
 
3696
3727
  @prim_attr_register
@@ -3700,6 +3731,42 @@ class FractionalMaxPoolGradWithFixedKsize(Primitive):
3700
3731
  self.init_prim_io_names(inputs=['origin_input', 'out_backprop', 'argmax'], outputs=['y'])
3701
3732
 
3702
3733
 
3734
+ class AffineGridGrad(Primitive):
3735
+ r"""
3736
+ Computes gradients for AffineGrid operation.
3737
+
3738
+ Args:
3739
+ align_corners (bool): if True, consider -1 and 1 to refer to the centers
3740
+ of the corner pixels rather than the image corners. Default: False.
3741
+
3742
+ Inputs:
3743
+ - **y_grad** (Tensor) - Data type must be float16 or float32.
3744
+ - **x_size** (tuple) - Data type must be int32 or int64.
3745
+
3746
+ Outputs:
3747
+ Tensor, with data type same as `y_grad`.
3748
+
3749
+ Supported Platforms:
3750
+ ``CPU``
3751
+
3752
+ Examples:
3753
+ >>> import mindspore.ops.operations._grad_ops as _grad_ops
3754
+ >>> affinegridgrad = _grad_ops.AffineGridGrad()
3755
+ >>> y_grad = Tensor(np.ones([1, 2, 2, 2]), mindspore.float32)
3756
+ >>> x_size = (1, 2, 2, 2)
3757
+ >>> x_grad = affinegridgrad(y_grad, x_size)
3758
+ >>> print(x_grad)
3759
+ [[[0. 0. 4.]
3760
+ [0. 0. 4.]]]
3761
+ """
3762
+
3763
+ @prim_attr_register
3764
+ def __init__(self, align_corners=False):
3765
+ """Initialize AffineGridGrad."""
3766
+ validator.check_value_type("align_corners", align_corners, [bool], self.name)
3767
+ self.init_prim_io_names(inputs=['y_grad', 'x_size'], outputs=['x_grad'])
3768
+
3769
+
3703
3770
  class HSigmoidGrad(Primitive):
3704
3771
  """Gets the gradient of HSigmoid operation."""
3705
3772
  @prim_attr_register
@@ -3778,3 +3845,25 @@ class MapTensorGetGrad(Primitive):
3778
3845
  """Initialize MapTensorGetGrad"""
3779
3846
  self.init_prim_io_names(inputs=['map_tensor', 'key_tensor', 'default_value', 'grad'], outputs=['output'])
3780
3847
  self.add_prim_attr('side_effect_mem', True)
3848
+
3849
+
3850
+ class ResizeV2Grad(Primitive):
3851
+ r"""
3852
+ Calculates the gradient of ResizeV2 operation.
3853
+
3854
+ Supported Platforms:
3855
+ ``CPU``
3856
+ """
3857
+
3858
+ @prim_attr_register
3859
+ def __init__(self, coordinate_transformation_mode="half_pixel", mode="nearest"):
3860
+ """Initialize ResizeV2Grad."""
3861
+ self.init_prim_io_names(inputs=["grads", "roi", "scales", "original_size"], outputs=["y"])
3862
+ self.add_prim_attr("nearest_mode", "floor")
3863
+ self.add_prim_attr("cubic_coeff_a", -0.75)
3864
+ validator.check_value_type(
3865
+ "coordinate_transformation_mode", coordinate_transformation_mode, [str], self.name)
3866
+ validator.check_string(coordinate_transformation_mode,
3867
+ ["align_corners", "half_pixel"], "coordinate_transformation_mode", self.name)
3868
+ validator.check_value_type("mode", mode, [str], self.name)
3869
+ validator.check_string(mode, ["nearest", "linear", "cubic"], "mode", self.name)