mindspore 2.0.0a0__cp38-cp38-win_amd64.whl → 2.0.0rc1__cp38-cp38-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (655) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -2
  3. mindspore/_c_dataengine.cp38-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp38-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp38-win_amd64.pyd +0 -0
  6. mindspore/_check_jit_forbidden_api.py +102 -0
  7. mindspore/_checkparam.py +1066 -1001
  8. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
  9. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
  10. mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
  11. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
  12. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
  13. mindspore/_extends/parse/__init__.py +5 -3
  14. mindspore/_extends/parse/namespace.py +16 -1
  15. mindspore/_extends/parse/parser.py +107 -22
  16. mindspore/_extends/parse/resources.py +0 -7
  17. mindspore/_extends/parse/standard_method.py +885 -413
  18. mindspore/amp.py +52 -57
  19. mindspore/boost/boost.py +2 -2
  20. mindspore/boost/boost_cell_wrapper.py +38 -20
  21. mindspore/boost/dim_reduce.py +3 -3
  22. mindspore/boost/group_loss_scale_manager.py +1 -1
  23. mindspore/common/__init__.py +4 -6
  24. mindspore/common/_decorator.py +2 -0
  25. mindspore/common/_register_for_adapter.py +55 -0
  26. mindspore/common/_stub_tensor.py +201 -0
  27. mindspore/common/_utils.py +41 -7
  28. mindspore/common/api.py +215 -141
  29. mindspore/common/dtype.py +8 -1
  30. mindspore/common/dump.py +2 -2
  31. mindspore/common/initializer.py +4 -2
  32. mindspore/common/jit_config.py +17 -13
  33. mindspore/common/mutable.py +33 -13
  34. mindspore/common/parameter.py +23 -21
  35. mindspore/common/seed.py +8 -24
  36. mindspore/common/sparse_tensor.py +62 -41
  37. mindspore/common/tensor.py +852 -1154
  38. mindspore/communication/__init__.py +2 -2
  39. mindspore/communication/_comm_helper.py +11 -4
  40. mindspore/communication/management.py +22 -21
  41. mindspore/config/op_info.config +501 -1008
  42. mindspore/context.py +201 -23
  43. mindspore/dataset/__init__.py +6 -6
  44. mindspore/dataset/audio/__init__.py +7 -7
  45. mindspore/dataset/audio/transforms.py +670 -30
  46. mindspore/dataset/audio/utils.py +47 -4
  47. mindspore/dataset/audio/validators.py +223 -1
  48. mindspore/dataset/callback/ds_callback.py +2 -2
  49. mindspore/dataset/core/config.py +210 -14
  50. mindspore/dataset/core/validator_helpers.py +2 -2
  51. mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
  52. mindspore/dataset/debug/debug_hook.py +65 -0
  53. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  54. mindspore/dataset/engine/__init__.py +7 -3
  55. mindspore/dataset/engine/cache_client.py +1 -1
  56. mindspore/dataset/engine/datasets.py +322 -66
  57. mindspore/dataset/engine/datasets_audio.py +80 -76
  58. mindspore/dataset/engine/datasets_standard_format.py +51 -38
  59. mindspore/dataset/engine/datasets_text.py +232 -118
  60. mindspore/dataset/engine/datasets_user_defined.py +41 -17
  61. mindspore/dataset/engine/datasets_vision.py +746 -225
  62. mindspore/dataset/engine/graphdata.py +75 -10
  63. mindspore/dataset/engine/iterators.py +45 -5
  64. mindspore/dataset/engine/offload.py +48 -28
  65. mindspore/dataset/engine/validators.py +117 -8
  66. mindspore/dataset/text/__init__.py +6 -5
  67. mindspore/dataset/text/transforms.py +86 -3
  68. mindspore/dataset/text/utils.py +6 -4
  69. mindspore/dataset/text/validators.py +25 -0
  70. mindspore/dataset/transforms/__init__.py +3 -2
  71. mindspore/dataset/transforms/c_transforms.py +1 -1
  72. mindspore/dataset/transforms/transforms.py +2 -2
  73. mindspore/dataset/utils/__init__.py +2 -1
  74. mindspore/dataset/utils/line_reader.py +121 -0
  75. mindspore/dataset/vision/__init__.py +2 -3
  76. mindspore/dataset/vision/c_transforms.py +9 -9
  77. mindspore/dataset/vision/py_transforms.py +5 -5
  78. mindspore/dataset/vision/py_transforms_util.py +2 -0
  79. mindspore/dataset/vision/transforms.py +160 -161
  80. mindspore/dataset/vision/utils.py +3 -3
  81. mindspore/experimental/map_parameter.py +38 -26
  82. mindspore/include/OWNERS +0 -1
  83. mindspore/include/api/callback/callback.h +9 -13
  84. mindspore/include/api/callback/ckpt_saver.h +2 -2
  85. mindspore/include/api/callback/loss_monitor.h +2 -2
  86. mindspore/include/api/callback/lr_scheduler.h +5 -5
  87. mindspore/include/api/callback/time_monitor.h +2 -2
  88. mindspore/include/api/callback/train_accuracy.h +4 -6
  89. mindspore/include/api/cfg.h +19 -6
  90. mindspore/include/api/context.h +44 -9
  91. mindspore/include/api/delegate.h +1 -1
  92. mindspore/include/api/metrics/accuracy.h +2 -2
  93. mindspore/include/api/metrics/metrics.h +4 -3
  94. mindspore/include/api/model.h +9 -4
  95. mindspore/include/api/model_parallel_runner.h +2 -2
  96. mindspore/include/api/net.h +12 -11
  97. mindspore/include/api/serialization.h +19 -3
  98. mindspore/include/api/types.h +3 -3
  99. mindspore/include/dataset/constants.h +7 -0
  100. mindspore/include/dataset/text.h +59 -0
  101. mindspore/jpeg62.dll +0 -0
  102. mindspore/log.py +1 -1
  103. mindspore/mindrecord/filereader.py +18 -0
  104. mindspore/mindrecord/filewriter.py +197 -34
  105. mindspore/mindrecord/shardreader.py +9 -0
  106. mindspore/mindrecord/shardwriter.py +1 -1
  107. mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
  108. mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
  109. mindspore/mindrecord/tools/csv_to_mr.py +3 -3
  110. mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
  111. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  112. mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
  113. mindspore/mindspore_backend.dll +0 -0
  114. mindspore/mindspore_common.dll +0 -0
  115. mindspore/mindspore_core.dll +0 -0
  116. mindspore/mindspore_glog.dll +0 -0
  117. mindspore/mindspore_shared_lib.dll +0 -0
  118. mindspore/nn/__init__.py +0 -4
  119. mindspore/nn/cell.py +204 -132
  120. mindspore/nn/dynamic_lr.py +1 -1
  121. mindspore/nn/grad/cell_grad.py +7 -6
  122. mindspore/nn/layer/__init__.py +5 -4
  123. mindspore/nn/layer/activation.py +40 -89
  124. mindspore/nn/layer/basic.py +255 -624
  125. mindspore/nn/layer/channel_shuffle.py +7 -6
  126. mindspore/nn/layer/combined.py +1 -1
  127. mindspore/nn/layer/container.py +41 -4
  128. mindspore/nn/layer/conv.py +64 -28
  129. mindspore/nn/layer/dense.py +9 -8
  130. mindspore/nn/layer/embedding.py +27 -25
  131. mindspore/nn/layer/image.py +53 -46
  132. mindspore/nn/layer/math.py +97 -105
  133. mindspore/nn/layer/normalization.py +117 -86
  134. mindspore/nn/layer/padding.py +185 -95
  135. mindspore/nn/layer/pooling.py +817 -414
  136. mindspore/nn/layer/rnn_cells.py +10 -15
  137. mindspore/nn/layer/rnns.py +37 -38
  138. mindspore/nn/layer/thor_layer.py +11 -12
  139. mindspore/nn/layer/timedistributed.py +5 -5
  140. mindspore/nn/layer/transformer.py +701 -0
  141. mindspore/nn/learning_rate_schedule.py +8 -8
  142. mindspore/nn/loss/__init__.py +5 -4
  143. mindspore/nn/loss/loss.py +334 -199
  144. mindspore/nn/optim/ada_grad.py +6 -6
  145. mindspore/nn/optim/adadelta.py +2 -3
  146. mindspore/nn/optim/adafactor.py +4 -5
  147. mindspore/nn/optim/adam.py +126 -62
  148. mindspore/nn/optim/adamax.py +3 -4
  149. mindspore/nn/optim/adasum.py +6 -6
  150. mindspore/nn/optim/asgd.py +2 -2
  151. mindspore/nn/optim/ftrl.py +67 -38
  152. mindspore/nn/optim/lamb.py +4 -5
  153. mindspore/nn/optim/lars.py +2 -2
  154. mindspore/nn/optim/lazyadam.py +43 -4
  155. mindspore/nn/optim/momentum.py +6 -5
  156. mindspore/nn/optim/optimizer.py +3 -1
  157. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  158. mindspore/nn/optim/rmsprop.py +1 -1
  159. mindspore/nn/optim/rprop.py +8 -9
  160. mindspore/nn/optim/sgd.py +19 -13
  161. mindspore/nn/optim/thor.py +10 -15
  162. mindspore/nn/probability/__init__.py +0 -2
  163. mindspore/nn/probability/bijector/bijector.py +4 -4
  164. mindspore/nn/probability/bijector/invert.py +1 -1
  165. mindspore/nn/probability/bijector/softplus.py +2 -2
  166. mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
  167. mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
  168. mindspore/nn/probability/distribution/_utils/utils.py +9 -15
  169. mindspore/nn/probability/distribution/bernoulli.py +3 -3
  170. mindspore/nn/probability/distribution/beta.py +1 -1
  171. mindspore/nn/probability/distribution/categorical.py +5 -7
  172. mindspore/nn/probability/distribution/cauchy.py +3 -3
  173. mindspore/nn/probability/distribution/distribution.py +2 -2
  174. mindspore/nn/probability/distribution/exponential.py +2 -2
  175. mindspore/nn/probability/distribution/gamma.py +3 -3
  176. mindspore/nn/probability/distribution/geometric.py +1 -1
  177. mindspore/nn/probability/distribution/gumbel.py +3 -3
  178. mindspore/nn/probability/distribution/half_normal.py +15 -11
  179. mindspore/nn/probability/distribution/laplace.py +16 -13
  180. mindspore/nn/probability/distribution/logistic.py +2 -2
  181. mindspore/nn/probability/distribution/normal.py +1 -1
  182. mindspore/nn/probability/distribution/poisson.py +1 -1
  183. mindspore/nn/probability/distribution/student_t.py +20 -15
  184. mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
  185. mindspore/nn/probability/distribution/uniform.py +2 -2
  186. mindspore/nn/reinforcement/_tensors_queue.py +3 -3
  187. mindspore/nn/reinforcement/tensor_array.py +2 -2
  188. mindspore/nn/sparse/sparse.py +2 -2
  189. mindspore/nn/wrap/cell_wrapper.py +27 -10
  190. mindspore/nn/wrap/grad_reducer.py +2 -2
  191. mindspore/nn/wrap/loss_scale.py +40 -24
  192. mindspore/numpy/array_creations.py +33 -22
  193. mindspore/numpy/array_ops.py +35 -30
  194. mindspore/numpy/logic_ops.py +6 -27
  195. mindspore/numpy/math_ops.py +22 -19
  196. mindspore/numpy/utils.py +1 -1
  197. mindspore/numpy/utils_const.py +108 -58
  198. mindspore/opencv_core452.dll +0 -0
  199. mindspore/opencv_imgcodecs452.dll +0 -0
  200. mindspore/opencv_imgproc452.dll +0 -0
  201. mindspore/ops/_constants.py +0 -6
  202. mindspore/ops/_grad/__init__.py +2 -1
  203. mindspore/ops/_grad/grad_array_ops.py +86 -117
  204. mindspore/ops/_grad/grad_base.py +23 -1
  205. mindspore/ops/_grad/grad_clip_ops.py +2 -3
  206. mindspore/ops/_grad/grad_comm_ops.py +34 -24
  207. mindspore/ops/_grad/grad_implementations.py +9 -45
  208. mindspore/ops/_grad/grad_inner_ops.py +47 -4
  209. mindspore/ops/_grad/grad_math_ops.py +142 -117
  210. mindspore/ops/_grad/grad_nn_ops.py +71 -165
  211. mindspore/ops/_grad/grad_sequence_ops.py +296 -0
  212. mindspore/ops/_grad/grad_sparse.py +7 -6
  213. mindspore/ops/_grad_experimental/__init__.py +1 -0
  214. mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
  215. mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
  216. mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
  217. mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
  218. mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
  219. mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
  220. mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
  221. mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
  222. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
  223. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
  224. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
  225. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
  226. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
  227. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
  228. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
  229. mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
  230. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
  231. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
  232. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
  233. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
  234. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
  235. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
  236. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
  237. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
  238. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
  239. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
  240. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
  241. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
  242. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
  243. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
  244. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
  245. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  246. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
  247. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
  248. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
  249. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
  250. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
  251. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
  252. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
  253. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
  254. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
  255. mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
  256. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  257. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
  258. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  259. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  260. mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
  261. mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
  262. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  263. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
  264. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  265. mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
  266. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  267. mindspore/ops/_op_impl/aicpu/conj.py +11 -0
  268. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
  269. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  270. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  271. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
  272. mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
  273. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  274. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  275. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
  276. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  277. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  278. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  279. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  280. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  281. mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
  282. mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
  283. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
  284. mindspore/ops/_op_impl/aicpu/mul.py +3 -1
  285. mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
  286. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  287. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  288. mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
  289. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  290. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  291. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  292. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  293. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  294. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  295. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
  296. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
  297. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  298. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  299. mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
  300. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
  301. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  302. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  303. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  304. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  305. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  306. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
  307. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  308. mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
  309. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
  310. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  311. mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
  312. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  313. mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
  314. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
  315. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
  316. mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
  317. mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
  318. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
  319. mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
  320. mindspore/ops/_op_impl/tbe/__init__.py +27 -611
  321. mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
  322. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  323. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
  324. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
  325. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  326. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
  327. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
  328. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
  329. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
  330. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
  331. mindspore/ops/_op_impl/tbe/cast.py +0 -2
  332. mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
  333. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
  334. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
  335. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
  336. mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
  337. mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
  338. mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
  339. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
  340. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
  341. mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
  342. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
  343. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  344. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
  345. mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
  346. mindspore/ops/_register_for_op.py +1 -0
  347. mindspore/ops/_utils/__init__.py +1 -2
  348. mindspore/ops/_utils/utils.py +19 -40
  349. mindspore/ops/_vmap/vmap_array_ops.py +116 -38
  350. mindspore/ops/_vmap/vmap_base.py +16 -9
  351. mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
  352. mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
  353. mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
  354. mindspore/ops/_vmap/vmap_image_ops.py +12 -5
  355. mindspore/ops/_vmap/vmap_math_ops.py +46 -5
  356. mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
  357. mindspore/ops/_vmap/vmap_random_ops.py +1 -1
  358. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  359. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  360. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
  361. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
  362. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  363. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  364. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  365. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
  366. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
  367. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  368. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
  369. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
  370. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
  371. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
  372. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
  373. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
  374. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
  375. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  376. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  377. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
  378. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
  379. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
  380. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  381. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  382. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  383. mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
  384. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  385. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
  386. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
  387. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
  388. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
  389. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  390. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
  391. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
  392. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  393. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
  394. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
  395. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
  396. mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
  397. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  398. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
  399. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
  400. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
  401. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
  402. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
  403. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  404. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
  405. mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
  406. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  407. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  408. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
  409. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  410. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  411. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  412. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
  413. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
  414. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
  415. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  416. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
  417. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
  418. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
  419. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
  420. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
  421. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
  422. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  423. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
  424. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
  425. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  426. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
  427. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
  428. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  429. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
  430. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
  431. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  432. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
  433. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  434. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  435. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
  436. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
  437. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
  438. mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
  439. mindspore/ops/composite/__init__.py +7 -8
  440. mindspore/ops/composite/base.py +101 -47
  441. mindspore/ops/composite/math_ops.py +188 -158
  442. mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
  443. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
  444. mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
  445. mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
  446. mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
  447. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
  448. mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
  449. mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
  450. mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
  451. mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
  452. mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
  453. mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
  454. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
  455. mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
  456. mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
  457. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
  458. mindspore/ops/function/__init__.py +152 -8
  459. mindspore/ops/function/array_func.py +2555 -674
  460. mindspore/ops/function/clip_func.py +209 -13
  461. mindspore/ops/function/debug_func.py +2 -2
  462. mindspore/ops/function/grad/__init__.py +2 -1
  463. mindspore/ops/function/grad/grad_func.py +147 -62
  464. mindspore/ops/function/image_func.py +54 -38
  465. mindspore/ops/function/linalg_func.py +167 -16
  466. mindspore/ops/function/math_func.py +4849 -1492
  467. mindspore/ops/function/nn_func.py +2573 -988
  468. mindspore/ops/function/other_func.py +115 -0
  469. mindspore/ops/function/parameter_func.py +3 -3
  470. mindspore/ops/function/random_func.py +790 -73
  471. mindspore/ops/function/sparse_func.py +98 -78
  472. mindspore/ops/function/sparse_unary_func.py +54 -53
  473. mindspore/ops/function/spectral_func.py +27 -24
  474. mindspore/ops/function/vmap_func.py +22 -2
  475. mindspore/ops/functional.py +97 -37
  476. mindspore/ops/op_info_register.py +70 -28
  477. mindspore/ops/operations/__init__.py +47 -14
  478. mindspore/ops/operations/_csr_ops.py +7 -7
  479. mindspore/ops/operations/_embedding_cache_ops.py +5 -5
  480. mindspore/ops/operations/_grad_ops.py +276 -187
  481. mindspore/ops/operations/_inner_ops.py +319 -113
  482. mindspore/ops/operations/_ms_kernel.py +10 -8
  483. mindspore/ops/operations/_ocr_ops.py +9 -9
  484. mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
  485. mindspore/ops/operations/_quant_ops.py +137 -102
  486. mindspore/ops/operations/_rl_inner_ops.py +121 -60
  487. mindspore/ops/operations/_scalar_ops.py +466 -0
  488. mindspore/ops/operations/_sequence_ops.py +1004 -2
  489. mindspore/ops/operations/_tensor_array.py +10 -11
  490. mindspore/ops/operations/_thor_ops.py +1 -1
  491. mindspore/ops/operations/array_ops.py +801 -466
  492. mindspore/ops/operations/comm_ops.py +51 -49
  493. mindspore/ops/operations/control_ops.py +2 -2
  494. mindspore/ops/operations/custom_ops.py +123 -44
  495. mindspore/ops/operations/debug_ops.py +24 -24
  496. mindspore/ops/operations/image_ops.py +240 -153
  497. mindspore/ops/operations/inner_ops.py +34 -50
  498. mindspore/ops/operations/linalg_ops.py +31 -9
  499. mindspore/ops/operations/math_ops.py +988 -757
  500. mindspore/ops/operations/nn_ops.py +965 -819
  501. mindspore/ops/operations/other_ops.py +51 -40
  502. mindspore/ops/operations/random_ops.py +204 -122
  503. mindspore/ops/operations/rl_ops.py +8 -9
  504. mindspore/ops/operations/sparse_ops.py +254 -93
  505. mindspore/ops/operations/spectral_ops.py +35 -3
  506. mindspore/ops/primitive.py +111 -9
  507. mindspore/parallel/_auto_parallel_context.py +189 -83
  508. mindspore/parallel/_offload_context.py +185 -0
  509. mindspore/parallel/_parallel_serialization.py +99 -7
  510. mindspore/parallel/_ps_context.py +9 -5
  511. mindspore/parallel/_recovery_context.py +1 -1
  512. mindspore/parallel/_tensor.py +7 -1
  513. mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
  514. mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
  515. mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
  516. mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
  517. mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
  518. mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
  519. mindspore/parallel/_utils.py +1 -2
  520. mindspore/parallel/algo_parameter_config.py +1 -1
  521. mindspore/parallel/checkpoint_transform.py +37 -34
  522. mindspore/parallel/shard.py +17 -18
  523. mindspore/profiler/common/validator/validate_path.py +2 -2
  524. mindspore/profiler/envprofiling.py +69 -47
  525. mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
  526. mindspore/profiler/parser/base_timeline_generator.py +49 -56
  527. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
  528. mindspore/profiler/parser/hwts_log_parser.py +1 -1
  529. mindspore/profiler/parser/integrator.py +15 -14
  530. mindspore/profiler/parser/minddata_analyzer.py +2 -2
  531. mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
  532. mindspore/profiler/parser/msadvisor_parser.py +2 -4
  533. mindspore/profiler/parser/optime_parser.py +17 -18
  534. mindspore/profiler/parser/profiler_info.py +2 -1
  535. mindspore/profiler/profiling.py +218 -186
  536. mindspore/rewrite/__init__.py +3 -1
  537. mindspore/rewrite/api/node.py +1 -114
  538. mindspore/rewrite/api/node_type.py +3 -0
  539. mindspore/rewrite/api/pattern_engine.py +31 -1
  540. mindspore/rewrite/api/scoped_value.py +4 -4
  541. mindspore/rewrite/api/symbol_tree.py +3 -78
  542. mindspore/rewrite/api/tree_node_helper.py +1 -1
  543. mindspore/rewrite/ast_creator_register.py +1 -0
  544. mindspore/rewrite/ast_helpers/__init__.py +2 -2
  545. mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
  546. mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
  547. mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
  548. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
  549. mindspore/rewrite/namespace.py +0 -2
  550. mindspore/rewrite/node.py +157 -11
  551. mindspore/rewrite/parsers/assign_parser.py +231 -53
  552. mindspore/rewrite/parsers/class_def_parser.py +187 -109
  553. mindspore/rewrite/parsers/for_parser.py +24 -14
  554. mindspore/rewrite/parsers/function_def_parser.py +21 -4
  555. mindspore/rewrite/parsers/if_parser.py +6 -2
  556. mindspore/rewrite/sparsify/__init__.py +0 -0
  557. mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
  558. mindspore/rewrite/sparsify/sparsify.py +109 -0
  559. mindspore/rewrite/sparsify/utils.py +173 -0
  560. mindspore/rewrite/symbol_tree.py +256 -133
  561. mindspore/rewrite/symbol_tree_builder.py +38 -1
  562. mindspore/run_check/_check_version.py +69 -63
  563. mindspore/run_check/run_check.py +2 -1
  564. mindspore/tinyxml2.dll +0 -0
  565. mindspore/train/__init__.py +1 -1
  566. mindspore/train/_utils.py +28 -5
  567. mindspore/train/amp.py +273 -102
  568. mindspore/train/callback/_backup_and_restore.py +5 -5
  569. mindspore/train/callback/_callback.py +2 -2
  570. mindspore/train/callback/_checkpoint.py +3 -3
  571. mindspore/train/callback/_early_stop.py +3 -3
  572. mindspore/train/callback/_lambda_callback.py +2 -2
  573. mindspore/train/callback/_landscape.py +29 -31
  574. mindspore/train/callback/_loss_monitor.py +3 -3
  575. mindspore/train/callback/_on_request_exit.py +3 -3
  576. mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
  577. mindspore/train/callback/_summary_collector.py +23 -16
  578. mindspore/train/callback/_time_monitor.py +3 -3
  579. mindspore/train/checkpoint_pb2.py +68 -8
  580. mindspore/train/data_sink.py +15 -3
  581. mindspore/train/dataset_helper.py +10 -15
  582. mindspore/train/loss_scale_manager.py +8 -11
  583. mindspore/train/metrics/__init__.py +1 -1
  584. mindspore/train/metrics/bleu_score.py +1 -1
  585. mindspore/train/metrics/confusion_matrix.py +1 -1
  586. mindspore/train/metrics/cosine_similarity.py +1 -1
  587. mindspore/train/metrics/dice.py +2 -2
  588. mindspore/train/metrics/fbeta.py +1 -1
  589. mindspore/train/metrics/hausdorff_distance.py +4 -3
  590. mindspore/train/metrics/mean_surface_distance.py +2 -2
  591. mindspore/train/metrics/occlusion_sensitivity.py +1 -1
  592. mindspore/train/metrics/perplexity.py +1 -1
  593. mindspore/train/metrics/precision.py +1 -1
  594. mindspore/train/metrics/recall.py +1 -1
  595. mindspore/train/metrics/roc.py +2 -2
  596. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  597. mindspore/train/mind_ir_pb2.py +116 -37
  598. mindspore/train/model.py +45 -28
  599. mindspore/train/serialization.py +295 -188
  600. mindspore/train/summary/_summary_adapter.py +1 -1
  601. mindspore/train/summary/summary_record.py +43 -13
  602. mindspore/train/train_thor/convert_utils.py +2 -2
  603. mindspore/train/train_thor/dataset_helper.py +3 -3
  604. mindspore/turbojpeg.dll +0 -0
  605. mindspore/version.py +1 -1
  606. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
  607. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
  608. mindspore/compression/__init__.py +0 -19
  609. mindspore/compression/common/constant.py +0 -124
  610. mindspore/compression/export/__init__.py +0 -19
  611. mindspore/compression/export/quant_export.py +0 -515
  612. mindspore/compression/quant/__init__.py +0 -28
  613. mindspore/compression/quant/qat.py +0 -634
  614. mindspore/compression/quant/quant_utils.py +0 -462
  615. mindspore/compression/quant/quantizer.py +0 -68
  616. mindspore/nn/layer/quant.py +0 -1868
  617. mindspore/nn/layer/rnn_utils.py +0 -90
  618. mindspore/nn/probability/dpn/__init__.py +0 -22
  619. mindspore/nn/probability/dpn/vae/__init__.py +0 -25
  620. mindspore/nn/probability/dpn/vae/cvae.py +0 -140
  621. mindspore/nn/probability/dpn/vae/vae.py +0 -124
  622. mindspore/nn/probability/infer/__init__.py +0 -22
  623. mindspore/nn/probability/infer/variational/elbo.py +0 -70
  624. mindspore/nn/probability/infer/variational/svi.py +0 -84
  625. mindspore/nn/probability/toolbox/__init__.py +0 -22
  626. mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
  627. mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
  628. mindspore/nn/probability/transforms/__init__.py +0 -22
  629. mindspore/nn/probability/transforms/transform_bnn.py +0 -262
  630. mindspore/nn/probability/zhusuan/__init__.py +0 -18
  631. mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
  632. mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
  633. mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
  634. mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
  635. mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
  636. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  637. mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
  638. mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
  639. mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
  640. mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
  641. mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
  642. mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
  643. mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
  644. mindspore/ops/composite/array_ops.py +0 -241
  645. mindspore/ops/composite/clip_ops.py +0 -134
  646. mindspore/ops/composite/random_ops.py +0 -426
  647. mindspore/ops/composite/vmap_ops.py +0 -38
  648. mindspore/parallel/nn/__init__.py +0 -42
  649. mindspore/parallel/nn/loss.py +0 -22
  650. mindspore/parallel/nn/moe.py +0 -21
  651. mindspore/parallel/nn/op_parallel_config.py +0 -22
  652. mindspore/parallel/nn/transformer.py +0 -31
  653. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
  654. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
  655. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -282,7 +282,7 @@ class SummaryLandscape:
282
282
  For example, in image dataset, You can set num_samples is 2048,
283
283
  which means that 2048 images are used to create loss landscape.
284
284
  Default: 2048.
285
- - intervals (List[List[int]): Specifies the interval
285
+ - intervals (List[List[int]]): Specifies the interval
286
286
  in which the loss landscape. For example: If the user wants to
287
287
  create loss landscape of two training processes, they are 1-5 epoch
288
288
  and 6-10 epoch respectively. They can set [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]].
@@ -294,6 +294,12 @@ class SummaryLandscape:
294
294
  Default: None. The default save path is the same as the summary file.
295
295
  """
296
296
 
297
+ executor = None
298
+ if len(device_ids) > 1:
299
+ executor = ProcessPoolExecutor(len(device_ids))
300
+ futures = [executor.submit(self._set_context, i) for i in device_ids]
301
+ wait(futures, return_when=ALL_COMPLETED)
302
+
297
303
  output_path = os.path.realpath(output) if output is not None else self._summary_dir
298
304
  summary_record = SummaryRecord(output_path)
299
305
  self._check_device_ids(device_ids)
@@ -321,13 +327,14 @@ class SummaryLandscape:
321
327
  json.dump(data, file)
322
328
  os.chmod(json_path, stat.S_IRUSR)
323
329
 
324
- for interval, landscape in self._list_landscapes(callback_fn=callback_fn, device_ids=device_ids):
330
+ for interval, landscape in self._list_landscapes(callback_fn=callback_fn, executor=executor,
331
+ device_ids=device_ids):
325
332
  summary_record.add_value(PluginEnum.LANDSCAPE.value, f'landscape_{str(interval)}', landscape)
326
333
  summary_record.record(0)
327
334
  summary_record.flush()
328
335
  summary_record.close()
329
336
 
330
- def _list_landscapes(self, callback_fn, device_ids=None):
337
+ def _list_landscapes(self, callback_fn, executor=None, device_ids=None):
331
338
  """Create landscape with single device and list all landscape."""
332
339
 
333
340
  if not os.path.exists(os.path.join(self._ckpt_dir, 'train_metadata.json')):
@@ -343,39 +350,30 @@ class SummaryLandscape:
343
350
  kwargs = dict(proz=0.2, landscape_size=data['landscape_size'], device_ids=device_ids, callback_fn=callback_fn)
344
351
 
345
352
  start = time.time()
346
- with ProcessPoolExecutor(max_workers=len(device_ids)) as executor:
347
- if len(device_ids) > 1:
348
- futures = []
349
- for device_id in device_ids:
350
- future = executor.submit(self._set_context, device_id)
351
- futures.append(future)
352
- wait(futures, return_when=ALL_COMPLETED)
353
-
354
- kwargs['executor'] = executor if len(device_ids) > 1 else None
355
-
356
- if data['create_landscape']['train']:
357
- for i, epochs in enumerate(self._epoch_group.values()):
358
- self._log_message(data['create_landscape'], index=i, interval=epochs)
359
- kwargs['epochs'] = epochs
360
- mid_time = time.time()
361
- landscape_data = self._create_landscape_by_pca(**kwargs)
362
- logger.info("Create landscape end, use time: %s s." % (round(time.time() - mid_time, 6)))
363
- landscape_data.unit = data['unit']
364
- landscape_data.step_per_epoch = data['step_per_epoch']
365
- landscape_data.num_samples = data['num_samples']
366
- yield [epochs[0], epochs[-1]], landscape_data.transform_to_loss_landscape_msg(landscape_data)
367
-
368
- if data['create_landscape']['result']:
369
- final_epochs = [list(self._epoch_group.values())[-1][-1]]
370
- self._log_message(data['create_landscape'], final_epochs=final_epochs)
371
- kwargs['epochs'] = final_epochs
353
+ kwargs['executor'] = executor
354
+ if data['create_landscape']['train']:
355
+ for i, epochs in enumerate(self._epoch_group.values()):
356
+ self._log_message(data['create_landscape'], index=i, interval=epochs)
357
+ kwargs['epochs'] = epochs
372
358
  mid_time = time.time()
373
- landscape_data = self._create_landscape_by_random(**kwargs)
359
+ landscape_data = self._create_landscape_by_pca(**kwargs)
374
360
  logger.info("Create landscape end, use time: %s s." % (round(time.time() - mid_time, 6)))
375
361
  landscape_data.unit = data['unit']
376
362
  landscape_data.step_per_epoch = data['step_per_epoch']
377
363
  landscape_data.num_samples = data['num_samples']
378
- yield final_epochs, landscape_data.transform_to_loss_landscape_msg(landscape_data)
364
+ yield [epochs[0], epochs[-1]], landscape_data.transform_to_loss_landscape_msg(landscape_data)
365
+
366
+ if data['create_landscape']['result']:
367
+ final_epochs = [list(self._epoch_group.values())[-1][-1]]
368
+ self._log_message(data['create_landscape'], final_epochs=final_epochs)
369
+ kwargs['epochs'] = final_epochs
370
+ mid_time = time.time()
371
+ landscape_data = self._create_landscape_by_random(**kwargs)
372
+ logger.info("Create landscape end, use time: %s s." % (round(time.time() - mid_time, 6)))
373
+ landscape_data.unit = data['unit']
374
+ landscape_data.step_per_epoch = data['step_per_epoch']
375
+ landscape_data.num_samples = data['num_samples']
376
+ yield final_epochs, landscape_data.transform_to_loss_landscape_msg(landscape_data)
379
377
  logger.info("Total use time: %s s." % (round(time.time() - start, 6)))
380
378
 
381
379
  def _log_message(self, create_landscape, index=None, interval=None, final_epochs=None):
@@ -17,7 +17,7 @@ from __future__ import absolute_import
17
17
 
18
18
  import numpy as np
19
19
 
20
- from mindspore._checkparam import Validator
20
+ from mindspore import _checkparam as Validator
21
21
  from mindspore.train.callback._callback import Callback, _handle_loss
22
22
 
23
23
 
@@ -41,8 +41,8 @@ class LossMonitor(Callback):
41
41
  .. note::
42
42
  Before running the following example, you need to customize the network LeNet5 and
43
43
  dataset preparation function create_dataset. Refer to
44
- `Building a Network <https://www.mindspore.cn/tutorials/en/r2.0.0-alpha/beginner/model.html>`_
45
- and `Dataset <https://www.mindspore.cn/tutorials/en/r2.0.0-alpha/beginner/dataset.html>`_ .
44
+ `Building a Network <https://www.mindspore.cn/tutorials/en/r2.0/beginner/model.html>`_
45
+ and `Dataset <https://www.mindspore.cn/tutorials/en/r2.0/beginner/dataset.html>`_ .
46
46
 
47
47
  >>> from mindspore import nn
48
48
  >>> from mindspore.train import Model, LossMonitor
@@ -19,7 +19,7 @@ import os
19
19
  import signal
20
20
 
21
21
  from mindspore import log
22
- from mindspore._checkparam import Validator
22
+ from mindspore import _checkparam as Validator
23
23
  from mindspore.train.serialization import load_checkpoint, save_checkpoint, export
24
24
  from mindspore.train.callback._callback import Callback
25
25
 
@@ -95,8 +95,8 @@ class OnRequestExit(Callback):
95
95
  self.train_file_path = os.path.abspath(os.path.join(directory, f"{file_name}_train"))
96
96
  self.eval_file_path = os.path.abspath(os.path.join(directory, f"{file_name}_eval"))
97
97
  self.sig = Validator.check_isinstance('sig', sig, int)
98
- if self.sig == signal.SIGKILL or self.sig == signal.SIGINT:
99
- raise ValueError("Not support send exit request by signal SIGKILL or SIGINT.")
98
+ if hasattr(signal, "SIGKILL") and self.sig == signal.SIGKILL:
99
+ raise ValueError("Not support send exit request by signal SIGKILL.")
100
100
  self.exit = False
101
101
 
102
102
  def on_train_begin(self, run_context):
@@ -20,7 +20,7 @@ import numpy as np
20
20
 
21
21
  from mindspore.common.tensor import Tensor
22
22
  from mindspore.common.parameter import Parameter
23
- from mindspore._checkparam import Validator, Rel
23
+ from mindspore import _checkparam as Validator
24
24
  from mindspore import log as logger
25
25
  from mindspore.ops import functional as F, ReduceOp
26
26
  from mindspore import nn, ops
@@ -84,8 +84,8 @@ class ReduceLROnPlateau(Callback):
84
84
  .. note::
85
85
  Before running the following example, you need to customize the network LeNet5 and
86
86
  dataset preparation function create_dataset. Refer to
87
- `Building a Network <https://www.mindspore.cn/tutorials/en/r2.0.0-alpha/beginner/model.html>`_
88
- and `Dataset <https://www.mindspore.cn/tutorials/en/r2.0.0-alpha/beginner/dataset.html>`_ .
87
+ `Building a Network <https://www.mindspore.cn/tutorials/en/r2.0/beginner/model.html>`_
88
+ and `Dataset <https://www.mindspore.cn/tutorials/en/r2.0/beginner/dataset.html>`_ .
89
89
 
90
90
  >>> from mindspore import nn
91
91
  >>> from mindspore.train import Model, ReduceLROnPlateau
@@ -102,7 +102,7 @@ class ReduceLROnPlateau(Callback):
102
102
  mode='auto', min_delta=1e-4, cooldown=0, min_lr=0):
103
103
  super(ReduceLROnPlateau, self).__init__()
104
104
  self.monitor = Validator.check_value_type('monitor', monitor, str)
105
- self.factor = Validator.check_float_range(factor, 0.0, 1.0, Rel.INC_NEITHER)
105
+ self.factor = Validator.check_float_range(factor, 0.0, 1.0, Validator.INC_NEITHER)
106
106
  self.patience = Validator.check_non_negative_int(patience)
107
107
  self.verbose = Validator.check_bool(verbose)
108
108
  self.mode = Validator.check_value_type('mode', mode, str)
@@ -103,7 +103,7 @@ class SummaryCollector(Callback):
103
103
  training computational graph is collected. Default: True.
104
104
  - collect_train_lineage (bool): Whether to collect lineage data for the training phase,
105
105
  this field will be displayed on the `lineage page \
106
- <https://www.mindspore.cn/mindinsight/docs/en/r2.0.0-alpha/lineage_and_scalars_comparison.html>`_
106
+ <https://www.mindspore.cn/mindinsight/docs/en/r2.0/lineage_and_scalars_comparison.html>`_
107
107
  of MindInsight. Default: True.
108
108
  - collect_eval_lineage (bool): Whether to collect lineage data for the evaluation phase,
109
109
  this field will be displayed on the lineage page of MindInsight. Default: True.
@@ -488,7 +488,26 @@ class SummaryCollector(Callback):
488
488
  self._record.add_value('custom_lineage_data', 'custom_lineage_data', packaged_custom_data)
489
489
  self._has_saved_custom_data = True
490
490
  self._record.record(cb_params.cur_step_num)
491
+ if not self._dataset_sink_mode:
492
+ self._collect_tensor_data(cb_params)
491
493
 
494
+ collect_landscape = self._collect_specified_data.get('collect_landscape')
495
+ if collect_landscape is not None:
496
+ intervals = collect_landscape.get('intervals')
497
+ collect_interval = False
498
+ for interval in intervals:
499
+ if "cur_step_num" in cb_params:
500
+ if cb_params.cur_step_num in interval:
501
+ collect_interval = True
502
+ break
503
+
504
+ if collect_landscape and collect_landscape.get('unit', 'step') == 'step' and collect_interval:
505
+ self._save_model_params_for_landscape(cb_params)
506
+
507
+ def _collect_tensor_data(self, cb_params):
508
+ """Collect tensor summary data."""
509
+ if cb_params.mode != ModeEnum.TRAIN.value:
510
+ return
492
511
  if self._first_step:
493
512
  self._tensor_collect_range = self._get_tensor_collect_range(cb_params, self._dataset_sink_mode)
494
513
  self._collect_at_step_end(cb_params, plugin_filter=None)
@@ -503,19 +522,6 @@ class SummaryCollector(Callback):
503
522
  elif current % self._collect_freq == 0:
504
523
  self._collect_at_step_end(cb_params, lambda plugin: plugin != PluginEnum.TENSOR.value)
505
524
 
506
- collect_landscape = self._collect_specified_data.get('collect_landscape')
507
- if collect_landscape is not None:
508
- intervals = collect_landscape.get('intervals')
509
- collect_interval = False
510
- for interval in intervals:
511
- if "cur_step_num" in cb_params:
512
- if cb_params.cur_step_num in interval:
513
- collect_interval = True
514
- break
515
-
516
- if collect_landscape and collect_landscape.get('unit', 'step') == 'step' and collect_interval:
517
- self._save_model_params_for_landscape(cb_params)
518
-
519
525
  def _get_tensor_collect_range(self, cb_params, dataset_sink_mode):
520
526
  """Get tensor collect range."""
521
527
  total_step = cb_params.epoch_num
@@ -542,6 +548,7 @@ class SummaryCollector(Callback):
542
548
 
543
549
  def epoch_end(self, run_context):
544
550
  cb_params = run_context.original_args()
551
+ self._collect_tensor_data(cb_params)
545
552
  collect_landscape = self._collect_specified_data.get('collect_landscape')
546
553
  if collect_landscape is not None:
547
554
  intervals = collect_landscape.get('intervals')
@@ -863,14 +870,14 @@ class SummaryCollector(Callback):
863
870
  if regular is not None:
864
871
  for parameter in parameters:
865
872
  if re.match(regular, parameter.name):
866
- self._record.add_value(PluginEnum.HISTOGRAM.value, parameter.name+'/auto', parameter.data)
873
+ self._record.add_value(PluginEnum.HISTOGRAM.value, parameter.name + '/auto', parameter.data)
867
874
  return
868
875
 
869
876
  # Note: If `histogram_regular` in `self._collect_specified_data` and the value is None,
870
877
  # we will collect the first five parameters.
871
878
  default_parameter_count = 5
872
879
  for parameter in parameters[:default_parameter_count]:
873
- self._record.add_value(PluginEnum.HISTOGRAM.value, parameter.name+'/auto', parameter.data)
880
+ self._record.add_value(PluginEnum.HISTOGRAM.value, parameter.name + '/auto', parameter.data)
874
881
 
875
882
  @staticmethod
876
883
  def _get_learning_rate(optimizer):
@@ -17,7 +17,7 @@ from __future__ import absolute_import
17
17
 
18
18
  import time
19
19
 
20
- from mindspore._checkparam import Validator
20
+ from mindspore import _checkparam as Validator
21
21
  from mindspore.train.callback._callback import Callback
22
22
 
23
23
 
@@ -37,8 +37,8 @@ class TimeMonitor(Callback):
37
37
  .. note::
38
38
  Before running the following example, you need to customize the network LeNet5 and
39
39
  dataset preparation function create_dataset. Refer to
40
- `Building a Network <https://www.mindspore.cn/tutorials/en/r2.0.0-alpha/beginner/model.html>`_
41
- and `Dataset <https://www.mindspore.cn/tutorials/en/r2.0.0-alpha/beginner/dataset.html>`_ .
40
+ `Building a Network <https://www.mindspore.cn/tutorials/en/r2.0/beginner/model.html>`_
41
+ and `Dataset <https://www.mindspore.cn/tutorials/en/r2.0/beginner/dataset.html>`_ .
42
42
 
43
43
  >>> from mindspore import nn
44
44
  >>> from mindspore.train import Model, TimeMonitor
@@ -19,7 +19,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
19
19
  syntax='proto2',
20
20
  serialized_options=None,
21
21
  create_key=_descriptor._internal_create_key,
22
- serialized_pb=b'\n\x10\x63heckpoint.proto\"b\n\nCheckpoint\x12 \n\x05value\x18\x01 \x03(\x0b\x32\x11.Checkpoint.Value\x1a\x32\n\x05Value\x12\x0b\n\x03tag\x18\x01 \x02(\t\x12\x1c\n\x06tensor\x18\x02 \x02(\x0b\x32\x0c.TensorProto\"H\n\x0bTensorProto\x12\x0c\n\x04\x64ims\x18\x01 \x03(\x03\x12\x13\n\x0btensor_type\x18\x02 \x02(\t\x12\x16\n\x0etensor_content\x18\x03 \x02(\x0c'
22
+ serialized_pb=b'\n\x10\x63heckpoint.proto\"\x93\x01\n\nCheckpoint\x12 \n\x05value\x18\x01 \x03(\x0b\x32\x11.Checkpoint.Value\x1a\x63\n\x05Value\x12\x0b\n\x03tag\x18\x01 \x02(\t\x12\x1e\n\x06tensor\x18\x02 \x01(\x0b\x32\x0c.TensorProtoH\x00\x12$\n\tmaptensor\x18\x03 \x01(\x0b\x32\x0f.MapTensorProtoH\x00\x42\x07\n\x05value\"H\n\x0bTensorProto\x12\x0c\n\x04\x64ims\x18\x01 \x03(\x03\x12\x13\n\x0btensor_type\x18\x02 \x02(\t\x12\x16\n\x0etensor_content\x18\x03 \x02(\x0c\".\n\x0eMapTensorProto\x12\x1c\n\x06tensor\x18\x01 \x03(\x0b\x32\x0c.TensorProto'
23
23
  )
24
24
 
25
25
 
@@ -42,7 +42,14 @@ _CHECKPOINT_VALUE = _descriptor.Descriptor(
42
42
  serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
43
43
  _descriptor.FieldDescriptor(
44
44
  name='tensor', full_name='Checkpoint.Value.tensor', index=1,
45
- number=2, type=11, cpp_type=10, label=2,
45
+ number=2, type=11, cpp_type=10, label=1,
46
+ has_default_value=False, default_value=None,
47
+ message_type=None, enum_type=None, containing_type=None,
48
+ is_extension=False, extension_scope=None,
49
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
50
+ _descriptor.FieldDescriptor(
51
+ name='maptensor', full_name='Checkpoint.Value.maptensor', index=2,
52
+ number=3, type=11, cpp_type=10, label=1,
46
53
  has_default_value=False, default_value=None,
47
54
  message_type=None, enum_type=None, containing_type=None,
48
55
  is_extension=False, extension_scope=None,
@@ -58,9 +65,14 @@ _CHECKPOINT_VALUE = _descriptor.Descriptor(
58
65
  syntax='proto2',
59
66
  extension_ranges=[],
60
67
  oneofs=[
68
+ _descriptor.OneofDescriptor(
69
+ name='value', full_name='Checkpoint.Value.value',
70
+ index=0, containing_type=None,
71
+ create_key=_descriptor._internal_create_key,
72
+ fields=[]),
61
73
  ],
62
- serialized_start=68,
63
- serialized_end=118,
74
+ serialized_start=69,
75
+ serialized_end=168,
64
76
  )
65
77
 
66
78
  _CHECKPOINT = _descriptor.Descriptor(
@@ -90,8 +102,8 @@ _CHECKPOINT = _descriptor.Descriptor(
90
102
  extension_ranges=[],
91
103
  oneofs=[
92
104
  ],
93
- serialized_start=20,
94
- serialized_end=118,
105
+ serialized_start=21,
106
+ serialized_end=168,
95
107
  )
96
108
 
97
109
 
@@ -136,15 +148,56 @@ _TENSORPROTO = _descriptor.Descriptor(
136
148
  extension_ranges=[],
137
149
  oneofs=[
138
150
  ],
139
- serialized_start=120,
140
- serialized_end=192,
151
+ serialized_start=170,
152
+ serialized_end=242,
153
+ )
154
+
155
+
156
+ _MAPTENSORPROTO = _descriptor.Descriptor(
157
+ name='MapTensorProto',
158
+ full_name='MapTensorProto',
159
+ filename=None,
160
+ file=DESCRIPTOR,
161
+ containing_type=None,
162
+ create_key=_descriptor._internal_create_key,
163
+ fields=[
164
+ _descriptor.FieldDescriptor(
165
+ name='tensor', full_name='MapTensorProto.tensor', index=0,
166
+ number=1, type=11, cpp_type=10, label=3,
167
+ has_default_value=False, default_value=[],
168
+ message_type=None, enum_type=None, containing_type=None,
169
+ is_extension=False, extension_scope=None,
170
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
171
+ ],
172
+ extensions=[
173
+ ],
174
+ nested_types=[],
175
+ enum_types=[
176
+ ],
177
+ serialized_options=None,
178
+ is_extendable=False,
179
+ syntax='proto2',
180
+ extension_ranges=[],
181
+ oneofs=[
182
+ ],
183
+ serialized_start=244,
184
+ serialized_end=290,
141
185
  )
142
186
 
143
187
  _CHECKPOINT_VALUE.fields_by_name['tensor'].message_type = _TENSORPROTO
188
+ _CHECKPOINT_VALUE.fields_by_name['maptensor'].message_type = _MAPTENSORPROTO
144
189
  _CHECKPOINT_VALUE.containing_type = _CHECKPOINT
190
+ _CHECKPOINT_VALUE.oneofs_by_name['value'].fields.append(
191
+ _CHECKPOINT_VALUE.fields_by_name['tensor'])
192
+ _CHECKPOINT_VALUE.fields_by_name['tensor'].containing_oneof = _CHECKPOINT_VALUE.oneofs_by_name['value']
193
+ _CHECKPOINT_VALUE.oneofs_by_name['value'].fields.append(
194
+ _CHECKPOINT_VALUE.fields_by_name['maptensor'])
195
+ _CHECKPOINT_VALUE.fields_by_name['maptensor'].containing_oneof = _CHECKPOINT_VALUE.oneofs_by_name['value']
145
196
  _CHECKPOINT.fields_by_name['value'].message_type = _CHECKPOINT_VALUE
197
+ _MAPTENSORPROTO.fields_by_name['tensor'].message_type = _TENSORPROTO
146
198
  DESCRIPTOR.message_types_by_name['Checkpoint'] = _CHECKPOINT
147
199
  DESCRIPTOR.message_types_by_name['TensorProto'] = _TENSORPROTO
200
+ DESCRIPTOR.message_types_by_name['MapTensorProto'] = _MAPTENSORPROTO
148
201
  _sym_db.RegisterFileDescriptor(DESCRIPTOR)
149
202
 
150
203
  Checkpoint = _reflection.GeneratedProtocolMessageType('Checkpoint', (_message.Message,), {
@@ -169,5 +222,12 @@ TensorProto = _reflection.GeneratedProtocolMessageType('TensorProto', (_message.
169
222
  })
170
223
  _sym_db.RegisterMessage(TensorProto)
171
224
 
225
+ MapTensorProto = _reflection.GeneratedProtocolMessageType('MapTensorProto', (_message.Message,), {
226
+ 'DESCRIPTOR' : _MAPTENSORPROTO,
227
+ '__module__' : 'checkpoint_pb2'
228
+ # @@protoc_insertion_point(class_scope:MapTensorProto)
229
+ })
230
+ _sym_db.RegisterMessage(MapTensorProto)
231
+
172
232
 
173
233
  # @@protoc_insertion_point(module_scope)
@@ -23,7 +23,7 @@ from mindspore.train.dataset_helper import _has_dynamic_shape, _check_inputs
23
23
  import mindspore.dataset as ds
24
24
  from mindspore._c_expression import _set_dataset_mode_config
25
25
  from mindspore.parallel._utils import _get_device_num, _need_to_full, _to_full_shapes, _get_pipeline_stages
26
- from mindspore._checkparam import Validator
26
+ from mindspore import _checkparam as Validator
27
27
 
28
28
 
29
29
  def _init_sink_dataset(dataset, sink_size, input_signature, create_info):
@@ -52,7 +52,7 @@ def _init_sink_dataset(dataset, sink_size, input_signature, create_info):
52
52
  _check_inputs(input_signature, dataset_shapes, dataset_types)
53
53
 
54
54
  queue_name = transfer_dataset.queue_name
55
- if _need_to_full():
55
+ if _need_to_full() and context.get_context('mode') == context.GRAPH_MODE:
56
56
  device_num = _get_device_num() // _get_pipeline_stages()
57
57
  dataset_shapes = _to_full_shapes(dataset_shapes, device_num)
58
58
  next_op = ops.GetNext(dataset_types, dataset_shapes, len(dataset_types), queue_name)
@@ -90,7 +90,7 @@ def _get_next_op(dataset, ori_next_op, is_info_queue):
90
90
  if key in dataset.__sink_aux__.next_ops:
91
91
  next_op = dataset.__sink_aux__.next_ops[key]
92
92
  else:
93
- if _need_to_full():
93
+ if _need_to_full() and context.get_context('mode') == context.GRAPH_MODE:
94
94
  device_num = _get_device_num() // _get_pipeline_stages()
95
95
  dataset_shapes = _to_full_shapes(dataset_shapes, device_num)
96
96
  next_op = ops.GetNext(dataset_types, dataset_shapes, len(dataset_types), queue_name)
@@ -129,6 +129,18 @@ def data_sink(fn, dataset, sink_size=1, jit_config=None, input_signature=None):
129
129
  """
130
130
  A wrapper function to generate a function for the input function.
131
131
 
132
+ Note:
133
+ When using data sinking, the dataset will automatically cycle. At this time,
134
+ only the total number of training steps (total_step) and the number of steps for each sinking (sink_size)
135
+ need to be considered. When switching from training by rounds (epochs) to data sinking,
136
+ the formula is as follows:
137
+
138
+ total_step = epochs * dataset_size
139
+
140
+ train_sink_step = total_step / sink_size
141
+
142
+ After transforming from `mindspore.data_sink`, you need to execute `train_sink_step` step for training.
143
+
132
144
  Args:
133
145
  fn (Function): The Python function that will be run with dataset.
134
146
  dataset (Dataset): The dataset iterator. The dataset can be generated by dataset generator API in
@@ -17,7 +17,7 @@ from __future__ import absolute_import
17
17
 
18
18
  import math
19
19
 
20
- from mindspore._checkparam import Validator
20
+ from mindspore import _checkparam as Validator
21
21
  from mindspore.common.dtype import pytype_to_dtype
22
22
  from mindspore.common.api import _cell_graph_executor
23
23
  from mindspore.common._utils import is_shape_unknown
@@ -27,8 +27,7 @@ from mindspore import context, nn
27
27
  from mindspore.train._utils import _exec_datagraph, _get_types_and_shapes, _construct_tensor_list
28
28
  from mindspore.parallel._utils import _get_device_num, _get_global_rank, _need_to_full, \
29
29
  _to_full_shapes, _get_pipeline_stages
30
- from mindspore.parallel._ps_context import _is_role_worker, _is_role_sched, _is_ps_mode, \
31
- _enable_distributed_mindrt
30
+ from mindspore.parallel._ps_context import _is_role_sched
32
31
  from mindspore.ops import operations as P
33
32
 
34
33
 
@@ -64,8 +63,7 @@ def _dynamic_sink_exception_scenario(dataset_iter):
64
63
  """The exception scenario for dynamic data is not applicable."""
65
64
  _, dataset_shapes = dataset_iter.types_shapes()
66
65
 
67
- if _has_dynamic_shape(dataset_shapes) or (_is_role_worker() and _is_ps_mode()) or \
68
- context.get_context("mode") != context.GRAPH_MODE:
66
+ if _has_dynamic_shape(dataset_shapes) or context.get_context("mode") != context.GRAPH_MODE:
69
67
  return True
70
68
  return False
71
69
 
@@ -95,6 +93,8 @@ class _DataWrapper(nn.Cell):
95
93
  self.get_next = P.GetNext(
96
94
  dataset_types, dataset_shapes, len(dataset_types), queue_name)
97
95
  self.network = network
96
+ if isinstance(network, nn.Cell) and network.jit_config_dict:
97
+ self._jit_config_dict = network.jit_config_dict
98
98
 
99
99
  def construct(self):
100
100
  outputs = self.get_next()
@@ -133,6 +133,8 @@ def _check_inputs(network_shapes, dataset_shapes, dataset_types):
133
133
  Check if set inputs are correct.
134
134
  """
135
135
  for tensor_index, ele_dataset_shape in enumerate(dataset_shapes):
136
+ if network_shapes[tensor_index] is None:
137
+ continue
136
138
  set_inputs_shape = list(network_shapes[tensor_index].shape)
137
139
  inputs_shape = list(ele_dataset_shape)
138
140
  if dataset_types[tensor_index] != network_shapes[tensor_index].dtype:
@@ -145,8 +147,6 @@ def _check_inputs(network_shapes, dataset_shapes, dataset_types):
145
147
  raise ValueError(
146
148
  f"The {tensor_index + 1}th input dims of 'set_inputs' must be the same as network's input, "
147
149
  f"but got 'set_inputs': {len(set_inputs_shape)} and network's input: {len(inputs_shape)}.")
148
- if network_shapes[tensor_index] is None:
149
- break
150
150
  for index, ele_shape in enumerate(ele_dataset_shape):
151
151
  if network_shapes[tensor_index].shape[index] != -1:
152
152
  if set_inputs_shape[index] != ele_shape:
@@ -396,7 +396,7 @@ class DatasetHelper:
396
396
 
397
397
  def _reset(self, step, epoch):
398
398
  """Reset the dataset to the provided step and epoch."""
399
- self.iter._reset(step, epoch) # pylint: disable=W0212
399
+ self.iter._reset(step, epoch) # pylint: disable=protected-access
400
400
 
401
401
  def get_data_info(self):
402
402
  """
@@ -420,9 +420,7 @@ class _DatasetIter:
420
420
  ds.config.set_dynamic_shape(True)
421
421
  if not hasattr(dataset, '__transfer_dataset__'):
422
422
  if hasattr(dataset, '__loop_size__'):
423
- # PS mode does not support loop sink and need get the real sink size.
424
- if not (_is_role_worker() and _is_ps_mode()) or _enable_distributed_mindrt():
425
- self.sink_size = dataset.__loop_size__
423
+ self.sink_size = dataset.__loop_size__
426
424
  create_data_info_queue = (sink_size == 1 and self.sink_count == 1 and dataset.get_dataset_size() != 1
427
425
  and not self.dynamic_shape)
428
426
  dataset.__transfer_dataset__ = _exec_datagraph(dataset, self.sink_size,
@@ -442,7 +440,7 @@ class _DatasetIter:
442
440
  self.continue_send = dataset.__transfer_dataset__.continue_send
443
441
  self.get_data_info = dataset.__transfer_dataset__.get_data_info
444
442
  if hasattr(dataset.__transfer_dataset__, "_reset"):
445
- self._reset = dataset.__transfer_dataset__._reset # pylint: disable=W0212
443
+ self._reset = dataset.__transfer_dataset__._reset # pylint: disable=protected-access
446
444
 
447
445
  def __iter__(self):
448
446
  self.index = 0
@@ -476,9 +474,6 @@ class _DatasetIter:
476
474
  sink_size = 1
477
475
  if hasattr(self.dataset, '__loop_size__'):
478
476
  sink_size = self.dataset.__loop_size__
479
- elif _is_role_worker() and _is_ps_mode() and not _enable_distributed_mindrt():
480
- # PS mode does not support loop sink.
481
- sink_size = 1
482
477
  else:
483
478
  if context.get_context("enable_ge") or context.get_context("device_target") == "Ascend" \
484
479
  or context.get_context("device_target") == "GPU":
@@ -15,7 +15,7 @@
15
15
  """Loss scale manager abstract class."""
16
16
  from __future__ import absolute_import
17
17
 
18
- from mindspore._checkparam import Validator as validator
18
+ from mindspore import _checkparam as validator
19
19
  from mindspore import nn
20
20
 
21
21
 
@@ -58,17 +58,11 @@ class FixedLossScaleManager(LossScaleManager):
58
58
 
59
59
  Examples:
60
60
  >>> import mindspore as ms
61
- >>> from mindspore import nn
61
+ >>> from mindspore import amp, nn
62
62
  >>>
63
63
  >>> net = Net()
64
- >>> #1) Drop the parameter update if there is an overflow
65
- >>> loss_scale_manager = ms.FixedLossScaleManager()
66
- >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
67
- >>> model = ms.Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
68
- >>>
69
- >>> #2) Execute parameter update even if overflow occurs
70
64
  >>> loss_scale = 1024.0
71
- >>> loss_scale_manager = ms.FixedLossScaleManager(loss_scale, False)
65
+ >>> loss_scale_manager = amp.FixedLossScaleManager(loss_scale, False)
72
66
  >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9, loss_scale=loss_scale)
73
67
  >>> model = ms.Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
74
68
  """
@@ -131,12 +125,15 @@ class DynamicLossScaleManager(LossScaleManager):
131
125
  scale_factor (int): Coefficient of increase and decrease. Default: 2.
132
126
  scale_window (int): Maximum continuous normal steps when there is no overflow. Default: 2000.
133
127
 
128
+ Supported Platforms:
129
+ ``Ascend`` ``GPU``
130
+
134
131
  Examples:
135
132
  >>> import mindspore as ms
136
- >>> from mindspore import nn
133
+ >>> from mindspore import amp, nn
137
134
  >>>
138
135
  >>> net = Net()
139
- >>> loss_scale_manager = ms.DynamicLossScaleManager()
136
+ >>> loss_scale_manager = amp.DynamicLossScaleManager()
140
137
  >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
141
138
  >>> model = ms.Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
142
139
  """
@@ -113,7 +113,7 @@ def get_metric_fn(name, *args, **kwargs):
113
113
  Gets the metric method based on the input name.
114
114
 
115
115
  Args:
116
- name (str): The name of metric method. Names can be obtained by `mindspore.train.names` .
116
+ name (str): The name of metric method. Names can be obtained by :func:`mindspore.train.names` .
117
117
  object for the currently supported metrics.
118
118
  args: Arguments for the metric function.
119
119
  kwargs: Keyword arguments for the metric function.
@@ -18,7 +18,7 @@ from __future__ import absolute_import
18
18
  from collections import Counter
19
19
  import numpy as np
20
20
 
21
- from mindspore._checkparam import Validator as validator
21
+ from mindspore import _checkparam as validator
22
22
  from mindspore.train.metrics.metric import Metric, rearrange_inputs
23
23
 
24
24
 
@@ -17,7 +17,7 @@ from __future__ import absolute_import
17
17
 
18
18
  import numpy as np
19
19
 
20
- from mindspore._checkparam import Validator as validator
20
+ from mindspore import _checkparam as validator
21
21
  from mindspore.train.metrics.metric import Metric, rearrange_inputs
22
22
 
23
23
 
@@ -17,7 +17,7 @@ from __future__ import absolute_import
17
17
 
18
18
  import numpy as np
19
19
 
20
- from mindspore._checkparam import Validator as validator
20
+ from mindspore import _checkparam as validator
21
21
  from mindspore.train.metrics.metric import Metric, rearrange_inputs
22
22
 
23
23
 
@@ -17,7 +17,7 @@ from __future__ import absolute_import
17
17
 
18
18
  import numpy as np
19
19
 
20
- from mindspore._checkparam import Validator as validator
20
+ from mindspore import _checkparam as validator
21
21
  from mindspore.train.metrics.metric import Metric, rearrange_inputs
22
22
 
23
23
 
@@ -69,7 +69,7 @@ class Dice(Metric):
69
69
  @rearrange_inputs
70
70
  def update(self, *inputs):
71
71
  r"""
72
- Updates the internal evaluation result :math:`y\_pred` and :math:`y`.
72
+ Updates the internal evaluation result `y_pred` and `y`.
73
73
 
74
74
  Args:
75
75
  inputs (tuple): Input `y_pred` and `y`. `y_pred` and `y` are Tensor, list or numpy.ndarray. `y_pred` is the
@@ -18,7 +18,7 @@ from __future__ import absolute_import
18
18
  import sys
19
19
  import numpy as np
20
20
 
21
- from mindspore._checkparam import Validator as validator
21
+ from mindspore import _checkparam as validator
22
22
  from mindspore.train.metrics.metric import Metric, rearrange_inputs, _check_onehot_data
23
23
 
24
24