mindspore 2.0.0a0__cp37-cp37m-win_amd64.whl → 2.0.0rc1__cp37-cp37m-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (655) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -2
  3. mindspore/_c_dataengine.cp37-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp37-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp37-win_amd64.pyd +0 -0
  6. mindspore/_check_jit_forbidden_api.py +102 -0
  7. mindspore/_checkparam.py +1066 -1001
  8. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
  9. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
  10. mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
  11. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
  12. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
  13. mindspore/_extends/parse/__init__.py +5 -3
  14. mindspore/_extends/parse/namespace.py +16 -1
  15. mindspore/_extends/parse/parser.py +107 -22
  16. mindspore/_extends/parse/resources.py +0 -7
  17. mindspore/_extends/parse/standard_method.py +885 -413
  18. mindspore/amp.py +52 -57
  19. mindspore/boost/boost.py +2 -2
  20. mindspore/boost/boost_cell_wrapper.py +38 -20
  21. mindspore/boost/dim_reduce.py +3 -3
  22. mindspore/boost/group_loss_scale_manager.py +1 -1
  23. mindspore/common/__init__.py +4 -6
  24. mindspore/common/_decorator.py +2 -0
  25. mindspore/common/_register_for_adapter.py +55 -0
  26. mindspore/common/_stub_tensor.py +201 -0
  27. mindspore/common/_utils.py +41 -7
  28. mindspore/common/api.py +215 -141
  29. mindspore/common/dtype.py +8 -1
  30. mindspore/common/dump.py +2 -2
  31. mindspore/common/initializer.py +4 -2
  32. mindspore/common/jit_config.py +17 -13
  33. mindspore/common/mutable.py +33 -13
  34. mindspore/common/parameter.py +23 -21
  35. mindspore/common/seed.py +8 -24
  36. mindspore/common/sparse_tensor.py +62 -41
  37. mindspore/common/tensor.py +852 -1154
  38. mindspore/communication/__init__.py +2 -2
  39. mindspore/communication/_comm_helper.py +11 -4
  40. mindspore/communication/management.py +22 -21
  41. mindspore/config/op_info.config +501 -1008
  42. mindspore/context.py +201 -23
  43. mindspore/dataset/__init__.py +6 -6
  44. mindspore/dataset/audio/__init__.py +7 -7
  45. mindspore/dataset/audio/transforms.py +670 -30
  46. mindspore/dataset/audio/utils.py +47 -4
  47. mindspore/dataset/audio/validators.py +223 -1
  48. mindspore/dataset/callback/ds_callback.py +2 -2
  49. mindspore/dataset/core/config.py +210 -14
  50. mindspore/dataset/core/validator_helpers.py +2 -2
  51. mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
  52. mindspore/dataset/debug/debug_hook.py +65 -0
  53. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  54. mindspore/dataset/engine/__init__.py +7 -3
  55. mindspore/dataset/engine/cache_client.py +1 -1
  56. mindspore/dataset/engine/datasets.py +322 -66
  57. mindspore/dataset/engine/datasets_audio.py +80 -76
  58. mindspore/dataset/engine/datasets_standard_format.py +51 -38
  59. mindspore/dataset/engine/datasets_text.py +232 -118
  60. mindspore/dataset/engine/datasets_user_defined.py +41 -17
  61. mindspore/dataset/engine/datasets_vision.py +746 -225
  62. mindspore/dataset/engine/graphdata.py +75 -10
  63. mindspore/dataset/engine/iterators.py +45 -5
  64. mindspore/dataset/engine/offload.py +48 -28
  65. mindspore/dataset/engine/validators.py +117 -8
  66. mindspore/dataset/text/__init__.py +6 -5
  67. mindspore/dataset/text/transforms.py +86 -3
  68. mindspore/dataset/text/utils.py +6 -4
  69. mindspore/dataset/text/validators.py +25 -0
  70. mindspore/dataset/transforms/__init__.py +3 -2
  71. mindspore/dataset/transforms/c_transforms.py +1 -1
  72. mindspore/dataset/transforms/transforms.py +2 -2
  73. mindspore/dataset/utils/__init__.py +2 -1
  74. mindspore/dataset/utils/line_reader.py +121 -0
  75. mindspore/dataset/vision/__init__.py +2 -3
  76. mindspore/dataset/vision/c_transforms.py +9 -9
  77. mindspore/dataset/vision/py_transforms.py +5 -5
  78. mindspore/dataset/vision/py_transforms_util.py +2 -0
  79. mindspore/dataset/vision/transforms.py +160 -161
  80. mindspore/dataset/vision/utils.py +3 -3
  81. mindspore/experimental/map_parameter.py +38 -26
  82. mindspore/include/OWNERS +0 -1
  83. mindspore/include/api/callback/callback.h +9 -13
  84. mindspore/include/api/callback/ckpt_saver.h +2 -2
  85. mindspore/include/api/callback/loss_monitor.h +2 -2
  86. mindspore/include/api/callback/lr_scheduler.h +5 -5
  87. mindspore/include/api/callback/time_monitor.h +2 -2
  88. mindspore/include/api/callback/train_accuracy.h +4 -6
  89. mindspore/include/api/cfg.h +19 -6
  90. mindspore/include/api/context.h +44 -9
  91. mindspore/include/api/delegate.h +1 -1
  92. mindspore/include/api/metrics/accuracy.h +2 -2
  93. mindspore/include/api/metrics/metrics.h +4 -3
  94. mindspore/include/api/model.h +9 -4
  95. mindspore/include/api/model_parallel_runner.h +2 -2
  96. mindspore/include/api/net.h +12 -11
  97. mindspore/include/api/serialization.h +19 -3
  98. mindspore/include/api/types.h +3 -3
  99. mindspore/include/dataset/constants.h +7 -0
  100. mindspore/include/dataset/text.h +59 -0
  101. mindspore/jpeg62.dll +0 -0
  102. mindspore/log.py +1 -1
  103. mindspore/mindrecord/filereader.py +18 -0
  104. mindspore/mindrecord/filewriter.py +197 -34
  105. mindspore/mindrecord/shardreader.py +9 -0
  106. mindspore/mindrecord/shardwriter.py +1 -1
  107. mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
  108. mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
  109. mindspore/mindrecord/tools/csv_to_mr.py +3 -3
  110. mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
  111. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  112. mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
  113. mindspore/mindspore_backend.dll +0 -0
  114. mindspore/mindspore_common.dll +0 -0
  115. mindspore/mindspore_core.dll +0 -0
  116. mindspore/mindspore_glog.dll +0 -0
  117. mindspore/mindspore_shared_lib.dll +0 -0
  118. mindspore/nn/__init__.py +0 -4
  119. mindspore/nn/cell.py +204 -132
  120. mindspore/nn/dynamic_lr.py +1 -1
  121. mindspore/nn/grad/cell_grad.py +7 -6
  122. mindspore/nn/layer/__init__.py +5 -4
  123. mindspore/nn/layer/activation.py +40 -89
  124. mindspore/nn/layer/basic.py +255 -624
  125. mindspore/nn/layer/channel_shuffle.py +7 -6
  126. mindspore/nn/layer/combined.py +1 -1
  127. mindspore/nn/layer/container.py +41 -4
  128. mindspore/nn/layer/conv.py +64 -28
  129. mindspore/nn/layer/dense.py +9 -8
  130. mindspore/nn/layer/embedding.py +27 -25
  131. mindspore/nn/layer/image.py +53 -46
  132. mindspore/nn/layer/math.py +97 -105
  133. mindspore/nn/layer/normalization.py +117 -86
  134. mindspore/nn/layer/padding.py +185 -95
  135. mindspore/nn/layer/pooling.py +817 -414
  136. mindspore/nn/layer/rnn_cells.py +10 -15
  137. mindspore/nn/layer/rnns.py +37 -38
  138. mindspore/nn/layer/thor_layer.py +11 -12
  139. mindspore/nn/layer/timedistributed.py +5 -5
  140. mindspore/nn/layer/transformer.py +701 -0
  141. mindspore/nn/learning_rate_schedule.py +8 -8
  142. mindspore/nn/loss/__init__.py +5 -4
  143. mindspore/nn/loss/loss.py +334 -199
  144. mindspore/nn/optim/ada_grad.py +6 -6
  145. mindspore/nn/optim/adadelta.py +2 -3
  146. mindspore/nn/optim/adafactor.py +4 -5
  147. mindspore/nn/optim/adam.py +126 -62
  148. mindspore/nn/optim/adamax.py +3 -4
  149. mindspore/nn/optim/adasum.py +6 -6
  150. mindspore/nn/optim/asgd.py +2 -2
  151. mindspore/nn/optim/ftrl.py +67 -38
  152. mindspore/nn/optim/lamb.py +4 -5
  153. mindspore/nn/optim/lars.py +2 -2
  154. mindspore/nn/optim/lazyadam.py +43 -4
  155. mindspore/nn/optim/momentum.py +6 -5
  156. mindspore/nn/optim/optimizer.py +3 -1
  157. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  158. mindspore/nn/optim/rmsprop.py +1 -1
  159. mindspore/nn/optim/rprop.py +8 -9
  160. mindspore/nn/optim/sgd.py +19 -13
  161. mindspore/nn/optim/thor.py +10 -15
  162. mindspore/nn/probability/__init__.py +0 -2
  163. mindspore/nn/probability/bijector/bijector.py +4 -4
  164. mindspore/nn/probability/bijector/invert.py +1 -1
  165. mindspore/nn/probability/bijector/softplus.py +2 -2
  166. mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
  167. mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
  168. mindspore/nn/probability/distribution/_utils/utils.py +9 -15
  169. mindspore/nn/probability/distribution/bernoulli.py +3 -3
  170. mindspore/nn/probability/distribution/beta.py +1 -1
  171. mindspore/nn/probability/distribution/categorical.py +5 -7
  172. mindspore/nn/probability/distribution/cauchy.py +3 -3
  173. mindspore/nn/probability/distribution/distribution.py +2 -2
  174. mindspore/nn/probability/distribution/exponential.py +2 -2
  175. mindspore/nn/probability/distribution/gamma.py +3 -3
  176. mindspore/nn/probability/distribution/geometric.py +1 -1
  177. mindspore/nn/probability/distribution/gumbel.py +3 -3
  178. mindspore/nn/probability/distribution/half_normal.py +15 -11
  179. mindspore/nn/probability/distribution/laplace.py +16 -13
  180. mindspore/nn/probability/distribution/logistic.py +2 -2
  181. mindspore/nn/probability/distribution/normal.py +1 -1
  182. mindspore/nn/probability/distribution/poisson.py +1 -1
  183. mindspore/nn/probability/distribution/student_t.py +20 -15
  184. mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
  185. mindspore/nn/probability/distribution/uniform.py +2 -2
  186. mindspore/nn/reinforcement/_tensors_queue.py +3 -3
  187. mindspore/nn/reinforcement/tensor_array.py +2 -2
  188. mindspore/nn/sparse/sparse.py +2 -2
  189. mindspore/nn/wrap/cell_wrapper.py +27 -10
  190. mindspore/nn/wrap/grad_reducer.py +2 -2
  191. mindspore/nn/wrap/loss_scale.py +40 -24
  192. mindspore/numpy/array_creations.py +33 -22
  193. mindspore/numpy/array_ops.py +35 -30
  194. mindspore/numpy/logic_ops.py +6 -27
  195. mindspore/numpy/math_ops.py +22 -19
  196. mindspore/numpy/utils.py +1 -1
  197. mindspore/numpy/utils_const.py +108 -58
  198. mindspore/opencv_core452.dll +0 -0
  199. mindspore/opencv_imgcodecs452.dll +0 -0
  200. mindspore/opencv_imgproc452.dll +0 -0
  201. mindspore/ops/_constants.py +0 -6
  202. mindspore/ops/_grad/__init__.py +2 -1
  203. mindspore/ops/_grad/grad_array_ops.py +86 -117
  204. mindspore/ops/_grad/grad_base.py +23 -1
  205. mindspore/ops/_grad/grad_clip_ops.py +2 -3
  206. mindspore/ops/_grad/grad_comm_ops.py +34 -24
  207. mindspore/ops/_grad/grad_implementations.py +9 -45
  208. mindspore/ops/_grad/grad_inner_ops.py +47 -4
  209. mindspore/ops/_grad/grad_math_ops.py +142 -117
  210. mindspore/ops/_grad/grad_nn_ops.py +71 -165
  211. mindspore/ops/_grad/grad_sequence_ops.py +296 -0
  212. mindspore/ops/_grad/grad_sparse.py +7 -6
  213. mindspore/ops/_grad_experimental/__init__.py +1 -0
  214. mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
  215. mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
  216. mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
  217. mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
  218. mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
  219. mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
  220. mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
  221. mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
  222. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
  223. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
  224. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
  225. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
  226. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
  227. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
  228. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
  229. mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
  230. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
  231. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
  232. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
  233. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
  234. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
  235. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
  236. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
  237. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
  238. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
  239. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
  240. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
  241. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
  242. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
  243. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
  244. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
  245. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  246. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
  247. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
  248. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
  249. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
  250. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
  251. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
  252. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
  253. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
  254. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
  255. mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
  256. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  257. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
  258. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  259. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  260. mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
  261. mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
  262. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  263. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
  264. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  265. mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
  266. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  267. mindspore/ops/_op_impl/aicpu/conj.py +11 -0
  268. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
  269. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  270. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  271. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
  272. mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
  273. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  274. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  275. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
  276. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  277. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  278. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  279. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  280. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  281. mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
  282. mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
  283. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
  284. mindspore/ops/_op_impl/aicpu/mul.py +3 -1
  285. mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
  286. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  287. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  288. mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
  289. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  290. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  291. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  292. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  293. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  294. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  295. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
  296. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
  297. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  298. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  299. mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
  300. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
  301. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  302. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  303. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  304. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  305. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  306. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
  307. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  308. mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
  309. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
  310. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  311. mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
  312. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  313. mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
  314. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
  315. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
  316. mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
  317. mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
  318. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
  319. mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
  320. mindspore/ops/_op_impl/tbe/__init__.py +27 -611
  321. mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
  322. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  323. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
  324. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
  325. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  326. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
  327. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
  328. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
  329. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
  330. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
  331. mindspore/ops/_op_impl/tbe/cast.py +0 -2
  332. mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
  333. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
  334. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
  335. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
  336. mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
  337. mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
  338. mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
  339. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
  340. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
  341. mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
  342. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
  343. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  344. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
  345. mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
  346. mindspore/ops/_register_for_op.py +1 -0
  347. mindspore/ops/_utils/__init__.py +1 -2
  348. mindspore/ops/_utils/utils.py +19 -40
  349. mindspore/ops/_vmap/vmap_array_ops.py +116 -38
  350. mindspore/ops/_vmap/vmap_base.py +16 -9
  351. mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
  352. mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
  353. mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
  354. mindspore/ops/_vmap/vmap_image_ops.py +12 -5
  355. mindspore/ops/_vmap/vmap_math_ops.py +46 -5
  356. mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
  357. mindspore/ops/_vmap/vmap_random_ops.py +1 -1
  358. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  359. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  360. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
  361. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
  362. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  363. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  364. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  365. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
  366. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
  367. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  368. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
  369. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
  370. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
  371. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
  372. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
  373. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
  374. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
  375. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  376. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  377. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
  378. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
  379. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
  380. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  381. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  382. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  383. mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
  384. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  385. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
  386. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
  387. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
  388. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
  389. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  390. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
  391. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
  392. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  393. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
  394. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
  395. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
  396. mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
  397. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  398. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
  399. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
  400. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
  401. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
  402. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
  403. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  404. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
  405. mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
  406. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  407. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  408. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
  409. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  410. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  411. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  412. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
  413. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
  414. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
  415. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  416. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
  417. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
  418. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
  419. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
  420. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
  421. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
  422. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  423. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
  424. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
  425. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  426. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
  427. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
  428. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  429. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
  430. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
  431. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  432. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
  433. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  434. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  435. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
  436. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
  437. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
  438. mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
  439. mindspore/ops/composite/__init__.py +7 -8
  440. mindspore/ops/composite/base.py +101 -47
  441. mindspore/ops/composite/math_ops.py +188 -158
  442. mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
  443. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
  444. mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
  445. mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
  446. mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
  447. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
  448. mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
  449. mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
  450. mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
  451. mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
  452. mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
  453. mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
  454. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
  455. mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
  456. mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
  457. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
  458. mindspore/ops/function/__init__.py +152 -8
  459. mindspore/ops/function/array_func.py +2555 -674
  460. mindspore/ops/function/clip_func.py +209 -13
  461. mindspore/ops/function/debug_func.py +2 -2
  462. mindspore/ops/function/grad/__init__.py +2 -1
  463. mindspore/ops/function/grad/grad_func.py +147 -62
  464. mindspore/ops/function/image_func.py +54 -38
  465. mindspore/ops/function/linalg_func.py +167 -16
  466. mindspore/ops/function/math_func.py +4849 -1492
  467. mindspore/ops/function/nn_func.py +2573 -988
  468. mindspore/ops/function/other_func.py +115 -0
  469. mindspore/ops/function/parameter_func.py +3 -3
  470. mindspore/ops/function/random_func.py +790 -73
  471. mindspore/ops/function/sparse_func.py +98 -78
  472. mindspore/ops/function/sparse_unary_func.py +54 -53
  473. mindspore/ops/function/spectral_func.py +27 -24
  474. mindspore/ops/function/vmap_func.py +22 -2
  475. mindspore/ops/functional.py +97 -37
  476. mindspore/ops/op_info_register.py +70 -28
  477. mindspore/ops/operations/__init__.py +47 -14
  478. mindspore/ops/operations/_csr_ops.py +7 -7
  479. mindspore/ops/operations/_embedding_cache_ops.py +5 -5
  480. mindspore/ops/operations/_grad_ops.py +276 -187
  481. mindspore/ops/operations/_inner_ops.py +319 -113
  482. mindspore/ops/operations/_ms_kernel.py +10 -8
  483. mindspore/ops/operations/_ocr_ops.py +9 -9
  484. mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
  485. mindspore/ops/operations/_quant_ops.py +137 -102
  486. mindspore/ops/operations/_rl_inner_ops.py +121 -60
  487. mindspore/ops/operations/_scalar_ops.py +466 -0
  488. mindspore/ops/operations/_sequence_ops.py +1004 -2
  489. mindspore/ops/operations/_tensor_array.py +10 -11
  490. mindspore/ops/operations/_thor_ops.py +1 -1
  491. mindspore/ops/operations/array_ops.py +801 -466
  492. mindspore/ops/operations/comm_ops.py +51 -49
  493. mindspore/ops/operations/control_ops.py +2 -2
  494. mindspore/ops/operations/custom_ops.py +123 -44
  495. mindspore/ops/operations/debug_ops.py +24 -24
  496. mindspore/ops/operations/image_ops.py +240 -153
  497. mindspore/ops/operations/inner_ops.py +34 -50
  498. mindspore/ops/operations/linalg_ops.py +31 -9
  499. mindspore/ops/operations/math_ops.py +988 -757
  500. mindspore/ops/operations/nn_ops.py +965 -819
  501. mindspore/ops/operations/other_ops.py +51 -40
  502. mindspore/ops/operations/random_ops.py +204 -122
  503. mindspore/ops/operations/rl_ops.py +8 -9
  504. mindspore/ops/operations/sparse_ops.py +254 -93
  505. mindspore/ops/operations/spectral_ops.py +35 -3
  506. mindspore/ops/primitive.py +111 -9
  507. mindspore/parallel/_auto_parallel_context.py +189 -83
  508. mindspore/parallel/_offload_context.py +185 -0
  509. mindspore/parallel/_parallel_serialization.py +99 -7
  510. mindspore/parallel/_ps_context.py +9 -5
  511. mindspore/parallel/_recovery_context.py +1 -1
  512. mindspore/parallel/_tensor.py +7 -1
  513. mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
  514. mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
  515. mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
  516. mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
  517. mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
  518. mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
  519. mindspore/parallel/_utils.py +1 -2
  520. mindspore/parallel/algo_parameter_config.py +1 -1
  521. mindspore/parallel/checkpoint_transform.py +37 -34
  522. mindspore/parallel/shard.py +17 -18
  523. mindspore/profiler/common/validator/validate_path.py +2 -2
  524. mindspore/profiler/envprofiling.py +69 -47
  525. mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
  526. mindspore/profiler/parser/base_timeline_generator.py +49 -56
  527. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
  528. mindspore/profiler/parser/hwts_log_parser.py +1 -1
  529. mindspore/profiler/parser/integrator.py +15 -14
  530. mindspore/profiler/parser/minddata_analyzer.py +2 -2
  531. mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
  532. mindspore/profiler/parser/msadvisor_parser.py +2 -4
  533. mindspore/profiler/parser/optime_parser.py +17 -18
  534. mindspore/profiler/parser/profiler_info.py +2 -1
  535. mindspore/profiler/profiling.py +218 -186
  536. mindspore/rewrite/__init__.py +3 -1
  537. mindspore/rewrite/api/node.py +1 -114
  538. mindspore/rewrite/api/node_type.py +3 -0
  539. mindspore/rewrite/api/pattern_engine.py +31 -1
  540. mindspore/rewrite/api/scoped_value.py +4 -4
  541. mindspore/rewrite/api/symbol_tree.py +3 -78
  542. mindspore/rewrite/api/tree_node_helper.py +1 -1
  543. mindspore/rewrite/ast_creator_register.py +1 -0
  544. mindspore/rewrite/ast_helpers/__init__.py +2 -2
  545. mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
  546. mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
  547. mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
  548. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
  549. mindspore/rewrite/namespace.py +0 -2
  550. mindspore/rewrite/node.py +157 -11
  551. mindspore/rewrite/parsers/assign_parser.py +231 -53
  552. mindspore/rewrite/parsers/class_def_parser.py +187 -109
  553. mindspore/rewrite/parsers/for_parser.py +24 -14
  554. mindspore/rewrite/parsers/function_def_parser.py +21 -4
  555. mindspore/rewrite/parsers/if_parser.py +6 -2
  556. mindspore/rewrite/sparsify/__init__.py +0 -0
  557. mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
  558. mindspore/rewrite/sparsify/sparsify.py +109 -0
  559. mindspore/rewrite/sparsify/utils.py +173 -0
  560. mindspore/rewrite/symbol_tree.py +256 -133
  561. mindspore/rewrite/symbol_tree_builder.py +38 -1
  562. mindspore/run_check/_check_version.py +69 -63
  563. mindspore/run_check/run_check.py +2 -1
  564. mindspore/tinyxml2.dll +0 -0
  565. mindspore/train/__init__.py +1 -1
  566. mindspore/train/_utils.py +28 -5
  567. mindspore/train/amp.py +273 -102
  568. mindspore/train/callback/_backup_and_restore.py +5 -5
  569. mindspore/train/callback/_callback.py +2 -2
  570. mindspore/train/callback/_checkpoint.py +3 -3
  571. mindspore/train/callback/_early_stop.py +3 -3
  572. mindspore/train/callback/_lambda_callback.py +2 -2
  573. mindspore/train/callback/_landscape.py +29 -31
  574. mindspore/train/callback/_loss_monitor.py +3 -3
  575. mindspore/train/callback/_on_request_exit.py +3 -3
  576. mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
  577. mindspore/train/callback/_summary_collector.py +23 -16
  578. mindspore/train/callback/_time_monitor.py +3 -3
  579. mindspore/train/checkpoint_pb2.py +68 -8
  580. mindspore/train/data_sink.py +15 -3
  581. mindspore/train/dataset_helper.py +10 -15
  582. mindspore/train/loss_scale_manager.py +8 -11
  583. mindspore/train/metrics/__init__.py +1 -1
  584. mindspore/train/metrics/bleu_score.py +1 -1
  585. mindspore/train/metrics/confusion_matrix.py +1 -1
  586. mindspore/train/metrics/cosine_similarity.py +1 -1
  587. mindspore/train/metrics/dice.py +2 -2
  588. mindspore/train/metrics/fbeta.py +1 -1
  589. mindspore/train/metrics/hausdorff_distance.py +4 -3
  590. mindspore/train/metrics/mean_surface_distance.py +2 -2
  591. mindspore/train/metrics/occlusion_sensitivity.py +1 -1
  592. mindspore/train/metrics/perplexity.py +1 -1
  593. mindspore/train/metrics/precision.py +1 -1
  594. mindspore/train/metrics/recall.py +1 -1
  595. mindspore/train/metrics/roc.py +2 -2
  596. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  597. mindspore/train/mind_ir_pb2.py +116 -37
  598. mindspore/train/model.py +45 -28
  599. mindspore/train/serialization.py +295 -188
  600. mindspore/train/summary/_summary_adapter.py +1 -1
  601. mindspore/train/summary/summary_record.py +43 -13
  602. mindspore/train/train_thor/convert_utils.py +2 -2
  603. mindspore/train/train_thor/dataset_helper.py +3 -3
  604. mindspore/turbojpeg.dll +0 -0
  605. mindspore/version.py +1 -1
  606. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
  607. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +610 -541
  608. mindspore/compression/__init__.py +0 -19
  609. mindspore/compression/common/constant.py +0 -124
  610. mindspore/compression/export/__init__.py +0 -19
  611. mindspore/compression/export/quant_export.py +0 -515
  612. mindspore/compression/quant/__init__.py +0 -28
  613. mindspore/compression/quant/qat.py +0 -634
  614. mindspore/compression/quant/quant_utils.py +0 -462
  615. mindspore/compression/quant/quantizer.py +0 -68
  616. mindspore/nn/layer/quant.py +0 -1868
  617. mindspore/nn/layer/rnn_utils.py +0 -90
  618. mindspore/nn/probability/dpn/__init__.py +0 -22
  619. mindspore/nn/probability/dpn/vae/__init__.py +0 -25
  620. mindspore/nn/probability/dpn/vae/cvae.py +0 -140
  621. mindspore/nn/probability/dpn/vae/vae.py +0 -124
  622. mindspore/nn/probability/infer/__init__.py +0 -22
  623. mindspore/nn/probability/infer/variational/elbo.py +0 -70
  624. mindspore/nn/probability/infer/variational/svi.py +0 -84
  625. mindspore/nn/probability/toolbox/__init__.py +0 -22
  626. mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
  627. mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
  628. mindspore/nn/probability/transforms/__init__.py +0 -22
  629. mindspore/nn/probability/transforms/transform_bnn.py +0 -262
  630. mindspore/nn/probability/zhusuan/__init__.py +0 -18
  631. mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
  632. mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
  633. mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
  634. mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
  635. mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
  636. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  637. mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
  638. mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
  639. mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
  640. mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
  641. mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
  642. mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
  643. mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
  644. mindspore/ops/composite/array_ops.py +0 -241
  645. mindspore/ops/composite/clip_ops.py +0 -134
  646. mindspore/ops/composite/random_ops.py +0 -426
  647. mindspore/ops/composite/vmap_ops.py +0 -38
  648. mindspore/parallel/nn/__init__.py +0 -42
  649. mindspore/parallel/nn/loss.py +0 -22
  650. mindspore/parallel/nn/moe.py +0 -21
  651. mindspore/parallel/nn/op_parallel_config.py +0 -22
  652. mindspore/parallel/nn/transformer.py +0 -31
  653. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
  654. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
  655. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
mindspore/context.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2022 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2022 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -27,15 +27,17 @@ from types import FunctionType
27
27
 
28
28
  from mindspore import log as logger
29
29
  from mindspore._c_expression import MSContext, ms_ctx_param
30
- from mindspore._checkparam import args_type_check, Validator, args_unreset_check
30
+ from mindspore import _checkparam as Validator
31
+ from mindspore._checkparam import args_type_check
31
32
  from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context, _get_auto_parallel_context, \
32
33
  _reset_auto_parallel_context
33
34
  from mindspore.parallel._ps_context import _set_ps_context, _get_ps_context, _reset_ps_context, \
34
35
  _need_reset_device_target_for_ps
36
+ from mindspore.parallel._offload_context import _set_offload_context, _get_offload_context
35
37
 
36
38
  __all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'set_context', 'get_context', 'set_auto_parallel_context',
37
39
  'get_auto_parallel_context', 'reset_auto_parallel_context', 'ParallelMode', 'set_ps_context',
38
- 'get_ps_context', 'reset_ps_context']
40
+ 'get_ps_context', 'reset_ps_context', 'set_offload_context', 'get_offload_context']
39
41
 
40
42
  GRAPH_MODE = 0
41
43
  PYNATIVE_MODE = 1
@@ -231,6 +233,51 @@ class _Context:
231
233
  else:
232
234
  self.set_param(ms_ctx_param.memory_offload, False)
233
235
 
236
+ def set_deterministic(self, deterministic):
237
+ """
238
+ Enable model run in deterministic, and support the values "ON" and "OFF".
239
+
240
+ Args:
241
+ deterministic (str): "ON", "OFF"
242
+ """
243
+ deterministic_options = ["ON", "OFF"]
244
+ if deterministic not in deterministic_options:
245
+ raise ValueError(f"For 'context.set_context', the argument 'deterministic' must be one of "
246
+ f"{deterministic_options}, but got {deterministic}.")
247
+ self.set_param(ms_ctx_param.deterministic, deterministic)
248
+
249
+ def set_ascend_config(self, ascend_config):
250
+ """
251
+ Enable ascend config.
252
+
253
+ Args:
254
+ ascend_config (dict): 'precision_mode'
255
+ - precision_mode (str): "force_fp16", "allow_fp32_to_fp16", "allow_mix_precision",
256
+ "must_keep_origin_dtype", "force_fp32", "force_lowerprecision", "allow_fp32_to_bf16",
257
+ "allow_fp32_to_lowprecision", "allow_mix_precision_fp16" and "allow_mix_precision_bf16".
258
+ """
259
+
260
+ ascend_cfgs = {'precision_mode': ["force_fp16", "allow_fp32_to_fp16", "allow_mix_precision",
261
+ "must_keep_origin_dtype", "force_fp32", "force_lowerprecision",
262
+ "allow_fp32_to_bf16", "allow_fp32_to_lowprecision",
263
+ "allow_mix_precision_fp16", "allow_mix_precision_bf16"],
264
+ 'jit_compile': [True, False]}
265
+ for ascend_key in ascend_config:
266
+ if ascend_key not in ascend_cfgs:
267
+ raise ValueError(f"For 'context.set_context', the key of argument 'ascend_config' must be one of "
268
+ f"{ascend_cfgs}, but got {ascend_key}.")
269
+ supported_modes = ascend_cfgs.get(ascend_key)
270
+ if ascend_config[ascend_key] not in supported_modes:
271
+ raise ValueError(f"For 'ascend_config', the value of argument {ascend_key} must be one of "
272
+ f"{supported_modes}, but got {ascend_config[ascend_key]}.")
273
+ if ascend_key == 'precision_mode':
274
+ self.set_param(ms_ctx_param.precision_mode, ascend_config[ascend_key])
275
+ if ascend_key == 'jit_compile':
276
+ if ascend_config[ascend_key] is True:
277
+ self.set_param(ms_ctx_param.jit_compile, "1")
278
+ else:
279
+ self.set_param(ms_ctx_param.jit_compile, "0")
280
+
234
281
  def set_backend_policy(self, policy):
235
282
  success = self._context_handle.set_backend_policy(policy)
236
283
  if not success:
@@ -368,8 +415,8 @@ class _Context:
368
415
  json.load(f)
369
416
  except (TypeError, ValueError) as exo:
370
417
  raise ValueError(str(exo) + "\nFor 'context.set_context', open or load the 'env_config_path' file {} "
371
- "failed, please check whether 'env_config_path' is json file and correct, or may not "
372
- "have permission to read it.".format(env_config_path))
418
+ "failed, please check whether 'env_config_path' is json file and correct, "
419
+ "or may not have permission to read it.".format(env_config_path))
373
420
  self.set_param(ms_ctx_param.env_config_path, env_config_path)
374
421
 
375
422
  def set_runtime_num_threads(self, runtime_num_threads):
@@ -383,6 +430,7 @@ class _Context:
383
430
  if op_timeout < 0:
384
431
  raise ValueError("The num of op exe timeout must bigger than or equal to 0.")
385
432
  self.set_param(ms_ctx_param.op_timeout, op_timeout)
433
+
386
434
  def set_inter_op_parallel_num(self, inter_op_parallel_num):
387
435
  """Check and set inter_op_parallel_num."""
388
436
  if inter_op_parallel_num < 0:
@@ -406,7 +454,9 @@ class _Context:
406
454
  'runtime_num_threads': set_runtime_num_threads,
407
455
  'memory_optimize_level': set_memory_optimize_level,
408
456
  'op_timeout': set_op_timeout,
409
- 'memory_offload': set_memory_offload
457
+ 'memory_offload': set_memory_offload,
458
+ 'deterministic': set_deterministic,
459
+ 'ascend_config': set_ascend_config
410
460
  }
411
461
 
412
462
  @property
@@ -447,7 +497,6 @@ class _Context:
447
497
  self._support_binary = support
448
498
 
449
499
 
450
-
451
500
  def _context():
452
501
  """
453
502
  Get the global _context, if context is not created, create a new one.
@@ -476,7 +525,7 @@ def _context():
476
525
  auto_parallel_search_mode=str, search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
477
526
  strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool, enable_alltoall=bool,
478
527
  all_reduce_fusion_config=list, pipeline_stages=int, grad_accumulation_step=int,
479
- parallel_optimizer_config=dict, comm_fusion=dict)
528
+ parallel_optimizer_config=dict, comm_fusion=dict, strategy_ckpt_config=dict)
480
529
  def set_auto_parallel_context(**kwargs):
481
530
  r"""
482
531
  Set auto parallel context, only data parallel supported on CPU.
@@ -504,6 +553,7 @@ def set_auto_parallel_context(**kwargs):
504
553
  enable_alltoall grad_accumulation_step
505
554
  \ auto_parallel_search_mode
506
555
  \ comm_fusion
556
+ \ strategy_ckpt_config
507
557
  =========================== ===========================
508
558
 
509
559
  Args:
@@ -542,15 +592,18 @@ def set_auto_parallel_context(**kwargs):
542
592
  data_parallel mode, all parameters are broadcast except for the parameter whose attribute
543
593
  layerwise_parallel is True. Hybrid_parallel, semi_auto_parallel and auto_parallel mode, the
544
594
  segmented parameters do not participate in broadcasting. Default: False.
545
- strategy_ckpt_load_file (str): The path to load parallel strategy checkpoint. Default: ''
546
- strategy_ckpt_save_file (str): The path to save parallel strategy checkpoint. Default: ''
595
+ strategy_ckpt_load_file (str): The path to load parallel strategy checkpoint. The parameter is not to be
596
+ recommended currently, it is better using 'strategy_ckpt_config' to replace it. Default: ''
597
+ strategy_ckpt_save_file (str): The path to save parallel strategy checkpoint. The parameter is not to be
598
+ recommended currently, it is better using 'strategy_ckpt_config' to replace it. Default: ''
547
599
  full_batch (bool): If you load whole batch datasets in auto_parallel mode, this parameter
548
600
  should be set as True. Default: False. The interface is not to be recommended currently,
549
601
  it is better using 'dataset_strategy' to replace it.
550
602
  dataset_strategy (Union[str, tuple]): Dataset sharding strategy. Default: "data_parallel".
551
603
  dataset_strategy="data_parallel" is equal to full_batch=False, dataset_strategy="full_batch" is
552
- equal to full_batch=True. For dataset load into net by model parallel strategy likes
553
- ds_stra ((1, 8), (1, 8)), it requires using set_auto_parallel_context(dataset_strategy=ds_stra).
604
+ equal to full_batch=True. For execution mode is 'GRAPH_MODE' and dataset load into net by model
605
+ parallel strategy likes ds_stra ((1, 8), (1, 8)), it requires using
606
+ set_auto_parallel_context(dataset_strategy=ds_stra).
554
607
  enable_parallel_optimizer (bool): This is a developing feature, which shards the weight update computation for
555
608
  data parallel training in the benefit of time and memory saving. Currently, auto and semi auto
556
609
  parallel mode support all optimizers in both Ascend and GPU. Data parallel mode only supports
@@ -591,6 +644,9 @@ def set_auto_parallel_context(**kwargs):
591
644
  communication fusion config has two keys: "mode" and "config".
592
645
  It supports following communication fusion types and configurations:
593
646
 
647
+ - openstate: Whether turn on the communication fusion or not. If `openstate` is `True`, turn on
648
+ the communication fusion, otherwise, turn off the communication fusion. Default: `True`.
649
+
594
650
  - allreduce: If communication fusion type is `allreduce`. The `mode` contains: `auto`, `size`
595
651
  and `index`. In `auto` mode, AllReduce fusion is configured by gradients size and the default
596
652
  fusion threshold is `64` MB. In 'size' mode, AllReduce fusion is configured by gradients size
@@ -605,6 +661,24 @@ def set_auto_parallel_context(**kwargs):
605
661
  - reducescatter: If communication fusion type is `reducescatter`. The `mode` contains: `auto`
606
662
  and `size`. Config is same as `allgather`.
607
663
 
664
+ strategy_ckpt_config (dict): A dict contains the configurations for setting the parallel strategy file. This
665
+ interface contains the functions of parameter `strategy_ckpt_load_file` and
666
+ `strategy_ckpt_save_file`, it is recommonded to use this parameter to replace those two
667
+ parameters.
668
+ It contains following configurations:
669
+
670
+ - load_file (str): The path to load parallel strategy checkpoint. If the file name extension is
671
+ `.json`, the file is loaded in JSON format. Otherwise, the file is loaded in ProtoBuf
672
+ format.
673
+ Default: ''
674
+
675
+ - save_file (str): The path to save parallel strategy checkpoint. If the file name extension is
676
+ `.json`, the file is saved in JSON format. Otherwise, the file is saved in ProtoBuf format.
677
+ Default: ''
678
+
679
+ - only_trainable_params (bool): Only save/load the strategy information for trainable parameter.
680
+ Default: True.
681
+
608
682
  Raises:
609
683
  ValueError: If input key is not attribute in auto parallel context.
610
684
 
@@ -629,6 +703,8 @@ def set_auto_parallel_context(**kwargs):
629
703
  >>> ms.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
630
704
  >>> config = {"allreduce": {"mode": "size", "config": 32}, "allgather": {"mode": "size", "config": 32}}
631
705
  >>> ms.set_auto_parallel_context(comm_fusion=config)
706
+ >>> stra_ckpt_dict = {"load_file": "./stra0.ckpt", "save_file": "./stra1.ckpt", "only_trainable_params": False}
707
+ >>> ms.set_auto_parallel_context(strategy_ckpt_config=stra_ckpt_dict)
632
708
  """
633
709
  _set_auto_parallel_context(**kwargs)
634
710
 
@@ -677,6 +753,46 @@ def reset_auto_parallel_context():
677
753
  _reset_auto_parallel_context()
678
754
 
679
755
 
756
+ @args_type_check(offload_config=dict)
757
+ def set_offload_context(offload_config):
758
+ r"""
759
+ Set offload context.
760
+ Some configurations are offload specific, see the below table for details:
761
+
762
+ Args:
763
+ offload_config (dict): A dict contains the keys and values for setting the offload context
764
+ configure.It supports the following keys.
765
+ enable_offload (bool): The flag of whether enabling offload. Default: False.
766
+ offload_param (str): The param for offload destination, cpu or disk.
767
+ offload_path (str): The path of offload.
768
+ offload_checkpoint (str): The checkpoint for offload destination, cpu or disk.
769
+ offload_ddr_size (int): The ddr size for offload.
770
+ offload_disk_size (int): The disk size for offload.
771
+ enable_aio (bool): The flag of whether enabling aio. Default: True.
772
+ aio_block_size (int): The size of aio block.
773
+ aio_queue_depth (int): The depth of aio queue.
774
+ enable_pinned_mem (bool): The flag of whether enabling pinned memory.
775
+
776
+ Raises:
777
+ ValueError: If input key is not attribute in auto parallel context.
778
+
779
+ Examples:
780
+ >>> from mindspore import context
781
+ >>> context.set_offload_context(offload_config={"offload_param"="cpu"})
782
+ """
783
+ _set_offload_context(offload_config)
784
+
785
+
786
+ def get_offload_context():
787
+ """
788
+ Get offload context.
789
+ Examples:
790
+ >>> from mindspore import context
791
+ >>> offload_config = context.get_offload_context()
792
+ """
793
+ return _get_offload_context()
794
+
795
+
680
796
  def _check_target_specific_cfgs(device, arg_key):
681
797
  """Checking whether a config is suitable for a specified device"""
682
798
  device_cfgs = {
@@ -688,7 +804,8 @@ def _check_target_specific_cfgs(device, arg_key):
688
804
  'auto_tune_mode': ['Ascend'],
689
805
  'max_device_memory': ['Ascend', 'GPU'],
690
806
  'mempool_block_size': ['GPU', 'Ascend'],
691
- 'disable_format_transform': ['GPU']
807
+ 'disable_format_transform': ['GPU'],
808
+ 'ascend_config': ['Ascend']
692
809
  }
693
810
  # configs not in map device_cfgs are supposed to be suitable for all devices
694
811
  if arg_key not in device_cfgs:
@@ -702,8 +819,7 @@ def _check_target_specific_cfgs(device, arg_key):
702
819
  return False
703
820
 
704
821
 
705
- @args_unreset_check(device_id=int, variable_memory_max_size=str, max_device_memory=str, mempool_block_size=str)
706
- @args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=bool,
822
+ @args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=(bool, int),
707
823
  save_graphs_path=str, enable_dump=bool, auto_tune_mode=str,
708
824
  save_dump_path=str, enable_reduce_precision=bool, variable_memory_max_size=str,
709
825
  enable_auto_mixed_precision=bool, inter_op_parallel_num=int,
@@ -711,7 +827,7 @@ def _check_target_specific_cfgs(device, arg_key):
711
827
  max_device_memory=str, print_file_path=str, max_call_depth=int, env_config_path=str,
712
828
  graph_kernel_flags=str, save_compile_cache=bool, runtime_num_threads=int, load_compile_cache=bool,
713
829
  grad_for_scalar=bool, pynative_synchronize=bool, mempool_block_size=str, disable_format_transform=bool,
714
- op_timeout=int, save_graph_dot=bool)
830
+ op_timeout=int, deterministic=str, ascend_config=dict)
715
831
  def set_context(**kwargs):
716
832
  """
717
833
  Set context for running environment.
@@ -749,6 +865,8 @@ def set_context(**kwargs):
749
865
  | +------------------------------+----------------------------+
750
866
  | | save_dump_path | Ascend |
751
867
  | +------------------------------+----------------------------+
868
+ | | deterministic | Ascend |
869
+ | +------------------------------+----------------------------+
752
870
  | | print_file_path | Ascend |
753
871
  | +------------------------------+----------------------------+
754
872
  | | env_config_path | CPU/GPU/Ascend |
@@ -790,6 +908,8 @@ def set_context(**kwargs):
790
908
  | | memory_optimize_level | CPU/GPU/Ascend |
791
909
  | +------------------------------+----------------------------+
792
910
  | | memory_offload | GPU/Ascend |
911
+ | +------------------------------+----------------------------+
912
+ | | ascend_config | Ascend |
793
913
  +-------------------------+------------------------------+----------------------------+
794
914
 
795
915
  Args:
@@ -806,14 +926,31 @@ def set_context(**kwargs):
806
926
  of the available memory of the device and mempool_block_size.
807
927
  op_timeout (int): Set the maximum duration of executing an operator in seconds.
808
928
  If the execution time exceeds this value, system will terminate the task. 0 means endless wait.
809
- Default: 0.
810
- save_graphs (bool): Whether to save graphs. Default: False.
811
- When the `save_graphs` attribute is set as True, attribute of `save_graphs_path` is used to set the
812
- intermediate compilation graph storage path. By default, the graphs are saved in the current directory.
929
+ Default: 1900.
930
+ save_graphs (bool or int): Whether to save intermediate compilation graphs. Default: 0.
931
+ Available values are:
932
+
933
+ - False or 0: disable saving of intermediate compilation graphs.
934
+ - 1: some intermediate files will be generated during graph compliation.
935
+ - True or 2: Generate more ir files related to backend process.
936
+ - 3: Generate visualization computing graphs and detailed frontend ir graphs.
937
+
938
+ When the `save_graphs` attribute is set as True, 1, 2 or 3, attribute of `save_graphs_path` is used
939
+ to set the intermediate compilation graph storage path. By default, the graphs are saved in the current
940
+ directory.
813
941
  save_graphs_path (str): Path to save graphs. Default: ".".
814
942
  If the specified directory does not exist, the system will automatically create the directory.
815
943
  During distributed training, graphs will be saved to the directory of
816
944
  `save_graphs_path/rank_${rank_id}/`. `rank_id` is the ID of the current device in the cluster.
945
+ deterministic (str): Whether to enable op run in deterministic mode. The value must be in the
946
+ range of ['ON', 'OFF'], and the default value is 'OFF'.
947
+
948
+ - "ON": Enable operator deterministic running mode.
949
+ - "OFF": Disable operator deterministic running mode.
950
+
951
+ When deterministic mode is on, model ops will be deterministic in Ascend. This means that if op run multiple
952
+ times with the same inputs on the same hardware, it will have the exact same outputs each time. This is
953
+ useful for debugging models.
817
954
  enable_dump (bool): This parameters is deprecated, and will be deleted in the next version.
818
955
  save_dump_path (str): This parameters is deprecated, and will be deleted in the next version.
819
956
  print_file_path (str): The path of saving print data. If this parameter is set, print data is saved to
@@ -864,7 +1001,7 @@ def set_context(**kwargs):
864
1001
  If enable_graph_kernel is set to True, acceleration can be enabled.
865
1002
  For details of graph kernel fusion, please check
866
1003
  `Enabling Graph Kernel Fusion
867
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/debug/graph_fusion_engine.html>`_.
1004
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/debug/graph_fusion_engine.html>`_.
868
1005
  graph_kernel_flags (str):
869
1006
  Optimization options of graph kernel fusion, and the priority is higher when it conflicts
870
1007
  with enable_graph_kernel. Only for experienced users.
@@ -899,7 +1036,7 @@ def set_context(**kwargs):
899
1036
 
900
1037
  For more information about the enable operator tuning tool settings, please check
901
1038
  `Enable the operator optimization tool
902
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/debug/auto_tune.html>`_.
1039
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/debug/auto_tune.html>`_.
903
1040
  check_bprop (bool): Whether to check back propagation nodes. The checking ensures that the shape and dtype
904
1041
  of back propagation node outputs is the same as input parameters. Default: False.
905
1042
  max_call_depth (int): Specify the maximum depth of function call. Must be positive integer. Default: 1000.
@@ -917,7 +1054,7 @@ def set_context(**kwargs):
917
1054
  the compile cache is loaded. Note that only limited automatic detection for the changes of
918
1055
  python scripts is supported by now, which means that there is a correctness risk. Default: False.
919
1056
  This is an experimental prototype that is subject to change and/or deletion.
920
- compile_cache_path (str): Path to save the cache of the graph compiled by front-end. Default: ".".
1057
+ compile_cache_path (str): Path to save the compile cache. Default: ".".
921
1058
  If the specified directory does not exist, the system will automatically create the directory.
922
1059
  The cache will be saved to the directory of `compile_cache_path/rank_${rank_id}/`. The `rank_id` is
923
1060
  the ID of the current device in the cluster.
@@ -946,6 +1083,36 @@ def set_context(**kwargs):
946
1083
  when the environment variable "GRAPH_OP_RUN=1" is not set; This parameter does not take effect when
947
1084
  memory_optimize_level is set 'O1'.
948
1085
  - OFF: Turn off the memory Offload function.
1086
+ ascend_config (dict): Set the parameters specific to Ascend hardware platform. It is not set by default.
1087
+ Currently, only setting `precision_mode` and `jit_compile` are supported on Ascend910B hardware platform.
1088
+ The default value of `precision_mode` and `jit_compile` are experimental parameters, may change
1089
+ in the future.
1090
+
1091
+ - precision_mode (str): Mixed precision mode setting, on Ascend910B hardware platform, the default
1092
+ value of training network is based on the value of CANN, and the default value of inference network
1093
+ is force_fp16. The value range is as follows:
1094
+
1095
+ - force_fp16: When the operator supports both float16 and float32, select float16 directly.
1096
+ - allow_fp32_to_fp16: When the operator does not support the float32 data type, directly reduce
1097
+ the precision of float16.
1098
+ - allow_mix_precision: Automatic mixing precision, facing the whole network operator, according
1099
+ to the built-in optimization strategy, automatically reduces the precision of some operators
1100
+ to float16 or bfloat16.
1101
+ - must_keep_origin_dtype: Keep the accuracy of the original drawing.
1102
+ - force_fp32: When the input of the matrix calculation operator is float16 and the output supports
1103
+ float16 and float32, output is forced to float32.
1104
+ - force_lowerprecision: When the operator supports both float16 or bfloat16 and float32, select
1105
+ float16 or bfloat16 directly.
1106
+ - allow_fp32_to_bf16: When the operator does not support the float32 data type, directly reduce
1107
+ the precision of bfloat16.
1108
+ - allow_fp32_to_lowprecision: When the operator does not support the float32 data type, directly
1109
+ reduce the precision of float16 or bfloat16.
1110
+ - allow_mix_precision_fp16: Automatic mixing precision, facing the whole network operator, automatically
1111
+ reduces the precision of some operators to float16 according to the built-in optimization strategy.
1112
+ - allow_mix_precision_bf16: Automatic mixing precision, facing the whole network operator, according to
1113
+ the built-in optimization strategy, automatically reduces the precision of some operators to bfloat16.
1114
+
1115
+ - jit_compile (bool): Whether to select online compilation. the default value is based on CANN.
949
1116
 
950
1117
  Raises:
951
1118
  ValueError: If input key is not an attribute in context.
@@ -977,6 +1144,8 @@ def set_context(**kwargs):
977
1144
  >>> ms.set_context(disable_format_transform=True)
978
1145
  >>> ms.set_context(memory_optimize_level='O0')
979
1146
  >>> ms.set_context(memory_offload='ON')
1147
+ >>> ms.set_context(deterministic='ON')
1148
+ >>> ms.set_context(ascend_config={"precision_mode": "force_fp16", "jit_compile": True})
980
1149
  """
981
1150
  ctx = _context()
982
1151
  # set device target first
@@ -992,6 +1161,15 @@ def set_context(**kwargs):
992
1161
  logger.warning(f"For 'context.set_context', '{key}' parameter is deprecated. "
993
1162
  "For details, please see the interface parameter API comments")
994
1163
  continue
1164
+ if key in ('precision_mode', 'jit_compile'):
1165
+ raise ValueError(f"Please set '{key}' through parameter ascend_config")
1166
+ if key == 'save_graphs':
1167
+ if value is True:
1168
+ value = 2
1169
+ if value is False:
1170
+ value = 0
1171
+ if value > 3:
1172
+ raise ValueError(f"value for save_graphs should be 0-3 but got '{value}'")
995
1173
  if not _check_target_specific_cfgs(device, key):
996
1174
  continue
997
1175
  if hasattr(ctx, key):
@@ -21,7 +21,7 @@ Besides, this module provides APIs to sample data while loading.
21
21
 
22
22
  We can enable cache in most of the dataset with its key arguments 'cache'. Please notice that cache is not supported
23
23
  on Windows platform yet. Do not use it while loading and processing data on Windows. More introductions and limitations
24
- can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/dataset/cache.html>`_ .
24
+ can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/r2.0/dataset/cache.html>`_ .
25
25
 
26
26
  Common imported modules in corresponding API examples are as follows:
27
27
 
@@ -55,11 +55,11 @@ The specific steps are as follows:
55
55
  - Dataset operation: The user uses the dataset object method `.shuffle` / `.filter` / `.skip` / `.split` /
56
56
  `.take` / ... to further shuffle, filter, skip, and obtain the maximum number of samples of datasets;
57
57
  - Dataset sample transform operation: The user can add data transform operations
58
- ( `vision transform <https://mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore.\
58
+ ( `vision transform <https://mindspore.cn/docs/en/r2.0/api_python/mindspore.\
59
59
  dataset.transforms.html#module-mindspore.dataset.vision>`_ ,
60
- `NLP transform <https://mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore.\
60
+ `NLP transform <https://mindspore.cn/docs/en/r2.0/api_python/mindspore.\
61
61
  dataset.transforms.html#module-mindspore.dataset.text>`_ ,
62
- `audio transform <https://mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore.\
62
+ `audio transform <https://mindspore.cn/docs/en/r2.0/api_python/mindspore.\
63
63
  dataset.transforms.html#module-mindspore.dataset.audio>`_ ) to the map
64
64
  operation to perform transformations. During data preprocessing, multiple map operations can be defined to
65
65
  perform different transform operations to different fields. The data transform operation can also be a
@@ -70,8 +70,7 @@ The specific steps are as follows:
70
70
  iterator, which can output the preprocessed data cyclically.
71
71
 
72
72
  The data processing pipeline example is as follows. Please refer to
73
- `datasets_example.py <https://gitee.com/mindspore/mindspore/tree/r2.0.0-alpha/docs/api/api_python_en
74
- /datasets_example.py>`_
73
+ `datasets_example.py <https://gitee.com/mindspore/mindspore/tree/r2.0/docs/api/api_python_en/datasets_example.py>`_
75
74
  for complete example.
76
75
 
77
76
  .. code-block::
@@ -122,6 +121,7 @@ from .engine.datasets import *
122
121
  from .engine.graphdata import GraphData, SamplingStrategy, OutputFormat
123
122
  from .engine.samplers import *
124
123
  from .engine.serializer_deserializer import compare, deserialize, serialize, show
124
+ from .utils.line_reader import LineReader
125
125
 
126
126
  __all__ = []
127
127
  __all__.extend(engine.__all__)
@@ -40,7 +40,7 @@ Descriptions of common data processing terms are as follows:
40
40
  The data transform operation can be executed in the data processing pipeline or in the eager mode:
41
41
 
42
42
  - Pipeline mode is generally used to process datasets. For examples, please refer to
43
- `introduction to data processing pipeline <https://www.mindspore.cn/docs/en/r2.0.0-alpha/api_python/
43
+ `introduction to data processing pipeline <https://www.mindspore.cn/docs/en/r2.0/api_python/
44
44
  mindspore.dataset.html#introduction-to-data-processing-pipeline>`_ .
45
45
  - Eager mode is generally used for scattered samples. Examples of audio preprocessing are as follows:
46
46
 
@@ -65,12 +65,12 @@ from __future__ import absolute_import
65
65
  from mindspore.dataset.audio import transforms
66
66
  from mindspore.dataset.audio import utils
67
67
  from mindspore.dataset.audio.transforms import AllpassBiquad, AmplitudeToDB, Angle, BandBiquad, \
68
- BandpassBiquad, BandrejectBiquad, BassBiquad, Biquad, \
69
- ComplexNorm, ComputeDeltas, Contrast, DBToAmplitude, DCShift, DeemphBiquad, DetectPitchFrequency, Dither, \
70
- EqualizerBiquad, Fade, Flanger, FrequencyMasking, Gain, GriffinLim, HighpassBiquad, InverseMelScale, LFilter, \
71
- LowpassBiquad, Magphase, MaskAlongAxis, MaskAlongAxisIID, MelScale, MuLawDecoding, MuLawEncoding, Overdrive, \
72
- Phaser, PhaseVocoder, Resample, RiaaBiquad, SlidingWindowCmn, SpectralCentroid, Spectrogram, TimeMasking, \
73
- TimeStretch, TrebleBiquad, Vad, Vol
68
+ BandpassBiquad, BandrejectBiquad, BassBiquad, Biquad, ComplexNorm, ComputeDeltas, Contrast, DBToAmplitude, \
69
+ DCShift, DeemphBiquad, DetectPitchFrequency, Dither, EqualizerBiquad, Fade, Filtfilt, Flanger, FrequencyMasking, \
70
+ Gain, GriffinLim, HighpassBiquad, InverseMelScale, InverseSpectrogram, LFCC, LFilter, LowpassBiquad, Magphase, \
71
+ MaskAlongAxis, MaskAlongAxisIID, MelScale, MelSpectrogram, MFCC, MuLawDecoding, MuLawEncoding, Overdrive, \
72
+ Phaser, PhaseVocoder, PitchShift, Resample, RiaaBiquad, SlidingWindowCmn, SpectralCentroid, Spectrogram, \
73
+ TimeMasking, TimeStretch, TrebleBiquad, Vad, Vol
74
74
  from mindspore.dataset.audio.utils import BorderType, DensityFunction, FadeShape, GainType, Interpolation, \
75
75
  MelType, Modulation, NormMode, NormType, ResampleMethod, ScaleType, WindowType, create_dct, linear_fbanks, \
76
76
  melscale_fbanks