mindspore 2.1.0__cp38-none-any.whl → 2.2.0__cp38-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (539) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -1
  3. mindspore/_akg/akg/build_module.py +5 -6
  4. mindspore/_akg/akg/composite/build_module.py +49 -16
  5. mindspore/_akg/akg/composite/split_stitch.py +10 -11
  6. mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
  7. mindspore/_akg/akg/tvm/api.py +4 -3
  8. mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
  9. mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
  10. mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
  11. mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
  12. mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
  13. mindspore/_akg/akg/tvm/build_module.py +16 -1
  14. mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
  15. mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
  16. mindspore/_akg/akg/tvm/ir_builder.py +1 -1
  17. mindspore/_akg/akg/tvm/module.py +1 -2
  18. mindspore/_akg/akg/tvm/stmt.py +2 -2
  19. mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
  20. mindspore/_akg/akg/utils/kernel_exec.py +58 -260
  21. mindspore/_akg/akg/utils/result_analysis.py +4 -24
  22. mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
  23. mindspore/_c_dataengine.cpython-38-aarch64-linux-gnu.so +0 -0
  24. mindspore/_c_expression.cpython-38-aarch64-linux-gnu.so +0 -0
  25. mindspore/_c_mindrecord.cpython-38-aarch64-linux-gnu.so +0 -0
  26. mindspore/_check_jit_forbidden_api.py +3 -1
  27. mindspore/_checkparam.py +26 -32
  28. mindspore/_extends/graph_kernel/__init__.py +0 -1
  29. mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
  30. mindspore/_extends/graph_kernel/splitter.py +1 -9
  31. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +122 -15
  32. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
  33. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
  34. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +2 -2
  35. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +4 -4
  36. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
  37. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
  38. mindspore/_extends/parse/__init__.py +12 -15
  39. mindspore/_extends/parse/namespace.py +7 -33
  40. mindspore/_extends/parse/parser.py +61 -71
  41. mindspore/_extends/parse/resources.py +1 -1
  42. mindspore/_extends/parse/standard_method.py +72 -95
  43. mindspore/_extends/parse/trope.py +1 -1
  44. mindspore/_extends/remote/kernel_build_server.py +24 -7
  45. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  46. mindspore/_install_custom.py +43 -0
  47. mindspore/_mindspore_offline_debug.cpython-38-aarch64-linux-gnu.so +0 -0
  48. mindspore/amp.py +47 -11
  49. mindspore/bin/cache_admin +0 -0
  50. mindspore/bin/cache_server +0 -0
  51. mindspore/boost/boost.py +1 -8
  52. mindspore/boost/boost_cell_wrapper.py +3 -2
  53. mindspore/boost/grad_accumulation.py +1 -1
  54. mindspore/boost/group_loss_scale_manager.py +8 -7
  55. mindspore/common/__init__.py +5 -3
  56. mindspore/common/_jit_fallback_utils.py +6 -0
  57. mindspore/common/_register_for_adapter.py +2 -0
  58. mindspore/common/_register_for_tensor.py +2 -2
  59. mindspore/common/_stub_tensor.py +13 -0
  60. mindspore/common/_utils.py +13 -0
  61. mindspore/common/api.py +173 -258
  62. mindspore/common/auto_dynamic_shape.py +498 -0
  63. mindspore/common/dtype.py +18 -11
  64. mindspore/common/dump.py +6 -4
  65. mindspore/common/initializer.py +14 -14
  66. mindspore/common/jit_config.py +33 -15
  67. mindspore/common/lazy_inline.py +126 -7
  68. mindspore/common/mindir_util.py +101 -0
  69. mindspore/common/parameter.py +51 -41
  70. mindspore/common/seed.py +4 -4
  71. mindspore/common/sparse_tensor.py +13 -14
  72. mindspore/common/tensor.py +240 -145
  73. mindspore/communication/__init__.py +7 -4
  74. mindspore/communication/_comm_helper.py +83 -4
  75. mindspore/communication/management.py +152 -84
  76. mindspore/config/op_info.config +13 -2
  77. mindspore/config/super_bar_config.json +4 -2
  78. mindspore/context.py +143 -59
  79. mindspore/dataset/__init__.py +5 -5
  80. mindspore/dataset/audio/__init__.py +2 -2
  81. mindspore/dataset/audio/transforms.py +52 -52
  82. mindspore/dataset/callback/ds_callback.py +16 -2
  83. mindspore/dataset/core/config.py +68 -51
  84. mindspore/dataset/engine/cache_client.py +28 -5
  85. mindspore/dataset/engine/datasets.py +250 -112
  86. mindspore/dataset/engine/datasets_audio.py +43 -211
  87. mindspore/dataset/engine/datasets_standard_format.py +11 -35
  88. mindspore/dataset/engine/datasets_text.py +43 -67
  89. mindspore/dataset/engine/datasets_user_defined.py +86 -100
  90. mindspore/dataset/engine/datasets_vision.py +219 -1029
  91. mindspore/dataset/engine/iterators.py +11 -4
  92. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +4 -0
  93. mindspore/dataset/engine/obs/util.py +3 -0
  94. mindspore/dataset/engine/samplers.py +1 -1
  95. mindspore/dataset/engine/validators.py +19 -5
  96. mindspore/dataset/text/__init__.py +3 -3
  97. mindspore/dataset/text/transforms.py +101 -127
  98. mindspore/dataset/text/utils.py +205 -138
  99. mindspore/dataset/transforms/__init__.py +1 -1
  100. mindspore/dataset/transforms/py_transforms_util.py +40 -12
  101. mindspore/dataset/transforms/transforms.py +95 -40
  102. mindspore/dataset/utils/browse_dataset.py +8 -2
  103. mindspore/dataset/utils/line_reader.py +17 -19
  104. mindspore/dataset/vision/__init__.py +3 -3
  105. mindspore/dataset/vision/c_transforms.py +6 -3
  106. mindspore/dataset/vision/transforms.py +409 -287
  107. mindspore/dataset/vision/utils.py +13 -14
  108. mindspore/dataset/vision/validators.py +11 -1
  109. mindspore/experimental/map_parameter.py +14 -0
  110. mindspore/{nn/optim_ex → experimental/optim}/__init__.py +30 -29
  111. mindspore/{nn/optim_ex → experimental/optim}/adam.py +59 -66
  112. mindspore/{nn/optim_ex → experimental/optim}/adamw.py +181 -203
  113. mindspore/experimental/optim/lr_scheduler.py +1427 -0
  114. mindspore/{nn/optim_ex → experimental/optim}/optimizer.py +252 -259
  115. mindspore/{nn/optim_ex → experimental/optim}/sgd.py +147 -152
  116. mindspore/gen_ops.py +273 -0
  117. mindspore/include/OWNERS +0 -1
  118. mindspore/include/api/data_type.h +2 -1
  119. mindspore/include/api/graph.h +0 -15
  120. mindspore/include/api/kernel.h +2 -0
  121. mindspore/include/api/kernel_api.h +37 -12
  122. mindspore/include/api/model.h +0 -14
  123. mindspore/include/api/types.h +37 -4
  124. mindspore/include/c_api/ms/abstract.h +67 -0
  125. mindspore/include/c_api/ms/attribute.h +197 -0
  126. mindspore/include/c_api/ms/base/handle_types.h +43 -0
  127. mindspore/include/c_api/ms/base/macros.h +32 -0
  128. mindspore/include/c_api/ms/base/status.h +33 -0
  129. mindspore/include/c_api/ms/base/types.h +282 -0
  130. mindspore/include/c_api/ms/context.h +102 -0
  131. mindspore/include/c_api/ms/graph.h +160 -0
  132. mindspore/include/c_api/ms/node.h +606 -0
  133. mindspore/include/c_api/ms/tensor.h +161 -0
  134. mindspore/include/c_api/ms/value.h +84 -0
  135. mindspore/include/dataset/constants.h +6 -5
  136. mindspore/include/dataset/execute.h +23 -13
  137. mindspore/include/dataset/text.h +26 -26
  138. mindspore/include/dataset/transforms.h +13 -13
  139. mindspore/include/dataset/vision.h +60 -60
  140. mindspore/include/dataset/vision_ascend.h +5 -6
  141. mindspore/include/dataset/vision_lite.h +17 -17
  142. mindspore/include/mindapi/base/type_id.h +1 -0
  143. mindspore/include/mindapi/base/types.h +1 -0
  144. mindspore/lib/libdnnl.so.2 +0 -0
  145. mindspore/lib/libjemalloc.so.2 +0 -0
  146. mindspore/lib/libmindspore.so +0 -0
  147. mindspore/lib/libmindspore_backend.so +0 -0
  148. mindspore/lib/libmindspore_common.so +0 -0
  149. mindspore/lib/libmindspore_core.so +0 -0
  150. mindspore/lib/libmindspore_glog.so.0 +0 -0
  151. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  152. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  153. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  154. mindspore/lib/libmindspore_shared_lib.so +0 -0
  155. mindspore/lib/libnnacl.so +0 -0
  156. mindspore/lib/libopencv_core.so.4.5 +0 -0
  157. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  158. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  159. mindspore/lib/libps_cache.so +0 -0
  160. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  161. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  162. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
  163. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  164. mindspore/lib/plugin/ascend/libakg.so +0 -0
  165. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  166. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  167. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  168. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  169. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  170. mindspore/lib/plugin/cpu/libakg.so +0 -0
  171. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  172. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  173. mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
  174. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  175. mindspore/nn/__init__.py +0 -2
  176. mindspore/nn/cell.py +316 -74
  177. mindspore/nn/dynamic_lr.py +21 -21
  178. mindspore/nn/layer/activation.py +21 -28
  179. mindspore/nn/layer/basic.py +15 -13
  180. mindspore/nn/layer/channel_shuffle.py +1 -1
  181. mindspore/nn/layer/container.py +271 -9
  182. mindspore/nn/layer/conv.py +310 -207
  183. mindspore/nn/layer/dense.py +8 -5
  184. mindspore/nn/layer/embedding.py +33 -27
  185. mindspore/nn/layer/flash_attention.py +82 -41
  186. mindspore/nn/layer/image.py +8 -6
  187. mindspore/nn/layer/math.py +13 -18
  188. mindspore/nn/layer/normalization.py +107 -66
  189. mindspore/nn/layer/padding.py +1 -1
  190. mindspore/nn/layer/pooling.py +131 -109
  191. mindspore/nn/layer/rnn_cells.py +22 -17
  192. mindspore/nn/layer/rnns.py +13 -16
  193. mindspore/nn/layer/thor_layer.py +1 -1
  194. mindspore/nn/layer/transformer.py +221 -154
  195. mindspore/nn/learning_rate_schedule.py +9 -1
  196. mindspore/nn/loss/loss.py +235 -174
  197. mindspore/nn/optim/ada_grad.py +2 -1
  198. mindspore/nn/optim/adadelta.py +1 -0
  199. mindspore/nn/optim/adafactor.py +2 -1
  200. mindspore/nn/optim/adam.py +7 -4
  201. mindspore/nn/optim/adamax.py +3 -2
  202. mindspore/nn/optim/adasum.py +2 -2
  203. mindspore/nn/optim/asgd.py +2 -3
  204. mindspore/nn/optim/ftrl.py +6 -5
  205. mindspore/nn/optim/lamb.py +7 -4
  206. mindspore/nn/optim/lars.py +1 -1
  207. mindspore/nn/optim/lazyadam.py +5 -3
  208. mindspore/nn/optim/momentum.py +2 -1
  209. mindspore/nn/optim/optimizer.py +53 -4
  210. mindspore/nn/optim/proximal_ada_grad.py +3 -4
  211. mindspore/nn/optim/rmsprop.py +4 -3
  212. mindspore/nn/optim/rprop.py +23 -12
  213. mindspore/nn/optim/sgd.py +26 -11
  214. mindspore/nn/optim/thor.py +9 -7
  215. mindspore/nn/probability/bijector/bijector.py +5 -5
  216. mindspore/nn/probability/bijector/power_transform.py +27 -27
  217. mindspore/nn/probability/bijector/softplus.py +3 -3
  218. mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -3
  219. mindspore/nn/probability/distribution/bernoulli.py +5 -5
  220. mindspore/nn/probability/distribution/beta.py +3 -3
  221. mindspore/nn/probability/distribution/categorical.py +7 -7
  222. mindspore/nn/probability/distribution/cauchy.py +0 -1
  223. mindspore/nn/probability/distribution/distribution.py +3 -3
  224. mindspore/nn/probability/distribution/gamma.py +3 -3
  225. mindspore/nn/probability/distribution/geometric.py +4 -4
  226. mindspore/nn/probability/distribution/gumbel.py +4 -4
  227. mindspore/nn/probability/distribution/log_normal.py +2 -2
  228. mindspore/nn/probability/distribution/logistic.py +2 -2
  229. mindspore/nn/probability/distribution/poisson.py +4 -4
  230. mindspore/nn/probability/distribution/transformed_distribution.py +3 -3
  231. mindspore/nn/probability/distribution/uniform.py +6 -6
  232. mindspore/nn/wrap/cell_wrapper.py +78 -34
  233. mindspore/nn/wrap/grad_reducer.py +8 -5
  234. mindspore/nn/wrap/loss_scale.py +105 -42
  235. mindspore/numpy/array_creations.py +1 -2
  236. mindspore/numpy/array_ops.py +3 -2
  237. mindspore/offline_debug/convert_async.py +2 -2
  238. mindspore/ops/_grad_experimental/__init__.py +0 -5
  239. mindspore/ops/_grad_experimental/grad_array_ops.py +1 -2
  240. mindspore/ops/_grad_experimental/grad_comm_ops.py +15 -2
  241. mindspore/ops/_grad_experimental/grad_debug_ops.py +0 -37
  242. mindspore/ops/_grad_experimental/grad_implementations.py +10 -0
  243. mindspore/ops/_grad_experimental/grad_inner_ops.py +2 -216
  244. mindspore/ops/_grad_experimental/grad_math_ops.py +0 -181
  245. mindspore/ops/_grad_experimental/grad_sparse.py +15 -0
  246. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
  247. mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +165 -109
  248. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +144 -86
  249. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +172 -187
  250. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +51 -57
  251. mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +6 -17
  252. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +1 -1
  253. mindspore/ops/_op_impl/aicpu/__init__.py +14 -2
  254. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
  255. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  256. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  257. mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
  258. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
  259. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
  260. mindspore/ops/_op_impl/aicpu/multinomial.py +3 -3
  261. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
  262. mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
  263. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
  264. mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
  265. mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
  266. mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
  267. mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
  268. mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -5
  269. mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -5
  270. mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
  271. mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
  272. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
  273. mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
  274. mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
  275. mindspore/ops/_op_impl/tbe/__init__.py +4 -4
  276. mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
  277. mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
  278. mindspore/ops/_primitive_cache.py +1 -1
  279. mindspore/ops/_tracefunc.py +45 -13
  280. mindspore/ops/_utils/utils.py +4 -1
  281. mindspore/ops/_vmap/vmap_array_ops.py +3 -3
  282. mindspore/ops/_vmap/vmap_base.py +3 -3
  283. mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
  284. mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
  285. mindspore/ops/_vmap/vmap_math_ops.py +5 -2
  286. mindspore/ops/_vmap/vmap_nn_ops.py +61 -7
  287. mindspore/ops/arg_dtype_cast.py +54 -0
  288. mindspore/ops/composite/base.py +37 -10
  289. mindspore/ops/composite/math_ops.py +5 -4
  290. mindspore/ops/composite/multitype_ops/_compile_utils.py +273 -72
  291. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +16 -9
  292. mindspore/ops/composite/multitype_ops/add_impl.py +43 -4
  293. mindspore/ops/composite/multitype_ops/getitem_impl.py +40 -2
  294. mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
  295. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  296. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
  297. mindspore/ops/deprecated.py +304 -0
  298. mindspore/ops/function/__init__.py +4 -1
  299. mindspore/ops/function/array_func.py +167 -189
  300. mindspore/ops/function/clip_func.py +81 -13
  301. mindspore/ops/function/debug_func.py +1 -1
  302. mindspore/ops/function/grad/grad_func.py +18 -8
  303. mindspore/ops/function/image_func.py +10 -4
  304. mindspore/ops/function/linalg_func.py +5 -5
  305. mindspore/ops/function/math_func.py +575 -386
  306. mindspore/ops/function/nn_func.py +470 -251
  307. mindspore/ops/function/random_func.py +86 -56
  308. mindspore/ops/function/sparse_func.py +1 -1
  309. mindspore/ops/function/sparse_unary_func.py +14 -12
  310. mindspore/ops/function/vmap_func.py +6 -5
  311. mindspore/ops/functional.py +15 -10
  312. mindspore/ops/op_info_register.py +235 -19
  313. mindspore/ops/operations/__init__.py +25 -17
  314. mindspore/ops/operations/_grad_ops.py +52 -7
  315. mindspore/ops/operations/_inner_ops.py +213 -12
  316. mindspore/ops/operations/_quant_ops.py +4 -8
  317. mindspore/ops/operations/_sequence_ops.py +42 -0
  318. mindspore/ops/operations/array_ops.py +64 -280
  319. mindspore/ops/operations/comm_ops.py +105 -57
  320. mindspore/ops/operations/custom_ops.py +10 -3
  321. mindspore/ops/operations/debug_ops.py +8 -4
  322. mindspore/ops/operations/image_ops.py +18 -12
  323. mindspore/ops/operations/math_ops.py +185 -138
  324. mindspore/ops/operations/nn_ops.py +716 -492
  325. mindspore/ops/operations/other_ops.py +0 -22
  326. mindspore/ops/operations/random_ops.py +53 -111
  327. mindspore/ops/operations/sparse_ops.py +3 -1
  328. mindspore/ops/primitive.py +24 -18
  329. mindspore/parallel/_auto_parallel_context.py +68 -8
  330. mindspore/parallel/_cost_model_context.py +2 -2
  331. mindspore/parallel/_offload_context.py +17 -3
  332. mindspore/parallel/_parallel_serialization.py +2 -2
  333. mindspore/parallel/_ps_context.py +12 -0
  334. mindspore/parallel/_tensor.py +14 -12
  335. mindspore/parallel/_transformer/layers.py +5 -3
  336. mindspore/parallel/_transformer/loss.py +1 -0
  337. mindspore/parallel/_transformer/moe.py +2 -2
  338. mindspore/parallel/_transformer/op_parallel_config.py +12 -1
  339. mindspore/parallel/_transformer/transformer.py +23 -3
  340. mindspore/parallel/_utils.py +11 -7
  341. mindspore/parallel/algo_parameter_config.py +85 -5
  342. mindspore/parallel/checkpoint_transform.py +6 -10
  343. mindspore/parallel/shard.py +4 -4
  344. mindspore/profiler/common/struct_type.py +3 -3
  345. mindspore/profiler/common/util.py +3 -2
  346. mindspore/profiler/envprofiling.py +1 -1
  347. mindspore/profiler/parser/aicpu_data_parser.py +5 -3
  348. mindspore/profiler/parser/ascend_flops_generator.py +2 -2
  349. mindspore/profiler/parser/ascend_fpbp_generator.py +1 -1
  350. mindspore/profiler/parser/ascend_hccl_generator.py +17 -12
  351. mindspore/profiler/parser/ascend_msprof_exporter.py +104 -252
  352. mindspore/profiler/parser/ascend_msprof_generator.py +8 -8
  353. mindspore/profiler/parser/ascend_op_generator.py +5 -5
  354. mindspore/profiler/parser/ascend_steptrace_generator.py +6 -4
  355. mindspore/profiler/parser/ascend_timeline_generator.py +9 -6
  356. mindspore/profiler/parser/base_timeline_generator.py +9 -7
  357. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +14 -10
  358. mindspore/profiler/parser/flops_parser.py +15 -11
  359. mindspore/profiler/parser/framework_parser.py +37 -21
  360. mindspore/profiler/parser/hccl_parser.py +16 -12
  361. mindspore/profiler/parser/integrator.py +22 -11
  362. mindspore/profiler/parser/memory_usage_parser.py +2 -2
  363. mindspore/profiler/parser/minddata_analyzer.py +12 -14
  364. mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
  365. mindspore/profiler/parser/msadvisor_parser.py +8 -4
  366. mindspore/profiler/parser/op_intermediate_parser.py +5 -2
  367. mindspore/profiler/parser/optime_parser.py +1 -1
  368. mindspore/profiler/parser/profiler_info.py +2 -2
  369. mindspore/profiler/parser/step_trace_parser.py +11 -14
  370. mindspore/profiler/profiling.py +139 -71
  371. mindspore/rewrite/api/node.py +102 -19
  372. mindspore/rewrite/api/node_type.py +5 -1
  373. mindspore/rewrite/api/scoped_value.py +9 -17
  374. mindspore/rewrite/api/symbol_tree.py +131 -47
  375. mindspore/rewrite/ast_helpers/__init__.py +2 -1
  376. mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
  377. mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
  378. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +93 -46
  379. mindspore/rewrite/common/rewrite_elog.py +5 -1
  380. mindspore/rewrite/namer.py +33 -24
  381. mindspore/rewrite/namespace.py +14 -5
  382. mindspore/{_extends/graph_kernel/expanders/complex → rewrite/node}/__init__.py +9 -9
  383. mindspore/rewrite/node/call_function.py +79 -0
  384. mindspore/rewrite/node/cell_container.py +135 -0
  385. mindspore/rewrite/node/control_flow.py +88 -0
  386. mindspore/rewrite/{node.py → node/node.py} +273 -234
  387. mindspore/rewrite/node/node_manager.py +254 -0
  388. mindspore/rewrite/{topological_manager.py → node/node_topological_manager.py} +13 -46
  389. mindspore/rewrite/parsers/arguments_parser.py +22 -21
  390. mindspore/rewrite/parsers/assign_parser.py +216 -221
  391. mindspore/rewrite/parsers/attribute_parser.py +9 -7
  392. mindspore/rewrite/parsers/class_def_parser.py +174 -113
  393. mindspore/rewrite/parsers/constant_parser.py +9 -6
  394. mindspore/rewrite/parsers/container_parser.py +9 -7
  395. mindspore/rewrite/parsers/for_parser.py +36 -15
  396. mindspore/rewrite/parsers/function_def_parser.py +24 -16
  397. mindspore/rewrite/parsers/if_parser.py +28 -24
  398. mindspore/rewrite/parsers/module_parser.py +196 -25
  399. mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
  400. mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
  401. mindspore/rewrite/parsers/return_parser.py +6 -6
  402. mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
  403. mindspore/rewrite/sparsify/utils.py +1 -1
  404. mindspore/rewrite/symbol_tree.py +525 -577
  405. mindspore/rewrite/symbol_tree_builder.py +9 -193
  406. mindspore/rewrite/symbol_tree_dumper.py +2 -2
  407. mindspore/run_check/_check_version.py +2 -2
  408. mindspore/{ops/bprop_mindir → safeguard}/__init__.py +4 -3
  409. mindspore/safeguard/rewrite_obfuscation.py +517 -0
  410. mindspore/scipy/linalg.py +1 -1
  411. mindspore/scipy/optimize/minimize.py +7 -3
  412. mindspore/train/_utils.py +7 -3
  413. mindspore/train/amp.py +323 -123
  414. mindspore/train/anf_ir_pb2.py +14 -2
  415. mindspore/train/callback/_backup_and_restore.py +2 -12
  416. mindspore/train/callback/_callback.py +29 -4
  417. mindspore/train/callback/_checkpoint.py +23 -8
  418. mindspore/train/callback/_early_stop.py +2 -2
  419. mindspore/train/callback/_landscape.py +4 -4
  420. mindspore/train/callback/_loss_monitor.py +2 -2
  421. mindspore/train/callback/_on_request_exit.py +2 -2
  422. mindspore/train/callback/_reduce_lr_on_plateau.py +3 -4
  423. mindspore/train/callback/_summary_collector.py +14 -7
  424. mindspore/train/callback/_time_monitor.py +58 -5
  425. mindspore/train/data_sink.py +5 -11
  426. mindspore/train/dataset_helper.py +83 -57
  427. mindspore/train/loss_scale_manager.py +2 -2
  428. mindspore/train/metrics/__init__.py +3 -3
  429. mindspore/train/metrics/cosine_similarity.py +1 -1
  430. mindspore/train/metrics/hausdorff_distance.py +3 -2
  431. mindspore/train/metrics/mean_surface_distance.py +3 -2
  432. mindspore/train/metrics/metric.py +39 -19
  433. mindspore/train/metrics/roc.py +2 -2
  434. mindspore/train/metrics/root_mean_square_surface_distance.py +4 -3
  435. mindspore/train/mind_ir_pb2.py +85 -36
  436. mindspore/train/model.py +185 -45
  437. mindspore/train/serialization.py +390 -150
  438. mindspore/train/summary/_writer_pool.py +3 -2
  439. mindspore/train/summary/summary_record.py +14 -10
  440. mindspore/train/train_thor/convert_utils.py +3 -3
  441. mindspore/train/train_thor/dataset_helper.py +1 -1
  442. mindspore/version.py +1 -1
  443. {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/METADATA +6 -7
  444. {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/RECORD +447 -507
  445. {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
  446. mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
  447. mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
  448. mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
  449. mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
  450. mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
  451. mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
  452. mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
  453. mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
  454. mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
  455. mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
  456. mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
  457. mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
  458. mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
  459. mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
  460. mindspore/_akg/akg/tvm/rpc/base.py +0 -182
  461. mindspore/_akg/akg/tvm/rpc/client.py +0 -436
  462. mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
  463. mindspore/_akg/akg/tvm/rpc/server.py +0 -413
  464. mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
  465. mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
  466. mindspore/_extends/graph_kernel/expander.py +0 -80
  467. mindspore/_extends/graph_kernel/expanders/__init__.py +0 -54
  468. mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
  469. mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
  470. mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
  471. mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
  472. mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
  473. mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
  474. mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
  475. mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
  476. mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
  477. mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
  478. mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
  479. mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
  480. mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
  481. mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
  482. mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
  483. mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
  484. mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
  485. mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
  486. mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
  487. mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
  488. mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
  489. mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
  490. mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
  491. mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
  492. mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
  493. mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
  494. mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
  495. mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
  496. mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
  497. mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
  498. mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
  499. mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
  500. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
  501. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
  502. mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
  503. mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
  504. mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
  505. mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
  506. mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
  507. mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
  508. mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
  509. mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
  510. mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
  511. mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
  512. mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
  513. mindspore/dataset/datapreprocess/__init__.py +0 -20
  514. mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
  515. mindspore/include/api/net.h +0 -142
  516. mindspore/nn/lr_scheduler.py +0 -262
  517. mindspore/ops/_grad_experimental/grad_image_ops.py +0 -248
  518. mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -181
  519. mindspore/ops/_grad_experimental/grad_other_ops.py +0 -72
  520. mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
  521. mindspore/ops/_grad_experimental/grad_sequence_ops.py +0 -351
  522. mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -0
  523. mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -0
  524. mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -0
  525. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
  526. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  527. mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -0
  528. mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -0
  529. mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
  530. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  531. mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -0
  532. mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -0
  533. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -0
  534. mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -0
  535. mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -0
  536. mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
  537. mindspore/rewrite/node_visitor.py +0 -44
  538. {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
  539. {mindspore-2.1.0.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
@@ -488,7 +488,8 @@
488
488
  "BNTrainingReduceGrad": "dynamic impl failed",
489
489
  "BNTrainingReduce": "dynamic impl failed",
490
490
  "BNTrainingUpdateGrad": "dynamic impl failed",
491
- "BNTrainingUpdate": "dynamic impl failed"
491
+ "BNTrainingUpdate": "dynamic impl failed",
492
+ "NonZero": "Implementation errors with float and bool types in AiCore, use AiCPU instead."
492
493
  },
493
494
  "SkipNodes": [
494
495
  "Im2col",
@@ -531,7 +532,8 @@
531
532
  "AvgPool",
532
533
  "GatherNd",
533
534
  "Eye",
534
- "BNInfer"
535
+ "BNInfer",
536
+ "NonZero"
535
537
  ],
536
538
  "FallbackOps": {
537
539
  "DeformableOffsets": [
mindspore/context.py CHANGED
@@ -302,8 +302,8 @@ class _Context:
302
302
  raise ValueError(f"For 'ascend_config', the value of argument {ascend_key} must be one of "
303
303
  f"{supported_modes}, but got {ascend_value}.")
304
304
  if isinstance(supported_modes, tuple) and not isinstance(ascend_value, supported_modes):
305
- raise ValueError(f"For 'ascend_config', the type of argument {ascend_key} must be one of "
306
- f"{supported_modes}, but got {type(ascend_value)}.")
305
+ raise TypeError(f"For 'ascend_config', the type of argument {ascend_key} must be one of "
306
+ f"{supported_modes}, but got {type(ascend_value)}.")
307
307
  cfg_setter = ascend_cfg_setters.get(ascend_key)
308
308
  cfg_setter(ascend_value)
309
309
 
@@ -317,6 +317,8 @@ class _Context:
317
317
  - conv_fprop_algo (str): "normal", "performance" or user specifies conv forward algorithm directly.
318
318
  - conv_dgrad_algo (str): "normal", "performance" or user specifies conv data grad algorithm directly.
319
319
  - conv_wgrad_algo (str): "normal", "performance" or user specifies conv weight grad algorithm directly.
320
+ - conv_allow_tf32 (bool): ``False`` and ``True``.
321
+ - matmul_allow_tf32 (bool): ``False`` and ``True``.
320
322
  """
321
323
 
322
324
  gpu_cfgs = {'conv_fprop_algo': ["normal", "performance", "implicit_gemm", "precomp_gemm", "gemm", "direct",
@@ -324,7 +326,9 @@ class _Context:
324
326
  'conv_dgrad_algo': ["normal", "performance", "algo_0", "algo_1", "fft", "fft_tiling", "winograd",
325
327
  "winograd_nonfused"],
326
328
  'conv_wgrad_algo': ["normal", "performance", "algo_0", "algo_1", "fft", "algo_3", "fft_tiling",
327
- "winograd_nonfused"]}
329
+ "winograd_nonfused"],
330
+ 'conv_allow_tf32': [True, False],
331
+ 'matmul_allow_tf32': [True, False]}
328
332
  for gpu_key in gpu_config:
329
333
  if gpu_key not in gpu_cfgs:
330
334
  raise ValueError(f"For 'context.set_context', the key of argument 'gpu_config' must be one of "
@@ -339,6 +343,10 @@ class _Context:
339
343
  self.set_param(ms_ctx_param.conv_dgrad_algo, gpu_config[gpu_key])
340
344
  if gpu_key == 'conv_wgrad_algo':
341
345
  self.set_param(ms_ctx_param.conv_wgrad_algo, gpu_config[gpu_key])
346
+ if gpu_key == 'conv_allow_tf32':
347
+ self.set_param(ms_ctx_param.conv_allow_tf32, gpu_config[gpu_key])
348
+ if gpu_key == 'matmul_allow_tf32':
349
+ self.set_param(ms_ctx_param.matmul_allow_tf32, gpu_config[gpu_key])
342
350
 
343
351
  def set_backend_policy(self, policy):
344
352
  success = self._context_handle.set_backend_policy(policy)
@@ -388,6 +396,29 @@ class _Context:
388
396
  raise ValueError(f"For 'context.set_context', the argument 'aoe_tune_mode' must be in "
389
397
  f"['online', 'offline'], but got {tune_mode}.")
390
398
 
399
+ def set_aoe_config(self, aoe_config):
400
+ """
401
+ Enable aoe config.
402
+
403
+ Args:
404
+ aoe_config (dict):
405
+ - job_type (str): ``"1"``, ``"2"``. Default: ``"2"`` .
406
+ - ``"1"``: subgraph tuning.
407
+ - ``"2"``: operator tuning.
408
+ """
409
+
410
+ aoe_cfgs = {'job_type': ["1", "2"]}
411
+ for aoe_config_key in aoe_config:
412
+ if aoe_config_key not in aoe_cfgs:
413
+ raise ValueError(f"For 'context.set_context', the key of argument 'aoe_config' must be one of "
414
+ f"{aoe_cfgs}, but got {aoe_config_key}.")
415
+ supported_value = aoe_cfgs.get(aoe_config_key)
416
+ if aoe_config[aoe_config_key] not in supported_value:
417
+ raise ValueError(f"For 'aoe_config', the value of argument {aoe_config_key} must be one of "
418
+ f"{supported_value}, but got {aoe_config[aoe_config_key]}.")
419
+ if aoe_config_key == 'job_type':
420
+ self.set_param(ms_ctx_param.aoe_job_type, aoe_config[aoe_config_key])
421
+
391
422
  def set_device_id(self, device_id):
392
423
  if device_id < 0 or device_id > 4095:
393
424
  raise ValueError(f"For 'context.set_context', the argument 'device_id' must be in range [0, 4095], "
@@ -484,7 +515,7 @@ class _Context:
484
515
  except (TypeError, ValueError) as exo:
485
516
  raise ValueError(str(exo) + "\nFor 'context.set_context', open or load the 'env_config_path' file {} "
486
517
  "failed, please check whether 'env_config_path' is json file and correct, "
487
- "or may not have permission to read it.".format(env_config_path))
518
+ "or may not have permission to read it.".format(env_config_path)) from exo
488
519
  self.set_param(ms_ctx_param.env_config_path, env_config_path)
489
520
 
490
521
  def set_runtime_num_threads(self, runtime_num_threads):
@@ -527,6 +558,7 @@ class _Context:
527
558
  'ascend_config': set_ascend_config,
528
559
  'jit_syntax_level': set_jit_syntax_level,
529
560
  'gpu_config': set_gpu_config,
561
+ 'aoe_config': set_aoe_config,
530
562
  }
531
563
 
532
564
  @property
@@ -595,6 +627,7 @@ class _Context:
595
627
  valid_option = {"recompute_comm_overlap": ms_ctx_param.recompute_comm_overlap,
596
628
  "matmul_grad_comm_overlap": ms_ctx_param.matmul_grad_comm_overlap,
597
629
  "enable_task_opt": ms_ctx_param.enable_task_opt,
630
+ "enable_grad_comm_opt": ms_ctx_param.enable_grad_comm_opt,
598
631
  "interleaved_matmul_comm": ms_ctx_param.interleaved_matmul_comm,
599
632
  "interleaved_layernorm_comm": ms_ctx_param.interleaved_layernorm_comm}
600
633
  with open(speedup_config_real_path, 'r') as f:
@@ -611,7 +644,8 @@ class _Context:
611
644
  raise ValueError(str(exo) + "\nFor 'context.set_context', "
612
645
  "open or load the 'speedup_config_path' file {} "
613
646
  "failed, please check whether 'speedup_config_path' is json file and correct, "
614
- "or may not have permission to read it.".format(speedup_config_real_path))
647
+ "or may not have permission to read it.".format(speedup_config_real_path)) \
648
+ from exo
615
649
 
616
650
 
617
651
  def _context():
@@ -641,8 +675,9 @@ def _context():
641
675
  @args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool, parallel_mode=str,
642
676
  auto_parallel_search_mode=str, search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
643
677
  strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool, enable_alltoall=bool,
644
- all_reduce_fusion_config=list, pipeline_stages=int, grad_accumulation_step=int,
645
- parallel_optimizer_config=dict, comm_fusion=dict, strategy_ckpt_config=dict)
678
+ all_reduce_fusion_config=list, pipeline_stages=int, pipeline_segments=int,
679
+ parallel_optimizer_config=dict,
680
+ comm_fusion=dict, strategy_ckpt_config=dict)
646
681
  def set_auto_parallel_context(**kwargs):
647
682
  r"""
648
683
  Set auto parallel context, only data parallel supported on CPU.
@@ -663,11 +698,11 @@ def set_auto_parallel_context(**kwargs):
663
698
  device_num gradient_fp32_sync
664
699
  global_rank loss_repeated_mean
665
700
  gradients_mean search_mode
666
- parallel_mode strategy_ckpt_load_file
667
- all_reduce_fusion_config strategy_ckpt_save_file
668
- enable_parallel_optimizer dataset_strategy
669
- parallel_optimizer_config pipeline_stages
670
- enable_alltoall grad_accumulation_step
701
+ parallel_mode parameter_broadcast
702
+ all_reduce_fusion_config strategy_ckpt_load_file
703
+ enable_parallel_optimizer strategy_ckpt_save_file
704
+ parallel_optimizer_config dataset_strategy
705
+ enable_alltoall pipeline_stages
671
706
  \ auto_parallel_search_mode
672
707
  \ comm_fusion
673
708
  \ strategy_ckpt_config
@@ -694,7 +729,7 @@ def set_auto_parallel_context(**kwargs):
694
729
 
695
730
  - auto_parallel: Achieving parallelism automatically.
696
731
  search_mode (str): There are three kinds of shard strategy search modes: ``"recursive_programming"`` ,
697
- ``"dynamic_programming"`` and ``"sharding_propagation"`` . Default: ``"dynamic_programming"`` .
732
+ ``"dynamic_programming"`` and ``"sharding_propagation"`` . Default: ``"recursive_programming"`` .
698
733
 
699
734
  - recursive_programming: Recursive programming search mode. In order to obtain optimal performance,
700
735
  it is recommended that users set the batch size to be greater than or equal to the product of
@@ -737,12 +772,9 @@ def set_auto_parallel_context(**kwargs):
737
772
  distributed alone in the pipeline. The total devices will be divided into 'pipeline_stags'
738
773
  stages.
739
774
  Default: ``1`` .
740
- grad_accumulation_step (int): Set the accumulation steps of gradients in auto and semi auto parallel mode.
741
- This should be a positive int. Default: ``1`` .
742
775
  parallel_optimizer_config (dict): A dict contains the keys and values for setting the parallel optimizer
743
776
  configure. The configure provides more detailed behavior control about parallel training
744
- when parallel optimizer is enabled. Currently it supports the key `gradient_accumulation_shard`.
745
- The configure will be effective when we use
777
+ when parallel optimizer is enabled. The configure will be effective when we use
746
778
  mindspore.set_auto_parallel_context(enable_parallel_optimizer=True).
747
779
  It supports the following keys.
748
780
 
@@ -760,6 +792,14 @@ def set_auto_parallel_context(**kwargs):
760
792
  across the devices. Parameter size = shape[0] \* ... \* shape[n] \* size(dtype). Non-negative.
761
793
  Unit: KB. Default: ``64`` .
762
794
 
795
+ - optimizer_weight_shard_size(int): Set the optimizer weight shard group size, if you want to
796
+ specific the maximum group size across devices when the parallel optimizer is enabled.
797
+ The numerical range can be (0, device_num]. If pipeline parallel is enabled, the numerical
798
+ range is (0, device_num/stage]. If the size of data parallel communication domain
799
+ of the parameter cannot be divided by `optimizer_weight_shard_size`, then the specified
800
+ communication group size will not take effect. Default value is ``-1`` , which means the
801
+ optimizer weight shard group size will be the size of data parallel group of each parameter.
802
+
763
803
  comm_fusion (dict): A dict contains the types and configurations for setting the communication fusion. each
764
804
  communication fusion config has two keys: "mode" and "config".
765
805
  It supports following communication fusion types and configurations:
@@ -820,7 +860,8 @@ def set_auto_parallel_context(**kwargs):
820
860
  >>> ms.set_auto_parallel_context(enable_alltoall=False)
821
861
  >>> ms.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
822
862
  >>> ms.set_auto_parallel_context(pipeline_stages=2)
823
- >>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24}
863
+ >>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24,
864
+ ... "optimizer_weight_shard_size": 2}
824
865
  >>> ms.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
825
866
  >>> config = {"allreduce": {"mode": "size", "config": 32}, "allgather": {"mode": "size", "config": 32}}
826
867
  >>> ms.set_auto_parallel_context(comm_fusion=config)
@@ -860,8 +901,8 @@ def reset_auto_parallel_context():
860
901
  - gradients_mean: False.
861
902
  - gradient_fp32_sync: True.
862
903
  - parallel_mode: 'stand_alone'.
863
- - search_mode: 'dynamic_programming'.
864
- - auto_parallel_search_mode: 'dynamic_programming'.
904
+ - search_mode: 'recursive_programming'.
905
+ - auto_parallel_search_mode: 'recursive_programming'.
865
906
  - parameter_broadcast: False.
866
907
  - strategy_ckpt_load_file: ''.
867
908
  - strategy_ckpt_save_file: ''.
@@ -881,24 +922,31 @@ def reset_auto_parallel_context():
881
922
  @args_type_check(offload_config=dict)
882
923
  def set_offload_context(offload_config):
883
924
  r"""
884
- Set offload context.
885
- Some configurations are offload specific, see the below table for details:
925
+ Configure heterogeneous training detailed parameters to adjust the offload strategy.
926
+
927
+ Note:
928
+ The offload configuration is only used if the memory offload feature is enabled
929
+ via mindspore.set_context(memory_offload="ON").
886
930
 
887
931
  Args:
888
932
  offload_config (dict): A dict contains the keys and values for setting the offload context
889
933
  configure.It supports the following keys.
890
934
 
891
- - offload_param (str): The param for offload destination, cpu or disk.
892
- - offload_path (str): The path of offload.
935
+ - offload_path (str): The path of offload, relative paths are supported. Default: ``"./offload"``.
893
936
  - offload_cpu_size (str): The cpu memory size for offload. The format is "xxGB".
894
937
  - offload_disk_size (str): The disk size for offload. The format is "xxGB"
895
- - hbm_ratio (float): The ratio that can be used based on the maximum device memory. The range is (0,1].
896
- - cpu_ratio (float): The ratio that can be used based on the maximum host memory. The range is (0,1].
938
+ - hbm_ratio (float): The ratio that can be used based on the maximum device memory.
939
+ The range is (0,1], Default: ``1.0``.
940
+ - cpu_ratio (float): The ratio that can be used based on the maximum host memory.
941
+ The range is (0,1], Default: ``1.0``.
942
+ - enable_pinned_mem (bool): The flag of whether enabling Pinned Memory. Default: ``True``.
897
943
  - enable_aio (bool): The flag of whether enabling aio. Default: ``True``.
898
- - aio_block_size (str): The size of aio block. The format is "xxGB"
944
+ - aio_block_size (str): The size of aio block. The format is "xxGB".
899
945
  - aio_queue_depth (int): The depth of aio queue.
900
- - enable_pinned_mem (bool): The flag of whether enabling pinned memory.
901
- - auto_offload (bool): The flag of whether auto offload.
946
+ - offload_param (str): The param for offload destination, cpu or disk, Default: ``""``.
947
+ - offload_checkpoint (str): The checkpoint for offload destination, only valid if recompute is turned on,
948
+ cpu or disk, Default: ``""``.
949
+ - auto_offload (bool): The flag of whether auto offload. Default: ``True``.
902
950
  - host_mem_block_size (str): The memory block size of host memory pool. The format is "xxGB"
903
951
 
904
952
  Raises:
@@ -906,14 +954,19 @@ def set_offload_context(offload_config):
906
954
 
907
955
  Examples:
908
956
  >>> from mindspore import context
909
- >>> context.set_offload_context(offload_config={"offload_param"="cpu"})
957
+ >>> context.set_offload_context(offload_config={"offload_param":"cpu"})
910
958
  """
911
959
  _set_offload_context(offload_config)
912
960
 
913
961
 
914
962
  def get_offload_context():
915
963
  """
916
- Get offload context.
964
+ Gets the offload configuration parameters. Configure through interface mindspore.set_offload_context().
965
+ If the user is not set, the default configuration is obtained.
966
+
967
+ Returns:
968
+ Dict, heterogeneous training offload detailed configuration parameters.
969
+
917
970
  Examples:
918
971
  >>> from mindspore import context
919
972
  >>> offload_config = context.get_offload_context()
@@ -948,7 +1001,7 @@ def _check_target_specific_cfgs(device, arg_key):
948
1001
 
949
1002
 
950
1003
  @args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=(bool, int),
951
- save_graphs_path=str, enable_dump=bool, aoe_tune_mode=str,
1004
+ save_graphs_path=str, enable_dump=bool, aoe_tune_mode=str, aoe_config=dict,
952
1005
  save_dump_path=str, enable_reduce_precision=bool, variable_memory_max_size=str,
953
1006
  enable_auto_mixed_precision=bool, inter_op_parallel_num=int,
954
1007
  enable_graph_kernel=bool, reserve_class_name_in_scope=bool, check_bprop=bool,
@@ -1004,7 +1057,7 @@ def set_context(**kwargs):
1004
1057
  | +------------------------------+----------------------------+
1005
1058
  | | reserve_class_name_in_scope | CPU/GPU/Ascend |
1006
1059
  | +------------------------------+----------------------------+
1007
- | | pynative_synchronize | GPU/Ascend |
1060
+ | | pynative_synchronize | CPU/GPU/Ascend |
1008
1061
  +-------------------------+------------------------------+----------------------------+
1009
1062
  | Executive Control | mode | CPU/GPU/Ascend |
1010
1063
  | +------------------------------+----------------------------+
@@ -1014,6 +1067,10 @@ def set_context(**kwargs):
1014
1067
  | +------------------------------+----------------------------+
1015
1068
  | | enable_reduce_precision | Ascend |
1016
1069
  | +------------------------------+----------------------------+
1070
+ | | aoe_tune_mode | Ascend |
1071
+ | +------------------------------+----------------------------+
1072
+ | | aoe_config | Ascend |
1073
+ | +------------------------------+----------------------------+
1017
1074
  | | check_bprop | CPU/GPU/Ascend |
1018
1075
  | +------------------------------+----------------------------+
1019
1076
  | | max_call_depth | CPU/GPU/Ascend |
@@ -1050,7 +1107,7 @@ def set_context(**kwargs):
1050
1107
  If device target is not set, the version of MindSpore package is used.
1051
1108
  max_device_memory (str): Set the maximum memory available for devices. The format is "xxGB".
1052
1109
  Default: ``" 1024GB"`` . The actual used memory size is the minimum of the available memory of the device
1053
- and max_device_memory.
1110
+ and max_device_memory. 'max_device_memory' should be set before the program runs.
1054
1111
  variable_memory_max_size (str): This parameter is deprecated, and will be removed in a future version.
1055
1112
  Please use parameter 'max_device_memory' instead.
1056
1113
  mempool_block_size (str): Set the size of the memory pool block in PyNative mode for devices.
@@ -1063,7 +1120,7 @@ def set_context(**kwargs):
1063
1120
  Available values are:
1064
1121
 
1065
1122
  - False or 0: disable saving of intermediate compilation graphs.
1066
- - 1: some intermediate files will be generated during graph compliation.
1123
+ - 1: some intermediate files will be generated during graph compilation.
1067
1124
  - True or 2: Generate more ir files related to backend process.
1068
1125
  - 3: Generate visualization computing graphs and detailed frontend ir graphs.
1069
1126
 
@@ -1134,11 +1191,17 @@ def set_context(**kwargs):
1134
1191
  If enable_graph_kernel is set to ``True`` , acceleration can be enabled.
1135
1192
  For details of graph kernel fusion, please check
1136
1193
  `Enabling Graph Kernel Fusion
1137
- <https://www.mindspore.cn/tutorials/experts/en/r2.1/optimize/graph_fusion_engine.html>`_.
1194
+ <https://www.mindspore.cn/tutorials/experts/en/r2.2/optimize/graph_fusion_engine.html>`_.
1138
1195
  graph_kernel_flags (str):
1139
1196
  Optimization options of graph kernel fusion, and the priority is higher when it conflicts
1140
1197
  with enable_graph_kernel. Only for experienced users.
1141
- For example, mindspore.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text"). Some general options:
1198
+ For example,
1199
+
1200
+ .. code-block::
1201
+
1202
+ mindspore.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
1203
+
1204
+ Some general options:
1142
1205
 
1143
1206
  - opt_level: Set the optimization level.
1144
1207
  Default: ``2`` . Graph kernel fusion can be enabled equivalently by setting opt_level greater than 0.
@@ -1154,10 +1217,19 @@ def set_context(**kwargs):
1154
1217
 
1155
1218
  - dump_as_text: dumps detail info as text files. Default: ``False`` .
1156
1219
 
1157
- More options can refer to the implementation code.
1158
1220
  enable_reduce_precision (bool): Whether to enable precision reduction.
1159
1221
  If the operator does not support the user-specified precision, the precision will
1160
1222
  be changed automatically. Default: ``True`` .
1223
+ aoe_tune_mode (str): AOE tuning mode setting, which is not set by default.
1224
+ When set to ``"online"`` , the tuning in online function is turned on.
1225
+ When set to ``"offline"`` , ge graph will be save for offline tuning.
1226
+ aoe_config (dict): Set the parameters specific to Ascend Optimization Engine. It is not set by default.
1227
+
1228
+ - job_type (str): Mode type setting, default value is ``"2"``.
1229
+
1230
+ - ``"1"``: subgraph tuning;
1231
+ - ``"2"``: operator tuning.
1232
+
1161
1233
  check_bprop (bool): Whether to check back propagation nodes. The checking ensures that the shape and dtype
1162
1234
  of back propagation node outputs is the same as input parameters. Default: ``False`` .
1163
1235
  max_call_depth (int): Specify the maximum depth of function call. Must be positive integer. Default: ``1000`` .
@@ -1205,8 +1277,8 @@ def set_context(**kwargs):
1205
1277
  memory_optimize_level is set 'O1'.
1206
1278
  - OFF: Turn off the memory Offload function.
1207
1279
  ascend_config (dict): Set the parameters specific to Ascend hardware platform. It is not set by default.
1208
- Currently, configurations except `parallel_speed_up_json_path` are currently only supported on Ascend910B
1209
- hardware platform. The default value of `precision_mode`, `jit_compile` and
1280
+ Currently, configurations except `parallel_speed_up_json_path` and `precision_mode.force_fp32` are only
1281
+ supported on Ascend910B hardware platform. The default value of `precision_mode`, `jit_compile` and
1210
1282
  `atomic_clean_policy` are experimental parameters, may change in the future.
1211
1283
 
1212
1284
  - precision_mode (str): Mixed precision mode setting, on Ascend910B hardware platform, the default
@@ -1238,35 +1310,40 @@ def set_context(**kwargs):
1238
1310
  When the memory of the network exceeds the limit, you may try this cleaning policy, but it may cause
1239
1311
  performance loss.
1240
1312
  - matmul_allow_hf32 (bool): Whether to convert FP32 to HF32 for Matmul operators. Default value: ``False``.
1313
+ This is an experimental prototype that is subject to change and/or deletion.
1241
1314
  For detailed information, please refer to `Ascend community <https://www.hiascend.com/>`_ .
1242
1315
  - conv_allow_hf32 (bool): Whether to convert FP32 to HF32 for Conv operators. Default value: ``True``.
1316
+ This is an experimental prototype that is subject to change and/or deletion.
1243
1317
  For detailed information, please refer to `Ascend community <https://www.hiascend.com/>`_ .
1244
1318
  - op_precision_mode (str): Path to config file of op precision mode. For detailed information, please refer
1245
1319
  to `Ascend community <https://www.hiascend.com/>`_ .
1246
1320
  - parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file, configuration
1247
1321
  can refer to `parallel_speed_up.json
1248
- <https://gitee.com/mindspore/mindspore/blob/r2.1/config/parallel_speed_up.json>`_ .
1322
+ <https://gitee.com/mindspore/mindspore/blob/r2.2/config/parallel_speed_up.json>`_ .
1249
1323
  If its value is None or '', it does not take effect. Default None.
1250
1324
 
1251
1325
  - recompute_comm_overlap (bool): Enable overlap between recompute ops and communication ops if True.
1252
1326
  Default: False.
1253
1327
  - matmul_grad_comm_overlap (bool): Enable overlap between grad ops and communication ops if True.
1254
1328
  Default: False.
1255
- - enable_task_opt (bool): Enable the optimizaton of the number of tasks for each communication if True.
1329
+ - enable_task_opt (bool): Enable the optimization of the number of tasks for each communication if True.
1256
1330
  Default: False.
1257
1331
  - interleaved_matmul_comm (bool): Enable interleaved optimization of Matmul-Comm if True. Default: False.
1258
1332
  - interleaved_layernorm_comm (bool): Enable interleaved optimization of LayerNorm-Comm if True.
1259
1333
  Default: False.
1334
+
1260
1335
  jit_syntax_level (int): Set JIT syntax level for graph compiling, triggered by GRAPH_MODE and @jit decorator.
1261
- The value must be in [STRICT, LAX]. Default: LAX. All levels
1262
- support all backends.
1336
+ The value must be ``STRICT`` or ``LAX`` . Default: ``LAX`` . All levels support all backends.
1337
+
1338
+ - ``STRICT`` : Only basic syntax is supported, and execution performance is optimal. Can be used for MindIR
1339
+ load and export.
1340
+ - ``LAX`` : Compatible with all Python syntax as much as possible. However, execution performance may be
1341
+ affected and not optimal. Cannot be used for MindIR load and export due to some syntax that may not be
1342
+ able to be exported.
1263
1343
 
1264
- - STRICT: Only basic syntax is supported, and execution performance is optimal.
1265
- - LAX: Compatible with all Python syntax as much as possible. However, execution performance may be
1266
- affected and not optimal.
1267
1344
  gpu_config (dict): Set the parameters specific to gpu hardware platform. It is not set by default.
1268
- Currently, only setting `conv_fprop_algo` and `conv_dgrad_algo` and `conv_wgrad_algo` are supported on GPU
1269
- hardware platform.
1345
+ Currently, only setting `conv_fprop_algo` and `conv_dgrad_algo` and `conv_wgrad_algo` and `conv_allow_tf32`
1346
+ and `matmul_allow_tf32` are supported on GPU hardware platform.
1270
1347
 
1271
1348
  - conv_fprop_algo (str): Specifies convolution forward algorithm and the default value is 'normal',
1272
1349
  The value range is as follows:
@@ -1330,6 +1407,10 @@ def set_context(**kwargs):
1330
1407
  - fft_tiling: This algorithm uses the Fast-Fourier Transform approach but splits the inputs into tiles.
1331
1408
  A significant memory workspace is needed to store intermediate results but less than fft for large size
1332
1409
  images. The results are deterministic.
1410
+ - conv_allow_tf32 (bool): The flag below controls to allow Tensor core TF32 computation on CUDNN and the
1411
+ default value is ``True``.
1412
+ - matmul_allow_tf32 (bool): The flag below controls to allow Tensor core TF32 computation on CUBLAS and the
1413
+ default value is ``False``.
1333
1414
 
1334
1415
  Raises:
1335
1416
  ValueError: If input key is not an attribute in context.
@@ -1346,6 +1427,8 @@ def set_context(**kwargs):
1346
1427
  >>> ms.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
1347
1428
  >>> ms.set_context(reserve_class_name_in_scope=True)
1348
1429
  >>> ms.set_context(variable_memory_max_size="6GB")
1430
+ >>> ms.set_context(aoe_tune_mode="online")
1431
+ >>> ms.set_context(aoe_config={"job_type": "2"})
1349
1432
  >>> ms.set_context(check_bprop=True)
1350
1433
  >>> ms.set_context(max_device_memory="3.5GB")
1351
1434
  >>> ms.set_context(mempool_block_size="1GB")
@@ -1364,7 +1447,8 @@ def set_context(**kwargs):
1364
1447
  >>> ms.set_context(ascend_config={"precision_mode": "force_fp16", "jit_compile": True,
1365
1448
  ... "atomic_clean_policy": 1, "op_precision_mode": "./op_precision_config_file"})
1366
1449
  >>> ms.set_context(jit_syntax_level=ms.STRICT)
1367
- >>> ms.set_context(gpu_config={"conv_fprop_algo": "performance"})
1450
+ >>> ms.set_context(gpu_config={"conv_fprop_algo": "performance", "conv_allow_tf32": True,
1451
+ ... "matmul_allow_tf32": True})
1368
1452
  """
1369
1453
  ctx = _context()
1370
1454
  # set device target first
@@ -1390,7 +1474,7 @@ def set_context(**kwargs):
1390
1474
  value = 0
1391
1475
  if value > 3:
1392
1476
  raise ValueError(f"value for save_graphs should be 0-3 but got '{value}'")
1393
- if key == 'jit_syntax_level' and value != STRICT and value != COMPATIBLE and value != LAX:
1477
+ if key == 'jit_syntax_level' and value not in (STRICT, COMPATIBLE, LAX):
1394
1478
  raise ValueError(f"For 'jit_syntax_level', the value should be context.STRICT"
1395
1479
  f" or context.LAX, but got {value}.")
1396
1480
  if not _check_target_specific_cfgs(device, key):
@@ -1454,16 +1538,16 @@ class ParallelMode:
1454
1538
  """
1455
1539
  Parallel mode options.
1456
1540
 
1457
- There are five kinds of parallel modes, "STAND_ALONE", "DATA_PARALLEL",
1458
- "HYBRID_PARALLEL", "SEMI_AUTO_PARALLEL" and "AUTO_PARALLEL". Default: "STAND_ALONE".
1541
+ There are five kinds of parallel modes, ``STAND_ALONE``, ``DATA_PARALLEL``,
1542
+ ``HYBRID_PARALLEL``, ``SEMI_AUTO_PARALLEL`` and ``AUTO_PARALLEL``. Default: ``STAND_ALONE``.
1459
1543
 
1460
- - STAND_ALONE: Only one processor is working.
1461
- - DATA_PARALLEL: Distributes the data across different processors.
1462
- - HYBRID_PARALLEL: Achieves data parallelism and model parallelism manually.
1463
- - SEMI_AUTO_PARALLEL: Achieves data parallelism and model parallelism by setting parallel strategies.
1464
- - AUTO_PARALLEL: Achieves parallelism automatically.
1544
+ - ``STAND_ALONE``: Only one processor is working.
1545
+ - ``DATA_PARALLEL``: Distributes the data across different processors.
1546
+ - ``HYBRID_PARALLEL``: Achieves data parallelism and model parallelism manually.
1547
+ - ``SEMI_AUTO_PARALLEL``: Achieves data parallelism and model parallelism by setting parallel strategies.
1548
+ - ``AUTO_PARALLEL``: Achieves parallelism automatically.
1465
1549
 
1466
- MODE_LIST: The list of all supported parallel modes.
1550
+ ``MODE_LIST``: The list of all supported parallel modes.
1467
1551
  """
1468
1552
 
1469
1553
  STAND_ALONE = "stand_alone"
@@ -21,7 +21,7 @@ Besides, this module provides APIs to sample data while loading.
21
21
 
22
22
  We can enable cache in most of the dataset with its key arguments 'cache'. Please notice that cache is not supported
23
23
  on Windows platform yet. Do not use it while loading and processing data on Windows. More introductions and limitations
24
- can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/r2.1/dataset/cache.html>`_ .
24
+ can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/r2.2/dataset/cache.html>`_ .
25
25
 
26
26
  Common imported modules in corresponding API examples are as follows:
27
27
 
@@ -55,11 +55,11 @@ The specific steps are as follows:
55
55
  - Dataset operation: The user uses the dataset object method `.shuffle` / `.filter` / `.skip` / `.split` /
56
56
  `.take` / ... to further shuffle, filter, skip, and obtain the maximum number of samples of datasets;
57
57
  - Dataset sample transform operation: The user can add data transform operations
58
- ( `vision transform <https://mindspore.cn/docs/en/r2.1/api_python/mindspore.\
58
+ ( `vision transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
59
59
  dataset.transforms.html#module-mindspore.dataset.vision>`_ ,
60
- `NLP transform <https://mindspore.cn/docs/en/r2.1/api_python/mindspore.\
60
+ `NLP transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
61
61
  dataset.transforms.html#module-mindspore.dataset.text>`_ ,
62
- `audio transform <https://mindspore.cn/docs/en/r2.1/api_python/mindspore.\
62
+ `audio transform <https://mindspore.cn/docs/en/r2.2/api_python/mindspore.\
63
63
  dataset.transforms.html#module-mindspore.dataset.audio>`_ ) to the map
64
64
  operation to perform transformations. During data preprocessing, multiple map operations can be defined to
65
65
  perform different transform operations to different fields. The data transform operation can also be a
@@ -73,7 +73,7 @@ Quick start of Dataset Pipeline
73
73
  -------------------------------
74
74
 
75
75
  For a quick start of using Dataset Pipeline, download `Load & Process Data With Dataset Pipeline
76
- <https://www.mindspore.cn/docs/en/r2.1/api_python/samples/dataset/dataset_gallery.html>`_
76
+ <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/dataset_gallery.html>`_
77
77
  to local and run in sequence.
78
78
 
79
79
  """
@@ -40,10 +40,10 @@ Descriptions of common data processing terms are as follows:
40
40
  The data transform operation can be executed in the data processing pipeline or in the eager mode:
41
41
 
42
42
  - Pipeline mode is generally used to process big datasets. Examples refer to
43
- `introduction to data processing pipeline <https://www.mindspore.cn/docs/en/r2.1/api_python/
43
+ `introduction to data processing pipeline <https://www.mindspore.cn/docs/en/r2.2/api_python/
44
44
  mindspore.dataset.html#introduction-to-data-processing-pipeline>`_ .
45
45
  - Eager mode is more like a function call to process data. Examples refer to
46
- `Lightweight Data Processing <https://www.mindspore.cn/tutorials/en/r2.1/advanced/dataset/eager.html>`_ .
46
+ `Lightweight Data Processing <https://www.mindspore.cn/tutorials/en/r2.2/advanced/dataset/eager.html>`_ .
47
47
  """
48
48
  from __future__ import absolute_import
49
49