mindspore 2.4.10__cp310-cp310-win_amd64.whl → 2.6.0rc1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (602) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +13 -6
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_check_jit_forbidden_api.py +3 -0
  9. mindspore/_checkparam.py +3 -38
  10. mindspore/_deprecated/__init__.py +17 -0
  11. mindspore/_deprecated/jit.py +198 -0
  12. mindspore/_extends/builtin_operations.py +1 -1
  13. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  14. mindspore/_extends/parse/__init__.py +6 -7
  15. mindspore/_extends/parse/compile_config.py +83 -0
  16. mindspore/_extends/parse/deprecated/__init__.py +0 -0
  17. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
  18. mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
  19. mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
  20. mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
  21. mindspore/_extends/parse/parser.py +46 -197
  22. mindspore/_extends/parse/resources.py +1 -5
  23. mindspore/_extends/parse/standard_method.py +217 -98
  24. mindspore/_extends/pijit/__init__.py +2 -2
  25. mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
  26. mindspore/_extends/pijit/tensor_func_list.py +27 -0
  27. mindspore/_extends/utils.py +1 -1
  28. mindspore/amp.py +11 -5
  29. mindspore/atlprov.dll +0 -0
  30. mindspore/avcodec-59.dll +0 -0
  31. mindspore/avdevice-59.dll +0 -0
  32. mindspore/avfilter-8.dll +0 -0
  33. mindspore/avformat-59.dll +0 -0
  34. mindspore/avutil-57.dll +0 -0
  35. mindspore/boost/__init__.py +2 -2
  36. mindspore/boost/base.py +3 -7
  37. mindspore/boost/boost_cell_wrapper.py +138 -43
  38. mindspore/c1.dll +0 -0
  39. mindspore/c1xx.dll +0 -0
  40. mindspore/c2.dll +0 -0
  41. mindspore/common/__init__.py +6 -3
  42. mindspore/common/_grad_function.py +56 -0
  43. mindspore/common/_pijit_context.py +14 -5
  44. mindspore/common/_register_for_tensor.py +1 -2
  45. mindspore/common/_stub_tensor.py +30 -14
  46. mindspore/common/_tensor_cpp_method.py +17 -0
  47. mindspore/common/_tensor_docs.py +4760 -0
  48. mindspore/common/api.py +435 -371
  49. mindspore/common/auto_dynamic_shape.py +41 -44
  50. mindspore/common/dtype.py +39 -36
  51. mindspore/common/dump.py +9 -6
  52. mindspore/common/file_system.py +9 -1
  53. mindspore/common/generator.py +2 -0
  54. mindspore/common/hook_handle.py +6 -2
  55. mindspore/common/initializer.py +13 -10
  56. mindspore/common/jit_begin_end.py +94 -0
  57. mindspore/common/jit_config.py +6 -1
  58. mindspore/common/jit_context.py +76 -0
  59. mindspore/common/jit_trace.py +378 -0
  60. mindspore/common/lazy_inline.py +9 -3
  61. mindspore/common/mindir_util.py +10 -2
  62. mindspore/common/mutable.py +5 -4
  63. mindspore/common/parameter.py +135 -52
  64. mindspore/common/seed.py +2 -2
  65. mindspore/common/sparse_tensor.py +23 -17
  66. mindspore/common/tensor.py +951 -1992
  67. mindspore/communication/__init__.py +7 -5
  68. mindspore/communication/_comm_helper.py +52 -2
  69. mindspore/communication/comm_func.py +240 -181
  70. mindspore/communication/management.py +95 -26
  71. mindspore/context.py +314 -566
  72. mindspore/dataset/__init__.py +65 -37
  73. mindspore/dataset/audio/__init__.py +2 -8
  74. mindspore/dataset/audio/transforms.py +3 -17
  75. mindspore/dataset/callback/ds_callback.py +2 -1
  76. mindspore/dataset/core/config.py +87 -6
  77. mindspore/dataset/engine/cache_admin.py +3 -3
  78. mindspore/dataset/engine/cache_client.py +6 -5
  79. mindspore/dataset/engine/datasets.py +292 -267
  80. mindspore/dataset/engine/datasets_audio.py +22 -8
  81. mindspore/dataset/engine/datasets_standard_format.py +46 -27
  82. mindspore/dataset/engine/datasets_text.py +78 -48
  83. mindspore/dataset/engine/datasets_user_defined.py +182 -116
  84. mindspore/dataset/engine/datasets_vision.py +120 -44
  85. mindspore/dataset/engine/iterators.py +283 -63
  86. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
  87. mindspore/dataset/engine/obs/util.py +8 -0
  88. mindspore/dataset/engine/queue.py +40 -0
  89. mindspore/dataset/engine/samplers.py +289 -43
  90. mindspore/dataset/engine/serializer_deserializer.py +3 -2
  91. mindspore/dataset/engine/validators.py +53 -11
  92. mindspore/dataset/text/__init__.py +7 -6
  93. mindspore/dataset/text/transforms.py +6 -5
  94. mindspore/dataset/text/utils.py +3 -3
  95. mindspore/dataset/transforms/__init__.py +0 -9
  96. mindspore/dataset/transforms/py_transforms_util.py +17 -0
  97. mindspore/dataset/transforms/transforms.py +31 -14
  98. mindspore/dataset/utils/browse_dataset.py +1 -1
  99. mindspore/dataset/vision/__init__.py +2 -9
  100. mindspore/dataset/vision/transforms.py +202 -158
  101. mindspore/dataset/vision/utils.py +7 -5
  102. mindspore/dataset/vision/validators.py +1 -2
  103. mindspore/device_context/__init__.py +21 -0
  104. mindspore/device_context/ascend/__init__.py +25 -0
  105. mindspore/device_context/ascend/device.py +72 -0
  106. mindspore/device_context/ascend/op_debug.py +153 -0
  107. mindspore/device_context/ascend/op_precision.py +193 -0
  108. mindspore/device_context/ascend/op_tuning.py +123 -0
  109. mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
  110. mindspore/device_context/cpu/device.py +62 -0
  111. mindspore/device_context/cpu/op_tuning.py +43 -0
  112. mindspore/device_context/gpu/__init__.py +21 -0
  113. mindspore/device_context/gpu/device.py +70 -0
  114. mindspore/device_context/gpu/op_precision.py +67 -0
  115. mindspore/device_context/gpu/op_tuning.py +175 -0
  116. mindspore/device_manager.py +170 -0
  117. mindspore/dnnl.dll +0 -0
  118. mindspore/dpcmi.dll +0 -0
  119. mindspore/experimental/es/embedding_service.py +35 -27
  120. mindspore/experimental/llm_boost/__init__.py +1 -0
  121. mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
  122. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
  123. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
  124. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  125. mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
  126. mindspore/experimental/llm_boost/register.py +1 -0
  127. mindspore/experimental/map_parameter.py +4 -4
  128. mindspore/experimental/optim/adadelta.py +6 -6
  129. mindspore/experimental/optim/adagrad.py +4 -4
  130. mindspore/experimental/optim/adam.py +7 -0
  131. mindspore/experimental/optim/adamax.py +4 -4
  132. mindspore/experimental/optim/adamw.py +4 -0
  133. mindspore/experimental/optim/asgd.py +1 -1
  134. mindspore/experimental/optim/lr_scheduler.py +73 -46
  135. mindspore/experimental/optim/radam.py +34 -31
  136. mindspore/experimental/optim/rprop.py +1 -1
  137. mindspore/experimental/optim/sgd.py +1 -1
  138. mindspore/hal/contiguous_tensors_handle.py +6 -10
  139. mindspore/hal/device.py +55 -53
  140. mindspore/hal/event.py +52 -52
  141. mindspore/hal/memory.py +157 -117
  142. mindspore/hal/stream.py +150 -109
  143. mindspore/include/api/context.h +0 -1
  144. mindspore/include/dataset/constants.h +7 -4
  145. mindspore/include/dataset/execute.h +2 -2
  146. mindspore/jpeg62.dll +0 -0
  147. mindspore/log.py +50 -0
  148. mindspore/mindrecord/__init__.py +21 -8
  149. mindspore/mindrecord/config.py +17 -316
  150. mindspore/mindrecord/filereader.py +1 -9
  151. mindspore/mindrecord/filewriter.py +5 -15
  152. mindspore/mindrecord/mindpage.py +1 -9
  153. mindspore/mindspore_backend_common.dll +0 -0
  154. mindspore/mindspore_backend_manager.dll +0 -0
  155. mindspore/mindspore_common.dll +0 -0
  156. mindspore/mindspore_core.dll +0 -0
  157. mindspore/mindspore_dump.dll +0 -0
  158. mindspore/mindspore_frontend.dll +0 -0
  159. mindspore/mindspore_glog.dll +0 -0
  160. mindspore/mindspore_memory_pool.dll +0 -0
  161. mindspore/mindspore_ms_backend.dll +0 -0
  162. mindspore/mindspore_ops.dll +0 -0
  163. mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
  164. mindspore/mindspore_ops_kernel_common.dll +0 -0
  165. mindspore/mindspore_profiler.dll +0 -0
  166. mindspore/mindspore_pyboost.dll +0 -0
  167. mindspore/mindspore_pynative.dll +0 -0
  168. mindspore/mindspore_res_manager.dll +0 -0
  169. mindspore/mindspore_runtime_pipeline.dll +0 -0
  170. mindspore/mint/__init__.py +796 -759
  171. mindspore/mint/distributed/__init__.py +70 -4
  172. mindspore/mint/distributed/distributed.py +2679 -44
  173. mindspore/mint/linalg/__init__.py +8 -0
  174. mindspore/mint/nn/__init__.py +743 -22
  175. mindspore/mint/nn/functional.py +716 -23
  176. mindspore/mint/nn/layer/__init__.py +21 -4
  177. mindspore/mint/nn/layer/_functions.py +334 -0
  178. mindspore/mint/nn/layer/activation.py +276 -1
  179. mindspore/mint/nn/layer/basic.py +123 -0
  180. mindspore/mint/nn/layer/conv.py +921 -0
  181. mindspore/mint/nn/layer/normalization.py +223 -28
  182. mindspore/mint/nn/layer/padding.py +797 -0
  183. mindspore/mint/nn/layer/pooling.py +235 -0
  184. mindspore/mint/optim/__init__.py +3 -1
  185. mindspore/mint/optim/adam.py +223 -0
  186. mindspore/mint/optim/adamw.py +26 -19
  187. mindspore/mint/optim/sgd.py +171 -0
  188. mindspore/mint/special/__init__.py +2 -1
  189. mindspore/msobj140.dll +0 -0
  190. mindspore/mspdb140.dll +0 -0
  191. mindspore/mspdbcore.dll +0 -0
  192. mindspore/mspdbst.dll +0 -0
  193. mindspore/mspft140.dll +0 -0
  194. mindspore/msvcdis140.dll +0 -0
  195. mindspore/msvcp140_1.dll +0 -0
  196. mindspore/msvcp140_2.dll +0 -0
  197. mindspore/msvcp140_atomic_wait.dll +0 -0
  198. mindspore/msvcp140_codecvt_ids.dll +0 -0
  199. mindspore/multiprocessing/__init__.py +5 -0
  200. mindspore/nn/__init__.py +4 -1
  201. mindspore/nn/cell.py +1370 -189
  202. mindspore/nn/dynamic_lr.py +2 -1
  203. mindspore/nn/layer/activation.py +29 -27
  204. mindspore/nn/layer/basic.py +51 -35
  205. mindspore/nn/layer/channel_shuffle.py +3 -3
  206. mindspore/nn/layer/container.py +1 -1
  207. mindspore/nn/layer/conv.py +22 -17
  208. mindspore/nn/layer/embedding.py +12 -11
  209. mindspore/nn/layer/normalization.py +56 -49
  210. mindspore/nn/layer/padding.py +4 -3
  211. mindspore/nn/layer/pooling.py +120 -42
  212. mindspore/nn/layer/rnn_cells.py +1 -1
  213. mindspore/nn/layer/rnns.py +2 -1
  214. mindspore/nn/layer/timedistributed.py +5 -5
  215. mindspore/nn/layer/transformer.py +59 -36
  216. mindspore/nn/learning_rate_schedule.py +8 -4
  217. mindspore/nn/loss/loss.py +58 -55
  218. mindspore/nn/optim/ada_grad.py +7 -5
  219. mindspore/nn/optim/adadelta.py +11 -9
  220. mindspore/nn/optim/adafactor.py +1 -1
  221. mindspore/nn/optim/adam.py +17 -13
  222. mindspore/nn/optim/adamax.py +8 -7
  223. mindspore/nn/optim/adasum.py +5 -5
  224. mindspore/nn/optim/asgd.py +1 -1
  225. mindspore/nn/optim/ftrl.py +11 -9
  226. mindspore/nn/optim/lamb.py +1 -1
  227. mindspore/nn/optim/lars.py +1 -4
  228. mindspore/nn/optim/lazyadam.py +12 -10
  229. mindspore/nn/optim/momentum.py +7 -6
  230. mindspore/nn/optim/optimizer.py +3 -3
  231. mindspore/nn/optim/proximal_ada_grad.py +12 -10
  232. mindspore/nn/optim/rmsprop.py +13 -12
  233. mindspore/nn/optim/rprop.py +11 -9
  234. mindspore/nn/optim/sgd.py +9 -6
  235. mindspore/nn/optim/tft_wrapper.py +5 -2
  236. mindspore/nn/optim/thor.py +2 -1
  237. mindspore/nn/probability/bijector/bijector.py +17 -11
  238. mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
  239. mindspore/nn/probability/bijector/invert.py +2 -2
  240. mindspore/nn/probability/bijector/scalar_affine.py +3 -3
  241. mindspore/nn/probability/bijector/softplus.py +3 -2
  242. mindspore/nn/probability/distribution/beta.py +3 -3
  243. mindspore/nn/probability/distribution/categorical.py +1 -1
  244. mindspore/nn/probability/distribution/cauchy.py +4 -2
  245. mindspore/nn/probability/distribution/exponential.py +6 -7
  246. mindspore/nn/probability/distribution/gamma.py +2 -2
  247. mindspore/nn/probability/distribution/gumbel.py +2 -2
  248. mindspore/nn/probability/distribution/half_normal.py +5 -3
  249. mindspore/nn/probability/distribution/logistic.py +5 -3
  250. mindspore/nn/probability/distribution/poisson.py +1 -1
  251. mindspore/nn/probability/distribution/uniform.py +5 -3
  252. mindspore/nn/reinforcement/_tensors_queue.py +1 -1
  253. mindspore/nn/reinforcement/tensor_array.py +1 -1
  254. mindspore/nn/utils/init.py +13 -11
  255. mindspore/nn/wrap/__init__.py +6 -6
  256. mindspore/nn/wrap/cell_wrapper.py +181 -122
  257. mindspore/nn/wrap/grad_reducer.py +45 -36
  258. mindspore/nn/wrap/loss_scale.py +6 -7
  259. mindspore/numpy/array_creations.py +63 -65
  260. mindspore/numpy/array_ops.py +149 -144
  261. mindspore/numpy/logic_ops.py +41 -42
  262. mindspore/numpy/math_ops.py +365 -363
  263. mindspore/numpy/utils.py +17 -18
  264. mindspore/numpy/utils_const.py +5 -6
  265. mindspore/opencv_core452.dll +0 -0
  266. mindspore/opencv_imgcodecs452.dll +0 -0
  267. mindspore/opencv_imgproc452.dll +0 -0
  268. mindspore/ops/__init__.py +5 -3
  269. mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
  270. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
  271. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  272. mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
  273. mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
  274. mindspore/ops/_op_impl/cpu/__init__.py +1 -0
  275. mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
  276. mindspore/ops/_register_for_op.py +0 -11
  277. mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
  278. mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
  279. mindspore/ops/_vmap/vmap_array_ops.py +27 -25
  280. mindspore/ops/_vmap/vmap_base.py +0 -2
  281. mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
  282. mindspore/ops/_vmap/vmap_math_ops.py +15 -16
  283. mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
  284. mindspore/ops/auto_generate/__init__.py +4 -3
  285. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +236 -46
  286. mindspore/ops/auto_generate/gen_extend_func.py +764 -124
  287. mindspore/ops/auto_generate/gen_ops_def.py +4018 -2264
  288. mindspore/ops/auto_generate/gen_ops_prim.py +15463 -5037
  289. mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
  290. mindspore/ops/composite/__init__.py +2 -1
  291. mindspore/ops/composite/base.py +20 -25
  292. mindspore/ops/composite/math_ops.py +6 -16
  293. mindspore/ops/composite/multitype_ops/__init__.py +5 -2
  294. mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
  295. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
  296. mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
  297. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
  298. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
  299. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
  300. mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
  301. mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
  302. mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
  303. mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
  304. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
  305. mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
  306. mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
  307. mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
  308. mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
  309. mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
  310. mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
  311. mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
  312. mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
  313. mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
  314. mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
  315. mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
  316. mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
  317. mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
  318. mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
  319. mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
  320. mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
  321. mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
  322. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  323. mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
  324. mindspore/ops/function/__init__.py +40 -2
  325. mindspore/ops/function/_add_attr_func.py +58 -0
  326. mindspore/ops/function/array_func.py +2089 -2403
  327. mindspore/ops/function/clip_func.py +80 -23
  328. mindspore/ops/function/debug_func.py +57 -57
  329. mindspore/ops/function/grad/__init__.py +1 -0
  330. mindspore/ops/function/grad/grad_func.py +104 -71
  331. mindspore/ops/function/image_func.py +2 -2
  332. mindspore/ops/function/linalg_func.py +47 -78
  333. mindspore/ops/function/math_func.py +4501 -3802
  334. mindspore/ops/function/nn_func.py +1726 -620
  335. mindspore/ops/function/other_func.py +159 -1
  336. mindspore/ops/function/parameter_func.py +18 -84
  337. mindspore/ops/function/random_func.py +440 -387
  338. mindspore/ops/function/reshard_func.py +4 -70
  339. mindspore/ops/function/sparse_func.py +3 -3
  340. mindspore/ops/function/sparse_unary_func.py +6 -6
  341. mindspore/ops/function/spectral_func.py +25 -58
  342. mindspore/ops/function/vmap_func.py +24 -17
  343. mindspore/ops/functional.py +22 -7
  344. mindspore/ops/functional_overload.py +1440 -0
  345. mindspore/ops/op_info_register.py +32 -244
  346. mindspore/ops/operations/__init__.py +13 -7
  347. mindspore/ops/operations/_custom_ops_utils.py +247 -0
  348. mindspore/ops/operations/_embedding_cache_ops.py +4 -4
  349. mindspore/ops/operations/_grad_ops.py +2 -43
  350. mindspore/ops/operations/_infer_ops.py +2 -1
  351. mindspore/ops/operations/_inner_ops.py +43 -84
  352. mindspore/ops/operations/_ms_kernel.py +4 -10
  353. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  354. mindspore/ops/operations/_scalar_ops.py +3 -2
  355. mindspore/ops/operations/_sequence_ops.py +1 -1
  356. mindspore/ops/operations/_tensor_array.py +1 -1
  357. mindspore/ops/operations/array_ops.py +81 -324
  358. mindspore/ops/operations/comm_ops.py +154 -108
  359. mindspore/ops/operations/custom_ops.py +232 -78
  360. mindspore/ops/operations/debug_ops.py +153 -59
  361. mindspore/ops/operations/inner_ops.py +7 -5
  362. mindspore/ops/operations/linalg_ops.py +1 -57
  363. mindspore/ops/operations/manually_defined/_inner.py +1 -1
  364. mindspore/ops/operations/manually_defined/ops_def.py +928 -180
  365. mindspore/ops/operations/math_ops.py +32 -234
  366. mindspore/ops/operations/nn_ops.py +210 -498
  367. mindspore/ops/operations/other_ops.py +62 -9
  368. mindspore/ops/operations/random_ops.py +13 -7
  369. mindspore/ops/operations/reshard_ops.py +1 -1
  370. mindspore/ops/operations/sparse_ops.py +2 -2
  371. mindspore/ops/primitive.py +66 -53
  372. mindspore/ops/tensor_method.py +1888 -0
  373. mindspore/ops_generate/__init__.py +0 -5
  374. mindspore/ops_generate/aclnn/__init__.py +0 -0
  375. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
  376. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
  377. mindspore/ops_generate/api/__init__.py +0 -0
  378. mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
  379. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
  380. mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
  381. mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
  382. mindspore/ops_generate/api/functions_cc_generator.py +237 -0
  383. mindspore/ops_generate/api/gen_api.py +103 -0
  384. mindspore/ops_generate/api/op_api_proto.py +235 -0
  385. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
  386. mindspore/ops_generate/common/__init__.py +0 -0
  387. mindspore/ops_generate/common/base_generator.py +11 -0
  388. mindspore/ops_generate/common/gen_constants.py +91 -0
  389. mindspore/ops_generate/common/gen_utils.py +348 -0
  390. mindspore/ops_generate/common/op_proto.py +473 -0
  391. mindspore/ops_generate/common/template.py +523 -0
  392. mindspore/ops_generate/gen_ops.py +22 -1069
  393. mindspore/ops_generate/op_def/__init__.py +0 -0
  394. mindspore/ops_generate/op_def/gen_op_def.py +90 -0
  395. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
  396. mindspore/ops_generate/op_def/ops_def_cc_generator.py +299 -0
  397. mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
  398. mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
  399. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
  400. mindspore/ops_generate/op_def_py/__init__.py +0 -0
  401. mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
  402. mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
  403. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
  404. mindspore/ops_generate/pyboost/__init__.py +0 -0
  405. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
  406. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
  407. mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
  408. mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
  409. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
  410. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
  411. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
  412. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
  413. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
  414. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
  415. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
  416. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
  417. mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
  418. mindspore/ops_generate/resources/__init__.py +0 -0
  419. mindspore/ops_generate/resources/resource_list.py +30 -0
  420. mindspore/ops_generate/resources/resource_loader.py +36 -0
  421. mindspore/ops_generate/resources/resource_manager.py +64 -0
  422. mindspore/ops_generate/resources/yaml_loader.py +88 -0
  423. mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
  424. mindspore/parallel/__init__.py +7 -3
  425. mindspore/parallel/_auto_parallel_context.py +152 -34
  426. mindspore/parallel/_cell_wrapper.py +130 -15
  427. mindspore/parallel/_parallel_serialization.py +107 -5
  428. mindspore/parallel/_ps_context.py +1 -1
  429. mindspore/parallel/_recovery_context.py +7 -2
  430. mindspore/parallel/_tensor.py +142 -18
  431. mindspore/parallel/_utils.py +199 -23
  432. mindspore/parallel/algo_parameter_config.py +4 -4
  433. mindspore/parallel/auto_parallel.py +732 -0
  434. mindspore/parallel/checkpoint_convert.py +159 -0
  435. mindspore/parallel/checkpoint_transform.py +698 -35
  436. mindspore/parallel/cluster/process_entity/_api.py +276 -50
  437. mindspore/parallel/cluster/process_entity/_utils.py +41 -6
  438. mindspore/parallel/cluster/run.py +21 -4
  439. mindspore/parallel/function/__init__.py +24 -0
  440. mindspore/parallel/function/reshard_func.py +259 -0
  441. mindspore/parallel/nn/__init__.py +25 -0
  442. mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
  443. mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
  444. mindspore/parallel/parameter_broadcast.py +25 -14
  445. mindspore/parallel/shard.py +137 -58
  446. mindspore/parallel/transform_safetensors.py +363 -305
  447. mindspore/pgodb140.dll +0 -0
  448. mindspore/pgort140.dll +0 -0
  449. mindspore/profiler/__init__.py +22 -5
  450. mindspore/profiler/analysis/__init__.py +0 -0
  451. mindspore/profiler/analysis/parser/__init__.py +0 -0
  452. mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
  453. mindspore/profiler/analysis/parser/base_parser.py +158 -0
  454. mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
  455. mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
  456. mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
  457. mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
  458. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
  459. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
  460. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +106 -0
  461. mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
  462. mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
  463. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
  464. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
  465. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
  466. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
  467. mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
  468. mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
  469. mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
  470. mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
  471. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
  472. mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
  473. mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
  474. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
  475. mindspore/profiler/analysis/task_manager.py +131 -0
  476. mindspore/profiler/analysis/time_converter.py +84 -0
  477. mindspore/profiler/analysis/viewer/__init__.py +0 -0
  478. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
  479. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
  480. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
  481. mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
  482. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
  483. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
  484. mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
  485. mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
  486. mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
  487. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
  488. mindspore/profiler/analysis/work_flow.py +73 -0
  489. mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
  490. mindspore/profiler/common/command_executor.py +90 -0
  491. mindspore/profiler/common/constant.py +186 -3
  492. mindspore/profiler/common/file_manager.py +208 -0
  493. mindspore/profiler/common/log.py +130 -0
  494. mindspore/profiler/common/msprof_cmd_tool.py +221 -0
  495. mindspore/profiler/common/path_manager.py +395 -0
  496. mindspore/profiler/common/process_bar.py +168 -0
  497. mindspore/profiler/common/process_pool.py +9 -3
  498. mindspore/profiler/common/profiler_context.py +500 -0
  499. mindspore/profiler/common/profiler_info.py +304 -0
  500. mindspore/profiler/common/profiler_meta_data.py +74 -0
  501. mindspore/profiler/common/profiler_output_path.py +284 -0
  502. mindspore/profiler/common/profiler_parameters.py +251 -0
  503. mindspore/profiler/common/profiler_path_manager.py +179 -0
  504. mindspore/profiler/common/record_function.py +76 -0
  505. mindspore/profiler/common/tlv_decoder.py +76 -0
  506. mindspore/profiler/common/util.py +75 -2
  507. mindspore/profiler/dynamic_profiler.py +341 -75
  508. mindspore/profiler/envprofiler.py +163 -0
  509. mindspore/profiler/experimental_config.py +197 -0
  510. mindspore/profiler/mstx.py +242 -0
  511. mindspore/profiler/platform/__init__.py +21 -0
  512. mindspore/profiler/platform/base_profiler.py +40 -0
  513. mindspore/profiler/platform/cpu_profiler.py +124 -0
  514. mindspore/profiler/platform/gpu_profiler.py +74 -0
  515. mindspore/profiler/platform/npu_profiler.py +335 -0
  516. mindspore/profiler/profiler.py +1073 -90
  517. mindspore/profiler/profiler_action_controller.py +187 -0
  518. mindspore/profiler/profiler_interface.py +118 -0
  519. mindspore/profiler/schedule.py +243 -0
  520. mindspore/rewrite/api/node.py +15 -13
  521. mindspore/rewrite/api/symbol_tree.py +2 -3
  522. mindspore/run_check/_check_version.py +27 -20
  523. mindspore/run_check/run_check.py +1 -1
  524. mindspore/runtime/__init__.py +37 -0
  525. mindspore/runtime/device.py +27 -0
  526. mindspore/runtime/event.py +209 -0
  527. mindspore/runtime/executor.py +177 -0
  528. mindspore/runtime/memory.py +409 -0
  529. mindspore/runtime/stream.py +460 -0
  530. mindspore/runtime/thread_bind_core.py +401 -0
  531. mindspore/safeguard/rewrite_obfuscation.py +12 -9
  532. mindspore/swresample-4.dll +0 -0
  533. mindspore/swscale-6.dll +0 -0
  534. mindspore/tbbmalloc.dll +0 -0
  535. mindspore/tinyxml2.dll +0 -0
  536. mindspore/train/__init__.py +8 -8
  537. mindspore/train/_utils.py +88 -25
  538. mindspore/train/amp.py +9 -5
  539. mindspore/train/callback/__init__.py +2 -2
  540. mindspore/train/callback/_callback.py +2 -16
  541. mindspore/train/callback/_checkpoint.py +53 -55
  542. mindspore/train/callback/_cluster_monitor.py +14 -18
  543. mindspore/train/callback/_early_stop.py +1 -1
  544. mindspore/train/callback/_flops_collector.py +103 -68
  545. mindspore/train/callback/_history.py +8 -5
  546. mindspore/train/callback/_lambda_callback.py +2 -2
  547. mindspore/train/callback/_landscape.py +0 -3
  548. mindspore/train/callback/_loss_monitor.py +2 -1
  549. mindspore/train/callback/_on_request_exit.py +6 -5
  550. mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
  551. mindspore/train/callback/_summary_collector.py +52 -19
  552. mindspore/train/callback/_time_monitor.py +2 -1
  553. mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -107
  554. mindspore/train/data_sink.py +25 -2
  555. mindspore/train/dataset_helper.py +15 -16
  556. mindspore/train/loss_scale_manager.py +8 -7
  557. mindspore/train/metrics/accuracy.py +3 -3
  558. mindspore/train/metrics/confusion_matrix.py +9 -9
  559. mindspore/train/metrics/error.py +3 -3
  560. mindspore/train/metrics/hausdorff_distance.py +4 -4
  561. mindspore/train/metrics/mean_surface_distance.py +3 -3
  562. mindspore/train/metrics/metric.py +0 -12
  563. mindspore/train/metrics/occlusion_sensitivity.py +4 -2
  564. mindspore/train/metrics/precision.py +11 -10
  565. mindspore/train/metrics/recall.py +9 -9
  566. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  567. mindspore/train/mind_ir_pb2.py +174 -46
  568. mindspore/train/model.py +184 -113
  569. mindspore/train/serialization.py +622 -978
  570. mindspore/train/summary/_summary_adapter.py +2 -2
  571. mindspore/train/summary/summary_record.py +2 -3
  572. mindspore/train/train_thor/model_thor.py +1 -1
  573. mindspore/turbojpeg.dll +0 -0
  574. mindspore/utils/__init__.py +6 -3
  575. mindspore/utils/dryrun.py +140 -0
  576. mindspore/utils/hooks.py +81 -0
  577. mindspore/utils/runtime_execution_order_check.py +550 -0
  578. mindspore/utils/utils.py +138 -4
  579. mindspore/vcmeta.dll +0 -0
  580. mindspore/vcruntime140.dll +0 -0
  581. mindspore/vcruntime140_1.dll +0 -0
  582. mindspore/version.py +1 -1
  583. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +3 -3
  584. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +587 -418
  585. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +1 -1
  586. mindspore/_install_custom.py +0 -43
  587. mindspore/common/_register_for_adapter.py +0 -74
  588. mindspore/common/_tensor_overload.py +0 -139
  589. mindspore/mindspore_np_dtype.dll +0 -0
  590. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
  591. mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
  592. mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
  593. mindspore/ops_generate/gen_aclnn_implement.py +0 -263
  594. mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
  595. mindspore/ops_generate/gen_pyboost_func.py +0 -1052
  596. mindspore/ops_generate/gen_utils.py +0 -209
  597. mindspore/ops_generate/op_proto.py +0 -145
  598. mindspore/ops_generate/template.py +0 -261
  599. mindspore/profiler/envprofiling.py +0 -254
  600. mindspore/profiler/profiling.py +0 -1926
  601. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
  602. {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
@@ -25,7 +25,8 @@ After declaring the dataset object, you can further apply dataset operations
25
25
  """
26
26
  import mindspore._c_dataengine as cde
27
27
 
28
- from .datasets import TextBaseDataset, SourceDataset, MappableDataset, Shuffle
28
+ from .samplers import Shuffle
29
+ from .datasets import TextBaseDataset, SourceDataset, MappableDataset
29
30
  from .validators import check_imdb_dataset, check_iwslt2016_dataset, check_iwslt2017_dataset, \
30
31
  check_penn_treebank_dataset, check_ag_news_dataset, check_amazon_review_dataset, check_udpos_dataset, \
31
32
  check_wiki_text_dataset, check_conll2000_dataset, check_cluedataset, \
@@ -61,16 +62,17 @@ class AGNewsDataset(SourceDataset, TextBaseDataset):
61
62
  Set the mode of data shuffling by passing in enumeration variables:
62
63
 
63
64
  - ``Shuffle.GLOBAL``: Shuffle both the files and samples.
64
-
65
65
  - ``Shuffle.FILES``: Shuffle files only.
66
66
 
67
67
  num_shards (int, optional): Number of shards that the dataset will be divided into.
68
68
  Default: ``None``. When this argument is specified, `num_samples` reflects the
69
69
  max sample number of per shard.
70
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
71
+ parallel/data_parallel.html#loading-datasets>`_ .
70
72
  shard_id (int, optional): The shard ID within `num_shards` . This
71
73
  argument can only be specified when `num_shards` is also specified. Default: ``None``.
72
74
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
73
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
75
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
74
76
  Default: ``None``, which means no cache is used.
75
77
 
76
78
  Raises:
@@ -88,9 +90,9 @@ class AGNewsDataset(SourceDataset, TextBaseDataset):
88
90
  >>> ag_news_dataset_dir = "/path/to/ag_news_dataset_file"
89
91
  >>> dataset = ds.AGNewsDataset(dataset_dir=ag_news_dataset_dir, usage='all')
90
92
 
91
- About AGNews dataset:
93
+ About AG News dataset:
92
94
 
93
- AG is a collection of over 1 million news articles. The news articles were collected
95
+ AG News is a collection of over 1 million news articles. The news articles were collected
94
96
  by ComeToMyHead from over 2,000 news sources in over 1 year of activity. ComeToMyHead
95
97
  is an academic news search engine that has been in operation since July 2004.
96
98
  The dataset is provided by academics for research purposes such as data mining
@@ -170,15 +172,16 @@ class AmazonReviewDataset(SourceDataset, TextBaseDataset):
170
172
  Set the mode of data shuffling by passing in enumeration variables:
171
173
 
172
174
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples.
173
-
174
175
  - ``Shuffle.FILES`` : Shuffle files only.
175
176
 
176
177
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
177
178
  When this argument is specified, `num_samples` reflects the max sample number of per shard.
179
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
180
+ parallel/data_parallel.html#loading-datasets>`_ .
178
181
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
179
182
  argument can only be specified when `num_shards` is also specified.
180
183
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
181
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
184
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
182
185
  Default: ``None`` , which means no cache is used.
183
186
 
184
187
  Raises:
@@ -268,15 +271,16 @@ class CLUEDataset(SourceDataset, TextBaseDataset):
268
271
  There are three levels of shuffling, desired shuffle enum defined by :class:`mindspore.dataset.Shuffle` .
269
272
 
270
273
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples, same as setting `shuffle` to ``True``.
271
-
272
274
  - ``Shuffle.FILES`` : Shuffle files only.
273
275
 
274
276
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
275
277
  When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
278
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
279
+ parallel/data_parallel.html#loading-datasets>`_ .
276
280
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
277
281
  argument can only be specified when `num_shards` is also specified.
278
282
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
279
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
283
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
280
284
  Default: ``None`` , which means no cache is used.
281
285
 
282
286
  The generated dataset with different task setting has different output columns:
@@ -515,14 +519,15 @@ class CoNLL2000Dataset(SourceDataset, TextBaseDataset):
515
519
 
516
520
  num_shards (int, optional): Number of shards that the dataset will be divided into.
517
521
  When this argument is specified, `num_samples` reflects the max sample number of per shard.
518
- Default: ``None`` .
522
+ Default: ``None`` . Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
523
+ parallel/data_parallel.html#loading-datasets>`_ .
519
524
  shard_id (int, optional): The shard ID within `num_shards` . This
520
525
  argument can only be specified when `num_shards` is also specified. Default: ``None`` .
521
526
  num_parallel_workers (int, optional): Number of worker threads to read the data.
522
527
  Default: ``None`` , will use global default workers(8), it can be set
523
528
  by :func:`mindspore.dataset.config.set_num_parallel_workers` .
524
529
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
525
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
530
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
526
531
  Default: ``None`` , which means no cache is used.
527
532
 
528
533
  Raises:
@@ -613,15 +618,16 @@ class DBpediaDataset(SourceDataset, TextBaseDataset):
613
618
  Set the mode of data shuffling by passing in enumeration variables:
614
619
 
615
620
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples.
616
-
617
621
  - ``Shuffle.FILES`` : Shuffle files only.
618
622
 
619
623
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
620
624
  When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
625
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
626
+ parallel/data_parallel.html#loading-datasets>`_ .
621
627
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
622
628
  argument can only be specified when `num_shards` is also specified.
623
629
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
624
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
630
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
625
631
  Default: ``None`` , which means no cache is used.
626
632
 
627
633
  Raises:
@@ -712,15 +718,16 @@ class EnWik9Dataset(SourceDataset, TextBaseDataset):
712
718
  Set the mode of data shuffling by passing in enumeration variables:
713
719
 
714
720
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples.
715
-
716
721
  - ``Shuffle.FILES`` : Shuffle files only.
717
722
 
718
723
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
719
724
  When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
725
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
726
+ parallel/data_parallel.html#loading-datasets>`_ .
720
727
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
721
728
  argument can only be specified when `num_shards` is also specified.
722
729
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
723
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
730
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
724
731
  Default: ``None`` , which means no cache is used.
725
732
 
726
733
  Raises:
@@ -805,10 +812,12 @@ class IMDBDataset(MappableDataset, TextBaseDataset):
805
812
  num_shards (int, optional): Number of shards that the dataset will be divided
806
813
  into. Default: ``None`` . When this argument is specified, `num_samples` reflects
807
814
  the maximum sample number of per shard.
815
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
816
+ parallel/data_parallel.html#loading-datasets>`_ .
808
817
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
809
818
  argument can only be specified when `num_shards` is also specified.
810
819
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
811
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
820
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
812
821
  Default: ``None`` , which means no cache is used.
813
822
 
814
823
  Raises:
@@ -936,18 +945,19 @@ class IWSLT2016Dataset(SourceDataset, TextBaseDataset):
936
945
  Set the mode of data shuffling by passing in enumeration variables:
937
946
 
938
947
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples.
939
-
940
948
  - ``Shuffle.FILES`` : Shuffle files only.
941
949
 
942
950
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
943
951
  When this argument is specified, `num_samples` reflects the max sample number of per shard.
952
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
953
+ parallel/data_parallel.html#loading-datasets>`_ .
944
954
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
945
955
  argument can only be specified when `num_shards` is also specified.
946
956
  num_parallel_workers (int, optional): Number of worker threads to read the data.
947
957
  Default: ``None`` , will use global default workers(8), it can be set
948
958
  by :func:`mindspore.dataset.config.set_num_parallel_workers` .
949
959
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
950
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
960
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
951
961
  Default: ``None`` , which means no cache is used.
952
962
 
953
963
  Raises:
@@ -1068,18 +1078,19 @@ class IWSLT2017Dataset(SourceDataset, TextBaseDataset):
1068
1078
  Set the mode of data shuffling by passing in enumeration variables:
1069
1079
 
1070
1080
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples.
1071
-
1072
1081
  - ``Shuffle.FILES`` : Shuffle files only.
1073
1082
 
1074
1083
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
1075
1084
  When this argument is specified, `num_samples` reflects the max sample number of per shard.
1085
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
1086
+ parallel/data_parallel.html#loading-datasets>`_ .
1076
1087
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
1077
1088
  argument can only be specified when `num_shards` is also specified.
1078
1089
  num_parallel_workers (int, optional): Number of worker threads to read the data.
1079
1090
  Default: ``None`` , will use global default workers(8), it can be set
1080
1091
  by :func:`mindspore.dataset.config.set_num_parallel_workers` .
1081
1092
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
1082
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
1093
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
1083
1094
  Default: ``None`` , which means no cache is used.
1084
1095
 
1085
1096
  Raises:
@@ -1181,10 +1192,12 @@ class Multi30kDataset(SourceDataset, TextBaseDataset):
1181
1192
  num_shards (int, optional): Number of shards that the dataset will be divided
1182
1193
  into. Default: ``None`` . When this argument is specified, `num_samples` reflects
1183
1194
  the max sample number of per shard.
1195
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
1196
+ parallel/data_parallel.html#loading-datasets>`_ .
1184
1197
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
1185
1198
  argument can only be specified when `num_shards` is also specified.
1186
1199
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
1187
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
1200
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
1188
1201
  Default: ``None`` , which means no cache is used.
1189
1202
 
1190
1203
  Raises:
@@ -1210,7 +1223,7 @@ class Multi30kDataset(SourceDataset, TextBaseDataset):
1210
1223
 
1211
1224
  Multi30K is a multilingual dataset that features approximately 31,000 standardized images
1212
1225
  described in multiple languages. The images are sourced from Flickr and each image comes
1213
- with sentence descripitions in both English and German, as well as descriptions in other
1226
+ with sentence descriptions in both English and German, as well as descriptions in other
1214
1227
  languages. Multi30k is used primarily for training and testing in tasks such as image
1215
1228
  captioning, machine translation, and visual question answering.
1216
1229
 
@@ -1285,15 +1298,16 @@ class PennTreebankDataset(SourceDataset, TextBaseDataset):
1285
1298
  Set the mode of data shuffling by passing in enumeration variables:
1286
1299
 
1287
1300
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples.
1288
-
1289
1301
  - ``Shuffle.FILES`` : Shuffle files only.
1290
1302
 
1291
1303
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
1292
1304
  When this argument is specified, `num_samples` reflects the max sample number of per shard.
1305
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
1306
+ parallel/data_parallel.html#loading-datasets>`_ .
1293
1307
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
1294
1308
  argument can only be specified when `num_shards` is also specified.
1295
1309
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
1296
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
1310
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
1297
1311
  Default: ``None`` , which means no cache is used.
1298
1312
 
1299
1313
  Raises:
@@ -1314,7 +1328,7 @@ class PennTreebankDataset(SourceDataset, TextBaseDataset):
1314
1328
  About PennTreebank dataset:
1315
1329
 
1316
1330
  Penn Treebank (PTB) dataset, is widely used in machine learning for NLP (Natural Language Processing)
1317
- research. Word-level PTB does not contain capital letters, numbers, and punctuations, and the vocabulary
1331
+ research. Word-level PTB does not contain capital letters, numbers, and punctuation, and the vocabulary
1318
1332
  is capped at 10k unique words, which is relatively small in comparison to most modern datasets which
1319
1333
  can result in a larger number of out of vocabulary tokens.
1320
1334
 
@@ -1385,17 +1399,19 @@ class SogouNewsDataset(SourceDataset, TextBaseDataset):
1385
1399
  Set the mode of data shuffling by passing in enumeration variables:
1386
1400
 
1387
1401
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples, same as setting shuffle to True.
1388
-
1389
1402
  - ``Shuffle.FILES`` : Shuffle files only.
1403
+
1390
1404
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
1391
1405
  When this argument is specified, `num_samples` reflects the max sample number of per shard.
1406
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
1407
+ parallel/data_parallel.html#loading-datasets>`_ .
1392
1408
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
1393
1409
  argument can only be specified when `num_shards` is also specified.
1394
1410
  num_parallel_workers (int, optional): Number of worker threads to read the data.
1395
1411
  Default: ``None`` , will use global default workers(8), it can be set
1396
1412
  by :func:`mindspore.dataset.config.set_num_parallel_workers` .
1397
1413
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
1398
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
1414
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
1399
1415
  Default: ``None`` , which means no cache is used.
1400
1416
 
1401
1417
  Raises:
@@ -1413,11 +1429,11 @@ class SogouNewsDataset(SourceDataset, TextBaseDataset):
1413
1429
  >>> sogou_news_dataset_dir = "/path/to/sogou_news_dataset_dir"
1414
1430
  >>> dataset = ds.SogouNewsDataset(dataset_dir=sogou_news_dataset_dir, usage='all')
1415
1431
 
1416
- About SogouNews Dataset:
1432
+ About Sogou News Dataset:
1417
1433
 
1418
- SogouNews dataset includes 3 columns, corresponding to class index (1 to 5), title and content. The title and
1434
+ Sogou News dataset includes 3 columns, corresponding to class index (1 to 5), title and content. The title and
1419
1435
  content are escaped using double quotes ("), and any internal double quote is escaped by 2 double quotes ("").
1420
- New lines are escaped by a backslash followed with an "n" character, that is "\n".
1436
+ New lines are escaped by a backslash followed with an "n" character, that is "\\n".
1421
1437
 
1422
1438
  You can unzip the dataset files into the following structure and read by MindSpore's API:
1423
1439
 
@@ -1490,10 +1506,12 @@ class SQuADDataset(SourceDataset, TextBaseDataset):
1490
1506
 
1491
1507
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
1492
1508
  When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
1509
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
1510
+ parallel/data_parallel.html#loading-datasets>`_ .
1493
1511
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
1494
1512
  argument can only be specified when `num_shards` is also specified.
1495
1513
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
1496
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
1514
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
1497
1515
  Default: ``None`` , which means no cache is used.
1498
1516
 
1499
1517
  Raises:
@@ -1604,14 +1622,17 @@ class SST2Dataset(SourceDataset, TextBaseDataset):
1604
1622
  If `shuffle` is ``True`` , the behavior is the same as setting shuffle to be Shuffle.GLOBAL
1605
1623
  Set the mode of data shuffling by passing in enumeration variables:
1606
1624
 
1607
- - ``Shuffle.GLOBAL`` : Shuffle the samples.
1625
+ - ``Shuffle.GLOBAL`` : Shuffle both the files and samples.
1626
+ - ``Shuffle.FILES`` : Shuffle files only.
1608
1627
 
1609
1628
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
1610
1629
  When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
1630
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
1631
+ parallel/data_parallel.html#loading-datasets>`_ .
1611
1632
  shard_id (int, optional): The shard ID within `num_shards`. This argument can only be specified when
1612
1633
  `num_shards` is also specified. Default: ``None`` .
1613
1634
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
1614
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
1635
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
1615
1636
  Default: ``None`` , which means no cache is used.
1616
1637
 
1617
1638
  Raises:
@@ -1706,15 +1727,16 @@ class TextFileDataset(SourceDataset, TextBaseDataset):
1706
1727
  There are three levels of shuffling, desired shuffle enum defined by :class:`mindspore.dataset.Shuffle` .
1707
1728
 
1708
1729
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples, same as setting shuffle to True.
1709
-
1710
1730
  - ``Shuffle.FILES`` : Shuffle files only.
1711
1731
 
1712
1732
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
1713
1733
  When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
1734
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
1735
+ parallel/data_parallel.html#loading-datasets>`_ .
1714
1736
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
1715
1737
  argument can only be specified when `num_shards` is also specified.
1716
1738
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
1717
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
1739
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
1718
1740
  Default: ``None`` , which means no cache is used.
1719
1741
 
1720
1742
  Raises:
@@ -1770,18 +1792,19 @@ class UDPOSDataset(SourceDataset, TextBaseDataset):
1770
1792
  Set the mode of data shuffling by passing in enumeration variables:
1771
1793
 
1772
1794
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples.
1773
-
1774
1795
  - ``Shuffle.FILES`` : Shuffle files only.
1775
1796
 
1776
1797
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
1777
1798
  When this argument is specified, `num_samples` reflects the max sample number of per shard.
1799
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
1800
+ parallel/data_parallel.html#loading-datasets>`_ .
1778
1801
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
1779
1802
  argument can only be specified when `num_shards` is also specified.
1780
1803
  num_parallel_workers (int, optional): Number of worker threads to read the data.
1781
1804
  Default: ``None`` , will use global default workers(8), it can be set
1782
1805
  by :func:`mindspore.dataset.config.set_num_parallel_workers` .
1783
1806
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
1784
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
1807
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
1785
1808
  Default: ``None`` , which means no cache is used.
1786
1809
 
1787
1810
  Raises:
@@ -1856,15 +1879,16 @@ class WikiTextDataset(SourceDataset, TextBaseDataset):
1856
1879
  Set the mode of data shuffling by passing in enumeration variables:
1857
1880
 
1858
1881
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples.
1859
-
1860
1882
  - ``Shuffle.FILES`` : Shuffle files only.
1861
1883
 
1862
1884
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
1863
1885
  When this argument is specified, `num_samples` reflects the max sample number of per shard.
1886
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
1887
+ parallel/data_parallel.html#loading-datasets>`_ .
1864
1888
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
1865
1889
  argument can only be specified when `num_shards` is also specified.
1866
1890
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
1867
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
1891
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
1868
1892
  Default: ``None`` , which means no cache is used.
1869
1893
 
1870
1894
  Raises:
@@ -1953,15 +1977,16 @@ class YahooAnswersDataset(SourceDataset, TextBaseDataset):
1953
1977
  Set the mode of data shuffling by passing in enumeration variables:
1954
1978
 
1955
1979
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples.
1956
-
1957
1980
  - ``Shuffle.FILES`` : Shuffle files only.
1958
1981
 
1959
1982
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
1960
1983
  When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
1984
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
1985
+ parallel/data_parallel.html#loading-datasets>`_ .
1961
1986
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
1962
1987
  argument can only be specified when `num_shards` is also specified.
1963
1988
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
1964
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
1989
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
1965
1990
  Default: ``None`` , which means no cache is used.
1966
1991
 
1967
1992
  Raises:
@@ -2039,11 +2064,14 @@ class YelpReviewDataset(SourceDataset, TextBaseDataset):
2039
2064
  Args:
2040
2065
  dataset_dir (str): Path to the root directory that contains the dataset.
2041
2066
  usage (str, optional): Usage of this dataset, can be ``'train'`` , ``'test'`` or ``'all'`` .
2042
- For Polarity, ``'train'`` will read from 560,000 train samples,
2043
- ``'test'`` will read from 38,000 test samples,
2044
- ``'all'`` will read from all 598,000 samples.
2045
- For Full, ``'train'`` will read from 650,000 train samples, ``'test'`` will read from 50,000 test samples,
2046
- ``'all'`` will read from all 700,000 samples. Default: ``None`` , all samples.
2067
+ Default: ``None`` , all samples.
2068
+
2069
+ - For Polarity, ``'train'`` will read from 560,000 train samples,
2070
+ ``'test'`` will read from 38,000 test samples,
2071
+ ``'all'`` will read from all 598,000 samples.
2072
+ - For Full, ``'train'`` will read from 650,000 train samples, ``'test'`` will read from 50,000 test samples,
2073
+ ``'all'`` will read from all 700,000 samples.
2074
+
2047
2075
  num_samples (int, optional): Number of samples (rows) to read. Default: ``None`` , reads all samples.
2048
2076
  shuffle (Union[bool, Shuffle], optional): Perform reshuffling of the data every epoch.
2049
2077
  Bool type and Shuffle enum are both supported to pass in.
@@ -2054,17 +2082,19 @@ class YelpReviewDataset(SourceDataset, TextBaseDataset):
2054
2082
  Set the mode of data shuffling by passing in enumeration variables:
2055
2083
 
2056
2084
  - ``Shuffle.GLOBAL`` : Shuffle both the files and samples.
2057
-
2058
2085
  - ``Shuffle.FILES`` : Shuffle files only.
2086
+
2059
2087
  num_shards (int, optional): Number of shards that the dataset will be divided into. Default: ``None`` .
2060
2088
  When this argument is specified, `num_samples` reflects the max sample number of per shard.
2089
+ Used in `data parallel training <https://www.mindspore.cn/tutorials/en/master/
2090
+ parallel/data_parallel.html#loading-datasets>`_ .
2061
2091
  shard_id (int, optional): The shard ID within `num_shards` . Default: ``None`` . This
2062
2092
  argument can only be specified when `num_shards` is also specified.
2063
2093
  num_parallel_workers (int, optional): Number of worker threads to read the data.
2064
2094
  Default: ``None`` , will use global default workers(8), it can be set
2065
2095
  by :func:`mindspore.dataset.config.set_num_parallel_workers` .
2066
2096
  cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing. More details:
2067
- `Single-Node Data Cache <https://www.mindspore.cn/docs/en/master/model_train/dataset/cache.html>`_ .
2097
+ `Single-Node Data Cache <https://www.mindspore.cn/tutorials/en/master/dataset/cache.html>`_ .
2068
2098
  Default: ``None`` , which means no cache is used.
2069
2099
 
2070
2100
  Raises: