mindspore 2.6.0rc1__cp310-cp310-win_amd64.whl → 2.7.0rc1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (407) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +1 -1
  5. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +40 -9
  9. mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
  10. mindspore/_extends/optimize/cell_utils.py +96 -0
  11. mindspore/_extends/parse/__init__.py +2 -2
  12. mindspore/_extends/parse/compile_config.py +44 -22
  13. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
  14. mindspore/_extends/parse/parser.py +37 -62
  15. mindspore/_extends/parse/resources.py +39 -0
  16. mindspore/_extends/parse/standard_method.py +43 -13
  17. mindspore/_extends/parse/trope.py +8 -1
  18. mindspore/_extends/pijit/__init__.py +1 -2
  19. mindspore/amp.py +4 -4
  20. mindspore/atlprov.dll +0 -0
  21. mindspore/avcodec-59.dll +0 -0
  22. mindspore/avdevice-59.dll +0 -0
  23. mindspore/avfilter-8.dll +0 -0
  24. mindspore/avformat-59.dll +0 -0
  25. mindspore/avutil-57.dll +0 -0
  26. mindspore/boost/adasum.py +1 -1
  27. mindspore/boost/boost_cell_wrapper.py +4 -4
  28. mindspore/c1.dll +0 -0
  29. mindspore/c1xx.dll +0 -0
  30. mindspore/c2.dll +0 -0
  31. mindspore/common/__init__.py +27 -2
  32. mindspore/common/_grad_function.py +2 -1
  33. mindspore/common/_pijit_context.py +28 -7
  34. mindspore/common/_stub_tensor.py +1 -209
  35. mindspore/common/_tensor_cpp_method.py +1 -1
  36. mindspore/common/_tensor_docs.py +77 -16
  37. mindspore/common/api.py +238 -113
  38. mindspore/common/dtype.py +21 -11
  39. mindspore/common/dump.py +10 -15
  40. mindspore/common/generator.py +5 -3
  41. mindspore/common/hook_handle.py +11 -2
  42. mindspore/common/jit_config.py +1 -1
  43. mindspore/common/jit_trace.py +84 -105
  44. mindspore/common/parameter.py +26 -12
  45. mindspore/common/recompute.py +3 -3
  46. mindspore/common/sparse_tensor.py +0 -3
  47. mindspore/common/symbol.py +0 -1
  48. mindspore/common/tensor.py +81 -81
  49. mindspore/communication/_comm_helper.py +46 -4
  50. mindspore/communication/management.py +79 -7
  51. mindspore/context.py +58 -40
  52. mindspore/dataset/core/config.py +3 -3
  53. mindspore/dataset/engine/datasets.py +20 -7
  54. mindspore/dataset/engine/datasets_user_defined.py +33 -3
  55. mindspore/dataset/engine/iterators.py +2 -2
  56. mindspore/dataset/engine/obs/config_loader.py +2 -2
  57. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
  58. mindspore/dataset/transforms/py_transforms.py +7 -3
  59. mindspore/dataset/transforms/transforms.py +7 -3
  60. mindspore/dataset/vision/validators.py +1 -0
  61. mindspore/device_context/ascend/device.py +1 -1
  62. mindspore/device_context/gpu/__init__.py +2 -2
  63. mindspore/device_context/gpu/device.py +1 -1
  64. mindspore/device_context/gpu/op_precision.py +4 -2
  65. mindspore/device_context/gpu/op_tuning.py +6 -3
  66. mindspore/device_manager.py +16 -9
  67. mindspore/dnnl.dll +0 -0
  68. mindspore/dpcmi.dll +0 -0
  69. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -7
  70. mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
  71. mindspore/experimental/optim/adadelta.py +13 -20
  72. mindspore/experimental/optim/adagrad.py +15 -22
  73. mindspore/experimental/optim/adam.py +17 -24
  74. mindspore/experimental/optim/adamax.py +14 -22
  75. mindspore/experimental/optim/adamw.py +28 -34
  76. mindspore/experimental/optim/asgd.py +15 -25
  77. mindspore/experimental/optim/lr_scheduler.py +27 -45
  78. mindspore/experimental/optim/nadam.py +14 -24
  79. mindspore/experimental/optim/optimizer.py +13 -23
  80. mindspore/experimental/optim/radam.py +18 -24
  81. mindspore/experimental/optim/rmsprop.py +14 -25
  82. mindspore/experimental/optim/rprop.py +15 -26
  83. mindspore/experimental/optim/sgd.py +9 -19
  84. mindspore/hal/__init__.py +4 -4
  85. mindspore/hal/contiguous_tensors_handle.py +2 -2
  86. mindspore/hal/memory.py +27 -7
  87. mindspore/include/api/cell.h +37 -1
  88. mindspore/include/api/delegate.h +10 -0
  89. mindspore/include/api/model.h +3 -0
  90. mindspore/include/api/types.h +2 -2
  91. mindspore/include/c_api/model_c.h +0 -58
  92. mindspore/include/c_api/tensor_c.h +0 -26
  93. mindspore/include/dataset/vision_ascend.h +1 -1
  94. mindspore/jpeg62.dll +0 -0
  95. mindspore/mindrecord/tools/cifar10.py +60 -11
  96. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
  97. mindspore/mindspore_backend_common.dll +0 -0
  98. mindspore/mindspore_backend_manager.dll +0 -0
  99. mindspore/mindspore_common.dll +0 -0
  100. mindspore/mindspore_core.dll +0 -0
  101. mindspore/mindspore_cpu_res_manager.dll +0 -0
  102. mindspore/mindspore_dump.dll +0 -0
  103. mindspore/mindspore_frontend.dll +0 -0
  104. mindspore/mindspore_glog.dll +0 -0
  105. mindspore/mindspore_memory_pool.dll +0 -0
  106. mindspore/mindspore_ms_backend.dll +0 -0
  107. mindspore/mindspore_ops.dll +0 -0
  108. mindspore/mindspore_ops_host.dll +0 -0
  109. mindspore/mindspore_ops_kernel_common.dll +0 -0
  110. mindspore/mindspore_profiler.dll +0 -0
  111. mindspore/mindspore_pyboost.dll +0 -0
  112. mindspore/mindspore_pynative.dll +0 -0
  113. mindspore/mindspore_res_manager.dll +0 -0
  114. mindspore/mindspore_runtime_pipeline.dll +0 -0
  115. mindspore/mint/__init__.py +6 -46
  116. mindspore/mint/distributed/__init__.py +1 -0
  117. mindspore/mint/distributed/distributed.py +212 -9
  118. mindspore/mint/nn/__init__.py +1 -1
  119. mindspore/mint/nn/functional.py +53 -6
  120. mindspore/mint/nn/layer/_functions.py +164 -294
  121. mindspore/mint/nn/layer/activation.py +8 -6
  122. mindspore/mint/nn/layer/conv.py +137 -101
  123. mindspore/mint/nn/layer/normalization.py +8 -22
  124. mindspore/mint/optim/adam.py +19 -18
  125. mindspore/mint/optim/adamw.py +14 -8
  126. mindspore/mint/optim/sgd.py +5 -5
  127. mindspore/msobj140.dll +0 -0
  128. mindspore/mspdb140.dll +0 -0
  129. mindspore/mspdbcore.dll +0 -0
  130. mindspore/mspdbst.dll +0 -0
  131. mindspore/mspft140.dll +0 -0
  132. mindspore/msvcdis140.dll +0 -0
  133. mindspore/msvcp140_1.dll +0 -0
  134. mindspore/msvcp140_2.dll +0 -0
  135. mindspore/msvcp140_atomic_wait.dll +0 -0
  136. mindspore/msvcp140_codecvt_ids.dll +0 -0
  137. mindspore/nn/cell.py +328 -502
  138. mindspore/nn/grad/cell_grad.py +11 -12
  139. mindspore/nn/layer/activation.py +32 -34
  140. mindspore/nn/layer/basic.py +67 -64
  141. mindspore/nn/layer/channel_shuffle.py +4 -4
  142. mindspore/nn/layer/combined.py +4 -2
  143. mindspore/nn/layer/conv.py +117 -110
  144. mindspore/nn/layer/dense.py +9 -7
  145. mindspore/nn/layer/embedding.py +50 -52
  146. mindspore/nn/layer/image.py +37 -39
  147. mindspore/nn/layer/math.py +111 -112
  148. mindspore/nn/layer/normalization.py +56 -44
  149. mindspore/nn/layer/pooling.py +58 -63
  150. mindspore/nn/layer/rnn_cells.py +33 -33
  151. mindspore/nn/layer/rnns.py +56 -56
  152. mindspore/nn/layer/thor_layer.py +74 -73
  153. mindspore/nn/layer/transformer.py +11 -1
  154. mindspore/nn/learning_rate_schedule.py +20 -20
  155. mindspore/nn/loss/loss.py +79 -81
  156. mindspore/nn/optim/adam.py +3 -3
  157. mindspore/nn/optim/adasum.py +2 -2
  158. mindspore/nn/optim/asgd.py +2 -0
  159. mindspore/nn/optim/optimizer.py +1 -1
  160. mindspore/nn/optim/thor.py +2 -2
  161. mindspore/nn/probability/distribution/exponential.py +2 -1
  162. mindspore/nn/probability/distribution/poisson.py +2 -1
  163. mindspore/nn/sparse/sparse.py +3 -3
  164. mindspore/nn/wrap/cell_wrapper.py +34 -37
  165. mindspore/nn/wrap/grad_reducer.py +37 -37
  166. mindspore/nn/wrap/loss_scale.py +72 -74
  167. mindspore/numpy/array_creations.py +5 -5
  168. mindspore/numpy/fft.py +1 -1
  169. mindspore/numpy/math_ops.py +5 -5
  170. mindspore/opencv_core452.dll +0 -0
  171. mindspore/opencv_imgcodecs452.dll +0 -0
  172. mindspore/opencv_imgproc452.dll +0 -0
  173. mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
  174. mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
  175. mindspore/ops/_vmap/vmap_array_ops.py +31 -13
  176. mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
  177. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +42 -11
  178. mindspore/ops/auto_generate/gen_extend_func.py +23 -141
  179. mindspore/ops/auto_generate/gen_ops_def.py +727 -321
  180. mindspore/ops/auto_generate/gen_ops_prim.py +1721 -984
  181. mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
  182. mindspore/ops/composite/__init__.py +10 -0
  183. mindspore/ops/composite/base.py +8 -4
  184. mindspore/ops/composite/multitype_ops/__init__.py +12 -1
  185. mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
  186. mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
  187. mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
  188. mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
  189. mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
  190. mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
  191. mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
  192. mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
  193. mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
  194. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
  195. mindspore/ops/function/__init__.py +3 -1
  196. mindspore/ops/function/_add_attr_func.py +11 -6
  197. mindspore/ops/function/array_func.py +9 -96
  198. mindspore/ops/function/debug_func.py +4 -3
  199. mindspore/ops/function/grad/grad_func.py +1 -1
  200. mindspore/ops/function/math_func.py +33 -540
  201. mindspore/ops/function/nn_func.py +28 -74
  202. mindspore/ops/function/other_func.py +4 -1
  203. mindspore/ops/function/random_func.py +44 -5
  204. mindspore/ops/function/vmap_func.py +2 -1
  205. mindspore/ops/functional.py +2 -3
  206. mindspore/ops/functional_overload.py +571 -6
  207. mindspore/ops/op_info_register.py +21 -0
  208. mindspore/ops/operations/__init__.py +16 -11
  209. mindspore/ops/operations/_custom_ops_utils.py +689 -34
  210. mindspore/ops/operations/_inner_ops.py +3 -6
  211. mindspore/ops/operations/_sequence_ops.py +1 -1
  212. mindspore/ops/operations/array_ops.py +2 -2
  213. mindspore/ops/operations/comm_ops.py +185 -26
  214. mindspore/ops/operations/custom_ops.py +294 -174
  215. mindspore/ops/operations/debug_ops.py +59 -4
  216. mindspore/ops/operations/image_ops.py +13 -13
  217. mindspore/ops/operations/manually_defined/ops_def.py +15 -16
  218. mindspore/ops/operations/math_ops.py +3 -4
  219. mindspore/ops/operations/nn_ops.py +7 -39
  220. mindspore/ops/primitive.py +6 -10
  221. mindspore/ops/tensor_method.py +47 -8
  222. mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
  223. mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
  224. mindspore/ops_generate/api/functions_cc_generator.py +58 -10
  225. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
  226. mindspore/ops_generate/common/base_generator.py +14 -0
  227. mindspore/ops_generate/common/gen_constants.py +8 -3
  228. mindspore/ops_generate/common/gen_utils.py +0 -19
  229. mindspore/ops_generate/common/op_proto.py +11 -4
  230. mindspore/ops_generate/common/template.py +88 -11
  231. mindspore/ops_generate/gen_ops.py +1 -1
  232. mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
  233. mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
  234. mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
  235. mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
  236. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
  237. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
  238. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
  239. mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
  240. mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
  241. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
  242. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
  243. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
  244. mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
  245. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
  246. mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
  247. mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
  248. mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
  249. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
  250. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
  251. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
  252. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
  253. mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
  254. mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
  255. mindspore/parallel/_auto_parallel_context.py +11 -8
  256. mindspore/parallel/_cell_wrapper.py +113 -45
  257. mindspore/parallel/_parallel_serialization.py +1 -1
  258. mindspore/parallel/_ps_context.py +4 -6
  259. mindspore/parallel/_tensor.py +167 -12
  260. mindspore/parallel/_transformer/moe.py +1 -1
  261. mindspore/parallel/_transformer/transformer.py +13 -8
  262. mindspore/parallel/auto_parallel.py +14 -7
  263. mindspore/parallel/checkpoint_convert.py +3 -3
  264. mindspore/parallel/checkpoint_transform.py +11 -7
  265. mindspore/parallel/cluster/process_entity/_api.py +84 -48
  266. mindspore/parallel/cluster/process_entity/_utils.py +95 -7
  267. mindspore/parallel/cluster/run.py +43 -4
  268. mindspore/parallel/function/__init__.py +8 -1
  269. mindspore/parallel/function/reshard_func.py +6 -7
  270. mindspore/parallel/nn/__init__.py +15 -2
  271. mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
  272. mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
  273. mindspore/parallel/shard.py +3 -4
  274. mindspore/parallel/transform_safetensors.py +463 -174
  275. mindspore/pgodb140.dll +0 -0
  276. mindspore/pgort140.dll +0 -0
  277. mindspore/profiler/__init__.py +2 -1
  278. mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
  279. mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
  280. mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
  281. mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
  282. mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
  283. mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
  284. mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
  285. mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
  286. mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
  287. mindspore/profiler/analysis/task_manager.py +1 -1
  288. mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
  289. mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
  290. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
  291. mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
  292. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
  293. mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
  294. mindspore/profiler/common/constant.py +16 -0
  295. mindspore/profiler/common/profiler_context.py +25 -27
  296. mindspore/profiler/common/profiler_info.py +0 -16
  297. mindspore/profiler/common/profiler_op_analyse.py +235 -0
  298. mindspore/profiler/common/profiler_output_path.py +23 -8
  299. mindspore/profiler/common/profiler_parameters.py +128 -35
  300. mindspore/profiler/dynamic_profile/__init__.py +0 -0
  301. mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
  302. mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
  303. mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
  304. mindspore/profiler/dynamic_profiler.py +305 -314
  305. mindspore/profiler/envprofiler.py +12 -7
  306. mindspore/profiler/experimental_config.py +96 -6
  307. mindspore/profiler/mstx.py +33 -12
  308. mindspore/profiler/platform/__init__.py +2 -3
  309. mindspore/profiler/platform/npu_profiler.py +29 -19
  310. mindspore/profiler/profiler.py +35 -19
  311. mindspore/profiler/profiler_action_controller.py +64 -76
  312. mindspore/profiler/schedule.py +10 -4
  313. mindspore/rewrite/common/config.py +1 -0
  314. mindspore/rewrite/common/namer.py +1 -0
  315. mindspore/rewrite/common/namespace.py +1 -0
  316. mindspore/rewrite/node/node.py +31 -11
  317. mindspore/rewrite/parsers/assign_parser.py +1 -1
  318. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  319. mindspore/run_check/_check_version.py +7 -10
  320. mindspore/runtime/__init__.py +5 -5
  321. mindspore/runtime/event.py +10 -4
  322. mindspore/runtime/executor.py +60 -45
  323. mindspore/runtime/memory.py +30 -32
  324. mindspore/runtime/thread_bind_core.py +298 -164
  325. mindspore/safeguard/rewrite_obfuscation.py +12 -13
  326. mindspore/swresample-4.dll +0 -0
  327. mindspore/swscale-6.dll +0 -0
  328. mindspore/tbbmalloc.dll +0 -0
  329. mindspore/tinyxml2.dll +0 -0
  330. mindspore/train/_utils.py +14 -4
  331. mindspore/train/amp.py +43 -20
  332. mindspore/train/callback/__init__.py +5 -5
  333. mindspore/train/callback/_checkpoint.py +3 -6
  334. mindspore/train/callback/_flops_collector.py +1 -1
  335. mindspore/train/callback/_landscape.py +0 -1
  336. mindspore/train/callback/_train_fault_tolerance.py +97 -16
  337. mindspore/train/data_sink.py +11 -2
  338. mindspore/train/dataset_helper.py +9 -0
  339. mindspore/train/model.py +135 -55
  340. mindspore/train/serialization.py +133 -111
  341. mindspore/train/summary/summary_record.py +13 -2
  342. mindspore/turbojpeg.dll +0 -0
  343. mindspore/utils/__init__.py +3 -2
  344. mindspore/utils/dryrun.py +0 -6
  345. mindspore/utils/runtime_execution_order_check.py +163 -77
  346. mindspore/utils/sdc_detect.py +68 -0
  347. mindspore/utils/utils.py +6 -9
  348. mindspore/vcmeta.dll +0 -0
  349. mindspore/vcruntime140.dll +0 -0
  350. mindspore/vcruntime140_1.dll +0 -0
  351. mindspore/version.py +1 -1
  352. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
  353. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +356 -394
  354. mindspore/_deprecated/jit.py +0 -198
  355. mindspore/experimental/es/__init__.py +0 -22
  356. mindspore/experimental/es/embedding_service.py +0 -891
  357. mindspore/experimental/es/embedding_service_layer.py +0 -581
  358. mindspore/profiler/parser/__init__.py +0 -14
  359. mindspore/profiler/parser/aicpu_data_parser.py +0 -272
  360. mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
  361. mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
  362. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
  363. mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
  364. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
  365. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
  366. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
  367. mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
  368. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
  369. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
  370. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
  371. mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
  372. mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
  373. mindspore/profiler/parser/ascend_flops_generator.py +0 -116
  374. mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
  375. mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
  376. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  377. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  378. mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
  379. mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
  380. mindspore/profiler/parser/ascend_op_generator.py +0 -334
  381. mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
  382. mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
  383. mindspore/profiler/parser/base_timeline_generator.py +0 -483
  384. mindspore/profiler/parser/container.py +0 -229
  385. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
  386. mindspore/profiler/parser/flops_parser.py +0 -531
  387. mindspore/profiler/parser/framework_enum.py +0 -111
  388. mindspore/profiler/parser/framework_parser.py +0 -464
  389. mindspore/profiler/parser/framework_struct.py +0 -61
  390. mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
  391. mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
  392. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
  393. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
  394. mindspore/profiler/parser/hccl_parser.py +0 -573
  395. mindspore/profiler/parser/hwts_log_parser.py +0 -122
  396. mindspore/profiler/parser/integrator.py +0 -526
  397. mindspore/profiler/parser/memory_usage_parser.py +0 -277
  398. mindspore/profiler/parser/minddata_analyzer.py +0 -800
  399. mindspore/profiler/parser/minddata_parser.py +0 -186
  400. mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
  401. mindspore/profiler/parser/op_intermediate_parser.py +0 -149
  402. mindspore/profiler/parser/optime_parser.py +0 -250
  403. mindspore/profiler/parser/profiler_info.py +0 -213
  404. mindspore/profiler/parser/step_trace_parser.py +0 -666
  405. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  406. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  407. {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
mindspore/context.py CHANGED
@@ -207,7 +207,7 @@ class _Context:
207
207
  parallel_mode = _get_auto_parallel_context("parallel_mode")
208
208
  if parallel_mode not in (ParallelMode.DATA_PARALLEL, ParallelMode.STAND_ALONE, ParallelMode.AUTO_PARALLEL):
209
209
  raise ValueError(f"Got {parallel_mode}, when the user enabled SEMI_AUTO_PARALELL, "
210
- f"pynative mode dose not support, you should set either "
210
+ f"pynative mode does not support, you should set either "
211
211
  f"context.set_auto_parallel_context(parallel_mode='data_parallel'), "
212
212
  f"context.set_auto_parallel_context(parallel_mode='stand_alone') "
213
213
  f"or context.set_auto_parallel_context(parallel_mode='auto_parallel').")
@@ -290,6 +290,8 @@ class _Context:
290
290
  raise ValueError(f"For 'context.set_context', the argument 'deterministic' must be one of "
291
291
  f"{deterministic_options}, but got {deterministic}.")
292
292
 
293
+ logger.info(f"Set deterministic setting to '{deterministic}'.")
294
+
293
295
  # Must wait for all async created groups to be initialized so that
294
296
  # deterministic feature could be consistent between all processes.
295
297
  CollectiveManager.get_instance().wait_all_comm_init()
@@ -838,6 +840,25 @@ class _Context:
838
840
  raise TypeError(f"For step num, the value type should be int, but got {type(step)}, {step}")
839
841
  self.set_param(ms_ctx_param.last_triggered_step, step)
840
842
 
843
+ @staticmethod
844
+ def _check_speedup_config_str_value(key, value):
845
+ """check speedup config str value"""
846
+ if key in ["pp_1f1b_overlap", "recompute_comm_overlap", "recomputation_communication_overlap"]:
847
+ if isinstance(value, str):
848
+ values = value.split(",")
849
+ for v in values:
850
+ if v not in ['AlltoAll', 'AlltoAllV', 'MorphAllGather', 'AllReduce',
851
+ 'AllGather', 'ReduceScatter', 'MorphReduceScatter', '']:
852
+ raise ValueError("{} 's value should be subset of ['AlltoAll', 'AlltoAllV',"
853
+ " 'MorphAllGather', 'AllGather', 'ReduceScatter',"
854
+ " 'MorphReduceScatter', 'AllReduce'].".format(key))
855
+ return value
856
+ if value:
857
+ return "AlltoAll,AlltoAllV,AllGather,ReduceScatter,AllReduce"
858
+ return ""
859
+
860
+ return value
861
+
841
862
  def _set_speedup_config_path(self, speedup_config_path):
842
863
  """"Check and set speedup config for auto parallel."""
843
864
  if speedup_config_path is None or speedup_config_path == "":
@@ -848,8 +869,8 @@ class _Context:
848
869
  f"{speedup_config_real_path} does not exist, please check whether the "
849
870
  f"'parallel_speed_up_json_path' is correct.")
850
871
  try:
851
- valid_option = {"recompute_comm_overlap": (ms_ctx_param.recompute_comm_overlap, bool),
852
- "recomputation_communication_overlap": (ms_ctx_param.recompute_comm_overlap, bool),
872
+ valid_option = {"recompute_comm_overlap": (ms_ctx_param.recompute_comm_overlap, str),
873
+ "recomputation_communication_overlap": (ms_ctx_param.recompute_comm_overlap, str),
853
874
  "matmul_grad_comm_overlap": (ms_ctx_param.matmul_grad_comm_overlap, bool),
854
875
  "grad_matmul_communication_overlap": (ms_ctx_param.matmul_grad_comm_overlap, bool),
855
876
  "enable_task_opt": (ms_ctx_param.enable_task_opt, bool),
@@ -908,17 +929,12 @@ class _Context:
908
929
  f"Please use '{name_replace.get(key)}' instead.")
909
930
  set_func, valid_type = valid_option.get(key)
910
931
  if not isinstance(value, valid_type):
911
- raise TypeError(f"The value type of {key} must be {valid_type}, "
912
- f"but got value is {value} and type is {type(value)}.")
913
- if key == "pp_1f1b_overlap":
914
- values = value.split(",")
915
- for v in values:
916
- if v not in ['AlltoAll', 'AlltoAllV', 'MorphAllGather',
917
- 'AllGather', 'ReduceScatter', 'MorphReduceScatter']:
918
- raise ValueError("{} 's value should be subset of ['AlltoAll', 'AlltoAllV',"
919
- " 'MorphAllGather', 'AllGather', 'ReduceScatter',"
920
- " 'MorphReduceScatter'].".format(key))
921
- self.set_param(set_func, value)
932
+ if not ((key == "recompute_comm_overlap" or key == "recomputation_communication_overlap")
933
+ and isinstance(value, bool)):
934
+ raise TypeError(f"The value type of {key} must be {valid_type}, "
935
+ f"but got value is {value} and type is {type(value)}.")
936
+ value_new = self._check_speedup_config_str_value(key, value)
937
+ self.set_param(set_func, value_new)
922
938
  except (TypeError, ValueError) as exo:
923
939
  raise ValueError(str(exo) + "\nFor 'context.set_context', "
924
940
  "open or load the 'speedup_config_path' file {} "
@@ -960,11 +976,11 @@ def _context():
960
976
  comm_fusion=dict, strategy_ckpt_config=dict, force_fp32_communication=bool)
961
977
  def set_auto_parallel_context(**kwargs):
962
978
  r"""
963
- Set auto parallel context, only data parallel supported on CPU.
979
+ Set auto parallel context, this api will be deprecated and removed in future versions, please use the api
980
+ :class:`mindspore.parallel.auto_parallel.AutoParallel` instead.
964
981
 
965
982
  Note:
966
- Global parallel configuration. This interface will be deprecated in future versions, please use
967
- the api :class:`mindspore.parallel.auto_parallel.AutoParallel` instead.
983
+ CPU only support data parallel.
968
984
 
969
985
  Some configurations are parallel mode specific, see the below table for details:
970
986
 
@@ -1201,10 +1217,8 @@ def set_auto_parallel_context(**kwargs):
1201
1217
 
1202
1218
  def get_auto_parallel_context(attr_key):
1203
1219
  """
1204
- Get auto parallel context attribute value according to the key.
1205
-
1206
- Note:
1207
- This interface will be deprecated in future versions.
1220
+ Get auto parallel context attribute value according to the key, this api will be deprecated and removed in future
1221
+ versions.
1208
1222
 
1209
1223
  Args:
1210
1224
  attr_key (str): The key of the attribute.
@@ -1225,7 +1239,7 @@ def get_auto_parallel_context(attr_key):
1225
1239
 
1226
1240
  def reset_auto_parallel_context():
1227
1241
  """
1228
- Reset auto parallel context attributes to the default values. This interface will be deprecated in future
1242
+ Reset auto parallel context attributes to the default values, this api will be deprecated and removed in future
1229
1243
  versions, please use the api :class:`mindspore.parallel.auto_parallel.AutoParallel` instead.
1230
1244
 
1231
1245
  - device_num: 1.
@@ -1261,8 +1275,8 @@ def reset_auto_parallel_context():
1261
1275
  @args_type_check(offload_config=dict)
1262
1276
  def set_offload_context(offload_config):
1263
1277
  r"""
1264
- Configure heterogeneous training detailed parameters to adjust the offload strategy. This function is deprecated and
1265
- will be removed in future versions.
1278
+ Configure heterogeneous training detailed parameters to adjust the offload strategy, this api will be deprecated
1279
+ and removed in future versions.
1266
1280
 
1267
1281
  Note:
1268
1282
  The offload configuration is only used if the memory offload feature is enabled
@@ -1302,9 +1316,10 @@ def set_offload_context(offload_config):
1302
1316
 
1303
1317
  def get_offload_context():
1304
1318
  """
1305
- Gets the offload configuration parameters. Configure through interface mindspore.set_offload_context().
1306
- If the user is not set, the default configuration is obtained. This function is deprecated and will be removed in
1307
- future versions.
1319
+ Gets the offload configuration parameters, this api will be deprecated and removed in future versions.
1320
+
1321
+ Configure through interface mindspore.set_offload_context(). If the user is not set, the default configuration is
1322
+ obtained.
1308
1323
 
1309
1324
  Returns:
1310
1325
  Dict, heterogeneous training offload detailed configuration parameters.
@@ -1558,7 +1573,8 @@ def set_context(**kwargs):
1558
1573
  check_bprop (bool): This parameter will be deprecated and removed in future versions.
1559
1574
  enable_reduce_precision (bool): This parameter will be deprecated and removed in a future versions.
1560
1575
  grad_for_scalar (bool): This parameter will be deprecated and removed in future versions.
1561
- support_binary (bool): Whether to support run .pyc or .so in graph mode.
1576
+ support_binary (bool): Whether to support run .pyc or .so in graph mode. This parameter will be deprecated and
1577
+ removed in a future version. Please use the environment variable `MS_SUPPORT_BINARY` instead.
1562
1578
 
1563
1579
  Examples:
1564
1580
  >>> import mindspore as ms
@@ -1668,7 +1684,9 @@ def set_context(**kwargs):
1668
1684
  def get_context(attr_key):
1669
1685
 
1670
1686
  """
1671
- Get context attribute value according to the input key.
1687
+ Get context attribute value according to the input key, this api will be deprecated and removed in future versions,
1688
+ please use :func:`mindspore.get_current_device` instead.
1689
+
1672
1690
  If some attributes are not set, they will be automatically obtained.
1673
1691
 
1674
1692
  Args:
@@ -1745,7 +1763,7 @@ class ParallelMode:
1745
1763
  @args_type_check(enable_ps=bool)
1746
1764
  def set_ps_context(**kwargs):
1747
1765
  """
1748
- Set parameter server training mode context.
1766
+ Set parameter server training mode context, this api will be deprecated and removed in future versions.
1749
1767
 
1750
1768
  Note:
1751
1769
  Parameter server mode is only supported in graph mode.
@@ -1768,8 +1786,9 @@ def set_ps_context(**kwargs):
1768
1786
  Default: ``False`` .
1769
1787
  config_file_path (str): Configuration file path used by recovery, parameter server training mode only
1770
1788
  supports Server disaster recovery currently. Default: ``''`` .
1771
- scheduler_manage_port (int): Scheduler manage port used to scale out/in. Default: ``11202`` .
1772
- enable_ssl (bool): Set PS SSL mode enabled or disabled. Default: ``False`` .
1789
+ enable_ssl (bool): Set PS SSL mode enabled or disabled. Default: ``False``.
1790
+ Turning it off by default may be a security risk,
1791
+ and users need to ensure the security of the network environment.
1773
1792
  client_password (str): Password to decrypt the secret key stored in the client certificate. Default: ``''`` .
1774
1793
  server_password (str): Password to decrypt the secret key stored in the server certificate. Default: ``''`` .
1775
1794
 
@@ -1778,14 +1797,15 @@ def set_ps_context(**kwargs):
1778
1797
 
1779
1798
  Examples:
1780
1799
  >>> import mindspore as ms
1781
- >>> ms.set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
1800
+ >>> ms.set_ps_context(enable_ps=True, enable_ssl=True, client_password='', server_password='')
1782
1801
  """
1783
1802
  _set_ps_context(**kwargs)
1784
1803
 
1785
1804
 
1786
1805
  def get_ps_context(attr_key):
1787
1806
  """
1788
- Get parameter server training mode context attribute value according to the key.
1807
+ Get parameter server training mode context attribute value according to the key, this api will be deprecated and
1808
+ removed in future versions.
1789
1809
 
1790
1810
  Args:
1791
1811
  attr_key (str): The key of the attribute:
@@ -1794,12 +1814,9 @@ def get_ps_context(attr_key):
1794
1814
  - config_file_path (str, optional): Configuration file path used by recovery,
1795
1815
  parameter server training mode only
1796
1816
  supports Server disaster recovery currently. Default: ``''`` .
1797
- - scheduler_manage_port (int, optional): Scheduler manage port used to scale out/in. Default: ``11202`` .
1798
1817
  - enable_ssl (bool, optional): Set PS SSL mode enabled or disabled. Default: ``False`` .
1799
- - client_password (str, optional): Password to decrypt the secret key stored in the client certificate.
1800
- Default: ``''`` .
1801
- - server_password (str, optional): Password to decrypt the secret key stored in the server certificate.
1802
- Default: ``''`` .
1818
+ Turning it off by default may be a security risk,
1819
+ and users need to ensure the security of the network environment.
1803
1820
 
1804
1821
  Returns:
1805
1822
  Returns attribute value according to the key.
@@ -1816,7 +1833,8 @@ def get_ps_context(attr_key):
1816
1833
 
1817
1834
  def reset_ps_context():
1818
1835
  """
1819
- Reset parameter server training mode context attributes to the default values.
1836
+ Reset parameter server training mode context attributes to the default values, this api will be deprecated and
1837
+ removed in future versions.
1820
1838
 
1821
1839
  Meaning of each field and its default value refer to :func:`mindspore.set_ps_context`.
1822
1840
 
@@ -1097,12 +1097,12 @@ def get_error_samples_mode():
1097
1097
  return _CDE_TO_PYTHON_ERROR_SAMPLES_MODE.get(_config.get_error_samples_mode())
1098
1098
 
1099
1099
 
1100
- def set_iterator_mode(do_copy=True, parallel_convert=False):
1100
+ def set_iterator_mode(do_copy=False, parallel_convert=False):
1101
1101
  """
1102
1102
  Select dataset iterator optimization strategy.
1103
1103
 
1104
1104
  Args:
1105
- do_copy (bool): Whether dataset iterator creates a Tensor from numpy.ndarray without copy. Default: "True".
1105
+ do_copy (bool): Whether dataset iterator creates a Tensor from numpy.ndarray without copy. Default: "False".
1106
1106
  parallel_convert (bool): Whether dataset iterator starts a thread to organize Tensors to output.
1107
1107
  Default: "False".
1108
1108
 
@@ -1122,7 +1122,7 @@ def set_iterator_mode(do_copy=True, parallel_convert=False):
1122
1122
  def get_iterator_mode():
1123
1123
  """
1124
1124
  Get dataset iterator mode indicate iterator optimization strategy.
1125
- If `set_iterator_mode` is never called before, `do_copy` default to "True", `parallel_convert` default to "False".
1125
+ If `set_iterator_mode` is never called before, `do_copy` default to "False", `parallel_convert` default to "False".
1126
1126
 
1127
1127
  Returns:
1128
1128
  dict, iterator mode dictionary contains the value of `do_copy` and `parallel_convert`.
@@ -575,6 +575,12 @@ class Dataset:
575
575
  create shared memory, and represents ``output_columns`` use the second element as the
576
576
  unit to create shared memory.
577
577
 
578
+ .. warning::
579
+ `batch` uses `dill` module implicitly in multiprocessing `spawn` mode to serialize/deserialize
580
+ `per_batch_map`, which is known to be insecure. It is possible to construct malicious pickle data
581
+ which will execute arbitrary code during unpickling. Never load data that could have come from
582
+ untrusted sources, or has been tampered with.
583
+
578
584
  Returns:
579
585
  Dataset, a new dataset with the above operation applied.
580
586
 
@@ -886,6 +892,12 @@ class Dataset:
886
892
 
887
893
  - offload (bool, optional): Flag to indicate whether offload is used. Default: ``None``.
888
894
 
895
+ .. warning::
896
+ `map` uses `dill` module implicitly in multiprocessing `spawn` mode to serialize/deserialize `operations`,
897
+ which is known to be insecure. It is possible to construct malicious pickle data which will
898
+ execute arbitrary code during unpickling. Never load data that could have come from untrusted sources,
899
+ or has been tampered with.
900
+
889
901
  Note:
890
902
  - Input `operations` accepts TensorOperations defined in mindspore.dataset part, plus user-defined
891
903
  Python functions (PyFuncs).
@@ -1557,7 +1569,7 @@ class Dataset:
1557
1569
  del api_tree
1558
1570
 
1559
1571
  @check_tuple_iterator
1560
- def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
1572
+ def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=False):
1561
1573
  """
1562
1574
  Create an iterator over the dataset that yields samples of type list, whose elements are
1563
1575
  the data for each column.
@@ -1571,7 +1583,7 @@ class Dataset:
1571
1583
  convert it to Tensor. Default: ``False`` .
1572
1584
  do_copy (bool, optional): Whether to copy the data when converting output to Tensor,
1573
1585
  or reuse the buffer for better performance, only works when `output_numpy` is ``False`` .
1574
- Default: ``True`` .
1586
+ Default: ``False`` .
1575
1587
 
1576
1588
  Returns:
1577
1589
  Iterator, a dataset iterator that yields samples of type list.
@@ -1598,7 +1610,7 @@ class Dataset:
1598
1610
  return TupleIterator(self, columns, num_epochs, output_numpy, do_copy)
1599
1611
 
1600
1612
  @check_dict_iterator
1601
- def create_dict_iterator(self, num_epochs=-1, output_numpy=False, do_copy=True):
1613
+ def create_dict_iterator(self, num_epochs=-1, output_numpy=False, do_copy=False):
1602
1614
  """
1603
1615
  Create an iterator over the dataset that yields samples of type dict,
1604
1616
  while the key is the column name and the value is the data.
@@ -1610,7 +1622,7 @@ class Dataset:
1610
1622
  convert it to Tensor. Default: ``False`` .
1611
1623
  do_copy (bool, optional): Whether to copy the data when converting output to Tensor,
1612
1624
  or reuse the buffer for better performance, only works when `output_numpy` is ``False`` .
1613
- Default: ``True`` .
1625
+ Default: ``False`` .
1614
1626
 
1615
1627
  Returns:
1616
1628
  Iterator, a dataset iterator that yields samples of type dict.
@@ -3632,9 +3644,10 @@ class _PythonMultiprocessing(cde.PythonMultiprocessingRuntime):
3632
3644
  "process(es): {}".format(self.cleaning_process.pid, self.get_pids()))
3633
3645
 
3634
3646
  if get_enable_watchdog():
3635
- worker_ids = [worker.pid for worker in self.workers]
3647
+ worker_ids = [os.getpid()]
3648
+ worker_ids.extend([worker.pid for worker in self.workers])
3636
3649
  worker_ids.append(self.cleaning_process.pid)
3637
- cde.register_worker_pids(id(self), set(worker_ids))
3650
+ cde.register_worker_pids(id(self), worker_ids)
3638
3651
 
3639
3652
  def _abort_monitor(self):
3640
3653
  """Deregister workers monitored by the watch dog and join clean process."""
@@ -4385,7 +4398,7 @@ class TransferDataset(Dataset):
4385
4398
  def create_dict_iterator(self, num_epochs=-1, output_numpy=False):
4386
4399
  raise RuntimeError("TransferDataset is not iterable.")
4387
4400
 
4388
- def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
4401
+ def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=False):
4389
4402
  raise RuntimeError("TransferDataset is not iterable.")
4390
4403
 
4391
4404
  def __iter__(self):
@@ -220,6 +220,7 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
220
220
 
221
221
  self.ppid = os.getpid()
222
222
  self.pids = []
223
+ self.thread_ids = []
223
224
  self.check_interval = get_multiprocessing_timeout_interval() # the interval of check queue's size
224
225
 
225
226
  if self.multi_process is True:
@@ -277,12 +278,32 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
277
278
  worker = _GeneratorWorkerMt(self.dataset, self.eof, worker_id)
278
279
  worker.daemon = True
279
280
  self.need_join = True
281
+ worker.start()
282
+ self.thread_ids.append(worker.ident)
280
283
  self.workers.append(worker)
281
284
 
282
285
  # Register a termination function using weakref to avoid the object from unable to properly destruct.
283
286
  atexit.register(lambda cleanup: cleanup()() if cleanup() is not None else None,
284
287
  weakref.WeakMethod(self.terminate))
285
288
 
289
+ def get_worker_ids(self):
290
+ """
291
+ Get dict of worker's ids
292
+
293
+ Returns:
294
+ dict of strings
295
+ """
296
+ if not self.is_mp_enabled():
297
+ return {}
298
+ worker_ids = {}
299
+ if self.multi_process is True:
300
+ worker_ids["is_thread"] = False
301
+ worker_ids["worker_id"] = self.pids
302
+ else:
303
+ worker_ids["is_thread"] = True
304
+ worker_ids["worker_id"] = self.thread_ids
305
+ return worker_ids
306
+
286
307
  def terminate(self):
287
308
  self._stop_subprocess()
288
309
 
@@ -345,12 +366,12 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
345
366
  start_time = int(time.time())
346
367
  wait_count = 1
347
368
  while self.workers[i % self.num_worker].res_queue.empty():
369
+ time.sleep(0.1)
348
370
  if self.eof.is_set():
349
371
  logger.warning("Generator receives a termination signal, stop waiting for data "
350
372
  "from subprocess.")
351
373
  self._stop_subprocess()
352
374
  return
353
- time.sleep(0.1)
354
375
  wait_count = self._interval_log(i, start_time, wait_count)
355
376
  result = self.workers[i % self.num_worker].get()
356
377
  if isinstance(result, ExceptionHandler):
@@ -421,9 +442,10 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
421
442
  "process(es): {}".format(self.cleaning_process.pid, [worker.pid for worker in self.workers]))
422
443
 
423
444
  if get_enable_watchdog():
424
- worker_ids = [worker.pid for worker in self.workers]
445
+ worker_ids = [os.getpid()]
446
+ worker_ids.extend([worker.pid for worker in self.workers])
425
447
  worker_ids.append(self.cleaning_process.pid)
426
- cde.register_worker_pids(id(self), set(worker_ids))
448
+ cde.register_worker_pids(id(self), worker_ids)
427
449
 
428
450
  def _release_fd(self):
429
451
  """Release the file descriptor by subprocess"""
@@ -480,6 +502,8 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
480
502
  except Exception: # pylint: disable=W0703
481
503
  # Block all errors when join
482
504
  continue
505
+ elif not self.multi_process:
506
+ w.join(timeout=5)
483
507
 
484
508
  if self.multi_process is True:
485
509
  self._release_fd()
@@ -795,6 +819,12 @@ class GeneratorDataset(MappableDataset, UnionBaseDataset):
795
819
  - `Load & Process Data With Dataset Pipeline
796
820
  <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/dataset_gallery.html>`_
797
821
 
822
+ .. warning::
823
+ `GeneratorDataset` uses `dill` module implicitly in multiprocessing `spawn` mode to serialize/deserialize
824
+ `source`, which is known to be insecure. It is possible to construct malicious pickle data which will
825
+ execute arbitrary code during unpickling. Never load data that could have come from untrusted sources,
826
+ or has been tampered with.
827
+
798
828
  Note:
799
829
  - If you configure `python_multiprocessing=True` (Default: ``True`` ) and `num_parallel_workers>1`
800
830
  (default: ``1`` ) indicates that the multiprocessing mode is started for data load acceleration.
@@ -220,7 +220,7 @@ class Iterator:
220
220
  dataset: Dataset to be iterated over
221
221
  """
222
222
 
223
- def __init__(self, dataset, num_epochs=-1, output_numpy=False, do_copy=True):
223
+ def __init__(self, dataset, num_epochs=-1, output_numpy=False, do_copy=False):
224
224
  self._col_names = None
225
225
 
226
226
  # create a copy of tree and work on it.
@@ -493,7 +493,7 @@ class TupleIterator(Iterator):
493
493
  The derived class of Iterator with list type.
494
494
  """
495
495
 
496
- def __init__(self, dataset, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
496
+ def __init__(self, dataset, columns=None, num_epochs=-1, output_numpy=False, do_copy=False):
497
497
  if columns is not None:
498
498
  if not isinstance(columns, list):
499
499
  columns = [columns]
@@ -57,12 +57,12 @@ class _Config:
57
57
 
58
58
  def _convert_type(self, key):
59
59
  if key not in self.config:
60
- return os.environ[key]
60
+ return os.environ[key]
61
61
  if isinstance(self.config[key], int):
62
62
  return int(os.environ[key])
63
63
  if isinstance(self.config[key], float):
64
64
  return float(os.environ[key])
65
- return os.environ[key]
65
+ return os.environ[key]
66
66
 
67
67
 
68
68
  config = _Config()
@@ -506,3 +506,11 @@ class MindRecordFromOBS:
506
506
  path = os.path.join(self._local_path, target_dataset)
507
507
  _iteration = MindDataset(dataset_files=[path], shuffle=False)
508
508
  return _iteration.get_col_names()
509
+
510
+ def close(self):
511
+ if self._pool:
512
+ self._pool.terminate()
513
+ self._pool = None
514
+
515
+ def __del__(self):
516
+ self.close()
@@ -51,10 +51,14 @@ class PyTensorOperation:
51
51
  if "transforms" in json_obj.keys():
52
52
  # operations which have transforms as input, need to call _from_json() for each transform to deseriallize
53
53
  transforms = []
54
+ valid_module = ['mindspore.dataset.vision', 'mindspore.dataset.text',
55
+ 'mindspore.dataset.audio', 'mindspore.dataset.transforms']
54
56
  for json_op in json_obj["transforms"]:
55
- transforms.append(getattr(
56
- sys.modules.get(json_op.get("python_module")),
57
- json_op.get("tensor_op_name")).from_json(json.dumps(json_op.get("tensor_op_params"))))
57
+ py_module = sys.modules.get(json_op.get("python_module"))
58
+ if py_module.__package__ not in valid_module:
59
+ raise RuntimeError('Invalid json content, try to serialzie dataset again.')
60
+ transforms.append(getattr(py_module, json_op.get("tensor_op_name")).from_json(
61
+ json.dumps(json_op.get("tensor_op_params"))))
58
62
  new_op.transforms = transforms
59
63
  if "output_type" in json_obj.keys():
60
64
  output_type = np.dtype(json_obj["output_type"])
@@ -165,10 +165,14 @@ class PyTensorOperation:
165
165
  if "transforms" in json_obj.keys():
166
166
  # operations which have transforms as input, need to call _from_json() for each transform to deseriallize
167
167
  transforms = []
168
+ valid_module = ['mindspore.dataset.vision', 'mindspore.dataset.text',
169
+ 'mindspore.dataset.audio', 'mindspore.dataset.transforms']
168
170
  for json_op in json_obj["transforms"]:
169
- transforms.append(getattr(
170
- sys.modules.get(json_op.get("python_module")),
171
- json_op["tensor_op_name"]).from_json(json.dumps(json_op["tensor_op_params"])))
171
+ py_module = sys.modules.get(json_op.get("python_module"))
172
+ if py_module.__package__ not in valid_module:
173
+ raise RuntimeError('Invalid json content, try to serialzie dataset again.')
174
+ transforms.append(getattr(py_module, json_op["tensor_op_name"]).from_json(
175
+ json.dumps(json_op["tensor_op_params"])))
172
176
  new_op.transforms = transforms
173
177
  if "output_type" in json_obj.keys():
174
178
  output_type = np.dtype(json_obj["output_type"])
@@ -351,6 +351,7 @@ def check_resize_interpolation(method):
351
351
 
352
352
  return new_method
353
353
 
354
+
354
355
  def check_device_target(method):
355
356
  """A wrapper that wraps a parameter checker"""
356
357
 
@@ -66,7 +66,7 @@ def is_available():
66
66
 
67
67
  def _is_supported():
68
68
  device_target = ms.context.get_context("device_target")
69
- if device_target == 'CPU' or device_target == 'GPU':
69
+ if device_target in ['CPU', 'GPU']:
70
70
  logger.error(f"{device_target} device is not supported. Please use correct device")
71
71
  return False
72
72
  return True
@@ -14,8 +14,8 @@
14
14
  # ============================================================================
15
15
  """init file for GPU device context"""
16
16
 
17
+ __all__ = ['device_count', 'is_available']
18
+
17
19
  from .device import device_count, is_available
18
20
  from .op_precision import *
19
21
  from .op_tuning import *
20
-
21
- __all__ = ["device_count", "is_available"]
@@ -64,7 +64,7 @@ def is_available():
64
64
 
65
65
  def _is_supported():
66
66
  device_target = ms.context.get_context("device_target")
67
- if device_target == 'CPU' or device_target == 'Ascend':
67
+ if device_target in ['CPU', 'Ascend']:
68
68
  logger.error(f"{device_target} device is not supported. Please use correct device")
69
69
  return False
70
70
  return True
@@ -29,7 +29,8 @@ function_status = {'matmul_allow_tf32': False, 'conv_allow_tf32': False}
29
29
  def matmul_allow_tf32(value):
30
30
  """
31
31
  Whether to convert FP32 to TF32 for Matmul operators.
32
- For detailed information, please refer to `CUBLAS_COMPUTE_32F_FAST_TF32 <https://docs.nvidia.com/cuda/cublas/index.html>`_.
32
+ For detailed information, please refer to `CUBLAS_COMPUTE_32F_FAST_TF32
33
+ <https://docs.nvidia.com/cuda/cublas/index.html>`_.
33
34
 
34
35
  Args:
35
36
  value (bool): Whether to convert FP32 to TF32 for Matmul operators. If not configured, the framework
@@ -50,7 +51,8 @@ def matmul_allow_tf32(value):
50
51
  def conv_allow_tf32(value):
51
52
  """
52
53
  Whether to convert FP32 to TF32 for Conv operators.
53
- For detailed information, please refer to `CUBLAS_COMPUTE_32F_FAST_TF32 <https://docs.nvidia.com/cuda/cublas/index.html>`_.
54
+ For detailed information, please refer to `CUBLAS_COMPUTE_32F_FAST_TF32
55
+ <https://docs.nvidia.com/cuda/cublas/index.html>`_.
54
56
 
55
57
  Args:
56
58
  value (bool): Whether to convert FP32 to HF32 for Conv operators. If not configured, the framework defaults
@@ -27,7 +27,8 @@ function_status = {'conv_fprop_algo': False, 'conv_wgrad_algo': False, 'conv_dgr
27
27
  def conv_fprop_algo(mode):
28
28
  """
29
29
  Specifies convolution forward algorithm.
30
- For detailed information, please refer to `NVIDA cuDNN about cudnnConvolutionForward <https://docs.nvidia.com/deeplearning/cudnn/latest/api/cudnn-cnn-library.html>`_.
30
+ For detailed information, please refer to `NVIDA cuDNN about cudnnConvolutionForward
31
+ <https://docs.nvidia.com/deeplearning/cudnn/latest/api/cudnn-cnn-library.html>`_.
31
32
 
32
33
  Args:
33
34
  mode (str): convolution forward algorithm. If not configured, the framework defaults to 'normal'.
@@ -80,7 +81,8 @@ def conv_fprop_algo(mode):
80
81
  def conv_wgrad_algo(mode):
81
82
  """
82
83
  Specifies convolution filter grad algorithm.
83
- For detailed information, please refer to `NVIDA cuDNN <https://docs.nvidia.com/deeplearning/cudnn/latest/api/cudnn-cnn-library.html>`_.
84
+ For detailed information, please refer to `NVIDA cuDNN
85
+ <https://docs.nvidia.com/deeplearning/cudnn/latest/api/cudnn-cnn-library.html>`_.
84
86
 
85
87
  Args:
86
88
  mode (str): convolution filter grad algorithm. If not configured, the framework defaults to 'normal'.
@@ -129,7 +131,8 @@ def conv_wgrad_algo(mode):
129
131
  def conv_dgrad_algo(mode):
130
132
  """
131
133
  Specifies convolution data grad algorithm.
132
- For detailed information, please refer to `NVIDA cuDNN <https://docs.nvidia.com/deeplearning/cudnn/latest/api/cudnn-cnn-library.html>`_.
134
+ For detailed information, please refer to `NVIDA cuDNN
135
+ <https://docs.nvidia.com/deeplearning/cudnn/latest/api/cudnn-cnn-library.html>`_.
133
136
 
134
137
  Args:
135
138
  mode (str): convolution data grad algorithm. If not configured, the framework defaults to 'normal'.