mindspore 2.7.0rc1__cp310-cp310-win_amd64.whl → 2.7.1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (370) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +5 -2
  3. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +2 -2
  7. mindspore/_extends/builtin_operations.py +3 -3
  8. mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
  9. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  10. mindspore/_extends/parse/__init__.py +3 -3
  11. mindspore/_extends/parse/compile_config.py +24 -1
  12. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -3
  13. mindspore/_extends/parse/parser.py +28 -22
  14. mindspore/_extends/parse/resources.py +1 -1
  15. mindspore/_extends/parse/standard_method.py +23 -2
  16. mindspore/_extends/parse/trope.py +2 -1
  17. mindspore/_extends/pijit/pijit_func_white_list.py +9 -27
  18. mindspore/amp.py +0 -18
  19. mindspore/avcodec-59.dll +0 -0
  20. mindspore/avdevice-59.dll +0 -0
  21. mindspore/avfilter-8.dll +0 -0
  22. mindspore/avformat-59.dll +0 -0
  23. mindspore/avutil-57.dll +0 -0
  24. mindspore/boost/base.py +29 -2
  25. mindspore/common/__init__.py +18 -12
  26. mindspore/common/_decorator.py +3 -2
  27. mindspore/common/_grad_function.py +3 -1
  28. mindspore/common/_tensor_cpp_method.py +1 -1
  29. mindspore/common/_tensor_docs.py +371 -96
  30. mindspore/common/_utils.py +7 -43
  31. mindspore/common/api.py +434 -135
  32. mindspore/common/dtype.py +98 -57
  33. mindspore/common/dump.py +7 -108
  34. mindspore/common/dynamic_shape/__init__.py +0 -0
  35. mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +15 -23
  36. mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
  37. mindspore/common/file_system.py +59 -9
  38. mindspore/common/hook_handle.py +82 -3
  39. mindspore/common/jit_config.py +5 -1
  40. mindspore/common/jit_trace.py +27 -12
  41. mindspore/common/lazy_inline.py +5 -3
  42. mindspore/common/np_dtype.py +3 -3
  43. mindspore/common/parameter.py +17 -127
  44. mindspore/common/recompute.py +4 -13
  45. mindspore/common/tensor.py +50 -217
  46. mindspore/communication/_comm_helper.py +11 -1
  47. mindspore/communication/comm_func.py +138 -4
  48. mindspore/communication/management.py +85 -1
  49. mindspore/config/op_info.config +0 -15
  50. mindspore/context.py +20 -106
  51. mindspore/dataset/__init__.py +1 -1
  52. mindspore/dataset/audio/transforms.py +1 -1
  53. mindspore/dataset/core/config.py +35 -1
  54. mindspore/dataset/engine/datasets.py +338 -319
  55. mindspore/dataset/engine/datasets_user_defined.py +38 -22
  56. mindspore/dataset/engine/datasets_vision.py +1 -1
  57. mindspore/dataset/engine/validators.py +1 -15
  58. mindspore/dataset/transforms/c_transforms.py +2 -2
  59. mindspore/dataset/transforms/transforms.py +3 -3
  60. mindspore/dataset/vision/__init__.py +1 -1
  61. mindspore/dataset/vision/py_transforms.py +8 -8
  62. mindspore/dataset/vision/transforms.py +17 -5
  63. mindspore/dataset/vision/utils.py +632 -21
  64. mindspore/device_context/ascend/op_tuning.py +35 -1
  65. mindspore/dnnl.dll +0 -0
  66. mindspore/{profiler/common/validator → graph}/__init__.py +9 -1
  67. mindspore/graph/custom_pass.py +55 -0
  68. mindspore/include/api/cell.h +28 -4
  69. mindspore/include/api/cfg.h +24 -7
  70. mindspore/include/api/context.h +1 -0
  71. mindspore/include/api/delegate.h +0 -2
  72. mindspore/include/api/dual_abi_helper.h +100 -19
  73. mindspore/include/api/graph.h +14 -1
  74. mindspore/include/api/kernel.h +16 -3
  75. mindspore/include/api/kernel_api.h +9 -1
  76. mindspore/include/api/metrics/accuracy.h +9 -0
  77. mindspore/include/api/model.h +5 -1
  78. mindspore/include/api/model_group.h +4 -0
  79. mindspore/include/api/model_parallel_runner.h +2 -0
  80. mindspore/include/api/status.h +48 -10
  81. mindspore/include/api/types.h +6 -1
  82. mindspore/include/dataset/constants.h +9 -0
  83. mindspore/include/dataset/execute.h +2 -2
  84. mindspore/jpeg62.dll +0 -0
  85. mindspore/mindrecord/__init__.py +3 -3
  86. mindspore/mindrecord/common/exceptions.py +1 -0
  87. mindspore/mindrecord/config.py +1 -1
  88. mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
  89. mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
  90. mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
  91. mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
  92. mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
  93. mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
  94. mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
  95. mindspore/mindrecord/filereader.py +4 -4
  96. mindspore/mindrecord/filewriter.py +5 -5
  97. mindspore/mindrecord/mindpage.py +2 -2
  98. mindspore/mindrecord/tools/cifar10.py +4 -3
  99. mindspore/mindrecord/tools/cifar100.py +1 -1
  100. mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
  101. mindspore/mindrecord/tools/cifar10_to_mr.py +6 -6
  102. mindspore/mindrecord/tools/csv_to_mr.py +1 -1
  103. mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
  104. mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
  105. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
  106. mindspore/mindspore_backend_common.dll +0 -0
  107. mindspore/mindspore_backend_manager.dll +0 -0
  108. mindspore/mindspore_cluster.dll +0 -0
  109. mindspore/mindspore_common.dll +0 -0
  110. mindspore/mindspore_core.dll +0 -0
  111. mindspore/mindspore_cpu.dll +0 -0
  112. mindspore/mindspore_dump.dll +0 -0
  113. mindspore/mindspore_frontend.dll +0 -0
  114. mindspore/mindspore_glog.dll +0 -0
  115. mindspore/mindspore_hardware_abstract.dll +0 -0
  116. mindspore/mindspore_memory_pool.dll +0 -0
  117. mindspore/mindspore_ms_backend.dll +0 -0
  118. mindspore/mindspore_ops.dll +0 -0
  119. mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
  120. mindspore/mindspore_profiler.dll +0 -0
  121. mindspore/mindspore_pyboost.dll +0 -0
  122. mindspore/mindspore_pynative.dll +0 -0
  123. mindspore/mindspore_runtime_pipeline.dll +0 -0
  124. mindspore/mindspore_runtime_utils.dll +0 -0
  125. mindspore/mindspore_tools.dll +0 -0
  126. mindspore/mint/__init__.py +15 -10
  127. mindspore/mint/distributed/__init__.py +4 -0
  128. mindspore/mint/distributed/distributed.py +392 -69
  129. mindspore/mint/nn/__init__.py +2 -16
  130. mindspore/mint/nn/functional.py +4 -110
  131. mindspore/mint/nn/layer/__init__.py +0 -2
  132. mindspore/mint/nn/layer/_functions.py +1 -2
  133. mindspore/mint/nn/layer/activation.py +0 -6
  134. mindspore/mint/nn/layer/basic.py +0 -47
  135. mindspore/mint/nn/layer/conv.py +10 -10
  136. mindspore/mint/nn/layer/normalization.py +11 -16
  137. mindspore/mint/nn/layer/pooling.py +0 -4
  138. mindspore/nn/__init__.py +1 -3
  139. mindspore/nn/cell.py +231 -239
  140. mindspore/nn/layer/activation.py +4 -2
  141. mindspore/nn/layer/basic.py +56 -14
  142. mindspore/nn/layer/container.py +16 -0
  143. mindspore/nn/layer/embedding.py +4 -169
  144. mindspore/nn/layer/image.py +1 -1
  145. mindspore/nn/layer/normalization.py +2 -1
  146. mindspore/nn/layer/thor_layer.py +4 -85
  147. mindspore/nn/optim/ada_grad.py +0 -1
  148. mindspore/nn/optim/adafactor.py +0 -1
  149. mindspore/nn/optim/adam.py +32 -127
  150. mindspore/nn/optim/adamax.py +0 -1
  151. mindspore/nn/optim/asgd.py +0 -1
  152. mindspore/nn/optim/ftrl.py +8 -102
  153. mindspore/nn/optim/lamb.py +1 -4
  154. mindspore/nn/optim/lars.py +0 -3
  155. mindspore/nn/optim/lazyadam.py +25 -218
  156. mindspore/nn/optim/momentum.py +5 -43
  157. mindspore/nn/optim/optimizer.py +6 -55
  158. mindspore/nn/optim/proximal_ada_grad.py +0 -1
  159. mindspore/nn/optim/rmsprop.py +0 -1
  160. mindspore/nn/optim/rprop.py +0 -1
  161. mindspore/nn/optim/sgd.py +0 -1
  162. mindspore/nn/optim/tft_wrapper.py +2 -4
  163. mindspore/nn/optim/thor.py +0 -2
  164. mindspore/nn/probability/bijector/bijector.py +7 -8
  165. mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
  166. mindspore/nn/probability/bijector/power_transform.py +20 -21
  167. mindspore/nn/probability/bijector/scalar_affine.py +5 -5
  168. mindspore/nn/probability/bijector/softplus.py +13 -14
  169. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  170. mindspore/nn/wrap/cell_wrapper.py +39 -5
  171. mindspore/nn/wrap/grad_reducer.py +4 -89
  172. mindspore/numpy/array_creations.py +4 -4
  173. mindspore/numpy/fft.py +9 -9
  174. mindspore/numpy/utils_const.py +1 -1
  175. mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
  176. mindspore/onnx/onnx_export.py +137 -0
  177. mindspore/opencv_core4110.dll +0 -0
  178. mindspore/opencv_imgcodecs4110.dll +0 -0
  179. mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
  180. mindspore/ops/__init__.py +2 -0
  181. mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
  182. mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
  183. mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
  184. mindspore/ops/_op_impl/cpu/__init__.py +1 -5
  185. mindspore/ops/_op_impl/cpu/{buffer_append.py → joinedstr_op.py} +8 -8
  186. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +28 -24
  187. mindspore/ops/auto_generate/gen_extend_func.py +6 -11
  188. mindspore/ops/auto_generate/gen_ops_def.py +385 -154
  189. mindspore/ops/auto_generate/gen_ops_prim.py +5676 -5167
  190. mindspore/ops/communication.py +97 -0
  191. mindspore/ops/composite/__init__.py +5 -2
  192. mindspore/ops/composite/base.py +16 -2
  193. mindspore/ops/composite/multitype_ops/__init__.py +3 -1
  194. mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
  195. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  196. mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
  197. mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
  198. mindspore/ops/function/__init__.py +2 -0
  199. mindspore/ops/function/array_func.py +24 -18
  200. mindspore/ops/function/comm_func.py +3883 -0
  201. mindspore/ops/function/debug_func.py +7 -6
  202. mindspore/ops/function/grad/grad_func.py +4 -12
  203. mindspore/ops/function/math_func.py +89 -86
  204. mindspore/ops/function/nn_func.py +92 -313
  205. mindspore/ops/function/random_func.py +9 -18
  206. mindspore/ops/functional.py +4 -1
  207. mindspore/ops/functional_overload.py +377 -30
  208. mindspore/ops/operations/__init__.py +2 -5
  209. mindspore/ops/operations/_custom_ops_utils.py +7 -9
  210. mindspore/ops/operations/_inner_ops.py +12 -50
  211. mindspore/ops/operations/_rl_inner_ops.py +0 -933
  212. mindspore/ops/operations/array_ops.py +5 -50
  213. mindspore/ops/operations/comm_ops.py +95 -17
  214. mindspore/ops/operations/custom_ops.py +237 -22
  215. mindspore/ops/operations/debug_ops.py +33 -35
  216. mindspore/ops/operations/manually_defined/ops_def.py +39 -318
  217. mindspore/ops/operations/math_ops.py +5 -5
  218. mindspore/ops/operations/nn_ops.py +3 -3
  219. mindspore/ops/operations/sparse_ops.py +0 -83
  220. mindspore/ops/primitive.py +4 -27
  221. mindspore/ops/tensor_method.py +88 -10
  222. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
  223. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
  224. mindspore/ops_generate/api/functions_cc_generator.py +53 -4
  225. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
  226. mindspore/ops_generate/common/gen_constants.py +11 -10
  227. mindspore/ops_generate/common/op_proto.py +18 -1
  228. mindspore/ops_generate/common/template.py +102 -245
  229. mindspore/ops_generate/common/template_utils.py +212 -0
  230. mindspore/ops_generate/gen_custom_ops.py +69 -0
  231. mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
  232. mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
  233. mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
  234. mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
  235. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
  236. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
  237. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
  238. mindspore/ops_generate/pyboost/gen_pyboost_func.py +0 -16
  239. mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
  240. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
  241. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
  242. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
  243. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
  244. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
  245. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
  246. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
  247. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
  248. mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
  249. mindspore/ops_generate/resources/yaml_loader.py +13 -0
  250. mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
  251. mindspore/parallel/_auto_parallel_context.py +5 -15
  252. mindspore/parallel/_cell_wrapper.py +1 -1
  253. mindspore/parallel/_parallel_serialization.py +4 -6
  254. mindspore/parallel/_ps_context.py +2 -2
  255. mindspore/parallel/_utils.py +34 -17
  256. mindspore/parallel/auto_parallel.py +23 -9
  257. mindspore/parallel/checkpoint_transform.py +20 -2
  258. mindspore/parallel/cluster/process_entity/_api.py +28 -33
  259. mindspore/parallel/cluster/process_entity/_utils.py +9 -5
  260. mindspore/parallel/cluster/run.py +5 -3
  261. mindspore/{experimental/llm_boost/ascend_native → parallel/distributed}/__init__.py +21 -22
  262. mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
  263. mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
  264. mindspore/parallel/function/reshard_func.py +6 -5
  265. mindspore/parallel/nn/parallel_cell_wrapper.py +40 -3
  266. mindspore/parallel/nn/parallel_grad_reducer.py +0 -8
  267. mindspore/parallel/shard.py +7 -21
  268. mindspore/parallel/strategy.py +336 -0
  269. mindspore/parallel/transform_safetensors.py +127 -20
  270. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +13 -9
  271. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
  272. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
  273. mindspore/profiler/common/constant.py +5 -0
  274. mindspore/profiler/common/file_manager.py +9 -0
  275. mindspore/profiler/common/msprof_cmd_tool.py +40 -4
  276. mindspore/profiler/common/path_manager.py +65 -24
  277. mindspore/profiler/common/profiler_context.py +27 -14
  278. mindspore/profiler/common/profiler_info.py +3 -3
  279. mindspore/profiler/common/profiler_meta_data.py +1 -0
  280. mindspore/profiler/common/profiler_op_analyse.py +10 -6
  281. mindspore/profiler/common/profiler_path_manager.py +13 -0
  282. mindspore/profiler/common/util.py +30 -3
  283. mindspore/profiler/dynamic_profiler.py +91 -46
  284. mindspore/profiler/envprofiler.py +30 -5
  285. mindspore/profiler/experimental_config.py +18 -2
  286. mindspore/profiler/platform/cpu_profiler.py +10 -4
  287. mindspore/profiler/platform/npu_profiler.py +34 -7
  288. mindspore/profiler/profiler.py +193 -145
  289. mindspore/profiler/profiler_action_controller.py +1 -1
  290. mindspore/profiler/profiler_interface.py +2 -2
  291. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  292. mindspore/run_check/_check_version.py +108 -24
  293. mindspore/runtime/__init__.py +9 -6
  294. mindspore/runtime/executor.py +35 -0
  295. mindspore/runtime/memory.py +113 -0
  296. mindspore/runtime/thread_bind_core.py +1 -1
  297. mindspore/swresample-4.dll +0 -0
  298. mindspore/swscale-6.dll +0 -0
  299. mindspore/tinyxml2.dll +0 -0
  300. mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
  301. mindspore/tools/data_dump.py +130 -0
  302. mindspore/tools/sdc_detect.py +91 -0
  303. mindspore/tools/stress_detect.py +63 -0
  304. mindspore/train/__init__.py +6 -6
  305. mindspore/train/_utils.py +8 -21
  306. mindspore/train/amp.py +6 -7
  307. mindspore/train/callback/_callback.py +2 -1
  308. mindspore/train/callback/_checkpoint.py +1 -17
  309. mindspore/train/callback/_flops_collector.py +10 -6
  310. mindspore/train/callback/_train_fault_tolerance.py +72 -25
  311. mindspore/train/data_sink.py +5 -9
  312. mindspore/train/dataset_helper.py +5 -5
  313. mindspore/train/model.py +41 -230
  314. mindspore/train/serialization.py +160 -401
  315. mindspore/train/train_thor/model_thor.py +2 -2
  316. mindspore/turbojpeg.dll +0 -0
  317. mindspore/utils/__init__.py +6 -3
  318. mindspore/utils/dlpack.py +92 -0
  319. mindspore/utils/dryrun.py +1 -1
  320. mindspore/utils/runtime_execution_order_check.py +10 -0
  321. mindspore/utils/sdc_detect.py +14 -12
  322. mindspore/utils/stress_detect.py +43 -0
  323. mindspore/utils/utils.py +152 -16
  324. mindspore/version.py +1 -1
  325. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
  326. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/RECORD +330 -344
  327. mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
  328. mindspore/communication/_hccl_management.py +0 -297
  329. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -207
  330. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
  331. mindspore/experimental/llm_boost/atb/__init__.py +0 -23
  332. mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
  333. mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
  334. mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
  335. mindspore/experimental/llm_boost/register.py +0 -130
  336. mindspore/experimental/llm_boost/utils.py +0 -31
  337. mindspore/include/OWNERS +0 -7
  338. mindspore/mindspore_cpu_res_manager.dll +0 -0
  339. mindspore/mindspore_ops_kernel_common.dll +0 -0
  340. mindspore/mindspore_res_manager.dll +0 -0
  341. mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
  342. mindspore/nn/reinforcement/_batch_read_write.py +0 -142
  343. mindspore/nn/reinforcement/_tensors_queue.py +0 -152
  344. mindspore/nn/reinforcement/tensor_array.py +0 -145
  345. mindspore/opencv_core452.dll +0 -0
  346. mindspore/opencv_imgcodecs452.dll +0 -0
  347. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
  348. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
  349. mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
  350. mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
  351. mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
  352. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
  353. mindspore/ops/operations/_tensor_array.py +0 -359
  354. mindspore/ops/operations/rl_ops.py +0 -288
  355. mindspore/parallel/_offload_context.py +0 -275
  356. mindspore/parallel/_recovery_context.py +0 -115
  357. mindspore/parallel/_transformer/__init__.py +0 -35
  358. mindspore/parallel/_transformer/layers.py +0 -765
  359. mindspore/parallel/_transformer/loss.py +0 -251
  360. mindspore/parallel/_transformer/moe.py +0 -693
  361. mindspore/parallel/_transformer/op_parallel_config.py +0 -222
  362. mindspore/parallel/_transformer/transformer.py +0 -3124
  363. mindspore/parallel/mpi/_mpi_config.py +0 -116
  364. mindspore/profiler/common/validator/validate_path.py +0 -84
  365. mindspore/train/memory_profiling_pb2.py +0 -298
  366. mindspore/utils/hooks.py +0 -81
  367. /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
  368. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
  369. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
  370. {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
@@ -1,130 +0,0 @@
1
- # Copyright 2024 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ============================================================================
15
- """LlmBoostRegister"""
16
- import inspect
17
-
18
-
19
- class LlmBoostType:
20
- """Class module type for vision pretrain"""
21
-
22
- def __init__(self):
23
- pass
24
-
25
- BUILDIN = 'BuildIn'
26
- ASCEND_NATIVE = 'LLMBoost'
27
-
28
-
29
- class LlmBoostRegister:
30
- """
31
- Module class factory.
32
- """
33
-
34
- def __init__(self):
35
- pass
36
-
37
- registry = {}
38
-
39
- @classmethod
40
- def register(cls, boost_type=LlmBoostType.BUILDIN, alias=None):
41
- """Register class into registry
42
- Args:
43
- boost_type:
44
- boost type name, default LlmBoostType.BUILDIN
45
- alias (str) : model_name
46
-
47
- Returns:
48
- wrapper
49
- """
50
-
51
- def wrapper(register_class):
52
- """Register-Class with wrapper function.
53
-
54
- Args:
55
- register_class : class need to register
56
-
57
- Returns:
58
- wrapper of register_class
59
- """
60
- model_name = alias if alias is not None else register_class.__name__
61
- if boost_type not in cls.registry:
62
- cls.registry[boost_type] = {model_name: register_class}
63
- else:
64
- cls.registry[boost_type][model_name] = register_class
65
- return register_class
66
-
67
- return wrapper
68
-
69
- @classmethod
70
- def is_exist(cls, boost_type, model_name=None):
71
- """Determine whether class name is in the current type group.
72
-
73
- Args:
74
- boost_type : Module type
75
- model_name : model name
76
-
77
- Returns:
78
- True/False
79
- """
80
- if not model_name:
81
- return boost_type in cls.registry
82
- registered = boost_type in cls.registry and model_name in cls.registry.get(
83
- boost_type)
84
- return registered
85
-
86
- @classmethod
87
- def get_cls(cls, boost_type, model_name=None):
88
- """Get class
89
-
90
- Args:
91
- boost_type : Module type
92
- model_name : model name
93
-
94
- Returns:
95
- register_class
96
- """
97
- if not cls.is_exist(boost_type, model_name):
98
- raise ValueError("Can't find class type {} class name {} \
99
- in class registry".format(boost_type, model_name))
100
-
101
- if not model_name:
102
- raise ValueError(
103
- "Can't find model. model name = {}".format(model_name))
104
- register_class = cls.registry.get(boost_type).get(model_name)
105
- return register_class
106
-
107
- @classmethod
108
- def get_instance(cls, boost_type=LlmBoostType.BUILDIN, model_name=None, **kwargs):
109
- """Get instance.
110
- Args:
111
- boost_type : module type
112
- model_name : model type
113
- Returns:
114
- object : The constructed object
115
- """
116
- if model_name is None:
117
- raise ValueError("Class name cannot be None.")
118
-
119
- if isinstance(model_name, str):
120
- obj_cls = cls.get_cls(boost_type, model_name)
121
- elif inspect.isclass(model_name):
122
- obj_cls = model_name
123
- else:
124
- raise ValueError("Can't find boost type {} model name {} \
125
- in class registry.".format(boost_type, model_name))
126
-
127
- try:
128
- return obj_cls(**kwargs)
129
- except Exception as e:
130
- raise type(e)('{}: {}'.format(obj_cls.__name__, e))
@@ -1,31 +0,0 @@
1
- # Copyright 2024 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ============================================================================
15
- """LlmBoostRegister"""
16
- import os
17
- from mindspore.communication import get_group_size, get_rank
18
-
19
-
20
- def get_real_rank():
21
- try:
22
- return get_rank()
23
- except RuntimeError:
24
- return int(os.getenv("RANK_ID", "0"))
25
-
26
-
27
- def get_real_group_size():
28
- try:
29
- return get_group_size()
30
- except RuntimeError:
31
- return int(os.getenv("RANK_SIZE", "1"))
mindspore/include/OWNERS DELETED
@@ -1,7 +0,0 @@
1
- approvers:
2
- - jpc_chenjianping #
3
- - zhang_xue_tong
4
- reviewers:
5
- - lx0095
6
- - xu-yfei
7
- - wilfchen
Binary file
Binary file
Binary file
@@ -1,111 +0,0 @@
1
- # Copyright 2022 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ============================================================================
15
- """_dist_optimizer_registry"""
16
- from __future__ import absolute_import
17
-
18
- from inspect import isfunction
19
-
20
- from mindspore.parallel._ps_context import _get_ps_context, _is_ps_mode
21
-
22
-
23
- _create_func_map = {}
24
-
25
-
26
- def _register_dist_optimizer(optimizer_type, creating_func):
27
- """
28
- Register distributed optimizers.
29
- This method should be called by original optimizers.
30
- """
31
- if optimizer_type in _create_func_map:
32
- return
33
- if not isfunction(creating_func):
34
- raise TypeError("creating_func is not a function type!")
35
- _create_func_map[optimizer_type] = creating_func
36
-
37
-
38
- def empty_creating_func(*args, **kwargs):
39
- """Empty function as placeholder."""
40
- return
41
-
42
-
43
- _pserver_optmizer_attrs = {
44
- "ms_role": "MS_PSERVER",
45
- "primitive_target": "CPU",
46
- "update_parameter": True
47
- }
48
-
49
-
50
- def create_optimizers_on_pserver(optimizer_type, parameters, *args, **kwargs):
51
- """
52
- Create the optimizers on parameter server.
53
- This method should be called only in Parameter Server training mode.
54
- Return distributed optimizer list and the flag list which indicates whether the parameters use them.
55
- The size of the two lists returned should be the same as the size of input 'parameters'
56
- """
57
- distributed_optimizer_list = []
58
- use_flag_list = []
59
- for index, param in enumerate(parameters):
60
- if param.is_param_ps and (not param.cache_enable):
61
- if optimizer_type not in _create_func_map:
62
- raise ValueError("Optimizer type %s is not recognized!" % optimizer_type)
63
- distributed_optimizer = _create_func_map.get(optimizer_type)(*args, **kwargs)
64
-
65
- server_rank_id = index % _get_ps_context("server_num")
66
- distributed_optimizer.add_prim_attr("rank_id", server_rank_id)
67
- for key, value in _pserver_optmizer_attrs.items():
68
- distributed_optimizer.add_prim_attr(key, value)
69
- distributed_optimizer_list.append(distributed_optimizer)
70
- use_flag_list.append(True)
71
- else:
72
- distributed_optimizer_list.append(empty_creating_func)
73
- use_flag_list.append(False)
74
- return distributed_optimizer_list, use_flag_list
75
-
76
-
77
- def no_distributed_optimizer(optimizer_type, parameters, *args, **kwargs):
78
- """
79
- In some cases, no distributed optimizers are needed.
80
- But we still need to return lists so optimizer subclasses can build the network using HyperMap.
81
- """
82
- empty_list = []
83
- use_flag_list = []
84
- for _ in parameters:
85
- empty_list.append(empty_creating_func)
86
- use_flag_list.append(False)
87
- return empty_list, use_flag_list
88
-
89
-
90
- def get_creating_func():
91
- """
92
- Returns creating functions for distributed optimizers.
93
- """
94
- # Only support optimizers in parameter server mode for now.
95
- if _is_ps_mode():
96
- return create_optimizers_on_pserver
97
- return no_distributed_optimizer
98
-
99
-
100
- def generate_dist_optimizer_list(optimizer_type, parameters, *args, **kwargs):
101
- """
102
- Generate the distributed optimizers according to the execution mode.
103
- Only Parameter Server training mode is supported for now.
104
- """
105
- func = get_creating_func()
106
- opt_list, use_flag_list = func(optimizer_type, parameters, *args, **kwargs)
107
- if len(opt_list) != len(parameters) or len(use_flag_list) != len(parameters):
108
- raise ValueError(f"Size of distributed optimizer list should be the same as parameter list. "
109
- f"But got len(opt_list):{len(opt_list)}"
110
- f", len(parameters):{len(parameters)}")
111
- return opt_list, tuple(use_flag_list)
@@ -1,142 +0,0 @@
1
- # Copyright 2022 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ============================================================================
15
- """
16
- BatchReadWrite
17
- """
18
- from __future__ import absolute_import
19
-
20
- from mindspore.nn.cell import Cell
21
- from mindspore.ops.operations._rl_inner_ops import BatchAssign
22
-
23
-
24
- class BatchWrite(Cell):
25
- r"""BatchWrite: write a list of parameters to assign the target.
26
-
27
- .. warning::
28
- This is an experiential prototype that is subject to change and/or deletion.
29
-
30
- Supported Platforms:
31
- ``GPU`` ``CPU``
32
-
33
- Examples:
34
- >>> import mindspore
35
- >>> from mindspore import nn
36
- >>> from mindspore.common.parameter import Parameter, ParameterTuple
37
- >>> from mindspore.nn.reinforcement import BatchWrite
38
- >>> class SourceNet(nn.Cell):
39
- ... def __init__(self):
40
- ... super(SourceNet, self).__init__()
41
- ... self.a = Parameter(Tensor(0.5, mstype.float32), name="a")
42
- ... self.dense = nn.Dense(in_channels=16, out_channels=1, weight_init=0)
43
- >>> class DstNet(nn.Cell):
44
- ... def __init__(self):
45
- ... super(DstNet, self).__init__()
46
- ... self.a = Parameter(Tensor(0.1, mstype.float32), name="a")
47
- ... self.dense = nn.Dense(in_channels=16, out_channels=1)
48
- >>> class Write(nn.Cell):
49
- ... def __init__(self, dst, src):
50
- ... super(Write, self).__init__()
51
- ... self.w = BatchWrite()
52
- ... self.dst = ParameterTuple(dst.trainable_params())
53
- ... self.src = ParameterTuple(src.trainable_params())
54
- ... def construct(self):
55
- ... success = self.w(self.dst, self.src)
56
- ... return success
57
- >>> dst_net = DstNet()
58
- >>> source_net = SourceNet()
59
- >>> nets = nn.CellList()
60
- >>> nets.append(dst_net)
61
- >>> nets.append(source_net)
62
- >>> success = Write(nets[0], nets[1])()
63
- """
64
- def __init__(self):
65
- """Initialize BatchWrite"""
66
- super(BatchWrite, self).__init__()
67
- self.write = BatchAssign(lock=True)
68
-
69
- def construct(self, dst, src):
70
- """
71
- Write the source parameter list to assign the dst.
72
-
73
- Inputs:
74
- - **dst** (tuple) - A paramameter tuple of the dst model.
75
- - **src** (tuple) - A paramameter tuple of the source model.
76
-
77
- Returns:
78
- Bool, true.
79
- """
80
- self.write(dst, src)
81
- return True
82
-
83
-
84
- class BatchRead(Cell):
85
- r"""BatchRead: read a list of parameters to assign the target.
86
-
87
- .. warning::
88
- This is an experiential prototype that is subject to change and/or deletion.
89
-
90
- Supported Platforms:
91
- ``GPU`` ``CPU``
92
-
93
- Examples:
94
- >>> import mindspore
95
- >>> from mindspore import nn
96
- >>> from mindspore.common.parameter import Parameter, ParameterTuple
97
- >>> from mindspore.nn.reinforcement import BatchRead
98
- >>> class SNet(nn.Cell):
99
- ... def __init__(self):
100
- ... super(SNet, self).__init__()
101
- ... self.a = Parameter(Tensor(0.5, mstype.float32), name="a")
102
- ... self.dense = nn.Dense(in_channels=16, out_channels=1, weight_init=0)
103
- >>> class DNet(nn.Cell):
104
- ... def __init__(self):
105
- ... super(DNet, self).__init__()
106
- ... self.a = Parameter(Tensor(0.1, mstype.float32), name="a")
107
- ... self.dense = nn.Dense(in_channels=16, out_channels=1)
108
- >>> class Read(nn.Cell):
109
- ... def __init__(self, dst, src):
110
- ... super(Read, self).__init__()
111
- ... self.read = BatchRead()
112
- ... self.dst = ParameterTuple(dst.trainable_params())
113
- ... self.src = ParameterTuple(src.trainable_params())
114
- ... def construct(self):
115
- ... success = self.read(self.dst, self.src)
116
- ... return success
117
- >>> dst_net = DNet()
118
- >>> source_net = SNet()
119
- >>> nets = nn.CellList()
120
- >>> nets.append(dst_net)
121
- >>> nets.append(source_net)
122
- >>> success = Read(nets[0], nets[1])()
123
-
124
- """
125
- def __init__(self):
126
- """Initialize BatchRead"""
127
- super(BatchRead, self).__init__()
128
- self.read = BatchAssign(lock=False)
129
-
130
- def construct(self, dst, src):
131
- """
132
- Read the source parameter list to assign the dst.
133
-
134
- Inputs:
135
- - **dst** (tuple) - A paramameter tuple of the dst model.
136
- - **src** (tuple) - A paramameter tuple of the source model.
137
-
138
- Returns:
139
- Bool, true.
140
- """
141
- self.read(dst, src)
142
- return True
@@ -1,152 +0,0 @@
1
- # Copyright 2022 Huawei Technologies Co., Ltd
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ============================================================================
15
- """
16
- TensorsQueue, each element in the queue is a list of tensors.
17
- """
18
- from __future__ import absolute_import
19
-
20
- from mindspore.nn.cell import Cell
21
- from mindspore.ops.operations import _rl_inner_ops as rl_ops
22
- from mindspore import _checkparam as Validator
23
- from mindspore.common import dtype as mstype
24
-
25
-
26
- class TensorsQueue(Cell):
27
- r'''
28
- TensorsQueue: a queue which stores tensors lists.
29
-
30
- .. warning::
31
- This is an experiential prototype that is subject to change and/or deletion.
32
-
33
- Args:
34
- dtype (mindspore.dtype): the data type in the TensorsQueue. Each tensor should have the same dtype.
35
- shapes (tuple[int64]): the shape of each element in TensorsQueue.
36
- size (int): the size of the TensorsQueue.
37
- name (str): the name of this TensorsQueue. Default: "TQ".
38
-
39
- Raises:
40
- TypeError: If `dtype` is not mindspore number type.
41
- ValueError: If `size` is less than 0.
42
- ValueError: If `shapes` size is less than 1.
43
-
44
- Supported Platforms:
45
- ``GPU`` ``CPU``
46
-
47
- Examples:
48
- >>> import mindspore as ms
49
- >>> from mindspore import Tensor
50
- >>> import mindspore.nn as nn
51
- >>> data1 = Tensor([[0, 1], [1, 2]], dtype=ms.float32)
52
- >>> data2 = Tensor([1], dtype=ms.float32)
53
- >>> tq = nn.TensorsQueue(dtype=ms.float32, shapes=((2, 2), (1,)), size=5)
54
- >>> tq.put((data1, data2))
55
- >>> ans = tq.pop()
56
- '''
57
-
58
- def __init__(self, dtype, shapes, size=0, name="TQ"):
59
- """Initialize TensorsQueue"""
60
- super(TensorsQueue, self).__init__()
61
- Validator.check_subclass("dtype", dtype, mstype.number_type + (mstype.bool_,), self.cls_name)
62
- Validator.check_int(size, 0, Validator.GE, "size", self.cls_name)
63
- elements_num = len(shapes)
64
- Validator.check_int(elements_num, 1, Validator.GE, "len(shapes)", self.cls_name)
65
- self.handle_ = rl_ops.TensorsQueueCreate(dtype, shapes, size, name)()
66
- self.tensors_q_put = rl_ops.TensorsQueuePut(dtype, shapes)
67
- self.tensors_q_get = rl_ops.TensorsQueueGet(dtype, shapes)
68
- self.tensors_q_pop = rl_ops.TensorsQueueGet(dtype, shapes, pop_after_get=True)
69
- self.tensors_q_clear = rl_ops.TensorsQueueClear()
70
- self.tensors_q_close = rl_ops.TensorsQueueClose()
71
- self.tensors_q_size = rl_ops.TensorsQueueSize()
72
- self.__is_tensors_queue__ = True
73
-
74
- def put(self, element):
75
- """
76
- Put element(tuple(Tensors)) to TensorsQueue in the end of queue.
77
-
78
- Args:
79
- element (tuple(Tensor) or list[tensor]): The input element.
80
-
81
- Returns:
82
- Bool, true.
83
- """
84
- self.tensors_q_put(self.handle_, element)
85
- return True
86
-
87
- def get(self):
88
- """
89
- Get one element int the front of the TensorsQueue.
90
-
91
- Returns:
92
- tuple(Tensors), the element in TensorsQueue.
93
- """
94
- element = self.tensors_q_get(self.handle_)
95
- return element
96
-
97
- def pop(self):
98
- """
99
- Get one element int the front of the TensorsQueue, and remove it.
100
-
101
- Returns:
102
- tuple(Tensors), the element in TensorsQueue.
103
- """
104
- element = self.tensors_q_pop(self.handle_)
105
- return element
106
-
107
- def __graph_pop__(self):
108
- """
109
- Get one element int the front of the TensorsQueue, and remove it.
110
- This is only used in graph mode.
111
-
112
- Returns:
113
- tuple(Tensors), the element in TensorsQueue.
114
- """
115
- element = self.tensors_q_pop(self.handle_)
116
- return self.handle_, element
117
-
118
- def size(self):
119
- """
120
- Get the used/available size of the TensorsQueue, and remove it.
121
-
122
- Returns:
123
- Tensor(mindspore.int64), the used size of TensorsQueue.
124
- """
125
- size = self.tensors_q_size(self.handle_)
126
- return size
127
-
128
- def close(self):
129
- """
130
- Close the created TensorsQueue.
131
-
132
- .. warning::
133
- Once close the TensorsQueue, every functions belong to this TensorsQueue will be disaviliable.
134
- Every resources created in TensorsQueue will be removed. If this TensorsQueue will be used in next step
135
- or somewhere, eg: next loop, please use `clear` instead.
136
-
137
- Returns:
138
- Bool, true.
139
- """
140
- self.tensors_q_close(self.handle_)
141
- return True
142
-
143
- def clear(self):
144
- """
145
- Clear the created TensorsQueue. Only reset the TensorsQueue, clear the data and reset the size
146
- in TensorsQueue and keep the instance of this TensorsQueue.
147
-
148
- Returns:
149
- Bool, true.
150
- """
151
- self.tensors_q_clear(self.handle_)
152
- return True