mindspore 2.4.0__cp39-cp39-manylinux1_x86_64.whl → 2.4.10__cp39-cp39-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (294) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/_c_dataengine.cpython-39-x86_64-linux-gnu.so +0 -0
  3. mindspore/_c_expression.cpython-39-x86_64-linux-gnu.so +0 -0
  4. mindspore/_c_mindrecord.cpython-39-x86_64-linux-gnu.so +0 -0
  5. mindspore/bin/cache_admin +0 -0
  6. mindspore/bin/cache_server +0 -0
  7. mindspore/common/api.py +1 -4
  8. mindspore/common/file_system.py +2 -0
  9. mindspore/common/initializer.py +51 -15
  10. mindspore/common/parameter.py +6 -5
  11. mindspore/common/tensor.py +15 -49
  12. mindspore/communication/_comm_helper.py +5 -0
  13. mindspore/communication/comm_func.py +7 -7
  14. mindspore/context.py +16 -2
  15. mindspore/dataset/engine/datasets_standard_format.py +17 -0
  16. mindspore/dataset/engine/datasets_user_defined.py +27 -1
  17. mindspore/experimental/llm_boost/__init__.py +2 -2
  18. mindspore/experimental/llm_boost/atb/boost_base.py +240 -64
  19. mindspore/experimental/llm_boost/atb/llama_boost.py +46 -29
  20. mindspore/experimental/llm_boost/atb/qwen_boost.py +47 -24
  21. mindspore/include/api/context.h +1 -1
  22. mindspore/include/dataset/constants.h +2 -2
  23. mindspore/include/mindapi/base/format.h +13 -0
  24. mindspore/lib/libavcodec.so.59 +0 -0
  25. mindspore/lib/libavdevice.so.59 +0 -0
  26. mindspore/lib/libavfilter.so.8 +0 -0
  27. mindspore/lib/libavformat.so.59 +0 -0
  28. mindspore/lib/libavutil.so.57 +0 -0
  29. mindspore/lib/libdnnl.so.2 +0 -0
  30. mindspore/lib/libmindspore_backend.so +0 -0
  31. mindspore/lib/libmindspore_common.so +0 -0
  32. mindspore/lib/libmindspore_core.so +0 -0
  33. mindspore/lib/libmindspore_glog.so.0 +0 -0
  34. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  35. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  36. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  37. mindspore/lib/libmindspore_ops.so +0 -0
  38. mindspore/lib/libopencv_core.so.4.5 +0 -0
  39. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  40. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  41. mindspore/lib/libswresample.so.4 +0 -0
  42. mindspore/lib/libswscale.so.6 +0 -0
  43. mindspore/lib/plugin/ascend/custom_aicore_ops/op_proto/libop_proto.so +0 -0
  44. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  45. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  46. mindspore/lib/plugin/ascend/custom_ascendc_910/framework/npu_supported_ops.json +10 -0
  47. mindspore/lib/plugin/ascend/custom_ascendc_910/op_api/lib/libcust_opapi.so +0 -0
  48. mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910}/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +0 -42
  49. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/decoder_kv_cache.py +51 -16
  50. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/prompt_kv_cache.py +51 -16
  51. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +158 -0
  52. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
  53. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +158 -0
  54. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
  55. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +158 -0
  56. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
  57. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +158 -0
  58. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
  59. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +158 -0
  60. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
  61. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +158 -0
  62. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
  63. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +158 -0
  64. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
  65. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +158 -0
  66. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
  67. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +167 -0
  68. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
  69. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +167 -0
  70. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
  71. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +167 -0
  72. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
  73. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +167 -0
  74. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
  75. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +167 -0
  76. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
  77. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +167 -0
  78. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
  79. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +167 -0
  80. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
  81. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +167 -0
  82. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/ascend910/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
  83. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/config/ascend910/binary_info_config.json +302 -0
  84. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/config/ascend910/decoder_kv_cache.json +892 -0
  85. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/kernel/config/ascend910/prompt_kv_cache.json +892 -0
  86. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
  87. mindspore/lib/plugin/ascend/custom_ascendc_910/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  88. mindspore/lib/plugin/ascend/custom_ascendc_910/op_proto/inc/op_proto.h +33 -0
  89. mindspore/lib/plugin/ascend/custom_ascendc_910/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
  90. mindspore/lib/plugin/ascend/custom_ascendc_910/version.info +1 -0
  91. mindspore/lib/plugin/ascend/custom_ascendc_910b/framework/npu_supported_ops.json +14 -0
  92. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/include/aclnn_decoder_kv_cache.h +59 -0
  93. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/include/aclnn_prompt_kv_cache.h +59 -0
  94. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_api/lib/libcust_opapi.so +0 -0
  95. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl}/dynamic/all_finite.py +51 -16
  96. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/decoder_kv_cache.cpp +192 -0
  97. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/decoder_kv_cache.py +215 -0
  98. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/prompt_kv_cache.cpp +274 -0
  99. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl/dynamic/prompt_kv_cache.py +215 -0
  100. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +80 -0
  101. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o +0 -0
  102. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +80 -0
  103. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o +0 -0
  104. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +80 -0
  105. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o +0 -0
  106. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +158 -0
  107. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
  108. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +158 -0
  109. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
  110. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +158 -0
  111. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
  112. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +158 -0
  113. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
  114. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +158 -0
  115. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
  116. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +158 -0
  117. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
  118. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +158 -0
  119. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
  120. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +158 -0
  121. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
  122. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +167 -0
  123. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
  124. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +167 -0
  125. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
  126. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +167 -0
  127. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
  128. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +167 -0
  129. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
  130. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +167 -0
  131. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
  132. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +167 -0
  133. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
  134. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +167 -0
  135. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
  136. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +167 -0
  137. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend310p/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
  138. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.json +78 -0
  139. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o → custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_52f59e2a65d9b1bb002de35c2819754a.o} +0 -0
  140. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.json +78 -0
  141. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o → custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_6b5e50e30256d85838d6ce83514df20f.o} +0 -0
  142. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.json +78 -0
  143. mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o → custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_74e4ac02880d452e3308c94af273562e.o} +0 -0
  144. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.json +156 -0
  145. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_0d5520cc587ad44ce634bf3fbcffc272.o +0 -0
  146. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.json +156 -0
  147. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_20390d30b3c4c0d23167ccca6c030c2b.o +0 -0
  148. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.json +156 -0
  149. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_2d151f0b1d2db51faa2968d5b67544e2.o +0 -0
  150. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.json +156 -0
  151. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_561690ec17cc1def3d2fcf68c1b07b56.o +0 -0
  152. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.json +156 -0
  153. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_570f9aaa99e5e773b3dd0a33784363f4.o +0 -0
  154. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.json +156 -0
  155. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_59668a0f0764afb98fda8ab9e84126f1.o +0 -0
  156. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.json +156 -0
  157. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_91d9833e4792b70b670e4e2b916abd86.o +0 -0
  158. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.json +156 -0
  159. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/decoder_kv_cache/DecoderKvCache_c74cdc5fef094383401856f8519504af.o +0 -0
  160. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.json +165 -0
  161. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0515c7b1a4cd614449e38c5e9a7e3f8d.o +0 -0
  162. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.json +165 -0
  163. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_09f22d898d6358c91e7c4fc48bac48e7.o +0 -0
  164. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.json +165 -0
  165. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_0cb9a6f894b925250227136e5aab7061.o +0 -0
  166. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.json +165 -0
  167. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_2fa8702ffd7ca85e9e194f62644415d5.o +0 -0
  168. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.json +165 -0
  169. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_570b62f187dfd439b64613d881deedb7.o +0 -0
  170. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.json +165 -0
  171. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_585218c11411ff84709b9e725b66c435.o +0 -0
  172. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.json +165 -0
  173. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_5c9365ccde170b358c5b126d69dae13e.o +0 -0
  174. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.json +165 -0
  175. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/ascend910b/prompt_kv_cache/PromptKvCache_6d97c45b7c43bc16fcff8baa5dacac4e.o +0 -0
  176. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/all_finite.json +139 -0
  177. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/binary_info_config.json +361 -0
  178. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/decoder_kv_cache.json +892 -0
  179. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend310p/prompt_kv_cache.json +892 -0
  180. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +139 -0
  181. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +361 -0
  182. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/decoder_kv_cache.json +892 -0
  183. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/kernel/config/ascend910b/prompt_kv_cache.json +892 -0
  184. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
  185. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  186. mindspore/lib/plugin/ascend/custom_ascendc_910b/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
  187. mindspore/lib/plugin/ascend/custom_ascendc_910b/version.info +1 -0
  188. mindspore/lib/plugin/ascend/custom_compiler/setup.py +1 -1
  189. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  190. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  191. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  192. mindspore/lib/plugin/ascend/libmindspore_internal_kernels.so +0 -0
  193. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
  194. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
  195. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
  196. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
  197. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/base/types.h +5 -5
  198. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
  199. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  200. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/liblcal.so +0 -0
  201. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/liblcal_static.a +0 -0
  202. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +1 -0
  203. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/flash_attention_score_op.h +6 -1
  204. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/paged_attention_op.h +6 -1
  205. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/rms_norm_op.h +4 -3
  206. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_quant_acme_impl.so +0 -0
  207. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_310p_impl.so +0 -0
  208. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  209. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  210. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  211. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  212. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  213. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  214. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/flash_attention_score/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  215. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bnsd_mix.o +0 -0
  216. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_bf16_bsh_mix.o +0 -0
  217. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bnsd_mix.o +0 -0
  218. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/paged_attention/paged_attention_fp16_bsh_mix.o +0 -0
  219. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
  220. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  221. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  222. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  223. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  224. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  225. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  226. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  227. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  228. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  229. mindspore/mint/__init__.py +490 -2
  230. mindspore/mint/nn/__init__.py +2 -2
  231. mindspore/mint/optim/adamw.py +6 -14
  232. mindspore/nn/__init__.py +2 -0
  233. mindspore/nn/cell.py +16 -4
  234. mindspore/nn/layer/basic.py +24 -7
  235. mindspore/nn/layer/conv.py +3 -0
  236. mindspore/nn/layer/embedding.py +31 -14
  237. mindspore/nn/layer/pooling.py +8 -10
  238. mindspore/nn/optim/tft_wrapper.py +12 -15
  239. mindspore/nn/utils/__init__.py +22 -0
  240. mindspore/nn/utils/init.py +71 -0
  241. mindspore/ops/_grad_experimental/grad_array_ops.py +0 -11
  242. mindspore/ops/_grad_experimental/grad_comm_ops.py +45 -8
  243. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +6 -0
  244. mindspore/ops/auto_generate/gen_extend_func.py +33 -0
  245. mindspore/ops/auto_generate/gen_ops_def.py +52 -3
  246. mindspore/ops/auto_generate/gen_ops_prim.py +158 -8
  247. mindspore/ops/function/array_func.py +2 -0
  248. mindspore/ops/function/math_func.py +12 -5
  249. mindspore/ops/function/random_func.py +221 -7
  250. mindspore/ops/operations/__init__.py +1 -1
  251. mindspore/ops/operations/array_ops.py +3 -1
  252. mindspore/ops/operations/comm_ops.py +25 -1
  253. mindspore/ops/operations/custom_ops.py +6 -4
  254. mindspore/ops/operations/manually_defined/ops_def.py +8 -10
  255. mindspore/ops/operations/nn_ops.py +7 -2
  256. mindspore/parallel/_auto_parallel_context.py +26 -5
  257. mindspore/parallel/_cell_wrapper.py +24 -3
  258. mindspore/parallel/_tensor.py +46 -2
  259. mindspore/parallel/_utils.py +39 -21
  260. mindspore/parallel/transform_safetensors.py +196 -43
  261. mindspore/profiler/profiling.py +5 -1
  262. mindspore/run_check/_check_version.py +20 -9
  263. mindspore/train/_utils.py +92 -32
  264. mindspore/train/callback/_checkpoint.py +12 -9
  265. mindspore/train/callback/_on_request_exit.py +12 -1
  266. mindspore/train/callback/_tft_register.py +33 -9
  267. mindspore/train/dataset_helper.py +10 -2
  268. mindspore/train/model.py +21 -0
  269. mindspore/train/serialization.py +12 -19
  270. mindspore/version.py +1 -1
  271. {mindspore-2.4.0.dist-info → mindspore-2.4.10.dist-info}/METADATA +9 -7
  272. {mindspore-2.4.0.dist-info → mindspore-2.4.10.dist-info}/RECORD +284 -157
  273. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  274. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
  275. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
  276. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
  277. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
  278. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
  279. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
  280. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  281. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
  282. mindspore/lib/plugin/ascend/custom_ascendc_ops/version.info +0 -1
  283. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910}/op_api/include/aclnn_decoder_kv_cache.h +0 -0
  284. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910}/op_api/include/aclnn_prompt_kv_cache.h +0 -0
  285. /mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/decoder_kv_cache.cpp +0 -0
  286. /mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910/op_impl/ai_core/tbe/custom_ascendc_910_impl}/dynamic/prompt_kv_cache.cpp +0 -0
  287. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_api/include/aclnn_all_finite.h +0 -0
  288. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -0
  289. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +0 -0
  290. /mindspore/lib/plugin/ascend/{custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl → custom_ascendc_910b/op_impl/ai_core/tbe/custom_ascendc_910b_impl}/dynamic/all_finite.cpp +0 -0
  291. /mindspore/lib/plugin/ascend/{custom_ascendc_ops → custom_ascendc_910b}/op_proto/inc/op_proto.h +0 -0
  292. {mindspore-2.4.0.dist-info → mindspore-2.4.10.dist-info}/WHEEL +0 -0
  293. {mindspore-2.4.0.dist-info → mindspore-2.4.10.dist-info}/entry_points.txt +0 -0
  294. {mindspore-2.4.0.dist-info → mindspore-2.4.10.dist-info}/top_level.txt +0 -0
mindspore/.commit_id CHANGED
@@ -1 +1 @@
1
- __commit_id__ = '[sha1]:8c86f33f,[branch]:(HEAD,origin/master,origin/HEAD,master)'
1
+ __commit_id__ = '[sha1]:8e2ae935,[branch]:(HEAD,origin/r2.4.1,r2.4.1)'
mindspore/bin/cache_admin CHANGED
Binary file
Binary file
mindspore/common/api.py CHANGED
@@ -1703,7 +1703,6 @@ class _CellGraphExecutor:
1703
1703
  self._graph_executor = GraphExecutor_.get_instance()
1704
1704
  self._graph_executor.set_py_exe_path(sys.executable)
1705
1705
  self._graph_executor.set_kernel_build_server_dir(os.path.split(kernel_build_server.__file__)[0] + os.sep)
1706
- self._pid = os.getpid()
1707
1706
 
1708
1707
  def init_dataset(self, queue_name, dataset_size, batch_size, dataset_types, dataset_shapes,
1709
1708
  input_indexs, phase='dataset', need_run=True):
@@ -1934,9 +1933,7 @@ class _CellGraphExecutor:
1934
1933
 
1935
1934
  def del_net_res(self, obj, net_id):
1936
1935
  """Clear the memory resource of a network."""
1937
- # no need to del net res by gc in independent dataset process which is a subprocess forked by main process
1938
- if self._pid == os.getpid():
1939
- self._graph_executor.del_net_res(obj, net_id)
1936
+ self._graph_executor.del_net_res(obj, net_id)
1940
1937
 
1941
1938
  def _get_branch_control_input(self):
1942
1939
  if ('obf_ratio' not in self.obfuscate_config.keys()) or (
@@ -22,6 +22,7 @@ class FileSystem:
22
22
  self.create_args = ("ab",)
23
23
  self.open = open
24
24
  self.open_args = ("rb",)
25
+ self.backend = "basic"
25
26
 
26
27
 
27
28
  def _register_basic_file_system(fs: FileSystem):
@@ -45,4 +46,5 @@ def _register_mindio_file_system(fs: FileSystem):
45
46
  fs.create_args = ()
46
47
  fs.open = mindio.open_file
47
48
  fs.open_args = ()
49
+ fs.backend = "mindio"
48
50
  return True
@@ -103,6 +103,12 @@ def _numpy_seed():
103
103
  return np.random.randint(low=1, high=(1 << 63), dtype=np.int64)
104
104
 
105
105
 
106
+ def _init_random_normal_inplace(mean, sigma, arr):
107
+ if sigma < 0:
108
+ raise ValueError("sigma < 0")
109
+ _random_normal(_numpy_seed(), arr, mean, sigma)
110
+
111
+
106
112
  def _init_random_normal(mean, sigma, shape):
107
113
  if sigma < 0:
108
114
  raise ValueError("sigma < 0")
@@ -111,12 +117,22 @@ def _init_random_normal(mean, sigma, shape):
111
117
  return data
112
118
 
113
119
 
120
+ def _init_random_uniform_inplace(a, b, arr):
121
+ _random_uniform(_numpy_seed(), arr, a, b)
122
+
123
+
114
124
  def _init_random_uniform(a, b, shape):
115
125
  data = np.ndarray(shape=shape, dtype=np.float32)
116
126
  _random_uniform(_numpy_seed(), data, a, b)
117
127
  return data
118
128
 
119
129
 
130
+ def _init_truncated_normal_inplace(a, b, mean, sigma, arr):
131
+ if sigma < 0:
132
+ raise ValueError("sigma < 0")
133
+ _truncated_normal(_numpy_seed(), arr, a, b, mean, sigma)
134
+
135
+
120
136
  def _init_truncated_normal(a, b, mean, sigma, shape):
121
137
  if sigma < 0:
122
138
  raise ValueError("sigma < 0")
@@ -298,9 +314,11 @@ class XavierNormal(Initializer):
298
314
  fan_in, fan_out = _calculate_fan_in_and_fan_out(arr.shape)
299
315
 
300
316
  std = self.gain * math.sqrt(2.0 / float(fan_in + fan_out))
301
- data = _init_random_normal(0, std, arr.shape)
302
-
303
- _assignment(arr, data)
317
+ if isinstance(arr, np.ndarray) and arr.dtype == np.float32:
318
+ _init_random_normal_inplace(0, std, arr)
319
+ else:
320
+ data = _init_random_normal(0, std, arr.shape)
321
+ _assignment(arr, data)
304
322
 
305
323
 
306
324
  @_register('xavier_uniform')
@@ -337,8 +355,11 @@ class XavierUniform(Initializer):
337
355
  def _initialize(self, arr):
338
356
  n_in, n_out = _calculate_fan_in_and_fan_out(arr.shape)
339
357
  boundary = self.gain * math.sqrt(6.0 / (n_in + n_out))
340
- data = _init_random_uniform(-boundary, boundary, arr.shape)
341
- _assignment(arr, data)
358
+ if isinstance(arr, np.ndarray) and arr.dtype == np.float32:
359
+ _init_random_uniform_inplace(-boundary, boundary, arr)
360
+ else:
361
+ data = _init_random_uniform(-boundary, boundary, arr.shape)
362
+ _assignment(arr, data)
342
363
 
343
364
 
344
365
  @_register('he_uniform')
@@ -386,8 +407,11 @@ class HeUniform(Initializer):
386
407
  gain = _calculate_gain(self.nonlinearity, self.negative_slope)
387
408
  std = gain / math.sqrt(fan)
388
409
  boundary = math.sqrt(3.0) * std
389
- data = _init_random_uniform(-boundary, boundary, arr.shape)
390
- _assignment(arr, data)
410
+ if isinstance(arr, np.ndarray) and arr.dtype == np.float32:
411
+ _init_random_uniform_inplace(-boundary, boundary, arr)
412
+ else:
413
+ data = _init_random_uniform(-boundary, boundary, arr.shape)
414
+ _assignment(arr, data)
391
415
 
392
416
 
393
417
  @_register('he_normal')
@@ -432,8 +456,11 @@ class HeNormal(Initializer):
432
456
  fan = _calculate_correct_fan(arr.shape, self.mode)
433
457
  gain = _calculate_gain(self.nonlinearity, self.negative_slope)
434
458
  std = gain / math.sqrt(fan)
435
- data = _init_random_normal(0, std, arr.shape)
436
- _assignment(arr, data)
459
+ if isinstance(arr, np.ndarray) and arr.dtype == np.float32:
460
+ _init_random_normal_inplace(0, std, arr)
461
+ else:
462
+ data = _init_random_normal(0, std, arr.shape)
463
+ _assignment(arr, data)
437
464
 
438
465
 
439
466
  class Constant(Initializer):
@@ -718,8 +745,11 @@ class Uniform(Initializer):
718
745
  self.scale = scale
719
746
 
720
747
  def _initialize(self, arr):
721
- tmp = _init_random_uniform(-self.scale, self.scale, arr.shape)
722
- _assignment(arr, tmp)
748
+ if isinstance(arr, np.ndarray) and arr.dtype == np.float32:
749
+ _init_random_uniform_inplace(-self.scale, self.scale, arr)
750
+ else:
751
+ tmp = _init_random_uniform(-self.scale, self.scale, arr.shape)
752
+ _assignment(arr, tmp)
723
753
 
724
754
 
725
755
  @_register()
@@ -749,8 +779,11 @@ class Normal(Initializer):
749
779
  self.mean = mean
750
780
 
751
781
  def _initialize(self, arr):
752
- data = _init_random_normal(self.mean, self.sigma, arr.shape)
753
- _assignment(arr, data)
782
+ if isinstance(arr, np.ndarray) and arr.dtype == np.float32:
783
+ _init_random_normal_inplace(self.mean, self.sigma, arr)
784
+ else:
785
+ data = _init_random_normal(self.mean, self.sigma, arr.shape)
786
+ _assignment(arr, data)
754
787
 
755
788
 
756
789
  @_register()
@@ -780,8 +813,11 @@ class TruncatedNormal(Initializer):
780
813
  self.b = b
781
814
 
782
815
  def _initialize(self, arr):
783
- tmp = _init_truncated_normal(self.a, self.b, self.mean, self.sigma, arr.shape)
784
- _assignment(arr, tmp)
816
+ if isinstance(arr, np.ndarray) and arr.dtype == np.float32:
817
+ _init_truncated_normal_inplace(self.a, self.b, self.mean, self.sigma, arr)
818
+ else:
819
+ tmp = _init_truncated_normal(self.a, self.b, self.mean, self.sigma, arr.shape)
820
+ _assignment(arr, tmp)
785
821
 
786
822
 
787
823
  def initializer(init, shape=None, dtype=mstype.float32):
@@ -243,7 +243,8 @@ class Parameter(Tensor_):
243
243
  def __new__(cls, default_input, *args, **kwargs):
244
244
  init_data_flag = bool(isinstance(default_input, Tensor) and default_input.has_init)
245
245
  rc = sys.getrefcount(default_input)
246
- input_class, *class_init_args = Parameter._get_parameter_new_args(default_input, rc)
246
+ init_param = getattr(cls, "init_param", True)
247
+ input_class, *class_init_args = Parameter._get_parameter_new_args(default_input, rc, init_param)
247
248
  new_type = Parameter._get_base_class(input_class)
248
249
  obj = input_class.__new__(new_type)
249
250
  input_class.__init__(obj, *class_init_args)
@@ -355,7 +356,7 @@ class Parameter(Tensor_):
355
356
  return new_type
356
357
 
357
358
  @staticmethod
358
- def _get_parameter_new_args(data, rc):
359
+ def _get_parameter_new_args(data, rc, init_param=True):
359
360
  """Set `set_data` of current `Parameter`."""
360
361
  if isinstance(data, bool):
361
362
  raise ValueError('Parameter data can not be `bool`')
@@ -370,8 +371,8 @@ class Parameter(Tensor_):
370
371
  return (Tensor, data.asnumpy(), mstype.qint4x2)
371
372
  return (Tensor, data.asnumpy())
372
373
 
373
- not_init_data = _is_role_sched() or (_is_role_pserver() and _cache_enable()
374
- ) or _is_in_auto_parallel_mode() or _is_parallel_mode()
374
+ not_init_data = not init_param or _is_role_sched() or (_is_role_pserver() and _cache_enable()) \
375
+ or _is_in_auto_parallel_mode() or _is_parallel_mode()
375
376
  if not_init_data:
376
377
  # do not init data while in auto parallel.
377
378
  return (Tensor, None, data.dtype, get_slice_shape(data.dtype, data.shape), data.init)
@@ -976,7 +977,7 @@ class Parameter(Tensor_):
976
977
  """
977
978
  if self.is_default_input_init and self.is_in_parallel != _is_in_auto_parallel_mode():
978
979
  raise RuntimeError("Must set or change parallel mode before any initializer Tensor created.")
979
- if self.init_mode is None:
980
+ if self.init_mode is None or not self.has_init:
980
981
  return self
981
982
  if self.inited_param is not None:
982
983
  return self.inited_param
@@ -2896,8 +2896,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2896
2896
  self.slice_shape_of_persistent_data_ = data_shape
2897
2897
  self.slice_num_of_persistent_data_ = slice_num_of_persistent_data
2898
2898
 
2899
+ from mindspore.common.initializer import Zero as ZeroInitializer
2900
+
2899
2901
  try:
2900
- data = np.ndarray(data_shape, dtype=mstype.dtype_to_nptype(self.dtype))
2902
+ if isinstance(self.init, ZeroInitializer):
2903
+ data = np.zeros(data_shape, dtype=mstype.dtype_to_nptype(self.dtype))
2904
+ else:
2905
+ data = np.ndarray(data_shape, dtype=mstype.dtype_to_nptype(self.dtype))
2901
2906
  except ValueError as e:
2902
2907
  msg = "Error shape={}".format(shape)
2903
2908
  logger.critical(msg)
@@ -2933,7 +2938,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2933
2938
  self.init.seed, _ = self.seed
2934
2939
 
2935
2940
  with seed_context(self.init):
2936
- if slice_num_of_persistent_data == 1:
2941
+ if not isinstance(self.init, ZeroInitializer) and slice_num_of_persistent_data == 1:
2937
2942
  self.init(data)
2938
2943
  self.init = None
2939
2944
 
@@ -4749,7 +4754,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4749
4754
  """
4750
4755
  return tensor_operator_registry.get('lu_solve')(self, LU_data, LU_pivots)
4751
4756
 
4752
-
4753
4757
  def nextafter(self, other):
4754
4758
  r"""
4755
4759
  For details, please refer to :func:`mindspore.ops.nextafter`.
@@ -4763,7 +4767,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4763
4767
  validator.check_value_type('some', some, bool, 'Tensor.qr')
4764
4768
  return tensor_operator_registry.get('qr')(self, 'reduced' if some else 'complete')
4765
4769
 
4766
-
4767
4770
  def ormqr(self, input2, input3, left=True, transpose=False):
4768
4771
  r"""
4769
4772
  For details, please refer to :func:`mindspore.ops.ormqr`,
@@ -4771,7 +4774,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4771
4774
  """
4772
4775
  return tensor_operator_registry.get('ormqr')(self, input2, input3, left, transpose)
4773
4776
 
4774
-
4775
4777
  def masked_scatter(self, mask, x):
4776
4778
  r"""
4777
4779
  Returns a Tensor. Updates the value in the "self Tensor" with the `tensor` value according to the mask.
@@ -4812,7 +4814,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4812
4814
  """
4813
4815
  return tensor_operator_registry.get('masked_scatter')()(self, mask, x)
4814
4816
 
4815
-
4816
4817
  def index_put(self, indices, values, accumulate=False):
4817
4818
  r"""
4818
4819
  Returns a Tensor. According to the index number of `indices` ,
@@ -4865,7 +4866,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4865
4866
  _index_put = tensor_operator_registry.get('index_put')(0 if accumulate is False else 1)
4866
4867
  return _index_put(self, values, indices)
4867
4868
 
4868
-
4869
4869
  def move_to(self, to, blocking=True):
4870
4870
  r"""
4871
4871
  Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
@@ -4899,8 +4899,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4899
4899
  mode = context.get_context("mode")
4900
4900
  if mode != context.PYNATIVE_MODE:
4901
4901
  raise ValueError(f"The method of 'move_to' only supported in pynative mode, but got: {mode}.")
4902
- return Tensor(Tensor_.move_to(self, to, blocking), device="CPU" if to == "CPU" else None)
4903
-
4902
+ return Tensor_.move_to(self, to, blocking)
4904
4903
 
4905
4904
  def _offload(self):
4906
4905
  r"""
@@ -4946,44 +4945,6 @@ def _vm_compare(*args):
4946
4945
  return Tensor(np.array(fn(y)))
4947
4946
 
4948
4947
 
4949
- def _check_sequence_shape(input_data):
4950
- """Check the shape of tensor input with type of sequence."""
4951
- max_dims_reached = False
4952
- max_ndim = 64 # corresponding to NPY_MAXDIMS
4953
- out_shape = [0]*max_ndim
4954
-
4955
- def check_shape_recursive(input_data, curr_ndim):
4956
- nonlocal max_dims_reached, max_ndim, out_shape
4957
- if curr_ndim > max_ndim:
4958
- return False
4959
- if not isinstance(input_data, (tuple, list)):
4960
- if max_dims_reached and curr_ndim != max_ndim:
4961
- max_ndim = curr_ndim
4962
- return False
4963
- max_dims_reached = True
4964
- max_ndim = curr_ndim
4965
- return True
4966
- if not max_dims_reached:
4967
- out_shape[curr_ndim] = len(input_data)
4968
- else:
4969
- if out_shape[curr_ndim] != len(input_data):
4970
- max_ndim = curr_ndim
4971
- return False
4972
- if not input_data:
4973
- # process empty list
4974
- if not check_shape_recursive(None, curr_ndim + 1):
4975
- return False
4976
- for data in input_data:
4977
- if not check_shape_recursive(data, curr_ndim + 1):
4978
- return False
4979
- return True
4980
-
4981
- if not check_shape_recursive(input_data, 0):
4982
- raise ValueError(f"When initializing a tensor with a sequence, the sequence has an inhomogeneous shape "
4983
- f"after {max_ndim} dimensions. The detected shape was {tuple(out_shape[:max_ndim])} "
4984
- f"+ inhomogeneous part.")
4985
-
4986
-
4987
4948
  def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
4988
4949
  """Check the tensor input."""
4989
4950
  if input_data is not None and shape is not None:
@@ -4997,8 +4958,13 @@ def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
4997
4958
  if isinstance(input_data, np.ndarray) and input_data.ndim >= 1 and input_data.size == 0:
4998
4959
  raise ValueError("input_data can not contain zero dimension.")
4999
4960
  if isinstance(input_data, (tuple, list)):
5000
- _check_sequence_shape(input_data)
5001
- if np.array(input_data).ndim >= 1 and np.array(input_data).size == 0:
4961
+ try:
4962
+ np_data = np.array(input_data)
4963
+ except ValueError as e:
4964
+ if "The requested array has an inhomogeneous shape" in str(e):
4965
+ raise TypeError(f"For Tensor, the input_data is {input_data} that contain unsupported element.")
4966
+ raise
4967
+ if np_data.ndim >= 1 and np_data.size == 0:
5002
4968
  raise ValueError("input_data can not contain zero dimension.")
5003
4969
 
5004
4970
  if shape is not None and not (hasattr(init, "__enable_zero_dim__") and init.__enable_zero_dim__) and 0 in shape:
@@ -499,3 +499,8 @@ def _destroy_group_helper(group):
499
499
  hccl.create_group(group)
500
500
  else:
501
501
  CollectiveManager.get_instance().destroy_group(group)
502
+
503
+
504
+ def _get_group_map():
505
+ """Get the group map"""
506
+ return CollectiveManager.get_instance().get_group_map()
@@ -209,7 +209,7 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP, async
209
209
  RuntimeError: If device target is invalid, or backend is invalid, or distributed initialization fails.
210
210
 
211
211
  Supported Platforms:
212
- ``Ascend`` ``GPU`` ``CPU``
212
+ ``Ascend``
213
213
 
214
214
  Examples:
215
215
  .. note::
@@ -275,7 +275,7 @@ def all_gather_into_tensor(tensor, group=GlobalComm.WORLD_COMM_GROUP, async_op=F
275
275
  RuntimeError: If device target is invalid, or backend is invalid, or distributed initialization fails.
276
276
 
277
277
  Supported Platforms:
278
- ``Ascend`` ``GPU``
278
+ ``Ascend``
279
279
 
280
280
  Examples:
281
281
  .. note::
@@ -349,7 +349,7 @@ def reduce_scatter_tensor(tensor, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_G
349
349
  RuntimeError: If device target is invalid, or backend is invalid, or distributed initialization fails.
350
350
 
351
351
  Supported Platforms:
352
- ``Ascend`` ``GPU``
352
+ ``Ascend``
353
353
 
354
354
  Examples:
355
355
  .. note::
@@ -909,7 +909,7 @@ def send(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
909
909
  ValueError: If the rank ID of the process is greater than the rank size of the communication group.
910
910
 
911
911
  Supported Platforms:
912
- ``Ascend`` ``GPU``
912
+ ``Ascend``
913
913
 
914
914
  Examples:
915
915
  .. note::
@@ -970,7 +970,7 @@ def recv(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
970
970
  ValueError: If the rank ID of the process is greater than the rank size of the communication group.
971
971
 
972
972
  Supported Platforms:
973
- ``Ascend`` ``GPU``
973
+ ``Ascend``
974
974
 
975
975
  Examples:
976
976
  .. note::
@@ -1040,7 +1040,7 @@ def isend(tensor, dst=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
1040
1040
  ValueError: If the rank ID of the process is greater than the rank size of the communication group.
1041
1041
 
1042
1042
  Supported Platforms:
1043
- ``Ascend`` ``GPU``
1043
+ ``Ascend``
1044
1044
 
1045
1045
  Examples:
1046
1046
  .. note::
@@ -1105,7 +1105,7 @@ def irecv(tensor, src=0, group=GlobalComm.WORLD_COMM_GROUP, tag=0):
1105
1105
  ValueError: If the rank ID of the process is greater than the rank size of the communication group.
1106
1106
 
1107
1107
  Supported Platforms:
1108
- ``Ascend`` ``GPU``
1108
+ ``Ascend``
1109
1109
 
1110
1110
  Examples:
1111
1111
  .. note::
mindspore/context.py CHANGED
@@ -936,6 +936,7 @@ def set_auto_parallel_context(**kwargs):
936
936
  \ group_ckpt_save_file
937
937
  \ auto_pipeline
938
938
  \ dump_local_norm
939
+ \ dump_device_local_norm
939
940
  =========================== ===========================
940
941
 
941
942
  Args:
@@ -1090,6 +1091,9 @@ def set_auto_parallel_context(**kwargs):
1090
1091
  dump_local_norm (bool): Whether to dump local_norm value, when the `parallel_mode` is set to
1091
1092
  ``semi_auto_parallel`` or ``auto_parallel``.
1092
1093
  Default: ``False`` .
1094
+ dump_device_local_norm (bool): Whether to dump device_local_norm value, when the `parallel_mode` is set to
1095
+ ``semi_auto_parallel`` or ``auto_parallel``.
1096
+ Default: ``False`` .
1093
1097
 
1094
1098
  Raises:
1095
1099
  ValueError: If input key is not attribute in auto parallel context.
@@ -1165,8 +1169,9 @@ def reset_auto_parallel_context():
1165
1169
  - pipeline_stages: 1.
1166
1170
  - pipeline_result_broadcast: False.
1167
1171
  - fusion_threshold: 64.
1168
- - dump_local_norm: False.
1169
1172
  - auto_pipeline: False.
1173
+ - dump_local_norm: False.
1174
+ - dump_device_local_norm: False.
1170
1175
 
1171
1176
  Examples:
1172
1177
  >>> import mindspore as ms
@@ -1686,6 +1691,15 @@ def set_context(**kwargs):
1686
1691
  - 3: Optimize dataset reader with all scenes.
1687
1692
  - bias_add_comm_swap (bool): Enable node execution order swap communication operators and add operators
1688
1693
  if ``True``. Only 1-dimension bias node is supported. Default: ``False``.
1694
+ - enable_allreduce_slice_to_reducescatter (bool): Enable allreduce optimization. In the scenario where
1695
+ the batchmatmul model introduces allreduce in parallel, if the subsequent nodes are stridedslice
1696
+ operator with model parallel, allreduce will be optimized as reducescatter according to the identified
1697
+ patterns. Typical used in MoE module with groupwise alltoall. Default: ``False``.
1698
+ - enable_interleave_split_concat_branch (bool): Enable communication computation parallel optimization
1699
+ for branches formed by split and concat operators with ``enable_interleave`` attribute. It is typical
1700
+ used in MoE parallel scenario. After splitting the input data, each slice of data is processed by the
1701
+ MoE module, and then the branch results are concatenated. When the optimization is enable,
1702
+ communication and computation will be executed in parallel between branches. Default: ``False``.
1689
1703
  - host_scheduling_max_threshold(int): The max threshold to control whether the dynamic shape process is
1690
1704
  used when run the static graph, the default value is 0. When the number of operations in the static graph
1691
1705
  is less than the max threshold, this graph will be executed in dynamic shape process. In large model
@@ -1784,7 +1798,7 @@ def set_context(**kwargs):
1784
1798
  When both exist simultaneously, the global jit config will not overwrite the local network's jit config.
1785
1799
 
1786
1800
  - jit_level (str): Used to control the compilation optimization level. Default: ``""`` , The framework
1787
- automatically selects the execution method based on product, Altas training product is O2, and all other
1801
+ automatically selects the execution method based on product, Atlas training product is O2, and all other
1788
1802
  products are O0. In addition, The option of the dynamic shape must be O0 or O1, O2 is not supported.
1789
1803
  The value range is as follows:
1790
1804
 
@@ -33,6 +33,7 @@ from .datasets import UnionBaseDataset, SourceDataset, MappableDataset, Shuffle,
33
33
  from .datasets_user_defined import GeneratorDataset
34
34
  from .obs.obs_mindrecord_dataset import MindRecordFromOBS
35
35
  from .validators import check_csvdataset, check_minddataset, check_tfrecorddataset, check_obsminddataset
36
+ from ..core.validator_helpers import type_check
36
37
  from ...mindrecord.config import _get_enc_key, _get_dec_mode, _get_hash_mode, decrypt, verify_file_hash
37
38
 
38
39
 
@@ -301,6 +302,22 @@ class MindDataset(MappableDataset, UnionBaseDataset):
301
302
  else:
302
303
  self.new_padded_sample[k] = v
303
304
 
305
+ def __deepcopy__(self, memodict):
306
+ if id(self) in memodict:
307
+ return memodict[id(self)]
308
+ return self.__safe_deepcopy__(memodict, exclude=("mindrecord_op"))
309
+
310
+ def __getitem__(self, index):
311
+ type_check(index, (int,), "index")
312
+ if index < 0:
313
+ raise ValueError("index cannot be negative, but got {0}.".format(index))
314
+ if not hasattr(self, "mindrecord_op"):
315
+ minddata_node = cde.MindDataNode(
316
+ self.dataset_files, self.columns_list, self.sampler, self.new_padded_sample,
317
+ self.num_padded, shuffle_to_shuffle_mode(self.shuffle_option))
318
+ self.mindrecord_op = minddata_node.Build()
319
+ return [t.as_array() for t in self.mindrecord_op[index]]
320
+
304
321
 
305
322
  class TFRecordDataset(SourceDataset, UnionBaseDataset):
306
323
  """
@@ -19,6 +19,7 @@ After declaring the dataset object, you can further apply dataset operations
19
19
  (e.g. filter, skip, concat, map, batch) on it.
20
20
  """
21
21
  import builtins
22
+ import copy
22
23
  import errno
23
24
  import itertools
24
25
  import math
@@ -50,6 +51,7 @@ from ..core.config import get_enable_shared_mem, get_prefetch_size, get_multipro
50
51
  get_enable_watchdog, get_debug_mode, get_seed, set_seed
51
52
  from ..core.datatypes import mstypelist_to_detypelist
52
53
  from ..core.py_util_helpers import ExceptionHandler
54
+ from ..core.validator_helpers import type_check
53
55
  from ..transforms import transforms
54
56
 
55
57
 
@@ -427,6 +429,10 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
427
429
  subprocess_file_descriptor = w.sentinel
428
430
  st = time.time()
429
431
  while _PythonMultiprocessing.is_process_alive(w.pid):
432
+ process = psutil.Process(w.pid)
433
+ if process.status() == psutil.STATUS_ZOMBIE:
434
+ process.kill()
435
+ break
430
436
  time.sleep(0.01) # sleep 10ms, waiting for the subprocess exit
431
437
  if time.time() - st > check_interval:
432
438
  logger.warning("Waiting for the subprocess worker [{}] to exit.".format(w.pid))
@@ -469,7 +475,7 @@ class SamplerFn(cde.PythonMultiprocessingRuntime):
469
475
 
470
476
  # let the quit event notify the worker process to exit
471
477
  w.join(timeout=5)
472
- if w.is_alive():
478
+ if _PythonMultiprocessing.is_process_alive(w.pid):
473
479
  # if the worker process did not exit, it may hang, try to terminate it
474
480
  w.terminate()
475
481
  w.close()
@@ -907,6 +913,26 @@ class GeneratorDataset(MappableDataset, UnionBaseDataset):
907
913
  return memodict[id(self)]
908
914
  return self.__safe_deepcopy__(memodict, exclude=("source", "__transfer_dataset__"))
909
915
 
916
+ def __getitem__(self, index):
917
+ type_check(index, (int, np.number), "index")
918
+ if not hasattr(self.source, "__getitem__"):
919
+ raise RuntimeError("Dataset don't support randomized access.")
920
+ if not hasattr(self, "generator_op"):
921
+ dataset = copy.deepcopy(self)
922
+ self.prepared_source = _generator_fn_wrapper(_cpp_sampler_fn, self.source)
923
+ if self.schema is None:
924
+ dataset.generator_node = cde.GeneratorNode(self.prepared_source, self.column_names, self.column_types,
925
+ self.source_len, self.sampler, 1, None)
926
+ else:
927
+ schema = self.schema
928
+ if isinstance(schema, Schema):
929
+ schema = self.schema.cpp_schema
930
+ dataset.generator_node = cde.GeneratorNode(self.prepared_source, schema, self.source_len,
931
+ self.sampler, 1, None)
932
+ self.generator_op = dataset.generator_node.Build()
933
+ sample_id = self.generator_op.GetMappedIndex(index)
934
+ return self.source[sample_id]
935
+
910
936
  def is_shuffled(self):
911
937
  if self.sampler:
912
938
  return self.sampler.is_shuffled()
@@ -15,7 +15,7 @@
15
15
  """LlmBoost Register"""
16
16
  from __future__ import absolute_import
17
17
 
18
- from mindspore.experimental.llm_boost.atb import *
18
+ from mindspore.experimental.llm_boost.atb import LlamaBoost, QwenBoost
19
19
  from mindspore.experimental.llm_boost.register import LlmBoostRegister
20
20
 
21
- __all__ = ['LlmBoostRegister']
21
+ __all__ = ["LlmBoostRegister"]