auto-coder 0.1.292__tar.gz → 0.1.294__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (203) hide show
  1. {auto_coder-0.1.292 → auto_coder-0.1.294}/PKG-INFO +1 -1
  2. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/auto_coder.egg-info/PKG-INFO +1 -1
  3. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/auto_coder.egg-info/SOURCES.txt +1 -0
  4. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/auto_coder_runner.py +17 -3
  5. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/auto_coder_server.py +8 -0
  6. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/chat_auto_coder_lang.py +4 -0
  7. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/commands/auto_command.py +31 -5
  8. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/commands/tools.py +22 -0
  9. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/auto_coder_lang.py +6 -2
  10. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/mcp_hub.py +59 -18
  11. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/mcp_server.py +18 -0
  12. auto_coder-0.1.294/src/autocoder/common/mcp_servers/mcp_server_gpt4o_mini_search.py +153 -0
  13. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/api_server.py +8 -0
  14. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/cache/local_duckdb_storage_cache.py +70 -25
  15. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/long_context_rag.py +16 -16
  16. auto_coder-0.1.294/src/autocoder/version.py +1 -0
  17. auto_coder-0.1.292/src/autocoder/version.py +0 -1
  18. {auto_coder-0.1.292 → auto_coder-0.1.294}/LICENSE +0 -0
  19. {auto_coder-0.1.292 → auto_coder-0.1.294}/README.md +0 -0
  20. {auto_coder-0.1.292 → auto_coder-0.1.294}/setup.cfg +0 -0
  21. {auto_coder-0.1.292 → auto_coder-0.1.294}/setup.py +0 -0
  22. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/auto_coder.egg-info/dependency_links.txt +0 -0
  23. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/auto_coder.egg-info/entry_points.txt +0 -0
  24. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/auto_coder.egg-info/requires.txt +0 -0
  25. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/auto_coder.egg-info/top_level.txt +0 -0
  26. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/__init__.py +0 -0
  27. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/agent/__init__.py +0 -0
  28. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/agent/auto_demand_organizer.py +0 -0
  29. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/agent/auto_filegroup.py +0 -0
  30. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/agent/auto_guess_query.py +0 -0
  31. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/agent/auto_learn_from_commit.py +0 -0
  32. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/agent/auto_review_commit.py +0 -0
  33. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/agent/auto_tool.py +0 -0
  34. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/agent/coder.py +0 -0
  35. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/agent/designer.py +0 -0
  36. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/agent/planner.py +0 -0
  37. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/agent/project_reader.py +0 -0
  38. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/auto_coder.py +0 -0
  39. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/auto_coder_rag.py +0 -0
  40. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/auto_coder_rag_client_mcp.py +0 -0
  41. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/auto_coder_rag_mcp.py +0 -0
  42. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/benchmark.py +0 -0
  43. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/chat/__init__.py +0 -0
  44. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/chat_auto_coder.py +0 -0
  45. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/command_args.py +0 -0
  46. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/commands/__init__.py +0 -0
  47. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/commands/auto_web.py +0 -0
  48. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/JupyterClient.py +0 -0
  49. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/ShellClient.py +0 -0
  50. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/__init__.py +0 -0
  51. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/anything2images.py +0 -0
  52. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/anything2img.py +0 -0
  53. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/audio.py +0 -0
  54. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/auto_configure.py +0 -0
  55. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/buildin_tokenizer.py +0 -0
  56. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/chunk_validation.py +0 -0
  57. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/cleaner.py +0 -0
  58. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/code_auto_execute.py +0 -0
  59. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/code_auto_generate.py +0 -0
  60. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/code_auto_generate_diff.py +0 -0
  61. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/code_auto_generate_editblock.py +0 -0
  62. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/code_auto_generate_strict_diff.py +0 -0
  63. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/code_auto_merge.py +0 -0
  64. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/code_auto_merge_diff.py +0 -0
  65. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/code_auto_merge_editblock.py +0 -0
  66. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/code_auto_merge_strict_diff.py +0 -0
  67. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/code_modification_ranker.py +0 -0
  68. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/command_completer.py +0 -0
  69. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/command_generator.py +0 -0
  70. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/command_templates.py +0 -0
  71. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/computer_use.py +0 -0
  72. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/conf_import_export.py +0 -0
  73. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/conf_validator.py +0 -0
  74. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/const.py +0 -0
  75. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/context_pruner.py +0 -0
  76. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/conversation_pruner.py +0 -0
  77. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/files.py +0 -0
  78. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/git_utils.py +0 -0
  79. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/global_cancel.py +0 -0
  80. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/image_to_page.py +0 -0
  81. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/index_import_export.py +0 -0
  82. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/interpreter.py +0 -0
  83. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/llm_rerank.py +0 -0
  84. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/mcp_servers/__init__.py +0 -0
  85. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/mcp_servers/mcp_server_perplexity.py +0 -0
  86. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/mcp_tools.py +0 -0
  87. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/memory_manager.py +0 -0
  88. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/model_speed_test.py +0 -0
  89. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/printer.py +0 -0
  90. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/recall_validation.py +0 -0
  91. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/result_manager.py +0 -0
  92. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/screenshots.py +0 -0
  93. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/search.py +0 -0
  94. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/search_replace.py +0 -0
  95. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/shells.py +0 -0
  96. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/stats_panel.py +0 -0
  97. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/sys_prompt.py +0 -0
  98. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/text.py +0 -0
  99. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/types.py +0 -0
  100. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/common/utils_code_auto_generate.py +0 -0
  101. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/data/byzerllm.md +0 -0
  102. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/data/tokenizer.json +0 -0
  103. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/db/__init__.py +0 -0
  104. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/db/store.py +0 -0
  105. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/dispacher/__init__.py +0 -0
  106. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/dispacher/actions/__init__.py +0 -0
  107. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/dispacher/actions/action.py +0 -0
  108. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/dispacher/actions/copilot.py +0 -0
  109. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/dispacher/actions/plugins/__init__.py +0 -0
  110. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/dispacher/actions/plugins/action_regex_project.py +0 -0
  111. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/dispacher/actions/plugins/action_translate.py +0 -0
  112. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/index/__init__.py +0 -0
  113. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/index/entry.py +0 -0
  114. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/index/filter/__init__.py +0 -0
  115. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/index/filter/normal_filter.py +0 -0
  116. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/index/filter/quick_filter.py +0 -0
  117. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/index/for_command.py +0 -0
  118. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/index/index.py +0 -0
  119. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/index/symbols_utils.py +0 -0
  120. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/index/types.py +0 -0
  121. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/lang.py +0 -0
  122. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/models.py +0 -0
  123. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/plugins/__init__.py +0 -0
  124. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/plugins/dynamic_completion_example.py +0 -0
  125. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/plugins/git_helper_plugin.py +0 -0
  126. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/plugins/sample_plugin.py +0 -0
  127. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/plugins/token_helper_plugin.py +0 -0
  128. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/plugins/utils.py +0 -0
  129. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/privacy/__init__.py +0 -0
  130. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/privacy/model_filter.py +0 -0
  131. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/pyproject/__init__.py +0 -0
  132. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/__init__.py +0 -0
  133. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/cache/__init__.py +0 -0
  134. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/cache/base_cache.py +0 -0
  135. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/cache/byzer_storage_cache.py +0 -0
  136. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/cache/cache_result_merge.py +0 -0
  137. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/cache/file_monitor_cache.py +0 -0
  138. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/cache/local_byzer_storage_cache.py +0 -0
  139. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/cache/rag_file_meta.py +0 -0
  140. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/cache/simple_cache.py +0 -0
  141. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/conversation_to_queries.py +0 -0
  142. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/doc_filter.py +0 -0
  143. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/document_retriever.py +0 -0
  144. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/lang.py +0 -0
  145. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/llm_wrapper.py +0 -0
  146. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/loaders/__init__.py +0 -0
  147. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/loaders/docx_loader.py +0 -0
  148. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/loaders/excel_loader.py +0 -0
  149. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/loaders/pdf_loader.py +0 -0
  150. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/loaders/ppt_loader.py +0 -0
  151. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/qa_conversation_strategy.py +0 -0
  152. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/rag_config.py +0 -0
  153. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/rag_entry.py +0 -0
  154. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/raw_rag.py +0 -0
  155. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/relevant_utils.py +0 -0
  156. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/searchable.py +0 -0
  157. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/simple_directory_reader.py +0 -0
  158. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/simple_rag.py +0 -0
  159. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/stream_event/__init__.py +0 -0
  160. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/stream_event/event_writer.py +0 -0
  161. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/stream_event/types.py +0 -0
  162. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/token_checker.py +0 -0
  163. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/token_counter.py +0 -0
  164. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/token_limiter.py +0 -0
  165. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/token_limiter_utils.py +0 -0
  166. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/types.py +0 -0
  167. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/utils.py +0 -0
  168. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/rag/variable_holder.py +0 -0
  169. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/regexproject/__init__.py +0 -0
  170. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/suffixproject/__init__.py +0 -0
  171. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/tsproject/__init__.py +0 -0
  172. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/__init__.py +0 -0
  173. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/_markitdown.py +0 -0
  174. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/auto_coder_utils/__init__.py +0 -0
  175. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/auto_coder_utils/chat_stream_out.py +0 -0
  176. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/auto_project_type.py +0 -0
  177. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/chat_auto_coder_utils/__init__.py +0 -0
  178. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/conversation_store.py +0 -0
  179. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/llm_client_interceptors.py +0 -0
  180. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/llms.py +0 -0
  181. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/log_capture.py +0 -0
  182. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/model_provider_selector.py +0 -0
  183. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/multi_turn.py +0 -0
  184. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/operate_config_api.py +0 -0
  185. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/print_table.py +0 -0
  186. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/project_structure.py +0 -0
  187. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/queue_communicate.py +0 -0
  188. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/request_event_queue.py +0 -0
  189. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/request_queue.py +0 -0
  190. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/rest.py +0 -0
  191. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/stream_thinking.py +0 -0
  192. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/tests.py +0 -0
  193. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/thread_utils.py +0 -0
  194. {auto_coder-0.1.292 → auto_coder-0.1.294}/src/autocoder/utils/types.py +0 -0
  195. {auto_coder-0.1.292 → auto_coder-0.1.294}/tests/test_action_regex_project.py +0 -0
  196. {auto_coder-0.1.292 → auto_coder-0.1.294}/tests/test_chat_auto_coder.py +0 -0
  197. {auto_coder-0.1.292 → auto_coder-0.1.294}/tests/test_code_auto_merge_editblock.py +0 -0
  198. {auto_coder-0.1.292 → auto_coder-0.1.294}/tests/test_command_completer.py +0 -0
  199. {auto_coder-0.1.292 → auto_coder-0.1.294}/tests/test_planner.py +0 -0
  200. {auto_coder-0.1.292 → auto_coder-0.1.294}/tests/test_plugins.py +0 -0
  201. {auto_coder-0.1.292 → auto_coder-0.1.294}/tests/test_privacy.py +0 -0
  202. {auto_coder-0.1.292 → auto_coder-0.1.294}/tests/test_queue_communicate.py +0 -0
  203. {auto_coder-0.1.292 → auto_coder-0.1.294}/tests/test_symbols_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.292
3
+ Version: 0.1.294
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.292
3
+ Version: 0.1.294
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -92,6 +92,7 @@ src/autocoder/common/text.py
92
92
  src/autocoder/common/types.py
93
93
  src/autocoder/common/utils_code_auto_generate.py
94
94
  src/autocoder/common/mcp_servers/__init__.py
95
+ src/autocoder/common/mcp_servers/mcp_server_gpt4o_mini_search.py
95
96
  src/autocoder/common/mcp_servers/mcp_server_perplexity.py
96
97
  src/autocoder/data/byzerllm.md
97
98
  src/autocoder/data/tokenizer.json
@@ -39,7 +39,7 @@ import git
39
39
  from autocoder.common import git_utils
40
40
  from autocoder.chat_auto_coder_lang import get_message
41
41
  from autocoder.agent.auto_guess_query import AutoGuessQuery
42
- from autocoder.common.mcp_server import get_mcp_server, McpRequest, McpInstallRequest, McpRemoveRequest, McpListRequest, McpListRunningRequest, McpRefreshRequest
42
+ from autocoder.common.mcp_server import get_mcp_server, McpRequest, McpInstallRequest, McpRemoveRequest, McpListRequest, McpListRunningRequest, McpRefreshRequest,McpServerInfoRequest
43
43
  import byzerllm
44
44
  from byzerllm.utils import format_str_jinja2
45
45
  from autocoder.common.memory_manager import get_global_memory_file_paths
@@ -1133,7 +1133,7 @@ def mcp(query: str):
1133
1133
  printer.print_in_terminal("mcp_list_builtin_title")
1134
1134
  printer.print_str_in_terminal(response.result)
1135
1135
  return
1136
-
1136
+
1137
1137
  # Handle refresh command
1138
1138
  if query.startswith("/refresh"):
1139
1139
  server_name = query.replace("/refresh", "", 1).strip()
@@ -1183,7 +1183,21 @@ def mcp(query: str):
1183
1183
  if os.path.exists(temp_yaml):
1184
1184
  os.remove(temp_yaml)
1185
1185
 
1186
- mcp_server = get_mcp_server()
1186
+ mcp_server = get_mcp_server()
1187
+
1188
+
1189
+ if query.startswith("/info"):
1190
+ response = mcp_server.send_request(McpServerInfoRequest(
1191
+ model=args.inference_model or args.model,
1192
+ product_mode=args.product_mode
1193
+ ))
1194
+ if response.error:
1195
+ printer.print_in_terminal("mcp_server_info_error", style="red", error=response.error)
1196
+ else:
1197
+ printer.print_in_terminal("mcp_server_info_title")
1198
+ printer.print_str_in_terminal(response.result)
1199
+ return
1200
+
1187
1201
  response = mcp_server.send_request(
1188
1202
  McpRequest(
1189
1203
  query=query,
@@ -34,6 +34,14 @@ import sys
34
34
  import io
35
35
  from autocoder.utils.log_capture import LogCapture
36
36
 
37
+ # If support dotenv, use it
38
+ if os.path.exists(".env"):
39
+ try:
40
+ from dotenv import load_dotenv
41
+ load_dotenv()
42
+ except ImportError:
43
+ pass
44
+
37
45
  def convert_yaml_config_to_str(yaml_config):
38
46
  yaml_content = yaml.safe_dump(
39
47
  yaml_config,
@@ -138,6 +138,8 @@ MESSAGES = {
138
138
  "official_doc": "Official Documentation: https://uelng8wukz.feishu.cn/wiki/NhPNwSRcWimKFIkQINIckloBncI",
139
139
  "plugins_desc": "Manage plugins",
140
140
  "plugins_usage": "Usage: /plugins <command>\nAvailable subcommands:\n /plugins /list - List all available plugins\n /plugins /load <name> - Load a plugin\n /plugins /unload <name> - Unload a plugin\n /plugins/dirs - List plugin directories\n /plugins/dirs /add <path> - Add a plugin directory\n /plugins/dirs /remove <path> - Remove a plugin directory\n /plugins/dirs /clear - Clear all plugin directories",
141
+ "mcp_server_info_error": "Error getting MCP server info: {{ error }}",
142
+ "mcp_server_info_title": "Connected MCP Server Info",
141
143
  },
142
144
  "zh": {
143
145
  "auto_command_analyzing": "正在分析命令请求",
@@ -275,6 +277,8 @@ MESSAGES = {
275
277
  "official_doc": "官方文档: https://uelng8wukz.feishu.cn/wiki/NhPNwSRcWimKFIkQINIckloBncI",
276
278
  "plugins_desc": "管理插件",
277
279
  "plugins_usage": "用法: /plugins <命令>\n可用的子命令:\n /plugins /list - 列出所有可用插件\n /plugins /load <名称> - 加载一个插件\n /plugins /unload <名称> - 卸载一个插件\n /plugins/dirs - 列出插件目录\n /plugins/dirs /add <路径> - 添加一个插件目录\n /plugins/dirs /remove <路径> - 移除一个插件目录\n /plugins/dirs /clear - 清除所有插件目录",
280
+ "mcp_server_info_error": "获取MCP服务器信息时出错: {{ error }}",
281
+ "mcp_server_info_title": "已连接的MCP服务器信息",
278
282
  },
279
283
  }
280
284
 
@@ -24,6 +24,7 @@ from autocoder.common.global_cancel import global_cancel
24
24
  from autocoder.common.auto_configure import config_readme
25
25
  from autocoder.utils.auto_project_type import ProjectTypeAnalyzer
26
26
  from rich.text import Text
27
+ from autocoder.common.mcp_server import get_mcp_server,McpServerInfoRequest
27
28
 
28
29
  class CommandMessage(BaseModel):
29
30
  role: str
@@ -159,7 +160,17 @@ class CommandAutoTuner:
159
160
  self.memory_config = memory_config
160
161
  self.command_config = command_config
161
162
  self.tools = AutoCommandTools(args=args, llm=self.llm)
162
- self.project_type_analyzer = ProjectTypeAnalyzer(args=args, llm=self.llm)
163
+ self.project_type_analyzer = ProjectTypeAnalyzer(args=args, llm=self.llm)
164
+ try:
165
+ self.mcp_server = get_mcp_server()
166
+ mcp_server_info_response = self.mcp_server.send_request(McpServerInfoRequest(
167
+ model=args.inference_model or args.model,
168
+ product_mode=args.product_mode
169
+ ))
170
+ self.mcp_server_info = mcp_server_info_response.result
171
+ except Exception as e:
172
+ logger.error(f"Error getting MCP server info: {str(e)}")
173
+ self.mcp_server_info = ""
163
174
 
164
175
  def get_conversations(self) -> List[CommandMessage]:
165
176
  """Get conversation history from memory file"""
@@ -1210,12 +1221,26 @@ class CommandAutoTuner:
1210
1221
  response_user(response="你好,我是 auto-coder")
1211
1222
  </usage>
1212
1223
  </command>
1213
- </commands>
1214
-
1215
-
1224
+
1225
+ <command>
1226
+ <name>execute_mcp_server</name>
1227
+ <description>执行MCP服务器</description>
1228
+ <usage>
1229
+ 该函数接受一个参数 query, 为要执行的MCP服务器查询字符串。
1230
+
1231
+ 你可以根据下面已经连接的 mcp server 信息,来决定个是否调用该函数,注意该函数会更具你的 query
1232
+ 自动选择合适的 mcp server 来执行。如果你想某个特定的 server 来执行,你可以在 query 中说明你想哪个 server 执行。
1233
+
1234
+ <mcp_server_info>
1235
+ {{ mcp_server_info }}
1236
+ </mcp_server_info>
1237
+
1238
+ </usage>
1239
+ </command>
1216
1240
  '''
1217
1241
  return {
1218
- "config_readme": config_readme.prompt()
1242
+ "config_readme": config_readme.prompt(),
1243
+ "mcp_server_info": self.mcp_server_info
1219
1244
  }
1220
1245
 
1221
1246
  def execute_auto_command(self, command: str, parameters: Dict[str, Any]) -> None:
@@ -1257,6 +1282,7 @@ class CommandAutoTuner:
1257
1282
  "read_file_with_keyword_ranges": self.tools.read_file_with_keyword_ranges,
1258
1283
  "get_project_type": self.project_type_analyzer.analyze,
1259
1284
  "response_user": self.tools.response_user,
1285
+ "execute_mcp_server": self.tools.execute_mcp_server,
1260
1286
 
1261
1287
  }
1262
1288
 
@@ -62,6 +62,28 @@ class AutoCommandTools:
62
62
  self.result_manager = ResultManager()
63
63
  self.printer = Printer()
64
64
 
65
+ def execute_mcp_server(self, query: str) -> str:
66
+ from autocoder.common.mcp_server import get_mcp_server, McpRequest, McpInstallRequest, McpRemoveRequest, McpListRequest, McpListRunningRequest, McpRefreshRequest
67
+ mcp_server = get_mcp_server()
68
+ response = mcp_server.send_request(
69
+ McpRequest(
70
+ query=query,
71
+ model=self.args.inference_model or self.args.model,
72
+ product_mode=self.args.product_mode
73
+ )
74
+ )
75
+
76
+ result = response.result
77
+
78
+ self.result_manager.append(content=result, meta = {
79
+ "action": "execute_mcp_server",
80
+ "input": {
81
+ "query": query
82
+ }
83
+ })
84
+ return result
85
+
86
+
65
87
  def ask_user(self,question:str) -> str:
66
88
  '''
67
89
  如果你对用户的问题有什么疑问,或者你想从用户收集一些额外信息,可以调用此方法。
@@ -191,7 +191,9 @@ MESSAGES = {
191
191
  "super_big_filter_failed": "❌ Super big filter failed: {{ error }}.",
192
192
  "super_big_filter_stats": "{{ model_names }} Super big filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}, input cost: {{ input_cost }}, output cost: {{ output_cost }}, speed: {{ speed }} tokens/s, chunk_index: {{ chunk_index }}",
193
193
  "super_big_filter_splitting": "⚠️ Index file is extremely large ({{ tokens_len }}/{{ max_tokens }}). The query will be split into {{ split_size }} chunks for processing.",
194
- "super_big_filter_title": "{{ model_name }} is analyzing how to filter extremely large context..."
194
+ "super_big_filter_title": "{{ model_name }} is analyzing how to filter extremely large context...",
195
+ "mcp_server_info_error": "Error getting MCP server info: {{ error }}",
196
+ "mcp_server_info_title": "Connected MCP Server Info",
195
197
  },
196
198
  "zh": {
197
199
  "file_sliding_window_processing": "文件 {{ file_path }} 过大 ({{ tokens }} tokens),正在使用滑动窗口处理...",
@@ -380,7 +382,9 @@ MESSAGES = {
380
382
  "super_big_filter_failed": "❌ 超大过滤器失败: {{ error }}.",
381
383
  "super_big_filter_stats": "{{ model_names }} 超大过滤器完成耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}, 速度: {{ speed }} tokens/秒, 块索引: {{ chunk_index }}",
382
384
  "super_big_filter_splitting": "⚠️ 索引文件极其庞大 ({{ tokens_len }}/{{ max_tokens }})。查询将被分成 {{ split_size }} 个部分进行处理。",
383
- "super_big_filter_title": "{{ model_name }} 正在分析如何过滤极大规模上下文..."
385
+ "super_big_filter_title": "{{ model_name }} 正在分析如何过滤极大规模上下文...",
386
+ "mcp_server_info_error": "获取MCP服务器信息时出错: {{ error }}",
387
+ "mcp_server_info_title": "已连接的MCP服务器信息",
384
388
  }}
385
389
 
386
390
 
@@ -2,8 +2,12 @@ import os
2
2
  import json
3
3
  import asyncio
4
4
  import aiohttp
5
+ import importlib
6
+ import pkgutil
7
+ import re
8
+ import inspect
5
9
  from datetime import datetime, timedelta
6
- from typing import Dict, List, Optional, Any, Set, Optional
10
+ from typing import Dict, List, Optional, Any, Set, Optional, Tuple
7
11
  from pathlib import Path
8
12
  from pydantic import BaseModel, Field
9
13
 
@@ -62,23 +66,53 @@ class McpConnection:
62
66
  self.session = session
63
67
 
64
68
 
65
- MCP_PERPLEXITY_SERVER = '''
66
- {
67
- "perplexity": {
68
- "command": "python",
69
- "args": [
70
- "-m", "autocoder.common.mcp_servers.mcp_server_perplexity"
71
- ],
72
- "env": {
73
- "PERPLEXITY_API_KEY": "{{PERPLEXITY_API_KEY}}"
74
- }
75
- }
76
- }
77
- '''
78
-
79
- MCP_BUILD_IN_SERVERS = {
80
- "perplexity": json.loads(MCP_PERPLEXITY_SERVER)["perplexity"]
81
- }
69
+ def _generate_server_configs() -> Tuple[Dict[str, Any], Dict[str, str]]:
70
+ """
71
+ Scan the autocoder.common.mcp_servers directory for mcp_server_*.py files
72
+ and generate server configurations.
73
+
74
+ Returns:
75
+ Tuple of (built-in servers dict, JSON templates dict)
76
+ """
77
+ servers = {}
78
+ templates = {}
79
+
80
+ try:
81
+ package_name = "autocoder.common.mcp_servers"
82
+ package = importlib.import_module(package_name)
83
+
84
+ # Find all modules in the package
85
+ for _, name, _ in pkgutil.iter_modules(package.__path__, package.__name__ + "."):
86
+ # Only process modules that start with "mcp_server_"
87
+ base_name = name.split(".")[-1]
88
+ if base_name.startswith("mcp_server_"):
89
+ # Generate a friendly server name
90
+ friendly_name = base_name[11:]
91
+
92
+ # Create env dictionary with placeholders
93
+ env_dict = {}
94
+
95
+ # Create server configuration
96
+ config = {
97
+ "command": "python",
98
+ "args": ["-m", name],
99
+ "env": env_dict
100
+ }
101
+
102
+ # Store in dictionaries
103
+ servers[friendly_name] = config
104
+ templates[friendly_name] = json.dumps({friendly_name: config}, indent=4)
105
+
106
+ logger.info(f"Detected MCP server: {friendly_name}")
107
+
108
+ except Exception as e:
109
+ logger.error(f"Error generating server configs: {e}")
110
+
111
+ return servers, templates
112
+
113
+
114
+ # Automatically generate server configurations
115
+ MCP_BUILD_IN_SERVERS, MCP_SERVER_TEMPLATES = _generate_server_configs()
82
116
 
83
117
 
84
118
  class McpHub:
@@ -422,3 +456,10 @@ class McpHub:
422
456
  """
423
457
  for name in list(self.connections.keys()):
424
458
  await self.delete_connection(name)
459
+
460
+ @classmethod
461
+ def get_server_templates(cls) -> Dict[str, str]:
462
+ """
463
+ Get all available server templates as JSON strings
464
+ """
465
+ return MCP_SERVER_TEMPLATES
@@ -53,6 +53,12 @@ class McpRefreshRequest:
53
53
  """Request to refresh MCP server connections"""
54
54
  name: Optional[str] = None
55
55
 
56
+ @dataclass
57
+ class McpServerInfoRequest:
58
+ """Request to get MCP server info"""
59
+ model: Optional[str] = None
60
+ product_mode: Optional[str] = None
61
+
56
62
 
57
63
  @dataclass
58
64
  class McpExternalServer(BaseModel):
@@ -346,6 +352,18 @@ class McpServer:
346
352
  await self._response_queue.put(McpResponse(
347
353
  result="", error=get_message_with_format("mcp_list_builtin_error", error=str(e))))
348
354
 
355
+ elif isinstance(request, McpServerInfoRequest):
356
+ try:
357
+ llm = get_single_llm(request.model, product_mode=request.product_mode)
358
+ mcp_executor = McpExecutor(hub, llm)
359
+ result = mcp_executor.get_connected_servers_info()
360
+ await self._response_queue.put(McpResponse(result=result))
361
+ except Exception as e:
362
+ import traceback
363
+ traceback.print_exc()
364
+ await self._response_queue.put(McpResponse(
365
+ result="", error=get_message_with_format("mcp_server_info_error", error=str(e))))
366
+
349
367
  elif isinstance(request, McpListRunningRequest):
350
368
  try:
351
369
  running_servers = "\n".join(
@@ -0,0 +1,153 @@
1
+ from os import getenv
2
+ from textwrap import dedent
3
+ import sys
4
+
5
+ import mcp.server.stdio
6
+ import mcp.types as types
7
+ from mcp.server import NotificationOptions, Server
8
+ from mcp.server.models import InitializationOptions
9
+ import json
10
+ from openai import OpenAI
11
+
12
+ OPENAI_API_KEY = getenv("OPENAI_API_KEY")
13
+ # Check if API key is empty or None
14
+ if not OPENAI_API_KEY:
15
+ print("Error: OPENAI_API_KEY environment variable is not set. Please set it before running this server.", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ OPENAI_API_BASE_URL = getenv(
19
+ "OPENAI_API_BASE_URL", "https://api.openai.com/v1")
20
+
21
+ server = Server("mcp-server-gpt4o-mini-search")
22
+
23
+ client = OpenAI(
24
+ api_key=OPENAI_API_KEY,
25
+ base_url=OPENAI_API_BASE_URL
26
+ )
27
+
28
+
29
+ @server.list_tools()
30
+ async def handle_list_tools() -> list[types.Tool]:
31
+ return [
32
+ types.Tool(
33
+ name="gpt4o_mini_search",
34
+ description=dedent(
35
+ """
36
+ GPT-4o mini with search enables agents to gather information from the internet
37
+ in real-time, providing up-to-date answers with source citations.
38
+ This tool is ideal for fact-checking, research, and accessing current information
39
+ that might not be in the model's training data.
40
+
41
+ The search-enhanced responses include relevant web sources to support the information
42
+ provided, making it useful for obtaining verified and recent information.
43
+
44
+ [Response structure]
45
+ - id: A unique identifier for the response
46
+ - model: The model used (gpt-4o-mini-search-preview)
47
+ - object: The object type ("chat.completion")
48
+ - created: The Unix timestamp when the completion was created
49
+ - choices[]: The list of completion choices generated
50
+ - usage: Usage statistics for the completion request
51
+ """
52
+ ),
53
+ inputSchema={
54
+ "type": "object",
55
+ "properties": {
56
+ "system_message": {
57
+ "type": "string",
58
+ "description": "Optional custom system message. If not provided, a default search-optimized system message will be used.",
59
+ },
60
+ "messages": {
61
+ "type": "array",
62
+ "description": "A list of messages comprising the conversation so far (excluding system message which is handled separately).",
63
+ "items": {
64
+ "type": "object",
65
+ "properties": {
66
+ "content": {
67
+ "type": "string",
68
+ "description": "The contents of the message in this turn of conversation.",
69
+ },
70
+ "role": {
71
+ "type": "string",
72
+ "description": "The role of the speaker in this turn of conversation.",
73
+ "enum": ["user", "assistant"],
74
+ },
75
+ },
76
+ "required": ["content", "role"],
77
+ },
78
+ },
79
+ },
80
+ "required": ["messages"],
81
+ },
82
+ )
83
+ ]
84
+
85
+
86
+ @server.call_tool()
87
+ async def handle_call_tool(
88
+ name: str, arguments: dict
89
+ ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]:
90
+ if name != "gpt4o_mini_search":
91
+ raise ValueError(f"Unknown tool: {name}")
92
+
93
+ # Extract user messages
94
+ user_messages = arguments.get("messages", [])
95
+
96
+ # Define default system message if not provided
97
+ default_system_message = (
98
+ "你是专业搜索助手,需要:\n"
99
+ "1. 提供基于用户查询的清晰格式化信息\n"
100
+ "2. 使用[标题](URL)格式嵌入链接\n"
101
+ "3. 每条信息后附上来源\n"
102
+ "4. 用'---'分隔不同结果\n"
103
+ "5. 直接在文本中引用,不使用编号引用\n"
104
+ "6. 确保提供完整URL"
105
+ )
106
+
107
+ # Use custom system message if provided, otherwise use default
108
+ system_message = arguments.get("system_message", default_system_message)
109
+
110
+ # Prepare full message list with system message first
111
+ full_messages = [{"role": "system", "content": system_message}]
112
+ full_messages.extend(user_messages)
113
+
114
+ try:
115
+ # Initialize OpenAI client
116
+
117
+ # Make the API call using OpenAI SDK
118
+ completion = client.chat.completions.create(
119
+ model="gpt-4o-mini-search-preview",
120
+ messages=full_messages
121
+ )
122
+
123
+ # Extract content from response
124
+ content = completion.choices[0].message.content
125
+
126
+ except Exception as e:
127
+ raise RuntimeError(f"API error: {str(e)}")
128
+
129
+ return [types.TextContent(
130
+ type="text",
131
+ text=content,
132
+ )]
133
+
134
+
135
+ async def main():
136
+ async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
137
+ await server.run(
138
+ read_stream,
139
+ write_stream,
140
+ InitializationOptions(
141
+ server_name="mcp-server-gpt4o-mini-search",
142
+ server_version="0.1.0",
143
+ capabilities=server.get_capabilities(
144
+ notification_options=NotificationOptions(
145
+ tools_changed=True),
146
+ experimental_capabilities={},
147
+ ),
148
+ ),
149
+ )
150
+
151
+ if __name__ == "__main__":
152
+ import asyncio
153
+ asyncio.run(main())
@@ -31,6 +31,14 @@ from byzerllm.utils.client.entrypoints.openai.protocol import (
31
31
  from pydantic import BaseModel
32
32
  from typing import List,Optional
33
33
 
34
+ # If support dotenv, use it
35
+ if os.path.exists(".env"):
36
+ try:
37
+ from dotenv import load_dotenv
38
+ load_dotenv()
39
+ except ImportError:
40
+ pass
41
+
34
42
  logger = init_logger(__name__)
35
43
 
36
44
  llm_client: ByzerLLM = None
@@ -79,7 +79,8 @@ class DuckDBLocalContext:
79
79
  class LocalDuckdbStorage:
80
80
 
81
81
  def __init__(
82
- self, llm: Union[ByzerLLM, SimpleByzerLLM] = None, database_name: str = ":memory:", table_name: str = "documents",
82
+ self, llm: Union[ByzerLLM, SimpleByzerLLM] = None, database_name: str = ":memory:",
83
+ table_name: str = "documents",
83
84
  embed_dim: Optional[int] = None, persist_dir: str = "./storage"
84
85
  ) -> None:
85
86
  self.llm = llm
@@ -410,9 +411,9 @@ class LocalDuckDBStorageCache(BaseCacheManager):
410
411
  from autocoder.rag.token_counter import initialize_tokenizer
411
412
 
412
413
  with Pool(
413
- processes=os.cpu_count(),
414
- initializer=initialize_tokenizer,
415
- initargs=(VariableHolder.TOKENIZER_PATH,),
414
+ processes=os.cpu_count(),
415
+ initializer=initialize_tokenizer,
416
+ initargs=(VariableHolder.TOKENIZER_PATH,),
416
417
  ) as pool:
417
418
  target_files_to_process = []
418
419
  for file_info in files_to_process:
@@ -451,23 +452,66 @@ class LocalDuckDBStorageCache(BaseCacheManager):
451
452
  self.write_cache()
452
453
 
453
454
  if items:
454
- logger.info("Clear cache from Byzer DuckDB Storage")
455
+ logger.info("[BUILD CACHE] Clearing existing cache from Byzer DuckDB Storage")
455
456
  self.storage.truncate_table()
456
- logger.info("Save new cache to Byzer DuckDB Storage")
457
-
458
- total_chunks = len(items)
459
- completed_chunks = 0
460
-
461
- logger.info(f"进度: 已完成 {0}/{total_chunks} 个文本块")
457
+ logger.info(f"[BUILD CACHE] Preparing to write to Byzer DuckDB Storage, "
458
+ f"total chunks: {len(items)}, total files: {len(files_to_process)}")
459
+
460
+ # Use a fixed optimal batch size instead of dividing by worker count
461
+ batch_size = 100 # Optimal batch size for Byzer Storage
462
+ item_batches = [items[i:i + batch_size] for i in range(0, len(items), batch_size)]
463
+
464
+ total_batches = len(item_batches)
465
+ completed_batches = 0
466
+
467
+ logger.info(f"[BUILD CACHE] Starting to write to Byzer Storage using {batch_size} items per batch, "
468
+ f"total batches: {total_batches}")
469
+ start_time = time.time()
470
+
471
+ # Use more workers to process the smaller batches efficiently
472
+ max_workers = min(10, total_batches) # Cap at 10 workers or total batch count
473
+ logger.info(f"[BUILD CACHE] Using {max_workers} parallel workers for processing")
474
+
475
+ def batch_add_doc(_batch):
476
+ for b in _batch:
477
+ self.storage.add_doc(b, dim=self.extra_params.rag_duckdb_vector_dim)
478
+
479
+ with (ThreadPoolExecutor(max_workers=max_workers) as executor):
480
+ futures = []
481
+ # Submit all batches to the executor upfront (non-blocking)
482
+ for batch in item_batches:
483
+ futures.append(
484
+ executor.submit(
485
+ batch_add_doc, batch
486
+ )
487
+ )
462
488
 
463
- for _chunk in items:
464
- try:
465
- self.storage.add_doc(_chunk, dim=self.extra_params.rag_duckdb_vector_dim)
466
- completed_chunks += 1
467
- logger.info(f"进度: 已完成 {completed_chunks}/{total_chunks} 个文本块")
468
- time.sleep(self.extra_params.anti_quota_limit)
469
- except Exception as err:
470
- logger.error(f"Error in saving chunk: {str(err)}")
489
+ # Wait for futures to complete
490
+ for future in as_completed(futures):
491
+ try:
492
+ future.result()
493
+ completed_batches += 1
494
+ elapsed = time.time() - start_time
495
+ estimated_total = elapsed / completed_batches * total_batches if completed_batches > 0 else 0
496
+ remaining = estimated_total - elapsed
497
+
498
+ # Only log progress at reasonable intervals to reduce log spam
499
+ if ((completed_batches == 1) or
500
+ (completed_batches == total_batches) or
501
+ (completed_batches % max(1, total_batches // 10) == 0)):
502
+ logger.info(
503
+ f"[BUILD CACHE] Progress: {completed_batches}/{total_batches} batches completed "
504
+ f"({(completed_batches / total_batches * 100):.1f}%) "
505
+ f"Estimated time remaining: {remaining:.1f}s"
506
+ )
507
+ except Exception as e:
508
+ logger.error(f"[BUILD CACHE] Error saving batch: {str(e)}")
509
+ # Add more detailed error information
510
+ logger.error(f"[BUILD CACHE] Error details: batch size: "
511
+ f"{len(batch) if 'batch' in locals() else 'unknown'}")
512
+
513
+ total_time = time.time() - start_time
514
+ logger.info(f"[BUILD CACHE] All chunks written, total time: {total_time:.2f}s")
471
515
 
472
516
  def update_storage(self, file_info: FileInfo, is_delete: bool):
473
517
  results = self.storage.query_by_path(file_info.file_path)
@@ -596,10 +640,11 @@ class LocalDuckDBStorageCache(BaseCacheManager):
596
640
  """Search cached documents using query"""
597
641
  self.trigger_update() # 检查更新
598
642
 
599
- if options is None or "query" not in options:
643
+ if options is None or "queries" not in options:
600
644
  return {file_path: self.cache[file_path].model_dump() for file_path in self.cache}
601
645
 
602
- query = options.get("query", "")
646
+ queries = options.get("queries", "")
647
+ query = queries[0]
603
648
  logger.info(f"正在使用向量搜索检索数据, 你的问题: {query}")
604
649
  total_tokens = 0
605
650
  results = []
@@ -610,9 +655,9 @@ class LocalDuckDBStorageCache(BaseCacheManager):
610
655
  # results = self.storage.vector_search(query, similarity_value=0.7, similarity_top_k=200)
611
656
  search_results = self.storage.vector_search(
612
657
  query,
613
- similarity_value=self.extra_params.duckdb_query_similarity,
614
- similarity_top_k=self.extra_params.duckdb_query_top_k,
615
- query_dim=self.extra_params.duckdb_vector_dim
658
+ similarity_value=self.extra_params.rag_duckdb_query_similarity,
659
+ similarity_top_k=self.extra_params.rag_duckdb_query_top_k,
660
+ query_dim=self.extra_params.rag_duckdb_vector_dim
616
661
  )
617
662
  results.extend(search_results)
618
663
 
@@ -644,4 +689,4 @@ class LocalDuckDBStorageCache(BaseCacheManager):
644
689
  f"用户Hybrid Index Max Tokens设置为:{self.max_output_tokens},"
645
690
  f"累计tokens: {total_tokens}, "
646
691
  f"经过向量搜索共检索出 {len(result.keys())} 个文档, 共 {len(self.cache.keys())} 个文档")
647
- return result
692
+ return result