auto-coder 0.1.255__tar.gz → 0.1.257__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (168) hide show
  1. {auto_coder-0.1.255 → auto_coder-0.1.257}/PKG-INFO +2 -2
  2. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/auto_coder.egg-info/PKG-INFO +2 -2
  3. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/auto_coder.egg-info/SOURCES.txt +4 -0
  4. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/auto_coder.egg-info/requires.txt +1 -1
  5. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/auto_coder.py +44 -50
  6. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/chat_auto_coder.py +16 -17
  7. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/chat_auto_coder_lang.py +1 -1
  8. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/__init__.py +7 -0
  9. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/auto_coder_lang.py +46 -16
  10. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/code_auto_generate.py +45 -5
  11. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/code_auto_generate_diff.py +45 -7
  12. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/code_auto_generate_editblock.py +48 -4
  13. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/code_auto_generate_strict_diff.py +46 -7
  14. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/code_modification_ranker.py +39 -3
  15. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/dispacher/actions/action.py +60 -40
  16. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/dispacher/actions/plugins/action_regex_project.py +12 -6
  17. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/index/entry.py +6 -4
  18. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/index/filter/quick_filter.py +175 -65
  19. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/index/index.py +94 -4
  20. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/models.py +44 -6
  21. auto_coder-0.1.257/src/autocoder/privacy/__init__.py +3 -0
  22. auto_coder-0.1.257/src/autocoder/privacy/model_filter.py +100 -0
  23. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/pyproject/__init__.py +1 -0
  24. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/suffixproject/__init__.py +1 -0
  25. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/tsproject/__init__.py +1 -0
  26. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/llms.py +27 -0
  27. auto_coder-0.1.257/src/autocoder/utils/model_provider_selector.py +192 -0
  28. auto_coder-0.1.257/src/autocoder/version.py +1 -0
  29. auto_coder-0.1.257/tests/test_privacy.py +107 -0
  30. auto_coder-0.1.255/src/autocoder/version.py +0 -1
  31. {auto_coder-0.1.255 → auto_coder-0.1.257}/LICENSE +0 -0
  32. {auto_coder-0.1.255 → auto_coder-0.1.257}/README.md +0 -0
  33. {auto_coder-0.1.255 → auto_coder-0.1.257}/setup.cfg +0 -0
  34. {auto_coder-0.1.255 → auto_coder-0.1.257}/setup.py +0 -0
  35. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/auto_coder.egg-info/dependency_links.txt +0 -0
  36. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/auto_coder.egg-info/entry_points.txt +0 -0
  37. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/auto_coder.egg-info/top_level.txt +0 -0
  38. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/__init__.py +0 -0
  39. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/agent/__init__.py +0 -0
  40. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/agent/auto_demand_organizer.py +0 -0
  41. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/agent/auto_filegroup.py +0 -0
  42. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/agent/auto_guess_query.py +0 -0
  43. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/agent/auto_review_commit.py +0 -0
  44. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/agent/auto_tool.py +0 -0
  45. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/agent/coder.py +0 -0
  46. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/agent/designer.py +0 -0
  47. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/agent/planner.py +0 -0
  48. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/agent/project_reader.py +0 -0
  49. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/auto_coder_rag.py +0 -0
  50. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/auto_coder_rag_client_mcp.py +0 -0
  51. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/auto_coder_rag_mcp.py +0 -0
  52. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/auto_coder_server.py +0 -0
  53. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/benchmark.py +0 -0
  54. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/chat/__init__.py +0 -0
  55. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/command_args.py +0 -0
  56. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/JupyterClient.py +0 -0
  57. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/ShellClient.py +0 -0
  58. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/anything2images.py +0 -0
  59. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/anything2img.py +0 -0
  60. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/audio.py +0 -0
  61. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/buildin_tokenizer.py +0 -0
  62. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/chunk_validation.py +0 -0
  63. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/cleaner.py +0 -0
  64. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/code_auto_execute.py +0 -0
  65. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/code_auto_merge.py +0 -0
  66. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/code_auto_merge_diff.py +0 -0
  67. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/code_auto_merge_editblock.py +0 -0
  68. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/code_auto_merge_strict_diff.py +0 -0
  69. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/command_completer.py +0 -0
  70. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/command_generator.py +0 -0
  71. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/command_templates.py +0 -0
  72. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/const.py +0 -0
  73. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/files.py +0 -0
  74. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/git_utils.py +0 -0
  75. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/global_cancel.py +0 -0
  76. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/image_to_page.py +0 -0
  77. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/interpreter.py +0 -0
  78. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/llm_rerank.py +0 -0
  79. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/mcp_hub.py +0 -0
  80. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/mcp_server.py +0 -0
  81. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/mcp_servers/__init__.py +0 -0
  82. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/mcp_servers/mcp_server_perplexity.py +0 -0
  83. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/mcp_tools.py +0 -0
  84. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/memory_manager.py +0 -0
  85. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/model_speed_test.py +0 -0
  86. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/printer.py +0 -0
  87. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/recall_validation.py +0 -0
  88. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/screenshots.py +0 -0
  89. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/search.py +0 -0
  90. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/search_replace.py +0 -0
  91. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/shells.py +0 -0
  92. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/sys_prompt.py +0 -0
  93. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/text.py +0 -0
  94. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/types.py +0 -0
  95. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/common/utils_code_auto_generate.py +0 -0
  96. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/data/byzerllm.md +0 -0
  97. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/data/tokenizer.json +0 -0
  98. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/db/__init__.py +0 -0
  99. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/db/store.py +0 -0
  100. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/dispacher/__init__.py +0 -0
  101. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/dispacher/actions/__init__.py +0 -0
  102. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/dispacher/actions/copilot.py +0 -0
  103. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/dispacher/actions/plugins/__init__.py +0 -0
  104. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/dispacher/actions/plugins/action_translate.py +0 -0
  105. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/index/__init__.py +0 -0
  106. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/index/filter/__init__.py +0 -0
  107. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/index/filter/normal_filter.py +0 -0
  108. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/index/for_command.py +0 -0
  109. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/index/symbols_utils.py +0 -0
  110. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/index/types.py +0 -0
  111. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/lang.py +0 -0
  112. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/__init__.py +0 -0
  113. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/api_server.py +0 -0
  114. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/cache/__init__.py +0 -0
  115. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/cache/base_cache.py +0 -0
  116. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/cache/byzer_storage_cache.py +0 -0
  117. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/cache/file_monitor_cache.py +0 -0
  118. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/cache/simple_cache.py +0 -0
  119. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/doc_filter.py +0 -0
  120. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/document_retriever.py +0 -0
  121. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/llm_wrapper.py +0 -0
  122. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/loaders/__init__.py +0 -0
  123. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/loaders/docx_loader.py +0 -0
  124. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/loaders/excel_loader.py +0 -0
  125. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/loaders/pdf_loader.py +0 -0
  126. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/loaders/ppt_loader.py +0 -0
  127. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/long_context_rag.py +0 -0
  128. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/rag_config.py +0 -0
  129. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/rag_entry.py +0 -0
  130. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/raw_rag.py +0 -0
  131. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/relevant_utils.py +0 -0
  132. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/simple_directory_reader.py +0 -0
  133. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/simple_rag.py +0 -0
  134. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/stream_event/__init__.py +0 -0
  135. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/stream_event/event_writer.py +0 -0
  136. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/stream_event/types.py +0 -0
  137. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/token_checker.py +0 -0
  138. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/token_counter.py +0 -0
  139. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/token_limiter.py +0 -0
  140. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/types.py +0 -0
  141. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/utils.py +0 -0
  142. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/rag/variable_holder.py +0 -0
  143. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/regexproject/__init__.py +0 -0
  144. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/__init__.py +0 -0
  145. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/_markitdown.py +0 -0
  146. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/auto_coder_utils/__init__.py +0 -0
  147. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/auto_coder_utils/chat_stream_out.py +0 -0
  148. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/chat_auto_coder_utils/__init__.py +0 -0
  149. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/conversation_store.py +0 -0
  150. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/llm_client_interceptors.py +0 -0
  151. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/log_capture.py +0 -0
  152. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/multi_turn.py +0 -0
  153. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/operate_config_api.py +0 -0
  154. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/print_table.py +0 -0
  155. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/queue_communicate.py +0 -0
  156. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/request_event_queue.py +0 -0
  157. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/request_queue.py +0 -0
  158. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/rest.py +0 -0
  159. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/tests.py +0 -0
  160. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/thread_utils.py +0 -0
  161. {auto_coder-0.1.255 → auto_coder-0.1.257}/src/autocoder/utils/types.py +0 -0
  162. {auto_coder-0.1.255 → auto_coder-0.1.257}/tests/test_action_regex_project.py +0 -0
  163. {auto_coder-0.1.255 → auto_coder-0.1.257}/tests/test_chat_auto_coder.py +0 -0
  164. {auto_coder-0.1.255 → auto_coder-0.1.257}/tests/test_code_auto_merge_editblock.py +0 -0
  165. {auto_coder-0.1.255 → auto_coder-0.1.257}/tests/test_command_completer.py +0 -0
  166. {auto_coder-0.1.255 → auto_coder-0.1.257}/tests/test_planner.py +0 -0
  167. {auto_coder-0.1.255 → auto_coder-0.1.257}/tests/test_queue_communicate.py +0 -0
  168. {auto_coder-0.1.255 → auto_coder-0.1.257}/tests/test_symbols_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.255
3
+ Version: 0.1.257
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
26
26
  Requires-Dist: jupyter_client
27
27
  Requires-Dist: prompt-toolkit
28
28
  Requires-Dist: tokenizers
29
- Requires-Dist: byzerllm[saas]>=0.1.163
29
+ Requires-Dist: byzerllm[saas]>=0.1.165
30
30
  Requires-Dist: patch
31
31
  Requires-Dist: diff_match_patch
32
32
  Requires-Dist: GitPython
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.255
3
+ Version: 0.1.257
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
26
26
  Requires-Dist: jupyter_client
27
27
  Requires-Dist: prompt-toolkit
28
28
  Requires-Dist: tokenizers
29
- Requires-Dist: byzerllm[saas]>=0.1.163
29
+ Requires-Dist: byzerllm[saas]>=0.1.165
30
30
  Requires-Dist: patch
31
31
  Requires-Dist: diff_match_patch
32
32
  Requires-Dist: GitPython
@@ -98,6 +98,8 @@ src/autocoder/index/types.py
98
98
  src/autocoder/index/filter/__init__.py
99
99
  src/autocoder/index/filter/normal_filter.py
100
100
  src/autocoder/index/filter/quick_filter.py
101
+ src/autocoder/privacy/__init__.py
102
+ src/autocoder/privacy/model_filter.py
101
103
  src/autocoder/pyproject/__init__.py
102
104
  src/autocoder/rag/__init__.py
103
105
  src/autocoder/rag/api_server.py
@@ -139,6 +141,7 @@ src/autocoder/utils/conversation_store.py
139
141
  src/autocoder/utils/llm_client_interceptors.py
140
142
  src/autocoder/utils/llms.py
141
143
  src/autocoder/utils/log_capture.py
144
+ src/autocoder/utils/model_provider_selector.py
142
145
  src/autocoder/utils/multi_turn.py
143
146
  src/autocoder/utils/operate_config_api.py
144
147
  src/autocoder/utils/print_table.py
@@ -157,5 +160,6 @@ tests/test_chat_auto_coder.py
157
160
  tests/test_code_auto_merge_editblock.py
158
161
  tests/test_command_completer.py
159
162
  tests/test_planner.py
163
+ tests/test_privacy.py
160
164
  tests/test_queue_communicate.py
161
165
  tests/test_symbols_utils.py
@@ -16,7 +16,7 @@ tabulate
16
16
  jupyter_client
17
17
  prompt-toolkit
18
18
  tokenizers
19
- byzerllm[saas]>=0.1.163
19
+ byzerllm[saas]>=0.1.165
20
20
  patch
21
21
  diff_match_patch
22
22
  GitPython
@@ -47,6 +47,8 @@ from autocoder.common.utils_code_auto_generate import stream_chat_with_continue
47
47
  from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
48
48
  from autocoder.common.printer import Printer
49
49
  from autocoder.rag.token_counter import count_tokens
50
+ from autocoder.privacy.model_filter import ModelPathFilter
51
+
50
52
  console = Console()
51
53
 
52
54
 
@@ -317,53 +319,7 @@ def main(input_args: Optional[List[str]] = None):
317
319
  "saas.model": model_info["model_name"],
318
320
  "saas.is_reasoning": model_info["is_reasoning"]
319
321
  }
320
- )
321
-
322
- if models_module.check_model_exists("deepseek_r1_chat"):
323
- r1_model_info = models_module.get_model_by_name("deepseek_r1_chat")
324
- api_key = r1_model_info["api_key"]
325
- chat_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
326
- chat_llm.deploy(
327
- model_path="",
328
- pretrained_model_type="saas/openai",
329
- udf_name="deepseek_r1_chat",
330
- infer_params={
331
- "saas.base_url": "https://api.deepseek.com/v1",
332
- "saas.api_key": api_key,
333
- "saas.model": "deepseek-reasoner",
334
- "saas.is_reasoning": True
335
- }
336
- )
337
-
338
- generate_rerank_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
339
- generate_rerank_llm.deploy(
340
- model_path="",
341
- pretrained_model_type="saas/openai",
342
- udf_name="deepseek_r1_chat",
343
- infer_params={
344
- "saas.base_url": "https://api.deepseek.com/v1",
345
- "saas.api_key": api_key,
346
- "saas.model": "deepseek-reasoner",
347
- "saas.is_reasoning": True
348
- }
349
- )
350
-
351
- index_filter_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
352
- index_filter_llm.deploy(
353
- model_path="",
354
- pretrained_model_type="saas/openai",
355
- udf_name="deepseek_r1_chat",
356
- infer_params={
357
- "saas.base_url": "https://api.deepseek.com/v1",
358
- "saas.api_key": api_key,
359
- "saas.model": "deepseek-reasoner",
360
- "saas.is_reasoning": True
361
- }
362
- )
363
-
364
- llm.setup_sub_client("chat_model", chat_llm)
365
- llm.setup_sub_client("generate_rerank_model", generate_rerank_llm)
366
- llm.setup_sub_client("index_filter_model", index_filter_llm)
322
+ )
367
323
 
368
324
  if args.product_mode == "lite":
369
325
  # Set up default models based on configuration
@@ -947,6 +903,17 @@ def main(input_args: Optional[List[str]] = None):
947
903
  # )
948
904
  return
949
905
  elif raw_args.agent_command == "project_reader":
906
+
907
+ target_llm = llm.get_sub_client("planner_model")
908
+ if not target_llm:
909
+ target_llm = llm
910
+ model_filter = ModelPathFilter.from_model_object(target_llm, args)
911
+ if model_filter.has_rules():
912
+ printer = Printer()
913
+ msg = printer.get_message_from_key_with_format("model_has_access_restrictions",
914
+ model_name=",".join(get_llm_names(target_llm)))
915
+ raise ValueError(msg)
916
+
950
917
  from autocoder.agent.project_reader import ProjectReader
951
918
 
952
919
  project_reader = ProjectReader(args, llm)
@@ -1194,10 +1161,23 @@ def main(input_args: Optional[List[str]] = None):
1194
1161
  else:
1195
1162
  pp = SuffixProject(args=args, llm=llm, file_filter=None)
1196
1163
  pp.run()
1197
- sources = pp.sources
1198
-
1164
+ sources = pp.sources
1165
+
1166
+ # Apply model filter for chat_llm
1167
+ model_filter = ModelPathFilter.from_model_object(chat_llm, args)
1168
+ filtered_sources = []
1169
+ printer = Printer()
1170
+ for source in sources:
1171
+ if model_filter.is_accessible(source.module_name):
1172
+ filtered_sources.append(source)
1173
+ else:
1174
+ printer.print_in_terminal("index_file_filtered",
1175
+ style="yellow",
1176
+ file_path=source.module_name,
1177
+ model_name=",".join(get_llm_names(chat_llm)))
1178
+
1199
1179
  s = build_index_and_filter_files(
1200
- llm=llm, args=args, sources=sources)
1180
+ llm=llm, args=args, sources=filtered_sources).to_str()
1201
1181
 
1202
1182
  if s:
1203
1183
  pre_conversations.append(
@@ -1389,11 +1369,25 @@ def main(input_args: Optional[List[str]] = None):
1389
1369
  elapsed_time = time.time() - start_time
1390
1370
  printer = Printer()
1391
1371
  speed = last_meta.generated_tokens_count / elapsed_time
1372
+
1373
+ # Get model info for pricing
1374
+ from autocoder.utils import llms as llm_utils
1375
+ model_info = llm_utils.get_model_info(model_name, args.product_mode) or {}
1376
+ input_price = model_info.get("input_price", 0.0) if model_info else 0.0
1377
+ output_price = model_info.get("output_price", 0.0) if model_info else 0.0
1378
+
1379
+ # Calculate costs
1380
+ input_cost = (last_meta.input_tokens_count * input_price) / 1000000 # Convert to millions
1381
+ output_cost = (last_meta.generated_tokens_count * output_price) / 1000000 # Convert to millions
1382
+
1392
1383
  printer.print_in_terminal("stream_out_stats",
1384
+ model_name=model_name,
1393
1385
  elapsed_time=elapsed_time,
1394
1386
  first_token_time=last_meta.first_token_time,
1395
1387
  input_tokens=last_meta.input_tokens_count,
1396
1388
  output_tokens=last_meta.generated_tokens_count,
1389
+ input_cost=round(input_cost, 4),
1390
+ output_cost=round(output_cost, 4),
1397
1391
  speed=round(speed, 2))
1398
1392
 
1399
1393
  chat_history["ask_conversation"].append(
@@ -297,23 +297,22 @@ def initialize_system(args):
297
297
 
298
298
  init_project()
299
299
 
300
- if args.product_mode == "lite":
301
- # Setup deepseek api key
302
- api_key_dir = os.path.expanduser("~/.auto-coder/keys")
303
- api_key_file = os.path.join(api_key_dir, "api.deepseek.com")
304
-
305
- if not os.path.exists(api_key_file):
306
- print_status(get_message("model_not_available"), "warning")
307
- api_key = prompt(HTML(f"<b>{get_message('enter_api_key')} </b>"))
308
-
309
- # Create directory if it doesn't exist
310
- os.makedirs(api_key_dir, exist_ok=True)
311
-
312
- # Save the API key
313
- with open(api_key_file, "w") as f:
314
- f.write(api_key)
315
-
316
- print_status(f"API key saved successfully: {api_key_file}", "success")
300
+ if args.product_mode == "lite":
301
+ from autocoder.utils.model_provider_selector import ModelProviderSelector
302
+ from autocoder import models as models_module
303
+ if not models_module.check_model_exists("v3_chat") or not models_module.check_model_exists("r1_chat"):
304
+ model_provider_selector = ModelProviderSelector()
305
+ model_provider_info = model_provider_selector.select_provider()
306
+ if model_provider_info is not None:
307
+ models_json_list = model_provider_selector.to_models_json(model_provider_info)
308
+ models_module.add_and_activate_models(models_json_list)
309
+ r1_model = models_json_list[0]['name']
310
+ v3_model = models_json_list[1]['name']
311
+ configure(f"model:{v3_model}", skip_print=True)
312
+ configure(f"chat_model:{r1_model}", skip_print=True)
313
+ configure(f"generate_rerank_model:{r1_model}", skip_print=True)
314
+ configure(f"code_model:{v3_model}", skip_print=True)
315
+ configure(f"index_filter_model:{r1_model}", skip_print=True)
317
316
 
318
317
  if args.product_mode == "pro":
319
318
  # Check if Ray is running
@@ -128,7 +128,7 @@ MESSAGES = {
128
128
  "official_doc": "Official Documentation: https://uelng8wukz.feishu.cn/wiki/NhPNwSRcWimKFIkQINIckloBncI",
129
129
  },
130
130
  "zh": {
131
- "commit_generating": "{{ model_name }} 正在生成提交信息...",
131
+ "commit_generating": "{{ model_name }} 正在生成提交信息...",
132
132
  "commit_message": "{{ model_name }} 生成的提交信息: {{ message }}",
133
133
  "commit_failed": "{{ model_name }} 生成提交信息失败: {{ error }}",
134
134
  "mcp_remove_error": "移除 MCP 服务器时出错:{error}",
@@ -14,7 +14,13 @@ class SourceCode(pydantic.BaseModel):
14
14
  tokens: int = -1
15
15
  metadata: Dict[str, Any] = {}
16
16
 
17
+ class SourceCodeList():
18
+ def __init__(self, sources: List[SourceCode]):
19
+ self.sources = sources
17
20
 
21
+ def to_str(self):
22
+ return "\n".join([f"##File: {source.module_name}\n{source.source_code}\n" for source in self.sources])
23
+
18
24
  class TranslateReadme(pydantic.BaseModel):
19
25
  filename: str = pydantic.Field(..., description="需要翻译的文件路径")
20
26
  content: str = pydantic.Field(..., description="翻译后的内容")
@@ -362,6 +368,7 @@ class AutoCoderArgs(pydantic.BaseModel):
362
368
  keep_only_reasoning_content: Optional[bool] = False
363
369
 
364
370
  in_code_apply: bool = False
371
+ model_filter_path: Optional[str] = None
365
372
 
366
373
  class Config:
367
374
  protected_namespaces = ()
@@ -3,6 +3,20 @@ from byzerllm.utils import format_str_jinja2
3
3
 
4
4
  MESSAGES = {
5
5
  "en": {
6
+ "model_provider_select_title": "Select Model Provider",
7
+ "model_provider_select_text": "Please select your model provider:",
8
+ "model_provider_volcano": "Volcano Engine",
9
+ "model_provider_guiji": "GuiJi AI",
10
+ "model_provider_deepseek": "DeepSeek Official",
11
+ "model_provider_api_key_title": "API Key",
12
+ "model_provider_volcano_api_key_text": "Please enter your Volcano Engine API key:",
13
+ "model_provider_volcano_r1_text": "Please enter your Volcano Engine R1 endpoint (format: ep-20250204215011-vzbsg):",
14
+ "model_provider_volcano_v3_text": "Please enter your Volcano Engine V3 endpoint (format: ep-20250204215011-vzbsg):",
15
+ "model_provider_guiji_api_key_text": "Please enter your GuiJi AI API key:",
16
+ "model_provider_deepseek_api_key_text": "Please enter your DeepSeek API key:",
17
+ "model_provider_selected": "Provider configuration completed successfully! You can use /models command to view, add and modify all models later.",
18
+ "model_provider_success_title": "Success",
19
+ "index_file_filtered": "File {{file_path}} is filtered by model {{model_name}} restrictions",
6
20
  "models_no_active": "No active models found",
7
21
  "models_speed_test_results": "Model Speed Test Results",
8
22
  "models_testing": "Testing model: {{name}}...",
@@ -11,7 +25,7 @@ MESSAGES = {
11
25
  "generation_cancelled": "[Interrupted] Generation cancelled",
12
26
  "model_not_found": "Model {{model_name}} not found",
13
27
  "generating_shell_script": "Generating Shell Script",
14
- "new_session_started": "New session started. Previous chat history has been archived.",
28
+ "new_session_started": "New session started. Previous chat history has been archived.",
15
29
  "memory_save_success": "✅ Saved to your memory",
16
30
  "file_decode_error": "Failed to decode file: {{file_path}}. Tried encodings: {{encodings}}",
17
31
  "file_write_error": "Failed to write file: {{file_path}}. Error: {{error}}",
@@ -21,7 +35,7 @@ MESSAGES = {
21
35
  "no_latest_commit": "Unable to get latest commit information",
22
36
  "code_review_error": "Code review process error: {{error}}",
23
37
  "index_file_too_large": "⚠️ File {{ file_path }} is too large ({{ file_size }} > {{ max_length }}), splitting into chunks...",
24
- "index_update_success": "✅ {{ model_name }} Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s",
38
+ "index_update_success": "✅ {{ model_name }} Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}",
25
39
  "index_build_error": "❌ {{ model_name }} Error building index for {{ file_path }}: {{ error }}",
26
40
  "index_build_summary": "📊 Total Files: {{ total_files }}, Need to Build Index: {{ num_files }}",
27
41
  "building_index_progress": "⏳ Building Index: {{ counter }}/{{ num_files }}...",
@@ -30,7 +44,7 @@ MESSAGES = {
30
44
  "index_threads_completed": "✅ Completed {{ completed_threads }}/{{ total_threads }} threads",
31
45
  "index_related_files_fail": "⚠️ Failed to find related files for chunk {{ chunk_count }}",
32
46
  "index_file_removed": "🗑️ Removed non-existent file index: {{ file_path }}",
33
- "index_file_saved": "💾 Saved index file, updated {{ updated_files }} files, removed {{ removed_files }} files",
47
+ "index_file_saved": "💾 Saved index file, updated {{ updated_files }} files, removed {{ removed_files }} files, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}",
34
48
  "human_as_model_instructions": (
35
49
  "You are now in Human as Model mode. The content has been copied to your clipboard.\n"
36
50
  "The system is waiting for your input. When finished, enter 'EOF' on a new line to submit.\n"
@@ -57,7 +71,7 @@ MESSAGES = {
57
71
  "Paste the answer to the input box below, use '/break' to exit, '/clear' to clear the screen, '/eof' to submit."
58
72
  ),
59
73
  "code_generation_start": "Auto generate the code...",
60
- "code_generation_complete": "Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}, speed: {{ speed }} tokens/s",
74
+ "code_generation_complete": "{{ model_names}} Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}, speed: {{ speed }} tokens/s",
61
75
  "code_merge_start": "Auto merge the code...",
62
76
  "code_execution_warning": "Content(send to model) is {{ content_length }} tokens (you may collect too much files), which is larger than the maximum input length {{ max_length }}",
63
77
  "quick_filter_start": "{{ model_name }} Starting filter context(quick_filter)...",
@@ -75,13 +89,13 @@ MESSAGES = {
75
89
  "ranking_start": "Start ranking {{ count }} candidates using model {{ model_name }}",
76
90
  "ranking_failed_request": "Ranking request failed: {{ error }}",
77
91
  "ranking_all_failed": "All ranking requests failed",
78
- "ranking_complete": "Ranking completed in {{ elapsed }}s, total voters: {{ total_tasks }}, best candidate index: {{ best_candidate }}, scores: {{ scores }}, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}",
92
+ "ranking_complete": "{{ model_names }} Ranking completed in {{ elapsed }}s, total voters: {{ total_tasks }}, best candidate index: {{ best_candidate }}, scores: {{ scores }}, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}",
79
93
  "ranking_process_failed": "Ranking process failed: {{ error }}",
80
94
  "ranking_failed": "Ranking failed in {{ elapsed }}s, using original order",
81
95
  "begin_index_source_code": "🚀 Begin to index source code in {{ source_dir }}",
82
- "stream_out_stats": "Elapsed time {{ elapsed_time }} seconds, first token time: {{ first_token_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}, speed: {{ speed }} tokens/s",
83
- "quick_filter_stats": "快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
84
- "upsert_file": "✅ Updated file: {{ file_path }}",
96
+ "stream_out_stats": "Model: {{ model_name }}, Total time: {{ elapsed_time }} seconds, First token time: {{ first_token_time }} seconds, Speed: {{ speed }} tokens/s, Input tokens: {{ input_tokens }}, Output tokens: {{ output_tokens }}, Input cost: {{ input_cost }}, Output cost: {{ output_cost }}",
97
+ "quick_filter_stats": "{{ model_names }} 快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
98
+ "upsert_file": "✅ Updated file: {{ file_path }}",
85
99
  "unmerged_blocks_title": "Unmerged Blocks",
86
100
  "quick_filter_title": "{{ model_name }} is analyzing how to filter context...",
87
101
  "quick_filter_failed": "❌ Quick filter failed: {{ error }}. ",
@@ -95,8 +109,23 @@ MESSAGES = {
95
109
  "quick_filter_tokens_len": "📊 Current index size: {{ tokens_len }} tokens",
96
110
  "estimated_chat_input_tokens": "Estimated chat input tokens: {{ estimated_input_tokens }}",
97
111
  "estimated_input_tokens_in_generate": "Estimated input tokens in generate ({{ generate_mode }}): {{ estimated_input_tokens }}",
112
+ "model_has_access_restrictions": "{{model_name}} has access restrictions, cannot use the current function",
98
113
  },
99
114
  "zh": {
115
+ "model_provider_select_title": "选择模型供应商",
116
+ "model_provider_select_text": "请选择您的模型供应商:",
117
+ "model_provider_volcano": "火山方舟",
118
+ "model_provider_guiji": "硅基流动",
119
+ "model_provider_deepseek": "DeepSeek官方",
120
+ "model_provider_api_key_title": "API密钥",
121
+ "model_provider_volcano_api_key_text": "请输入您的火山方舟API密钥:",
122
+ "model_provider_volcano_r1_text": "请输入您的火山方舟 R1 推理点(格式如: ep-20250204215011-vzbsg):",
123
+ "model_provider_volcano_v3_text": "请输入您的火山方舟 V3 推理点(格式如: ep-20250204215011-vzbsg):",
124
+ "model_provider_guiji_api_key_text": "请输入您的硅基流动API密钥:",
125
+ "model_provider_deepseek_api_key_text": "请输入您的DeepSeek API密钥:",
126
+ "model_provider_selected": "供应商配置已成功完成!后续你可以使用 /models 命令,查看,新增和修改所有模型",
127
+ "model_provider_success_title": "成功",
128
+ "index_file_filtered": "文件 {{file_path}} 被模型 {{model_name}} 的访问限制过滤",
100
129
  "models_no_active": "未找到激活的模型",
101
130
  "models_speed_test_results": "模型速度测试结果",
102
131
  "models_testing": "正在测试模型: {{name}}...",
@@ -114,7 +143,7 @@ MESSAGES = {
114
143
  "no_latest_commit": "无法获取最新的提交信息",
115
144
  "code_review_error": "代码审查过程出错: {{error}}",
116
145
  "index_file_too_large": "⚠️ 文件 {{ file_path }} 过大 ({{ file_size }} > {{ max_length }}), 正在分块处理...",
117
- "index_update_success": "✅ {{ model_name }} 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} ",
146
+ "index_update_success": "✅ {{ model_name }} 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} 秒, 输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
118
147
  "index_build_error": "❌ {{ model_name }} 构建 {{ file_path }} 索引时出错: {{ error }}",
119
148
  "index_build_summary": "📊 总文件数: {{ total_files }}, 需要构建索引: {{ num_files }}",
120
149
  "building_index_progress": "⏳ 正在构建索引: {{ counter }}/{{ num_files }}...",
@@ -123,7 +152,7 @@ MESSAGES = {
123
152
  "index_threads_completed": "✅ 已完成 {{ completed_threads }}/{{ total_threads }} 个线程",
124
153
  "index_related_files_fail": "⚠️ 无法为块 {{ chunk_count }} 找到相关文件",
125
154
  "index_file_removed": "🗑️ 已移除不存在的文件索引:{{ file_path }}",
126
- "index_file_saved": "💾 已保存索引文件,更新了 {{ updated_files }} 个文件,移除了 {{ removed_files }} 个文件",
155
+ "index_file_saved": "💾 已保存索引文件,更新了 {{ updated_files }} 个文件,移除了 {{ removed_files }} 个文件,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
127
156
  "human_as_model_instructions": (
128
157
  "您现在处于人类作为模型模式。内容已复制到您的剪贴板。\n"
129
158
  "系统正在等待您的输入。完成后,在新行输入'EOF'提交。\n"
@@ -150,7 +179,7 @@ MESSAGES = {
150
179
  "将获得答案黏贴到下面的输入框,换行后,使用 '/break' 退出,'/clear' 清屏,'/eof' 提交。"
151
180
  ),
152
181
  "code_generation_start": "正在自动生成代码...",
153
- "code_generation_complete": "代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 速度: {{ speed }} tokens/秒",
182
+ "code_generation_complete": "{{ model_names}} 代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}, 速度: {{ speed }} tokens/秒",
154
183
  "code_merge_start": "正在自动合并代码...",
155
184
  "code_execution_warning": "发送给模型的内容长度为 {{ content_length }} tokens(您可能收集了太多文件),超过了最大输入长度 {{ max_length }}",
156
185
  "quick_filter_start": "{{ model_name }} 开始查找上下文(quick_filter)...",
@@ -179,17 +208,17 @@ MESSAGES = {
179
208
  "ranking_start": "开始对 {{ count }} 个候选项进行排序,使用模型 {{ model_name }} 打分",
180
209
  "ranking_failed_request": "排序请求失败: {{ error }}",
181
210
  "ranking_all_failed": "所有排序请求都失败",
182
- "ranking_complete": "排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }}",
211
+ "ranking_complete": "{{ model_names }} 排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }} 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
183
212
  "ranking_process_failed": "排序过程失败: {{ error }}",
184
213
  "ranking_failed": "排序失败,耗时 {{ elapsed }} 秒,使用原始顺序",
185
- "stream_out_stats": "总耗时 {{ elapsed_time }} 秒,首token时间: {{ first_token_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 速度: {{ speed }} tokens/秒",
186
- "quick_filter_stats": "Quick filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
214
+ "stream_out_stats": "模型: {{ model_name }},总耗时 {{ elapsed_time }} 秒,首token时间: {{ first_token_time }} 秒, 速度: {{ speed }} tokens/秒, 输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
215
+ "quick_filter_stats": "{{ model_names }} Quick filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}, input cost: {{ input_cost }}, output cost: {{ output_cost }}",
187
216
  "quick_filter_title": "{{ model_name }} 正在分析如何筛选上下文...",
188
217
  "quick_filter_failed": "❌ 快速过滤器失败: {{ error }}. ",
189
218
  "estimated_chat_input_tokens": "对话输入token预估为: {{ estimated_input_tokens }}",
190
219
  "estimated_input_tokens_in_generate": "生成代码({{ generate_mode }})预计输入token数: {{ estimated_input_tokens_in_generate }}",
191
- },
192
- }
220
+ "model_has_access_restrictions": "{{model_name}} 有访问限制,无法使用当前功能",
221
+ }}
193
222
 
194
223
 
195
224
  def get_system_language():
@@ -203,5 +232,6 @@ def get_message(key):
203
232
  lang = get_system_language()
204
233
  return MESSAGES.get(lang, MESSAGES['en']).get(key, MESSAGES['en'][key])
205
234
 
235
+
206
236
  def get_message_with_format(msg_key: str, **kwargs):
207
237
  return format_str_jinja2(get_message(msg_key), **kwargs)
@@ -10,6 +10,9 @@ from autocoder.common.utils_code_auto_generate import chat_with_continue
10
10
  import json
11
11
  from autocoder.common.printer import Printer
12
12
  from autocoder.rag.token_counter import count_tokens
13
+ from autocoder.utils import llms as llm_utils
14
+ from autocoder.common import SourceCodeList
15
+ from autocoder.privacy.model_filter import ModelPathFilter
13
16
 
14
17
 
15
18
  class CodeAutoGenerate:
@@ -155,10 +158,27 @@ class CodeAutoGenerate:
155
158
  }
156
159
 
157
160
  def single_round_run(
158
- self, query: str, source_content: str
161
+ self, query: str, source_code_list: SourceCodeList
159
162
  ) -> Tuple[List[str], Dict[str, str]]:
160
163
  llm_config = {"human_as_model": self.args.human_as_model}
161
164
 
165
+ # Apply model filter for code_llm
166
+ printer = Printer()
167
+ for llm in self.llms:
168
+ model_filter = ModelPathFilter.from_model_object(llm, self.args)
169
+ filtered_sources = []
170
+ for source in source_code_list.sources:
171
+ if model_filter.is_accessible(source.module_name):
172
+ filtered_sources.append(source)
173
+ else:
174
+ printer.print_in_terminal("index_file_filtered",
175
+ style="yellow",
176
+ file_path=source.module_name,
177
+ model_name=",".join(llm_utils.get_llm_names(llm)))
178
+
179
+ source_code_list = SourceCodeList(filtered_sources)
180
+ source_content = source_code_list.to_str()
181
+
162
182
  if self.args.request_id and not self.args.skip_events:
163
183
  queue_communicate.send_event_no_wait(
164
184
  request_id=self.args.request_id,
@@ -193,6 +213,9 @@ class CodeAutoGenerate:
193
213
  results = []
194
214
  input_tokens_count = 0
195
215
  generated_tokens_count = 0
216
+ input_tokens_cost = 0
217
+ generated_tokens_cost = 0
218
+ model_names = []
196
219
 
197
220
  printer = Printer()
198
221
  estimated_input_tokens = count_tokens(json.dumps(conversations, ensure_ascii=False))
@@ -206,13 +229,27 @@ class CodeAutoGenerate:
206
229
  futures = []
207
230
  for llm in self.llms:
208
231
  for _ in range(self.generate_times_same_model):
209
- futures.append(executor.submit(
210
- chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
232
+
233
+ model_names_list = llm_utils.get_llm_names(llm)
234
+ model_name = None
235
+ if model_names_list:
236
+ model_name = model_names_list[0]
237
+
238
+ for _ in range(self.generate_times_same_model):
239
+ model_names.append(model_name)
240
+ futures.append(executor.submit(
241
+ chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
242
+
211
243
  temp_results = [future.result() for future in futures]
212
244
  for result in temp_results:
213
245
  results.append(result.content)
214
246
  input_tokens_count += result.input_tokens_count
215
247
  generated_tokens_count += result.generated_tokens_count
248
+ model_info = llm_utils.get_model_info(model_name, self.args.product_mode)
249
+ input_cost = model_info.get("input_price", 0) if model_info else 0
250
+ output_cost = model_info.get("output_price", 0) if model_info else 0
251
+ input_tokens_cost += input_cost * result.input_tokens_count / 1000000
252
+ generated_tokens_cost += output_cost * result.generated_tokens_count / 1000000
216
253
 
217
254
  for result in results:
218
255
  conversations_list.append(
@@ -227,7 +264,9 @@ class CodeAutoGenerate:
227
264
 
228
265
  statistics = {
229
266
  "input_tokens_count": input_tokens_count,
230
- "generated_tokens_count": generated_tokens_count
267
+ "generated_tokens_count": generated_tokens_count,
268
+ "input_tokens_cost": input_tokens_cost,
269
+ "generated_tokens_cost": generated_tokens_cost
231
270
  }
232
271
 
233
272
  if self.args.request_id and not self.args.skip_events:
@@ -242,10 +281,11 @@ class CodeAutoGenerate:
242
281
  return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
243
282
 
244
283
  def multi_round_run(
245
- self, query: str, source_content: str, max_steps: int = 10
284
+ self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
246
285
  ) -> Tuple[List[str], List[Dict[str, str]]]:
247
286
  llm_config = {"human_as_model": self.args.human_as_model}
248
287
  result = []
288
+ source_content = source_code_list.to_str()
249
289
 
250
290
  if self.args.template == "common":
251
291
  init_prompt = self.multi_round_instruction.prompt(