auto-coder 0.1.262__tar.gz → 0.1.264__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.262 → auto_coder-0.1.264}/PKG-INFO +1 -1
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/auto_coder.egg-info/PKG-INFO +1 -1
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/auto_coder.egg-info/SOURCES.txt +1 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/auto_coder.py +14 -13
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/chat_auto_coder.py +55 -50
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/__init__.py +6 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/auto_coder_lang.py +8 -2
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/code_auto_generate_diff.py +9 -9
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/code_auto_merge.py +23 -3
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/code_auto_merge_diff.py +28 -3
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/code_auto_merge_editblock.py +24 -4
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/code_auto_merge_strict_diff.py +23 -3
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/code_modification_ranker.py +65 -3
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/conf_validator.py +6 -0
- auto_coder-0.1.264/src/autocoder/common/context_pruner.py +305 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/index/entry.py +8 -2
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/index/filter/normal_filter.py +13 -2
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/index/filter/quick_filter.py +143 -13
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/index/index.py +3 -2
- auto_coder-0.1.264/src/autocoder/utils/project_structure.py +270 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/thread_utils.py +6 -1
- auto_coder-0.1.264/src/autocoder/version.py +1 -0
- auto_coder-0.1.262/src/autocoder/utils/project_structure.py +0 -15
- auto_coder-0.1.262/src/autocoder/version.py +0 -1
- {auto_coder-0.1.262 → auto_coder-0.1.264}/LICENSE +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/README.md +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/setup.cfg +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/setup.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/auto_coder.egg-info/dependency_links.txt +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/auto_coder.egg-info/entry_points.txt +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/auto_coder.egg-info/requires.txt +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/auto_coder.egg-info/top_level.txt +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/agent/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/agent/auto_demand_organizer.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/agent/auto_filegroup.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/agent/auto_guess_query.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/agent/auto_review_commit.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/agent/auto_tool.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/agent/coder.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/agent/designer.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/agent/planner.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/agent/project_reader.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/auto_coder_rag.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/auto_coder_rag_client_mcp.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/auto_coder_rag_mcp.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/auto_coder_server.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/benchmark.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/chat/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/chat_auto_coder_lang.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/command_args.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/commands/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/commands/auto_command.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/commands/tools.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/JupyterClient.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/ShellClient.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/anything2images.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/anything2img.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/audio.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/auto_configure.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/buildin_tokenizer.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/chunk_validation.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/cleaner.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/code_auto_execute.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/code_auto_generate.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/code_auto_generate_editblock.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/code_auto_generate_strict_diff.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/command_completer.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/command_generator.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/command_templates.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/const.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/conversation_pruner.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/files.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/git_utils.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/global_cancel.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/image_to_page.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/index_import_export.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/interpreter.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/llm_rerank.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/mcp_hub.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/mcp_server.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/mcp_servers/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/mcp_servers/mcp_server_perplexity.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/mcp_tools.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/memory_manager.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/model_speed_test.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/printer.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/recall_validation.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/result_manager.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/screenshots.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/search.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/search_replace.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/shells.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/sys_prompt.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/text.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/types.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/common/utils_code_auto_generate.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/data/byzerllm.md +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/data/tokenizer.json +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/db/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/db/store.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/dispacher/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/dispacher/actions/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/dispacher/actions/action.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/dispacher/actions/copilot.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/dispacher/actions/plugins/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/dispacher/actions/plugins/action_regex_project.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/dispacher/actions/plugins/action_translate.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/index/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/index/filter/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/index/for_command.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/index/symbols_utils.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/index/types.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/lang.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/models.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/privacy/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/privacy/model_filter.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/pyproject/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/api_server.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/cache/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/cache/base_cache.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/cache/byzer_storage_cache.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/cache/file_monitor_cache.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/cache/simple_cache.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/doc_filter.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/document_retriever.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/llm_wrapper.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/loaders/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/loaders/docx_loader.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/loaders/excel_loader.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/loaders/pdf_loader.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/loaders/ppt_loader.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/long_context_rag.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/rag_config.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/rag_entry.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/raw_rag.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/relevant_utils.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/simple_directory_reader.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/simple_rag.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/stream_event/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/stream_event/event_writer.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/stream_event/types.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/token_checker.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/token_counter.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/token_limiter.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/types.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/utils.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/rag/variable_holder.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/regexproject/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/suffixproject/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/tsproject/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/_markitdown.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/auto_coder_utils/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/auto_coder_utils/chat_stream_out.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/chat_auto_coder_utils/__init__.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/conversation_store.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/llm_client_interceptors.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/llms.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/log_capture.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/model_provider_selector.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/multi_turn.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/operate_config_api.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/print_table.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/queue_communicate.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/request_event_queue.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/request_queue.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/rest.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/tests.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/src/autocoder/utils/types.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/tests/test_action_regex_project.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/tests/test_chat_auto_coder.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/tests/test_code_auto_merge_editblock.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/tests/test_command_completer.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/tests/test_planner.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/tests/test_privacy.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/tests/test_queue_communicate.py +0 -0
- {auto_coder-0.1.262 → auto_coder-0.1.264}/tests/test_symbols_utils.py +0 -0
|
@@ -60,6 +60,7 @@ src/autocoder/common/command_generator.py
|
|
|
60
60
|
src/autocoder/common/command_templates.py
|
|
61
61
|
src/autocoder/common/conf_validator.py
|
|
62
62
|
src/autocoder/common/const.py
|
|
63
|
+
src/autocoder/common/context_pruner.py
|
|
63
64
|
src/autocoder/common/conversation_pruner.py
|
|
64
65
|
src/autocoder/common/files.py
|
|
65
66
|
src/autocoder/common/git_utils.py
|
|
@@ -1164,19 +1164,20 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1164
1164
|
file_path=source.module_name,
|
|
1165
1165
|
model_name=",".join(get_llm_names(chat_llm)))
|
|
1166
1166
|
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1167
|
+
if "no_context" not in args.action:
|
|
1168
|
+
s = build_index_and_filter_files(
|
|
1169
|
+
llm=llm, args=args, sources=filtered_sources).to_str()
|
|
1170
|
+
|
|
1171
|
+
if s:
|
|
1172
|
+
pre_conversations.append(
|
|
1173
|
+
{
|
|
1174
|
+
"role": "user",
|
|
1175
|
+
"content": f"请阅读下面的代码和文档:\n\n <files>\n{s}\n</files>",
|
|
1176
|
+
}
|
|
1177
|
+
)
|
|
1178
|
+
pre_conversations.append(
|
|
1179
|
+
{"role": "assistant", "content": "read"})
|
|
1180
|
+
source_count += 1
|
|
1180
1181
|
|
|
1181
1182
|
loaded_conversations = pre_conversations + \
|
|
1182
1183
|
chat_history["ask_conversation"]
|
|
@@ -268,7 +268,7 @@ def initialize_system(args):
|
|
|
268
268
|
from autocoder.utils.model_provider_selector import ModelProviderSelector
|
|
269
269
|
from autocoder import models as models_module
|
|
270
270
|
print(f"\n\033[1;34m{get_message('initializing')}\033[0m")
|
|
271
|
-
|
|
271
|
+
|
|
272
272
|
first_time = [False]
|
|
273
273
|
configure_success = [False]
|
|
274
274
|
|
|
@@ -391,7 +391,7 @@ def initialize_system(args):
|
|
|
391
391
|
except subprocess.CalledProcessError:
|
|
392
392
|
print_status(get_message("deploy_fail"), "error")
|
|
393
393
|
return
|
|
394
|
-
|
|
394
|
+
|
|
395
395
|
|
|
396
396
|
deploy_cmd = [
|
|
397
397
|
"byzerllm",
|
|
@@ -672,12 +672,12 @@ completer = CommandCompleter(commands,
|
|
|
672
672
|
|
|
673
673
|
def print_conf(content:Dict[str,Any]):
|
|
674
674
|
"""Display configuration dictionary in a Rich table format with enhanced visual styling.
|
|
675
|
-
|
|
675
|
+
|
|
676
676
|
Args:
|
|
677
677
|
conf (Dict[str, Any]): Configuration dictionary to display
|
|
678
678
|
"""
|
|
679
679
|
console = Console()
|
|
680
|
-
|
|
680
|
+
|
|
681
681
|
# Create a styled table with rounded borders
|
|
682
682
|
table = Table(
|
|
683
683
|
show_header=True,
|
|
@@ -687,11 +687,11 @@ def print_conf(content:Dict[str,Any]):
|
|
|
687
687
|
border_style="blue",
|
|
688
688
|
show_lines=True
|
|
689
689
|
)
|
|
690
|
-
|
|
690
|
+
|
|
691
691
|
# Add columns with explicit width and alignment
|
|
692
692
|
table.add_column(get_message("conf_key"), style="cyan", justify="right", width=30, no_wrap=False)
|
|
693
693
|
table.add_column(get_message("conf_value"), style="green", justify="left", width=50, no_wrap=False)
|
|
694
|
-
|
|
694
|
+
|
|
695
695
|
# Sort keys for consistent display
|
|
696
696
|
for key in sorted(content.keys()):
|
|
697
697
|
value = content[key]
|
|
@@ -704,9 +704,9 @@ def print_conf(content:Dict[str,Any]):
|
|
|
704
704
|
formatted_value = Text(str(value), style="bright_cyan")
|
|
705
705
|
else:
|
|
706
706
|
formatted_value = Text(str(value), style="green")
|
|
707
|
-
|
|
707
|
+
|
|
708
708
|
table.add_row(str(key), formatted_value)
|
|
709
|
-
|
|
709
|
+
|
|
710
710
|
# Add padding and print with a panel
|
|
711
711
|
console.print(Panel(
|
|
712
712
|
table,
|
|
@@ -742,7 +742,7 @@ def revert():
|
|
|
742
742
|
|
|
743
743
|
|
|
744
744
|
def add_files(args: List[str]):
|
|
745
|
-
|
|
745
|
+
|
|
746
746
|
result_manager = ResultManager()
|
|
747
747
|
if "groups" not in memory["current_files"]:
|
|
748
748
|
memory["current_files"]["groups"] = {}
|
|
@@ -837,7 +837,7 @@ def add_files(args: List[str]):
|
|
|
837
837
|
)
|
|
838
838
|
result_manager.append(content=f"Added group '{group_name}' with current files.",
|
|
839
839
|
meta={"action": "add_files","success":True, "input":{ "args": args}})
|
|
840
|
-
|
|
840
|
+
|
|
841
841
|
elif len(args) >= 3 and args[1] == "/drop":
|
|
842
842
|
group_name = args[2]
|
|
843
843
|
if group_name in groups:
|
|
@@ -1272,14 +1272,14 @@ def mcp(query: str):
|
|
|
1272
1272
|
os.makedirs(mcp_dir, exist_ok=True)
|
|
1273
1273
|
timestamp = str(int(time.time()))
|
|
1274
1274
|
file_path = os.path.join(mcp_dir, f"{timestamp}.md")
|
|
1275
|
-
|
|
1275
|
+
|
|
1276
1276
|
# Format response as markdown
|
|
1277
1277
|
markdown_content = response.result
|
|
1278
|
-
|
|
1278
|
+
|
|
1279
1279
|
# Save to file
|
|
1280
1280
|
with open(file_path, "w", encoding="utf-8") as f:
|
|
1281
1281
|
f.write(markdown_content)
|
|
1282
|
-
|
|
1282
|
+
|
|
1283
1283
|
console = Console()
|
|
1284
1284
|
console.print(
|
|
1285
1285
|
Panel(
|
|
@@ -1424,13 +1424,13 @@ def commit(query: str):
|
|
|
1424
1424
|
finally:
|
|
1425
1425
|
if os.path.exists(temp_yaml):
|
|
1426
1426
|
os.remove(temp_yaml)
|
|
1427
|
-
|
|
1427
|
+
|
|
1428
1428
|
target_model = args.commit_model or args.model
|
|
1429
1429
|
llm = get_single_llm(target_model, product_mode)
|
|
1430
1430
|
printer = Printer()
|
|
1431
1431
|
printer.print_in_terminal("commit_generating", style="yellow", model_name=target_model)
|
|
1432
1432
|
commit_message = ""
|
|
1433
|
-
|
|
1433
|
+
|
|
1434
1434
|
try:
|
|
1435
1435
|
uncommitted_changes = git_utils.get_uncommitted_changes(".")
|
|
1436
1436
|
commit_message = git_utils.generate_commit_message.with_llm(llm).run(
|
|
@@ -1441,7 +1441,7 @@ def commit(query: str):
|
|
|
1441
1441
|
except Exception as e:
|
|
1442
1442
|
printer.print_in_terminal("commit_failed", style="red", error=str(e), model_name=target_model)
|
|
1443
1443
|
return
|
|
1444
|
-
|
|
1444
|
+
|
|
1445
1445
|
yaml_config["query"] = commit_message
|
|
1446
1446
|
yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
|
|
1447
1447
|
with open(os.path.join(execute_file), "w") as f:
|
|
@@ -1513,14 +1513,14 @@ def coding(query: str):
|
|
|
1513
1513
|
converted_value = convert_config_value(key, value)
|
|
1514
1514
|
if converted_value is not None:
|
|
1515
1515
|
yaml_config[key] = converted_value
|
|
1516
|
-
|
|
1516
|
+
|
|
1517
1517
|
yaml_config["urls"] = current_files + get_llm_friendly_package_docs(
|
|
1518
1518
|
return_paths=True
|
|
1519
1519
|
)
|
|
1520
1520
|
|
|
1521
1521
|
if conf.get("enable_global_memory", "true") in ["true", "True",True]:
|
|
1522
1522
|
yaml_config["urls"] += get_global_memory_file_paths()
|
|
1523
|
-
|
|
1523
|
+
|
|
1524
1524
|
# handle image
|
|
1525
1525
|
v = Image.convert_image_paths_from(query)
|
|
1526
1526
|
yaml_config["query"] = v
|
|
@@ -1667,7 +1667,7 @@ def chat(query: str):
|
|
|
1667
1667
|
if "/save" in query:
|
|
1668
1668
|
yaml_config["action"].append("save")
|
|
1669
1669
|
query = query.replace("/save", "", 1).strip()
|
|
1670
|
-
|
|
1670
|
+
|
|
1671
1671
|
if "/review" in query and "/commit" in query:
|
|
1672
1672
|
yaml_config["action"].append("review_commit")
|
|
1673
1673
|
query = query.replace("/review", "", 1).replace("/commit", "", 1).strip()
|
|
@@ -1680,9 +1680,10 @@ def chat(query: str):
|
|
|
1680
1680
|
else:
|
|
1681
1681
|
query = code_review.prompt(query)
|
|
1682
1682
|
|
|
1683
|
-
is_no_context = query.strip()
|
|
1683
|
+
is_no_context = "/no_context" in query.strip()
|
|
1684
1684
|
if is_no_context:
|
|
1685
1685
|
query = query.replace("/no_context", "", 1).strip()
|
|
1686
|
+
yaml_config["action"].append("no_context")
|
|
1686
1687
|
|
|
1687
1688
|
for key, value in conf.items():
|
|
1688
1689
|
converted_value = convert_config_value(key, value)
|
|
@@ -1891,22 +1892,22 @@ def manage_models(query: str):
|
|
|
1891
1892
|
"""
|
|
1892
1893
|
printer = Printer()
|
|
1893
1894
|
console = Console()
|
|
1894
|
-
|
|
1895
|
+
|
|
1895
1896
|
product_mode = memory.get("product_mode", "lite")
|
|
1896
1897
|
if product_mode != "lite":
|
|
1897
1898
|
printer.print_in_terminal("models_lite_only", style="red")
|
|
1898
1899
|
return
|
|
1899
|
-
|
|
1900
|
+
|
|
1900
1901
|
models_data = models_module.load_models()
|
|
1901
1902
|
subcmd = ""
|
|
1902
1903
|
if "/list" in query:
|
|
1903
1904
|
subcmd = "/list"
|
|
1904
1905
|
query = query.replace("/list", "", 1).strip()
|
|
1905
|
-
|
|
1906
|
+
|
|
1906
1907
|
if "/add_model" in query:
|
|
1907
1908
|
subcmd = "/add_model"
|
|
1908
1909
|
query = query.replace("/add_model", "", 1).strip()
|
|
1909
|
-
|
|
1910
|
+
|
|
1910
1911
|
if "/add" in query:
|
|
1911
1912
|
subcmd = "/add"
|
|
1912
1913
|
query = query.replace("/add", "", 1).strip()
|
|
@@ -1915,7 +1916,7 @@ def manage_models(query: str):
|
|
|
1915
1916
|
if "/activate" in query:
|
|
1916
1917
|
subcmd = "/add"
|
|
1917
1918
|
query = query.replace("/activate", "", 1).strip()
|
|
1918
|
-
|
|
1919
|
+
|
|
1919
1920
|
if "/remove" in query:
|
|
1920
1921
|
subcmd = "/remove"
|
|
1921
1922
|
query = query.replace("/remove", "", 1).strip()
|
|
@@ -1935,23 +1936,23 @@ def manage_models(query: str):
|
|
|
1935
1936
|
if "output_price" in query:
|
|
1936
1937
|
subcmd = "/output_price"
|
|
1937
1938
|
query = query.replace("/output_price", "", 1).strip()
|
|
1938
|
-
|
|
1939
|
+
|
|
1939
1940
|
if "/speed" in query:
|
|
1940
1941
|
subcmd = "/speed"
|
|
1941
1942
|
query = query.replace("/speed", "", 1).strip()
|
|
1942
|
-
|
|
1943
|
-
|
|
1943
|
+
|
|
1944
|
+
|
|
1944
1945
|
|
|
1945
1946
|
if not subcmd:
|
|
1946
1947
|
printer.print_in_terminal("models_usage")
|
|
1947
|
-
|
|
1948
|
+
|
|
1948
1949
|
result_manager = ResultManager()
|
|
1949
1950
|
if subcmd == "/list":
|
|
1950
1951
|
if models_data:
|
|
1951
1952
|
# Sort models by speed (average_speed)
|
|
1952
1953
|
sorted_models = sorted(models_data, key=lambda x: float(x.get('average_speed', 0)))
|
|
1953
1954
|
sorted_models.reverse()
|
|
1954
|
-
|
|
1955
|
+
|
|
1955
1956
|
table = Table(
|
|
1956
1957
|
title=printer.get_message_from_key("models_title"),
|
|
1957
1958
|
expand=True,
|
|
@@ -1972,7 +1973,7 @@ def manage_models(query: str):
|
|
|
1972
1973
|
if not api_key:
|
|
1973
1974
|
printer.print_in_terminal("models_api_key_empty", style="yellow", name=name)
|
|
1974
1975
|
name = f"{name} *"
|
|
1975
|
-
|
|
1976
|
+
|
|
1976
1977
|
table.add_row(
|
|
1977
1978
|
name,
|
|
1978
1979
|
m.get("model_name", ""),
|
|
@@ -1988,7 +1989,7 @@ def manage_models(query: str):
|
|
|
1988
1989
|
"query": query
|
|
1989
1990
|
}
|
|
1990
1991
|
})
|
|
1991
|
-
|
|
1992
|
+
|
|
1992
1993
|
else:
|
|
1993
1994
|
printer.print_in_terminal("models_no_models", style="yellow")
|
|
1994
1995
|
result_manager.add_result(content="No models found",meta={
|
|
@@ -2036,7 +2037,7 @@ def manage_models(query: str):
|
|
|
2036
2037
|
}
|
|
2037
2038
|
})
|
|
2038
2039
|
printer.print_in_terminal("models_input_price_usage", style="red")
|
|
2039
|
-
|
|
2040
|
+
|
|
2040
2041
|
elif subcmd == "/output_price":
|
|
2041
2042
|
args = query.strip().split()
|
|
2042
2043
|
if len(args) >= 2:
|
|
@@ -2114,11 +2115,11 @@ def manage_models(query: str):
|
|
|
2114
2115
|
}
|
|
2115
2116
|
})
|
|
2116
2117
|
printer.print_in_terminal("models_speed_usage", style="red")
|
|
2117
|
-
|
|
2118
|
+
|
|
2118
2119
|
elif subcmd == "/speed-test":
|
|
2119
2120
|
from autocoder.common.model_speed_test import render_speed_test_in_terminal
|
|
2120
2121
|
test_rounds = 1 # 默认测试轮数
|
|
2121
|
-
|
|
2122
|
+
|
|
2122
2123
|
enable_long_context = False
|
|
2123
2124
|
if "/long_context" in query:
|
|
2124
2125
|
enable_long_context = True
|
|
@@ -2132,7 +2133,7 @@ def manage_models(query: str):
|
|
|
2132
2133
|
args = query.strip().split()
|
|
2133
2134
|
if args and args[0].isdigit():
|
|
2134
2135
|
test_rounds = int(args[0])
|
|
2135
|
-
|
|
2136
|
+
|
|
2136
2137
|
render_speed_test_in_terminal(product_mode, test_rounds,enable_long_context=enable_long_context)
|
|
2137
2138
|
## 等待优化,获取明细数据
|
|
2138
2139
|
result_manager.add_result(content="models test success",meta={
|
|
@@ -2141,7 +2142,7 @@ def manage_models(query: str):
|
|
|
2141
2142
|
"query": query
|
|
2142
2143
|
}
|
|
2143
2144
|
})
|
|
2144
|
-
|
|
2145
|
+
|
|
2145
2146
|
elif subcmd == "/add":
|
|
2146
2147
|
# Support both simplified and legacy formats
|
|
2147
2148
|
args = query.strip().split(" ")
|
|
@@ -2547,12 +2548,12 @@ def auto_command(params,query: str):
|
|
|
2547
2548
|
from autocoder.commands.auto_command import CommandAutoTuner, AutoCommandRequest, CommandConfig, MemoryConfig
|
|
2548
2549
|
args = get_final_config()
|
|
2549
2550
|
# help(query)
|
|
2550
|
-
|
|
2551
|
+
|
|
2551
2552
|
# 准备请求参数
|
|
2552
2553
|
request = AutoCommandRequest(
|
|
2553
2554
|
user_input=query
|
|
2554
2555
|
)
|
|
2555
|
-
|
|
2556
|
+
|
|
2556
2557
|
# 初始化调优器
|
|
2557
2558
|
llm = get_single_llm(args.chat_model or args.model,product_mode=args.product_mode)
|
|
2558
2559
|
tuner = CommandAutoTuner(llm,
|
|
@@ -2580,7 +2581,7 @@ def auto_command(params,query: str):
|
|
|
2580
2581
|
execute_shell_command=execute_shell_command,
|
|
2581
2582
|
generate_shell_command=generate_shell_command
|
|
2582
2583
|
))
|
|
2583
|
-
|
|
2584
|
+
|
|
2584
2585
|
# 生成建议
|
|
2585
2586
|
response = tuner.analyze(request)
|
|
2586
2587
|
printer = Printer()
|
|
@@ -2592,7 +2593,7 @@ def auto_command(params,query: str):
|
|
|
2592
2593
|
border_style="blue",
|
|
2593
2594
|
padding=(1, 2)
|
|
2594
2595
|
))
|
|
2595
|
-
|
|
2596
|
+
|
|
2596
2597
|
|
|
2597
2598
|
def main():
|
|
2598
2599
|
from autocoder.rag.variable_holder import VariableHolder
|
|
@@ -2605,20 +2606,20 @@ def main():
|
|
|
2605
2606
|
VariableHolder.TOKENIZER_MODEL = Tokenizer.from_file(tokenizer_path)
|
|
2606
2607
|
except FileNotFoundError:
|
|
2607
2608
|
tokenizer_path = None
|
|
2608
|
-
|
|
2609
|
+
|
|
2609
2610
|
ARGS = parse_arguments()
|
|
2610
|
-
|
|
2611
|
+
|
|
2611
2612
|
if ARGS.lite:
|
|
2612
2613
|
ARGS.product_mode = "lite"
|
|
2613
|
-
|
|
2614
|
+
|
|
2614
2615
|
if ARGS.pro:
|
|
2615
2616
|
ARGS.product_mode = "pro"
|
|
2616
2617
|
|
|
2617
2618
|
if not ARGS.quick:
|
|
2618
2619
|
initialize_system(ARGS)
|
|
2619
|
-
|
|
2620
|
+
|
|
2620
2621
|
load_memory()
|
|
2621
|
-
|
|
2622
|
+
|
|
2622
2623
|
configure(f"product_mode:{ARGS.product_mode}")
|
|
2623
2624
|
|
|
2624
2625
|
MODES = {
|
|
@@ -2679,7 +2680,11 @@ def main():
|
|
|
2679
2680
|
human_as_model = memory["conf"].get("human_as_model", "false")
|
|
2680
2681
|
if mode not in MODES:
|
|
2681
2682
|
mode = "auto_detect"
|
|
2682
|
-
|
|
2683
|
+
pwd = os.getcwd()
|
|
2684
|
+
pwd_parts = pwd.split(os.sep)
|
|
2685
|
+
if len(pwd_parts) > 3:
|
|
2686
|
+
pwd = os.sep.join(pwd_parts[-3:])
|
|
2687
|
+
return f"Current Dir: {pwd} \nMode: {MODES[mode]} | Human as Model: {human_as_model} "
|
|
2683
2688
|
|
|
2684
2689
|
session = PromptSession(
|
|
2685
2690
|
history=InMemoryHistory(),
|
|
@@ -2772,14 +2777,14 @@ def main():
|
|
|
2772
2777
|
|
|
2773
2778
|
elif user_input.startswith("/index/build"):
|
|
2774
2779
|
index_build()
|
|
2775
|
-
|
|
2780
|
+
|
|
2776
2781
|
elif user_input.startswith("/index/export"):
|
|
2777
2782
|
export_path = user_input[len("/index/export"):].strip()
|
|
2778
2783
|
if not export_path:
|
|
2779
2784
|
print("Please specify the export path")
|
|
2780
2785
|
else:
|
|
2781
2786
|
index_export(export_path)
|
|
2782
|
-
|
|
2787
|
+
|
|
2783
2788
|
elif user_input.startswith("/index/import"):
|
|
2784
2789
|
import_path = user_input[len("/index/import"):].strip()
|
|
2785
2790
|
if not import_path:
|
|
@@ -2821,7 +2826,7 @@ def main():
|
|
|
2821
2826
|
show_help()
|
|
2822
2827
|
else:
|
|
2823
2828
|
help(query)
|
|
2824
|
-
|
|
2829
|
+
|
|
2825
2830
|
elif user_input.startswith("/exclude_dirs"):
|
|
2826
2831
|
dir_names = user_input[len(
|
|
2827
2832
|
"/exclude_dirs"):].strip().split(",")
|
|
@@ -359,6 +359,9 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
359
359
|
data_cells_max_num: Optional[int] = 2000
|
|
360
360
|
generate_times_same_model: Optional[int] = 1
|
|
361
361
|
rank_times_same_model: Optional[int] = 1
|
|
362
|
+
|
|
363
|
+
# block:给定每个文件修改的代码块 file: 给定每个文件修改前后内容
|
|
364
|
+
rank_strategy: Optional[str] = "file"
|
|
362
365
|
|
|
363
366
|
action: List[str] = []
|
|
364
367
|
enable_global_memory: Optional[bool] = True
|
|
@@ -374,6 +377,9 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
374
377
|
conversation_prune_group_size: Optional[int] = 4
|
|
375
378
|
conversation_prune_strategy: Optional[str] = "summarize"
|
|
376
379
|
|
|
380
|
+
context_prune_strategy: Optional[str] = "score"
|
|
381
|
+
context_prune: Optional[bool] = True
|
|
382
|
+
|
|
377
383
|
auto_command_max_iterations: Optional[int] = 10
|
|
378
384
|
|
|
379
385
|
skip_commit: Optional[bool] = False
|
|
@@ -136,7 +136,10 @@ MESSAGES = {
|
|
|
136
136
|
"auto_command_analyzed": "Selected command",
|
|
137
137
|
"invalid_enum_value": "Value '{{value}}' is not in allowed values ({{allowed}})",
|
|
138
138
|
"no_changes_made": "⚠️ no changes made, the reason may be that the text block generated by the coding function has a problem, so it cannot be merged into the project",
|
|
139
|
-
"conversation_pruning_start": "Conversation pruning started, total tokens: {{total_tokens}}, safe zone: {{safe_zone}}",
|
|
139
|
+
"conversation_pruning_start": "⚠️ Conversation pruning started, total tokens: {{total_tokens}}, safe zone: {{safe_zone}}",
|
|
140
|
+
"invalid_file_number": "⚠️ Invalid file number {{file_number}}, total files: {{total_files}}",
|
|
141
|
+
"all_merge_results_failed": "⚠️ All merge attempts failed, returning first candidate",
|
|
142
|
+
"only_one_merge_result_success": "✅ Only one merge result succeeded, returning that candidate"
|
|
140
143
|
},
|
|
141
144
|
"zh": {
|
|
142
145
|
"config_validation_error": "配置验证错误: {{error}}",
|
|
@@ -270,7 +273,10 @@ MESSAGES = {
|
|
|
270
273
|
"satisfied_prompt": "已满足需求,无需进一步操作",
|
|
271
274
|
"auto_command_analyzed": "被选择指令",
|
|
272
275
|
"invalid_enum_value": "值 '{{value}}' 不在允许的值列表中 ({{allowed}})",
|
|
273
|
-
"conversation_pruning_start": "⚠️ 对话长度 {{total_tokens}} tokens 超过安全阈值 {{safe_zone}},开始修剪对话。"
|
|
276
|
+
"conversation_pruning_start": "⚠️ 对话长度 {{total_tokens}} tokens 超过安全阈值 {{safe_zone}},开始修剪对话。",
|
|
277
|
+
"invalid_file_number": "⚠️ 无效的文件编号 {{file_number}},总文件数为 {{total_files}}",
|
|
278
|
+
"all_merge_results_failed": "⚠️ 所有合并尝试都失败,返回第一个候选",
|
|
279
|
+
"only_one_merge_result_success": "✅ 只有一个合并结果成功,返回该候选"
|
|
274
280
|
}}
|
|
275
281
|
|
|
276
282
|
|
|
@@ -359,16 +359,16 @@ class CodeAutoGenerateDiff:
|
|
|
359
359
|
with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
|
|
360
360
|
futures = []
|
|
361
361
|
for llm in self.llms:
|
|
362
|
+
|
|
363
|
+
model_names_list = llm_utils.get_llm_names(llm)
|
|
364
|
+
model_name = None
|
|
365
|
+
if model_names_list:
|
|
366
|
+
model_name = model_names_list[0]
|
|
367
|
+
|
|
362
368
|
for _ in range(self.generate_times_same_model):
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
model_name = model_names_list[0]
|
|
367
|
-
|
|
368
|
-
for _ in range(self.generate_times_same_model):
|
|
369
|
-
model_names.append(model_name)
|
|
370
|
-
futures.append(executor.submit(
|
|
371
|
-
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
369
|
+
model_names.append(model_name)
|
|
370
|
+
futures.append(executor.submit(
|
|
371
|
+
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
372
372
|
|
|
373
373
|
temp_results = [future.result() for future in futures]
|
|
374
374
|
for result in temp_results:
|
|
@@ -73,15 +73,35 @@ class CodeAutoMerge:
|
|
|
73
73
|
def choose_best_choice(self, generate_result: CodeGenerateResult) -> CodeGenerateResult:
|
|
74
74
|
if len(generate_result.contents) == 1:
|
|
75
75
|
return generate_result
|
|
76
|
+
|
|
77
|
+
merge_results = []
|
|
78
|
+
for content,conversations in zip(generate_result.contents,generate_result.conversations):
|
|
79
|
+
merge_result = self._merge_code_without_effect(content)
|
|
80
|
+
merge_results.append(merge_result)
|
|
76
81
|
|
|
82
|
+
# If all merge results are None, return first one
|
|
83
|
+
if all(len(result.failed_blocks) != 0 for result in merge_results):
|
|
84
|
+
self.printer.print_in_terminal("all_merge_results_failed")
|
|
85
|
+
return CodeGenerateResult(contents=[generate_result.contents[0]], conversations=[generate_result.conversations[0]])
|
|
86
|
+
|
|
87
|
+
# If only one merge result is not None, return that one
|
|
88
|
+
not_none_indices = [i for i, result in enumerate(merge_results) if len(result.failed_blocks) == 0]
|
|
89
|
+
if len(not_none_indices) == 1:
|
|
90
|
+
idx = not_none_indices[0]
|
|
91
|
+
self.printer.print_in_terminal("only_one_merge_result_success")
|
|
92
|
+
return CodeGenerateResult(contents=[generate_result.contents[idx]], conversations=[generate_result.conversations[idx]])
|
|
93
|
+
|
|
94
|
+
# 最后,如果有多个,那么根据质量排序再返回
|
|
77
95
|
ranker = CodeModificationRanker(self.llm, self.args)
|
|
78
|
-
ranked_result = ranker.rank_modifications(generate_result)
|
|
79
|
-
|
|
96
|
+
ranked_result = ranker.rank_modifications(generate_result,merge_results)
|
|
97
|
+
|
|
98
|
+
## 得到的结果,再做一次合并,第一个通过的返回 , 返回做合并有点重复低效,未来修改。
|
|
80
99
|
for content,conversations in zip(ranked_result.contents,ranked_result.conversations):
|
|
81
100
|
merge_result = self._merge_code_without_effect(content)
|
|
82
101
|
if not merge_result.failed_blocks:
|
|
83
102
|
return CodeGenerateResult(contents=[content], conversations=[conversations])
|
|
84
|
-
|
|
103
|
+
|
|
104
|
+
# 最后保底,但实际不会出现
|
|
85
105
|
return CodeGenerateResult(contents=[ranked_result.contents[0]], conversations=[ranked_result.conversations[0]])
|
|
86
106
|
|
|
87
107
|
|
|
@@ -387,15 +387,35 @@ class CodeAutoMergeDiff:
|
|
|
387
387
|
def choose_best_choice(self, generate_result: CodeGenerateResult) -> CodeGenerateResult:
|
|
388
388
|
if len(generate_result.contents) == 1:
|
|
389
389
|
return generate_result
|
|
390
|
+
|
|
391
|
+
merge_results = []
|
|
392
|
+
for content,conversations in zip(generate_result.contents,generate_result.conversations):
|
|
393
|
+
merge_result = self._merge_code_without_effect(content)
|
|
394
|
+
merge_results.append(merge_result)
|
|
390
395
|
|
|
396
|
+
# If all merge results are None, return first one
|
|
397
|
+
if all(len(result.failed_blocks) != 0 for result in merge_results):
|
|
398
|
+
self.printer.print_in_terminal("all_merge_results_failed")
|
|
399
|
+
return CodeGenerateResult(contents=[generate_result.contents[0]], conversations=[generate_result.conversations[0]])
|
|
400
|
+
|
|
401
|
+
# If only one merge result is not None, return that one
|
|
402
|
+
not_none_indices = [i for i, result in enumerate(merge_results) if len(result.failed_blocks) == 0]
|
|
403
|
+
if len(not_none_indices) == 1:
|
|
404
|
+
idx = not_none_indices[0]
|
|
405
|
+
self.printer.print_in_terminal("only_one_merge_result_success")
|
|
406
|
+
return CodeGenerateResult(contents=[generate_result.contents[idx]], conversations=[generate_result.conversations[idx]])
|
|
407
|
+
|
|
408
|
+
# 最后,如果有多个,那么根据质量排序再返回
|
|
391
409
|
ranker = CodeModificationRanker(self.llm, self.args)
|
|
392
|
-
ranked_result = ranker.rank_modifications(generate_result)
|
|
393
|
-
|
|
410
|
+
ranked_result = ranker.rank_modifications(generate_result,merge_results)
|
|
411
|
+
|
|
412
|
+
## 得到的结果,再做一次合并,第一个通过的返回 , 返回做合并有点重复低效,未来修改。
|
|
394
413
|
for content,conversations in zip(ranked_result.contents,ranked_result.conversations):
|
|
395
414
|
merge_result = self._merge_code_without_effect(content)
|
|
396
415
|
if not merge_result.failed_blocks:
|
|
397
416
|
return CodeGenerateResult(contents=[content], conversations=[conversations])
|
|
398
|
-
|
|
417
|
+
|
|
418
|
+
# 最后保底,但实际不会出现
|
|
399
419
|
return CodeGenerateResult(contents=[ranked_result.contents[0]], conversations=[ranked_result.conversations[0]])
|
|
400
420
|
|
|
401
421
|
@byzerllm.prompt(render="jinja2")
|
|
@@ -440,6 +460,11 @@ class CodeAutoMergeDiff:
|
|
|
440
460
|
errors = []
|
|
441
461
|
for path, hunk in uniq:
|
|
442
462
|
full_path = self.abs_root_path(path)
|
|
463
|
+
|
|
464
|
+
if not os.path.exists(full_path):
|
|
465
|
+
with open(full_path, "w",encoding="utf-8") as f:
|
|
466
|
+
f.write("")
|
|
467
|
+
|
|
443
468
|
content = FileUtils.read_file(full_path)
|
|
444
469
|
|
|
445
470
|
original, _ = hunk_to_before_after(hunk)
|
|
@@ -164,15 +164,35 @@ class CodeAutoMergeEditBlock:
|
|
|
164
164
|
def choose_best_choice(self, generate_result: CodeGenerateResult) -> CodeGenerateResult:
|
|
165
165
|
if len(generate_result.contents) == 1:
|
|
166
166
|
return generate_result
|
|
167
|
-
|
|
167
|
+
|
|
168
|
+
merge_results = []
|
|
169
|
+
for content,conversations in zip(generate_result.contents,generate_result.conversations):
|
|
170
|
+
merge_result = self._merge_code_without_effect(content)
|
|
171
|
+
merge_results.append(merge_result)
|
|
172
|
+
|
|
173
|
+
# If all merge results are None, return first one
|
|
174
|
+
if all(len(result.failed_blocks) != 0 for result in merge_results):
|
|
175
|
+
self.printer.print_in_terminal("all_merge_results_failed")
|
|
176
|
+
return CodeGenerateResult(contents=[generate_result.contents[0]], conversations=[generate_result.conversations[0]])
|
|
177
|
+
|
|
178
|
+
# If only one merge result is not None, return that one
|
|
179
|
+
not_none_indices = [i for i, result in enumerate(merge_results) if len(result.failed_blocks) == 0]
|
|
180
|
+
if len(not_none_indices) == 1:
|
|
181
|
+
idx = not_none_indices[0]
|
|
182
|
+
self.printer.print_in_terminal("only_one_merge_result_success")
|
|
183
|
+
return CodeGenerateResult(contents=[generate_result.contents[idx]], conversations=[generate_result.conversations[idx]])
|
|
184
|
+
|
|
185
|
+
# 最后,如果有多个,那么根据质量排序再返回
|
|
168
186
|
ranker = CodeModificationRanker(self.llm, self.args)
|
|
169
|
-
ranked_result = ranker.rank_modifications(generate_result)
|
|
170
|
-
|
|
187
|
+
ranked_result = ranker.rank_modifications(generate_result,merge_results)
|
|
188
|
+
|
|
189
|
+
## 得到的结果,再做一次合并,第一个通过的返回 , 返回做合并有点重复低效,未来修改。
|
|
171
190
|
for content,conversations in zip(ranked_result.contents,ranked_result.conversations):
|
|
172
191
|
merge_result = self._merge_code_without_effect(content)
|
|
173
192
|
if not merge_result.failed_blocks:
|
|
174
193
|
return CodeGenerateResult(contents=[content], conversations=[conversations])
|
|
175
|
-
|
|
194
|
+
|
|
195
|
+
# 最后保底,但实际不会出现
|
|
176
196
|
return CodeGenerateResult(contents=[ranked_result.contents[0]], conversations=[ranked_result.conversations[0]])
|
|
177
197
|
|
|
178
198
|
@byzerllm.prompt()
|