auto-coder 0.1.256__tar.gz → 0.1.258__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.256 → auto_coder-0.1.258}/PKG-INFO +2 -2
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/auto_coder.egg-info/PKG-INFO +2 -2
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/auto_coder.egg-info/SOURCES.txt +4 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/auto_coder.egg-info/requires.txt +1 -1
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/auto_coder.py +30 -50
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/chat_auto_coder.py +16 -17
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/chat_auto_coder_lang.py +1 -1
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/__init__.py +7 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/auto_coder_lang.py +38 -8
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_auto_generate.py +22 -2
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_auto_generate_diff.py +23 -4
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_auto_generate_editblock.py +24 -2
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_auto_generate_strict_diff.py +23 -3
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/dispacher/actions/action.py +38 -28
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/dispacher/actions/plugins/action_regex_project.py +8 -6
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/index/entry.py +6 -4
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/index/index.py +94 -4
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/models.py +14 -0
- auto_coder-0.1.258/src/autocoder/privacy/__init__.py +3 -0
- auto_coder-0.1.258/src/autocoder/privacy/model_filter.py +100 -0
- auto_coder-0.1.258/src/autocoder/utils/model_provider_selector.py +214 -0
- auto_coder-0.1.258/src/autocoder/version.py +1 -0
- auto_coder-0.1.258/tests/test_privacy.py +107 -0
- auto_coder-0.1.256/src/autocoder/version.py +0 -1
- {auto_coder-0.1.256 → auto_coder-0.1.258}/LICENSE +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/README.md +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/setup.cfg +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/setup.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/auto_coder.egg-info/dependency_links.txt +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/auto_coder.egg-info/entry_points.txt +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/auto_coder.egg-info/top_level.txt +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/agent/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/agent/auto_demand_organizer.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/agent/auto_filegroup.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/agent/auto_guess_query.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/agent/auto_review_commit.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/agent/auto_tool.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/agent/coder.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/agent/designer.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/agent/planner.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/agent/project_reader.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/auto_coder_rag.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/auto_coder_rag_client_mcp.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/auto_coder_rag_mcp.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/auto_coder_server.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/benchmark.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/chat/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/command_args.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/JupyterClient.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/ShellClient.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/anything2images.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/anything2img.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/audio.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/buildin_tokenizer.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/chunk_validation.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/cleaner.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_auto_execute.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_auto_merge.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_auto_merge_diff.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_auto_merge_editblock.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_auto_merge_strict_diff.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_modification_ranker.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/command_completer.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/command_generator.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/command_templates.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/const.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/files.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/git_utils.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/global_cancel.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/image_to_page.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/interpreter.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/llm_rerank.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/mcp_hub.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/mcp_server.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/mcp_servers/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/mcp_servers/mcp_server_perplexity.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/mcp_tools.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/memory_manager.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/model_speed_test.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/printer.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/recall_validation.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/screenshots.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/search.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/search_replace.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/shells.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/sys_prompt.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/text.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/types.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/utils_code_auto_generate.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/data/byzerllm.md +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/data/tokenizer.json +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/db/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/db/store.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/dispacher/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/dispacher/actions/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/dispacher/actions/copilot.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/dispacher/actions/plugins/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/dispacher/actions/plugins/action_translate.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/index/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/index/filter/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/index/filter/normal_filter.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/index/filter/quick_filter.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/index/for_command.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/index/symbols_utils.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/index/types.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/lang.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/pyproject/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/api_server.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/cache/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/cache/base_cache.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/cache/byzer_storage_cache.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/cache/file_monitor_cache.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/cache/simple_cache.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/doc_filter.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/document_retriever.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/llm_wrapper.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/loaders/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/loaders/docx_loader.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/loaders/excel_loader.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/loaders/pdf_loader.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/loaders/ppt_loader.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/long_context_rag.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/rag_config.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/rag_entry.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/raw_rag.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/relevant_utils.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/simple_directory_reader.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/simple_rag.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/stream_event/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/stream_event/event_writer.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/stream_event/types.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/token_checker.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/token_counter.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/token_limiter.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/types.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/utils.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/rag/variable_holder.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/regexproject/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/suffixproject/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/tsproject/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/_markitdown.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/auto_coder_utils/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/auto_coder_utils/chat_stream_out.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/chat_auto_coder_utils/__init__.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/conversation_store.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/llm_client_interceptors.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/llms.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/log_capture.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/multi_turn.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/operate_config_api.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/print_table.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/queue_communicate.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/request_event_queue.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/request_queue.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/rest.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/tests.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/thread_utils.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/utils/types.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/tests/test_action_regex_project.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/tests/test_chat_auto_coder.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/tests/test_code_auto_merge_editblock.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/tests/test_command_completer.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/tests/test_planner.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/tests/test_queue_communicate.py +0 -0
- {auto_coder-0.1.256 → auto_coder-0.1.258}/tests/test_symbols_utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.258
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter_client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas]>=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas]>=0.1.165
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff_match_patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.258
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter_client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas]>=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas]>=0.1.165
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff_match_patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -98,6 +98,8 @@ src/autocoder/index/types.py
|
|
|
98
98
|
src/autocoder/index/filter/__init__.py
|
|
99
99
|
src/autocoder/index/filter/normal_filter.py
|
|
100
100
|
src/autocoder/index/filter/quick_filter.py
|
|
101
|
+
src/autocoder/privacy/__init__.py
|
|
102
|
+
src/autocoder/privacy/model_filter.py
|
|
101
103
|
src/autocoder/pyproject/__init__.py
|
|
102
104
|
src/autocoder/rag/__init__.py
|
|
103
105
|
src/autocoder/rag/api_server.py
|
|
@@ -139,6 +141,7 @@ src/autocoder/utils/conversation_store.py
|
|
|
139
141
|
src/autocoder/utils/llm_client_interceptors.py
|
|
140
142
|
src/autocoder/utils/llms.py
|
|
141
143
|
src/autocoder/utils/log_capture.py
|
|
144
|
+
src/autocoder/utils/model_provider_selector.py
|
|
142
145
|
src/autocoder/utils/multi_turn.py
|
|
143
146
|
src/autocoder/utils/operate_config_api.py
|
|
144
147
|
src/autocoder/utils/print_table.py
|
|
@@ -157,5 +160,6 @@ tests/test_chat_auto_coder.py
|
|
|
157
160
|
tests/test_code_auto_merge_editblock.py
|
|
158
161
|
tests/test_command_completer.py
|
|
159
162
|
tests/test_planner.py
|
|
163
|
+
tests/test_privacy.py
|
|
160
164
|
tests/test_queue_communicate.py
|
|
161
165
|
tests/test_symbols_utils.py
|
|
@@ -47,6 +47,8 @@ from autocoder.common.utils_code_auto_generate import stream_chat_with_continue
|
|
|
47
47
|
from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
|
|
48
48
|
from autocoder.common.printer import Printer
|
|
49
49
|
from autocoder.rag.token_counter import count_tokens
|
|
50
|
+
from autocoder.privacy.model_filter import ModelPathFilter
|
|
51
|
+
|
|
50
52
|
console = Console()
|
|
51
53
|
|
|
52
54
|
|
|
@@ -317,53 +319,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
317
319
|
"saas.model": model_info["model_name"],
|
|
318
320
|
"saas.is_reasoning": model_info["is_reasoning"]
|
|
319
321
|
}
|
|
320
|
-
)
|
|
321
|
-
|
|
322
|
-
if models_module.check_model_exists("deepseek_r1_chat"):
|
|
323
|
-
r1_model_info = models_module.get_model_by_name("deepseek_r1_chat")
|
|
324
|
-
api_key = r1_model_info["api_key"]
|
|
325
|
-
chat_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
|
|
326
|
-
chat_llm.deploy(
|
|
327
|
-
model_path="",
|
|
328
|
-
pretrained_model_type="saas/openai",
|
|
329
|
-
udf_name="deepseek_r1_chat",
|
|
330
|
-
infer_params={
|
|
331
|
-
"saas.base_url": "https://api.deepseek.com/v1",
|
|
332
|
-
"saas.api_key": api_key,
|
|
333
|
-
"saas.model": "deepseek-reasoner",
|
|
334
|
-
"saas.is_reasoning": True
|
|
335
|
-
}
|
|
336
|
-
)
|
|
337
|
-
|
|
338
|
-
generate_rerank_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
|
|
339
|
-
generate_rerank_llm.deploy(
|
|
340
|
-
model_path="",
|
|
341
|
-
pretrained_model_type="saas/openai",
|
|
342
|
-
udf_name="deepseek_r1_chat",
|
|
343
|
-
infer_params={
|
|
344
|
-
"saas.base_url": "https://api.deepseek.com/v1",
|
|
345
|
-
"saas.api_key": api_key,
|
|
346
|
-
"saas.model": "deepseek-reasoner",
|
|
347
|
-
"saas.is_reasoning": True
|
|
348
|
-
}
|
|
349
|
-
)
|
|
350
|
-
|
|
351
|
-
index_filter_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
|
|
352
|
-
index_filter_llm.deploy(
|
|
353
|
-
model_path="",
|
|
354
|
-
pretrained_model_type="saas/openai",
|
|
355
|
-
udf_name="deepseek_r1_chat",
|
|
356
|
-
infer_params={
|
|
357
|
-
"saas.base_url": "https://api.deepseek.com/v1",
|
|
358
|
-
"saas.api_key": api_key,
|
|
359
|
-
"saas.model": "deepseek-reasoner",
|
|
360
|
-
"saas.is_reasoning": True
|
|
361
|
-
}
|
|
362
|
-
)
|
|
363
|
-
|
|
364
|
-
llm.setup_sub_client("chat_model", chat_llm)
|
|
365
|
-
llm.setup_sub_client("generate_rerank_model", generate_rerank_llm)
|
|
366
|
-
llm.setup_sub_client("index_filter_model", index_filter_llm)
|
|
322
|
+
)
|
|
367
323
|
|
|
368
324
|
if args.product_mode == "lite":
|
|
369
325
|
# Set up default models based on configuration
|
|
@@ -947,6 +903,17 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
947
903
|
# )
|
|
948
904
|
return
|
|
949
905
|
elif raw_args.agent_command == "project_reader":
|
|
906
|
+
|
|
907
|
+
target_llm = llm.get_sub_client("planner_model")
|
|
908
|
+
if not target_llm:
|
|
909
|
+
target_llm = llm
|
|
910
|
+
model_filter = ModelPathFilter.from_model_object(target_llm, args)
|
|
911
|
+
if model_filter.has_rules():
|
|
912
|
+
printer = Printer()
|
|
913
|
+
msg = printer.get_message_from_key_with_format("model_has_access_restrictions",
|
|
914
|
+
model_name=",".join(get_llm_names(target_llm)))
|
|
915
|
+
raise ValueError(msg)
|
|
916
|
+
|
|
950
917
|
from autocoder.agent.project_reader import ProjectReader
|
|
951
918
|
|
|
952
919
|
project_reader = ProjectReader(args, llm)
|
|
@@ -1194,10 +1161,23 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1194
1161
|
else:
|
|
1195
1162
|
pp = SuffixProject(args=args, llm=llm, file_filter=None)
|
|
1196
1163
|
pp.run()
|
|
1197
|
-
sources = pp.sources
|
|
1198
|
-
|
|
1164
|
+
sources = pp.sources
|
|
1165
|
+
|
|
1166
|
+
# Apply model filter for chat_llm
|
|
1167
|
+
model_filter = ModelPathFilter.from_model_object(chat_llm, args)
|
|
1168
|
+
filtered_sources = []
|
|
1169
|
+
printer = Printer()
|
|
1170
|
+
for source in sources:
|
|
1171
|
+
if model_filter.is_accessible(source.module_name):
|
|
1172
|
+
filtered_sources.append(source)
|
|
1173
|
+
else:
|
|
1174
|
+
printer.print_in_terminal("index_file_filtered",
|
|
1175
|
+
style="yellow",
|
|
1176
|
+
file_path=source.module_name,
|
|
1177
|
+
model_name=",".join(get_llm_names(chat_llm)))
|
|
1178
|
+
|
|
1199
1179
|
s = build_index_and_filter_files(
|
|
1200
|
-
llm=llm, args=args, sources=
|
|
1180
|
+
llm=llm, args=args, sources=filtered_sources).to_str()
|
|
1201
1181
|
|
|
1202
1182
|
if s:
|
|
1203
1183
|
pre_conversations.append(
|
|
@@ -297,23 +297,22 @@ def initialize_system(args):
|
|
|
297
297
|
|
|
298
298
|
init_project()
|
|
299
299
|
|
|
300
|
-
if args.product_mode == "lite":
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
f
|
|
315
|
-
|
|
316
|
-
print_status(f"API key saved successfully: {api_key_file}", "success")
|
|
300
|
+
if args.product_mode == "lite":
|
|
301
|
+
from autocoder.utils.model_provider_selector import ModelProviderSelector
|
|
302
|
+
from autocoder import models as models_module
|
|
303
|
+
if not models_module.check_model_exists("v3_chat") or not models_module.check_model_exists("r1_chat"):
|
|
304
|
+
model_provider_selector = ModelProviderSelector()
|
|
305
|
+
model_provider_info = model_provider_selector.select_provider()
|
|
306
|
+
if model_provider_info is not None:
|
|
307
|
+
models_json_list = model_provider_selector.to_models_json(model_provider_info)
|
|
308
|
+
models_module.add_and_activate_models(models_json_list)
|
|
309
|
+
r1_model = models_json_list[0]['name']
|
|
310
|
+
v3_model = models_json_list[1]['name']
|
|
311
|
+
configure(f"model:{v3_model}", skip_print=True)
|
|
312
|
+
configure(f"chat_model:{r1_model}", skip_print=True)
|
|
313
|
+
configure(f"generate_rerank_model:{r1_model}", skip_print=True)
|
|
314
|
+
configure(f"code_model:{v3_model}", skip_print=True)
|
|
315
|
+
configure(f"index_filter_model:{r1_model}", skip_print=True)
|
|
317
316
|
|
|
318
317
|
if args.product_mode == "pro":
|
|
319
318
|
# Check if Ray is running
|
|
@@ -128,7 +128,7 @@ MESSAGES = {
|
|
|
128
128
|
"official_doc": "Official Documentation: https://uelng8wukz.feishu.cn/wiki/NhPNwSRcWimKFIkQINIckloBncI",
|
|
129
129
|
},
|
|
130
130
|
"zh": {
|
|
131
|
-
"commit_generating": "{{ model_name }} 正在生成提交信息...",
|
|
131
|
+
"commit_generating": "{{ model_name }} 正在生成提交信息...",
|
|
132
132
|
"commit_message": "{{ model_name }} 生成的提交信息: {{ message }}",
|
|
133
133
|
"commit_failed": "{{ model_name }} 生成提交信息失败: {{ error }}",
|
|
134
134
|
"mcp_remove_error": "移除 MCP 服务器时出错:{error}",
|
|
@@ -14,7 +14,13 @@ class SourceCode(pydantic.BaseModel):
|
|
|
14
14
|
tokens: int = -1
|
|
15
15
|
metadata: Dict[str, Any] = {}
|
|
16
16
|
|
|
17
|
+
class SourceCodeList():
|
|
18
|
+
def __init__(self, sources: List[SourceCode]):
|
|
19
|
+
self.sources = sources
|
|
17
20
|
|
|
21
|
+
def to_str(self):
|
|
22
|
+
return "\n".join([f"##File: {source.module_name}\n{source.source_code}\n" for source in self.sources])
|
|
23
|
+
|
|
18
24
|
class TranslateReadme(pydantic.BaseModel):
|
|
19
25
|
filename: str = pydantic.Field(..., description="需要翻译的文件路径")
|
|
20
26
|
content: str = pydantic.Field(..., description="翻译后的内容")
|
|
@@ -362,6 +368,7 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
362
368
|
keep_only_reasoning_content: Optional[bool] = False
|
|
363
369
|
|
|
364
370
|
in_code_apply: bool = False
|
|
371
|
+
model_filter_path: Optional[str] = None
|
|
365
372
|
|
|
366
373
|
class Config:
|
|
367
374
|
protected_namespaces = ()
|
|
@@ -3,6 +3,20 @@ from byzerllm.utils import format_str_jinja2
|
|
|
3
3
|
|
|
4
4
|
MESSAGES = {
|
|
5
5
|
"en": {
|
|
6
|
+
"model_provider_select_title": "Select Model Provider",
|
|
7
|
+
"model_provider_select_text": "Please select your model provider:",
|
|
8
|
+
"model_provider_volcano": "Volcano Engine",
|
|
9
|
+
"model_provider_siliconflow": "SiliconFlow AI",
|
|
10
|
+
"model_provider_deepseek": "DeepSeek Official",
|
|
11
|
+
"model_provider_api_key_title": "API Key",
|
|
12
|
+
"model_provider_volcano_api_key_text": "Please enter your Volcano Engine API key:",
|
|
13
|
+
"model_provider_volcano_r1_text": "Please enter your Volcano Engine R1 endpoint (format: ep-20250204215011-vzbsg):",
|
|
14
|
+
"model_provider_volcano_v3_text": "Please enter your Volcano Engine V3 endpoint (format: ep-20250204215011-vzbsg):",
|
|
15
|
+
"model_provider_siliconflow_api_key_text": "Please enter your SiliconFlow AI API key:",
|
|
16
|
+
"model_provider_deepseek_api_key_text": "Please enter your DeepSeek API key:",
|
|
17
|
+
"model_provider_selected": "Provider configuration completed successfully! You can use /models command to view, add and modify all models later.",
|
|
18
|
+
"model_provider_success_title": "Success",
|
|
19
|
+
"index_file_filtered": "File {{file_path}} is filtered by model {{model_name}} restrictions",
|
|
6
20
|
"models_no_active": "No active models found",
|
|
7
21
|
"models_speed_test_results": "Model Speed Test Results",
|
|
8
22
|
"models_testing": "Testing model: {{name}}...",
|
|
@@ -11,7 +25,7 @@ MESSAGES = {
|
|
|
11
25
|
"generation_cancelled": "[Interrupted] Generation cancelled",
|
|
12
26
|
"model_not_found": "Model {{model_name}} not found",
|
|
13
27
|
"generating_shell_script": "Generating Shell Script",
|
|
14
|
-
"new_session_started": "New session started. Previous chat history has been archived.",
|
|
28
|
+
"new_session_started": "New session started. Previous chat history has been archived.",
|
|
15
29
|
"memory_save_success": "✅ Saved to your memory",
|
|
16
30
|
"file_decode_error": "Failed to decode file: {{file_path}}. Tried encodings: {{encodings}}",
|
|
17
31
|
"file_write_error": "Failed to write file: {{file_path}}. Error: {{error}}",
|
|
@@ -21,7 +35,7 @@ MESSAGES = {
|
|
|
21
35
|
"no_latest_commit": "Unable to get latest commit information",
|
|
22
36
|
"code_review_error": "Code review process error: {{error}}",
|
|
23
37
|
"index_file_too_large": "⚠️ File {{ file_path }} is too large ({{ file_size }} > {{ max_length }}), splitting into chunks...",
|
|
24
|
-
"index_update_success": "✅ {{ model_name }} Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s",
|
|
38
|
+
"index_update_success": "✅ {{ model_name }} Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}",
|
|
25
39
|
"index_build_error": "❌ {{ model_name }} Error building index for {{ file_path }}: {{ error }}",
|
|
26
40
|
"index_build_summary": "📊 Total Files: {{ total_files }}, Need to Build Index: {{ num_files }}",
|
|
27
41
|
"building_index_progress": "⏳ Building Index: {{ counter }}/{{ num_files }}...",
|
|
@@ -30,7 +44,7 @@ MESSAGES = {
|
|
|
30
44
|
"index_threads_completed": "✅ Completed {{ completed_threads }}/{{ total_threads }} threads",
|
|
31
45
|
"index_related_files_fail": "⚠️ Failed to find related files for chunk {{ chunk_count }}",
|
|
32
46
|
"index_file_removed": "🗑️ Removed non-existent file index: {{ file_path }}",
|
|
33
|
-
"index_file_saved": "💾 Saved index file, updated {{ updated_files }} files, removed {{ removed_files }} files",
|
|
47
|
+
"index_file_saved": "💾 Saved index file, updated {{ updated_files }} files, removed {{ removed_files }} files, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}",
|
|
34
48
|
"human_as_model_instructions": (
|
|
35
49
|
"You are now in Human as Model mode. The content has been copied to your clipboard.\n"
|
|
36
50
|
"The system is waiting for your input. When finished, enter 'EOF' on a new line to submit.\n"
|
|
@@ -81,7 +95,7 @@ MESSAGES = {
|
|
|
81
95
|
"begin_index_source_code": "🚀 Begin to index source code in {{ source_dir }}",
|
|
82
96
|
"stream_out_stats": "Model: {{ model_name }}, Total time: {{ elapsed_time }} seconds, First token time: {{ first_token_time }} seconds, Speed: {{ speed }} tokens/s, Input tokens: {{ input_tokens }}, Output tokens: {{ output_tokens }}, Input cost: {{ input_cost }}, Output cost: {{ output_cost }}",
|
|
83
97
|
"quick_filter_stats": "{{ model_names }} 快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
|
|
84
|
-
"upsert_file": "✅ Updated file: {{ file_path }}",
|
|
98
|
+
"upsert_file": "✅ Updated file: {{ file_path }}",
|
|
85
99
|
"unmerged_blocks_title": "Unmerged Blocks",
|
|
86
100
|
"quick_filter_title": "{{ model_name }} is analyzing how to filter context...",
|
|
87
101
|
"quick_filter_failed": "❌ Quick filter failed: {{ error }}. ",
|
|
@@ -95,8 +109,23 @@ MESSAGES = {
|
|
|
95
109
|
"quick_filter_tokens_len": "📊 Current index size: {{ tokens_len }} tokens",
|
|
96
110
|
"estimated_chat_input_tokens": "Estimated chat input tokens: {{ estimated_input_tokens }}",
|
|
97
111
|
"estimated_input_tokens_in_generate": "Estimated input tokens in generate ({{ generate_mode }}): {{ estimated_input_tokens }}",
|
|
112
|
+
"model_has_access_restrictions": "{{model_name}} has access restrictions, cannot use the current function",
|
|
98
113
|
},
|
|
99
114
|
"zh": {
|
|
115
|
+
"model_provider_select_title": "选择模型供应商",
|
|
116
|
+
"model_provider_select_text": "请选择您的模型供应商:",
|
|
117
|
+
"model_provider_volcano": "火山方舟",
|
|
118
|
+
"model_provider_siliconflow": "硅基流动",
|
|
119
|
+
"model_provider_deepseek": "DeepSeek官方",
|
|
120
|
+
"model_provider_api_key_title": "API密钥",
|
|
121
|
+
"model_provider_volcano_api_key_text": "请输入您的火山方舟API密钥:",
|
|
122
|
+
"model_provider_volcano_r1_text": "请输入您的火山方舟 R1 推理点(格式如: ep-20250204215011-vzbsg):",
|
|
123
|
+
"model_provider_volcano_v3_text": "请输入您的火山方舟 V3 推理点(格式如: ep-20250204215011-vzbsg):",
|
|
124
|
+
"model_provider_siliconflow_api_key_text": "请输入您的硅基流动API密钥:",
|
|
125
|
+
"model_provider_deepseek_api_key_text": "请输入您的DeepSeek API密钥:",
|
|
126
|
+
"model_provider_selected": "供应商配置已成功完成!后续你可以使用 /models 命令,查看,新增和修改所有模型",
|
|
127
|
+
"model_provider_success_title": "成功",
|
|
128
|
+
"index_file_filtered": "文件 {{file_path}} 被模型 {{model_name}} 的访问限制过滤",
|
|
100
129
|
"models_no_active": "未找到激活的模型",
|
|
101
130
|
"models_speed_test_results": "模型速度测试结果",
|
|
102
131
|
"models_testing": "正在测试模型: {{name}}...",
|
|
@@ -114,7 +143,7 @@ MESSAGES = {
|
|
|
114
143
|
"no_latest_commit": "无法获取最新的提交信息",
|
|
115
144
|
"code_review_error": "代码审查过程出错: {{error}}",
|
|
116
145
|
"index_file_too_large": "⚠️ 文件 {{ file_path }} 过大 ({{ file_size }} > {{ max_length }}), 正在分块处理...",
|
|
117
|
-
"index_update_success": "✅ {{ model_name }} 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }}
|
|
146
|
+
"index_update_success": "✅ {{ model_name }} 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} 秒, 输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
|
|
118
147
|
"index_build_error": "❌ {{ model_name }} 构建 {{ file_path }} 索引时出错: {{ error }}",
|
|
119
148
|
"index_build_summary": "📊 总文件数: {{ total_files }}, 需要构建索引: {{ num_files }}",
|
|
120
149
|
"building_index_progress": "⏳ 正在构建索引: {{ counter }}/{{ num_files }}...",
|
|
@@ -123,7 +152,7 @@ MESSAGES = {
|
|
|
123
152
|
"index_threads_completed": "✅ 已完成 {{ completed_threads }}/{{ total_threads }} 个线程",
|
|
124
153
|
"index_related_files_fail": "⚠️ 无法为块 {{ chunk_count }} 找到相关文件",
|
|
125
154
|
"index_file_removed": "🗑️ 已移除不存在的文件索引:{{ file_path }}",
|
|
126
|
-
"index_file_saved": "💾 已保存索引文件,更新了 {{ updated_files }} 个文件,移除了 {{ removed_files }}
|
|
155
|
+
"index_file_saved": "💾 已保存索引文件,更新了 {{ updated_files }} 个文件,移除了 {{ removed_files }} 个文件,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
|
|
127
156
|
"human_as_model_instructions": (
|
|
128
157
|
"您现在处于人类作为模型模式。内容已复制到您的剪贴板。\n"
|
|
129
158
|
"系统正在等待您的输入。完成后,在新行输入'EOF'提交。\n"
|
|
@@ -188,8 +217,8 @@ MESSAGES = {
|
|
|
188
217
|
"quick_filter_failed": "❌ 快速过滤器失败: {{ error }}. ",
|
|
189
218
|
"estimated_chat_input_tokens": "对话输入token预估为: {{ estimated_input_tokens }}",
|
|
190
219
|
"estimated_input_tokens_in_generate": "生成代码({{ generate_mode }})预计输入token数: {{ estimated_input_tokens_in_generate }}",
|
|
191
|
-
|
|
192
|
-
}
|
|
220
|
+
"model_has_access_restrictions": "{{model_name}} 有访问限制,无法使用当前功能",
|
|
221
|
+
}}
|
|
193
222
|
|
|
194
223
|
|
|
195
224
|
def get_system_language():
|
|
@@ -203,5 +232,6 @@ def get_message(key):
|
|
|
203
232
|
lang = get_system_language()
|
|
204
233
|
return MESSAGES.get(lang, MESSAGES['en']).get(key, MESSAGES['en'][key])
|
|
205
234
|
|
|
235
|
+
|
|
206
236
|
def get_message_with_format(msg_key: str, **kwargs):
|
|
207
237
|
return format_str_jinja2(get_message(msg_key), **kwargs)
|
|
@@ -11,6 +11,8 @@ import json
|
|
|
11
11
|
from autocoder.common.printer import Printer
|
|
12
12
|
from autocoder.rag.token_counter import count_tokens
|
|
13
13
|
from autocoder.utils import llms as llm_utils
|
|
14
|
+
from autocoder.common import SourceCodeList
|
|
15
|
+
from autocoder.privacy.model_filter import ModelPathFilter
|
|
14
16
|
|
|
15
17
|
|
|
16
18
|
class CodeAutoGenerate:
|
|
@@ -156,10 +158,27 @@ class CodeAutoGenerate:
|
|
|
156
158
|
}
|
|
157
159
|
|
|
158
160
|
def single_round_run(
|
|
159
|
-
self, query: str,
|
|
161
|
+
self, query: str, source_code_list: SourceCodeList
|
|
160
162
|
) -> Tuple[List[str], Dict[str, str]]:
|
|
161
163
|
llm_config = {"human_as_model": self.args.human_as_model}
|
|
162
164
|
|
|
165
|
+
# Apply model filter for code_llm
|
|
166
|
+
printer = Printer()
|
|
167
|
+
for llm in self.llms:
|
|
168
|
+
model_filter = ModelPathFilter.from_model_object(llm, self.args)
|
|
169
|
+
filtered_sources = []
|
|
170
|
+
for source in source_code_list.sources:
|
|
171
|
+
if model_filter.is_accessible(source.module_name):
|
|
172
|
+
filtered_sources.append(source)
|
|
173
|
+
else:
|
|
174
|
+
printer.print_in_terminal("index_file_filtered",
|
|
175
|
+
style="yellow",
|
|
176
|
+
file_path=source.module_name,
|
|
177
|
+
model_name=",".join(llm_utils.get_llm_names(llm)))
|
|
178
|
+
|
|
179
|
+
source_code_list = SourceCodeList(filtered_sources)
|
|
180
|
+
source_content = source_code_list.to_str()
|
|
181
|
+
|
|
163
182
|
if self.args.request_id and not self.args.skip_events:
|
|
164
183
|
queue_communicate.send_event_no_wait(
|
|
165
184
|
request_id=self.args.request_id,
|
|
@@ -262,10 +281,11 @@ class CodeAutoGenerate:
|
|
|
262
281
|
return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
|
|
263
282
|
|
|
264
283
|
def multi_round_run(
|
|
265
|
-
self, query: str,
|
|
284
|
+
self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
|
|
266
285
|
) -> Tuple[List[str], List[Dict[str, str]]]:
|
|
267
286
|
llm_config = {"human_as_model": self.args.human_as_model}
|
|
268
287
|
result = []
|
|
288
|
+
source_content = source_code_list.to_str()
|
|
269
289
|
|
|
270
290
|
if self.args.template == "common":
|
|
271
291
|
init_prompt = self.multi_round_instruction.prompt(
|
|
@@ -2,6 +2,7 @@ from typing import List, Dict, Tuple
|
|
|
2
2
|
from autocoder.common.types import Mode, CodeGenerateResult
|
|
3
3
|
from autocoder.common import AutoCoderArgs
|
|
4
4
|
import byzerllm
|
|
5
|
+
from autocoder.privacy.model_filter import ModelPathFilter
|
|
5
6
|
from autocoder.utils.queue_communicate import queue_communicate, CommunicateEvent, CommunicateEventType
|
|
6
7
|
from autocoder.common import sys_prompt
|
|
7
8
|
from concurrent.futures import ThreadPoolExecutor
|
|
@@ -10,7 +11,7 @@ from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
|
10
11
|
from autocoder.common.printer import Printer
|
|
11
12
|
from autocoder.rag.token_counter import count_tokens
|
|
12
13
|
from autocoder.utils import llms as llm_utils
|
|
13
|
-
|
|
14
|
+
from autocoder.common import SourceCodeList
|
|
14
15
|
|
|
15
16
|
class CodeAutoGenerateDiff:
|
|
16
17
|
def __init__(
|
|
@@ -302,9 +303,10 @@ class CodeAutoGenerateDiff:
|
|
|
302
303
|
}
|
|
303
304
|
|
|
304
305
|
def single_round_run(
|
|
305
|
-
self, query: str,
|
|
306
|
+
self, query: str, source_code_list: SourceCodeList
|
|
306
307
|
) -> CodeGenerateResult:
|
|
307
308
|
llm_config = {"human_as_model": self.args.human_as_model}
|
|
309
|
+
source_content = source_code_list.to_str()
|
|
308
310
|
|
|
309
311
|
if self.args.template == "common":
|
|
310
312
|
init_prompt = self.single_round_instruction.prompt(
|
|
@@ -410,11 +412,28 @@ class CodeAutoGenerateDiff:
|
|
|
410
412
|
return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
|
|
411
413
|
|
|
412
414
|
def multi_round_run(
|
|
413
|
-
self, query: str,
|
|
415
|
+
self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
|
|
414
416
|
) -> CodeGenerateResult:
|
|
417
|
+
|
|
418
|
+
# Apply model filter for code_llm
|
|
419
|
+
printer = Printer()
|
|
420
|
+
for llm in self.llms:
|
|
421
|
+
model_filter = ModelPathFilter.from_model_object(llm, self.args)
|
|
422
|
+
filtered_sources = []
|
|
423
|
+
for source in source_code_list.sources:
|
|
424
|
+
if model_filter.is_accessible(source.module_name):
|
|
425
|
+
filtered_sources.append(source)
|
|
426
|
+
else:
|
|
427
|
+
printer.print_in_terminal("index_file_filtered",
|
|
428
|
+
style="yellow",
|
|
429
|
+
file_path=source.path,
|
|
430
|
+
model_name=",".join(llm_utils.get_llm_names(llm)))
|
|
431
|
+
|
|
432
|
+
source_code_list = SourceCodeList(filtered_sources)
|
|
433
|
+
|
|
415
434
|
llm_config = {"human_as_model": self.args.human_as_model}
|
|
416
435
|
result = []
|
|
417
|
-
|
|
436
|
+
source_content = source_code_list.to_str()
|
|
418
437
|
if self.args.template == "common":
|
|
419
438
|
init_prompt = self.multi_round_instruction.prompt(
|
|
420
439
|
instruction=query, content=source_content, context=self.args.context
|
{auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_auto_generate_editblock.py
RENAMED
|
@@ -3,6 +3,7 @@ from autocoder.common.types import Mode, CodeGenerateResult
|
|
|
3
3
|
from autocoder.common import AutoCoderArgs
|
|
4
4
|
import byzerllm
|
|
5
5
|
from autocoder.common import sys_prompt
|
|
6
|
+
from autocoder.privacy.model_filter import ModelPathFilter
|
|
6
7
|
from autocoder.utils.queue_communicate import (
|
|
7
8
|
queue_communicate,
|
|
8
9
|
CommunicateEvent,
|
|
@@ -14,6 +15,7 @@ from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
|
14
15
|
from autocoder.common.printer import Printer
|
|
15
16
|
from autocoder.rag.token_counter import count_tokens
|
|
16
17
|
from autocoder.utils import llms as llm_utils
|
|
18
|
+
from autocoder.common import SourceCodeList
|
|
17
19
|
|
|
18
20
|
|
|
19
21
|
class CodeAutoGenerateEditBlock:
|
|
@@ -384,10 +386,29 @@ class CodeAutoGenerateEditBlock:
|
|
|
384
386
|
}
|
|
385
387
|
|
|
386
388
|
def single_round_run(
|
|
387
|
-
self, query: str,
|
|
389
|
+
self, query: str, source_code_list: SourceCodeList
|
|
388
390
|
) -> CodeGenerateResult:
|
|
391
|
+
|
|
392
|
+
# Apply model filter for code_llm
|
|
393
|
+
printer = Printer()
|
|
394
|
+
for llm in self.llms:
|
|
395
|
+
model_filter = ModelPathFilter.from_model_object(llm, self.args)
|
|
396
|
+
filtered_sources = []
|
|
397
|
+
for source in source_code_list.sources:
|
|
398
|
+
if model_filter.is_accessible(source.module_name):
|
|
399
|
+
filtered_sources.append(source)
|
|
400
|
+
else:
|
|
401
|
+
printer.print_in_terminal("index_file_filtered",
|
|
402
|
+
style="yellow",
|
|
403
|
+
file_path=source.module_name,
|
|
404
|
+
model_name=",".join(llm_utils.get_llm_names(llm)))
|
|
405
|
+
|
|
406
|
+
source_code_list = SourceCodeList(filtered_sources)
|
|
407
|
+
|
|
389
408
|
llm_config = {"human_as_model": self.args.human_as_model}
|
|
390
409
|
|
|
410
|
+
source_content = source_code_list.to_str()
|
|
411
|
+
|
|
391
412
|
if self.args.template == "common":
|
|
392
413
|
init_prompt = self.single_round_instruction.prompt(
|
|
393
414
|
instruction=query, content=source_content, context=self.args.context
|
|
@@ -498,10 +519,11 @@ class CodeAutoGenerateEditBlock:
|
|
|
498
519
|
return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
|
|
499
520
|
|
|
500
521
|
def multi_round_run(
|
|
501
|
-
self, query: str,
|
|
522
|
+
self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
|
|
502
523
|
) -> CodeGenerateResult:
|
|
503
524
|
llm_config = {"human_as_model": self.args.human_as_model}
|
|
504
525
|
result = []
|
|
526
|
+
source_content = source_code_list.to_str()
|
|
505
527
|
|
|
506
528
|
if self.args.template == "common":
|
|
507
529
|
init_prompt = self.multi_round_instruction.prompt(
|
{auto_coder-0.1.256 → auto_coder-0.1.258}/src/autocoder/common/code_auto_generate_strict_diff.py
RENAMED
|
@@ -10,7 +10,8 @@ from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
|
10
10
|
from autocoder.common.printer import Printer
|
|
11
11
|
from autocoder.rag.token_counter import count_tokens
|
|
12
12
|
from autocoder.utils import llms as llm_utils
|
|
13
|
-
|
|
13
|
+
from autocoder.common import SourceCodeList
|
|
14
|
+
from autocoder.privacy.model_filter import ModelPathFilter
|
|
14
15
|
class CodeAutoGenerateStrictDiff:
|
|
15
16
|
def __init__(
|
|
16
17
|
self, llm: byzerllm.ByzerLLM, args: AutoCoderArgs, action=None
|
|
@@ -272,9 +273,10 @@ class CodeAutoGenerateStrictDiff:
|
|
|
272
273
|
}
|
|
273
274
|
|
|
274
275
|
def single_round_run(
|
|
275
|
-
self, query: str,
|
|
276
|
+
self, query: str, source_code_list: SourceCodeList
|
|
276
277
|
) -> CodeGenerateResult:
|
|
277
278
|
llm_config = {"human_as_model": self.args.human_as_model}
|
|
279
|
+
source_content = source_code_list.to_str()
|
|
278
280
|
|
|
279
281
|
if self.args.template == "common":
|
|
280
282
|
init_prompt = self.single_round_instruction.prompt(
|
|
@@ -379,10 +381,28 @@ class CodeAutoGenerateStrictDiff:
|
|
|
379
381
|
return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
|
|
380
382
|
|
|
381
383
|
def multi_round_run(
|
|
382
|
-
self, query: str,
|
|
384
|
+
self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
|
|
383
385
|
) -> CodeGenerateResult:
|
|
386
|
+
|
|
387
|
+
# Apply model filter for code_llm
|
|
388
|
+
printer = Printer()
|
|
389
|
+
for llm in self.llms:
|
|
390
|
+
model_filter = ModelPathFilter.from_model_object(llm, self.args)
|
|
391
|
+
filtered_sources = []
|
|
392
|
+
for source in source_code_list.sources:
|
|
393
|
+
if model_filter.is_accessible(source.module_name):
|
|
394
|
+
filtered_sources.append(source)
|
|
395
|
+
else:
|
|
396
|
+
printer.print_in_terminal("index_file_filtered",
|
|
397
|
+
style="yellow",
|
|
398
|
+
file_path=source.module_name,
|
|
399
|
+
model_name=",".join(llm_utils.get_llm_names(llm)))
|
|
400
|
+
|
|
401
|
+
source_code_list = SourceCodeList(filtered_sources)
|
|
402
|
+
|
|
384
403
|
llm_config = {"human_as_model": self.args.human_as_model}
|
|
385
404
|
result = []
|
|
405
|
+
source_content = source_code_list.to_str()
|
|
386
406
|
|
|
387
407
|
if self.args.template == "common":
|
|
388
408
|
init_prompt = self.multi_round_instruction.prompt(
|