auto-coder 0.1.251__tar.gz → 0.1.252__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.251 → auto_coder-0.1.252}/PKG-INFO +2 -2
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/auto_coder.egg-info/PKG-INFO +2 -2
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/auto_coder.egg-info/SOURCES.txt +2 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/auto_coder.egg-info/requires.txt +1 -1
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/auto_coder.py +28 -4
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/auto_coder_rag.py +198 -35
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/chat_auto_coder.py +56 -3
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/chat_auto_coder_lang.py +21 -3
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/__init__.py +1 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/auto_coder_lang.py +6 -4
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/code_modification_ranker.py +3 -3
- auto_coder-0.1.252/src/autocoder/common/global_cancel.py +21 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/dispacher/actions/action.py +29 -8
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/dispacher/actions/plugins/action_regex_project.py +17 -5
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/index/filter/quick_filter.py +4 -6
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/index/index.py +13 -6
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/models.py +87 -6
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/doc_filter.py +1 -3
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/long_context_rag.py +7 -5
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/token_limiter.py +1 -3
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/auto_coder_utils/chat_stream_out.py +13 -2
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/llms.py +15 -1
- auto_coder-0.1.252/src/autocoder/utils/thread_utils.py +201 -0
- auto_coder-0.1.252/src/autocoder/version.py +1 -0
- auto_coder-0.1.251/src/autocoder/version.py +0 -1
- {auto_coder-0.1.251 → auto_coder-0.1.252}/LICENSE +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/README.md +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/setup.cfg +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/setup.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/auto_coder.egg-info/dependency_links.txt +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/auto_coder.egg-info/entry_points.txt +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/auto_coder.egg-info/top_level.txt +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/agent/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/agent/auto_demand_organizer.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/agent/auto_filegroup.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/agent/auto_guess_query.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/agent/auto_review_commit.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/agent/auto_tool.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/agent/coder.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/agent/designer.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/agent/planner.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/agent/project_reader.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/auto_coder_rag_client_mcp.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/auto_coder_rag_mcp.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/auto_coder_server.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/benchmark.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/chat/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/command_args.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/JupyterClient.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/ShellClient.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/anything2images.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/anything2img.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/audio.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/buildin_tokenizer.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/chunk_validation.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/cleaner.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/code_auto_execute.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/code_auto_generate.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/code_auto_generate_diff.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/code_auto_generate_editblock.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/code_auto_generate_strict_diff.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/code_auto_merge.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/code_auto_merge_diff.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/code_auto_merge_editblock.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/code_auto_merge_strict_diff.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/command_completer.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/command_generator.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/command_templates.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/const.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/files.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/git_utils.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/image_to_page.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/interpreter.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/llm_rerank.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/mcp_hub.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/mcp_server.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/mcp_servers/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/mcp_servers/mcp_server_perplexity.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/mcp_tools.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/memory_manager.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/printer.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/recall_validation.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/screenshots.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/search.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/search_replace.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/shells.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/sys_prompt.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/text.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/types.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/common/utils_code_auto_generate.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/data/tokenizer.json +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/db/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/db/store.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/dispacher/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/dispacher/actions/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/dispacher/actions/copilot.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/dispacher/actions/plugins/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/dispacher/actions/plugins/action_translate.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/index/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/index/entry.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/index/filter/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/index/filter/normal_filter.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/index/for_command.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/index/symbols_utils.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/index/types.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/lang.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/pyproject/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/api_server.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/cache/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/cache/base_cache.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/cache/byzer_storage_cache.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/cache/file_monitor_cache.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/cache/simple_cache.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/document_retriever.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/llm_wrapper.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/loaders/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/loaders/docx_loader.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/loaders/excel_loader.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/loaders/pdf_loader.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/loaders/ppt_loader.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/rag_config.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/rag_entry.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/raw_rag.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/relevant_utils.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/simple_directory_reader.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/simple_rag.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/stream_event/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/stream_event/event_writer.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/stream_event/types.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/token_checker.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/token_counter.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/types.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/utils.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/rag/variable_holder.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/regexproject/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/suffixproject/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/tsproject/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/_markitdown.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/auto_coder_utils/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/chat_auto_coder_utils/__init__.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/conversation_store.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/llm_client_interceptors.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/log_capture.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/multi_turn.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/operate_config_api.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/print_table.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/queue_communicate.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/request_event_queue.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/request_queue.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/rest.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/tests.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/src/autocoder/utils/types.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/tests/test_action_regex_project.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/tests/test_chat_auto_coder.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/tests/test_code_auto_merge_editblock.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/tests/test_command_completer.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/tests/test_planner.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/tests/test_queue_communicate.py +0 -0
- {auto_coder-0.1.251 → auto_coder-0.1.252}/tests/test_symbols_utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.252
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter_client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas]>=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas]>=0.1.163
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff_match_patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.252
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter_client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas]>=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas]>=0.1.163
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff_match_patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -57,6 +57,7 @@ src/autocoder/common/command_templates.py
|
|
|
57
57
|
src/autocoder/common/const.py
|
|
58
58
|
src/autocoder/common/files.py
|
|
59
59
|
src/autocoder/common/git_utils.py
|
|
60
|
+
src/autocoder/common/global_cancel.py
|
|
60
61
|
src/autocoder/common/image_to_page.py
|
|
61
62
|
src/autocoder/common/interpreter.py
|
|
62
63
|
src/autocoder/common/llm_rerank.py
|
|
@@ -144,6 +145,7 @@ src/autocoder/utils/request_event_queue.py
|
|
|
144
145
|
src/autocoder/utils/request_queue.py
|
|
145
146
|
src/autocoder/utils/rest.py
|
|
146
147
|
src/autocoder/utils/tests.py
|
|
148
|
+
src/autocoder/utils/thread_utils.py
|
|
147
149
|
src/autocoder/utils/types.py
|
|
148
150
|
src/autocoder/utils/auto_coder_utils/__init__.py
|
|
149
151
|
src/autocoder/utils/auto_coder_utils/chat_stream_out.py
|
|
@@ -6,6 +6,7 @@ from autocoder.common import git_utils, code_auto_execute
|
|
|
6
6
|
from autocoder.utils.llm_client_interceptors import token_counter_interceptor
|
|
7
7
|
from autocoder.db.store import Store
|
|
8
8
|
|
|
9
|
+
from autocoder.utils.llms import get_llm_names
|
|
9
10
|
from autocoder.utils.queue_communicate import (
|
|
10
11
|
queue_communicate,
|
|
11
12
|
CommunicateEvent,
|
|
@@ -765,6 +766,23 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
765
766
|
)
|
|
766
767
|
llm.setup_sub_client("planner_model", planner_model)
|
|
767
768
|
|
|
769
|
+
if args.commit_model:
|
|
770
|
+
model_name = args.commit_model.strip()
|
|
771
|
+
model_info = models_module.get_model_by_name(model_name)
|
|
772
|
+
commit_model = byzerllm.SimpleByzerLLM(default_model_name=model_name)
|
|
773
|
+
commit_model.deploy(
|
|
774
|
+
model_path="",
|
|
775
|
+
pretrained_model_type=model_info["model_type"],
|
|
776
|
+
udf_name=model_name,
|
|
777
|
+
infer_params={
|
|
778
|
+
"saas.base_url": model_info["base_url"],
|
|
779
|
+
"saas.api_key": model_info["api_key"],
|
|
780
|
+
"saas.model": model_info["model_name"],
|
|
781
|
+
"saas.is_reasoning": model_info["is_reasoning"]
|
|
782
|
+
}
|
|
783
|
+
)
|
|
784
|
+
llm.setup_sub_client("commit_model", commit_model)
|
|
785
|
+
|
|
768
786
|
if args.designer_model:
|
|
769
787
|
model_name = args.designer_model.strip()
|
|
770
788
|
model_info = models_module.get_model_by_name(model_name)
|
|
@@ -875,6 +893,11 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
875
893
|
designer_model.setup_default_model_name(args.designer_model)
|
|
876
894
|
llm.setup_sub_client("designer_model", designer_model)
|
|
877
895
|
|
|
896
|
+
if args.commit_model:
|
|
897
|
+
commit_model = byzerllm.ByzerLLM()
|
|
898
|
+
commit_model.setup_default_model_name(args.commit_model)
|
|
899
|
+
llm.setup_sub_client("commit_model", commit_model)
|
|
900
|
+
|
|
878
901
|
else:
|
|
879
902
|
llm = None
|
|
880
903
|
|
|
@@ -1348,9 +1371,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1348
1371
|
)
|
|
1349
1372
|
|
|
1350
1373
|
|
|
1351
|
-
model_name =
|
|
1352
|
-
if not model_name:
|
|
1353
|
-
model_name = "unknown(without default model name)"
|
|
1374
|
+
model_name = ",".join(get_llm_names(chat_llm))
|
|
1354
1375
|
|
|
1355
1376
|
assistant_response, last_meta = stream_out(
|
|
1356
1377
|
v,
|
|
@@ -1364,10 +1385,13 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1364
1385
|
if last_meta:
|
|
1365
1386
|
elapsed_time = time.time() - start_time
|
|
1366
1387
|
printer = Printer()
|
|
1388
|
+
speed = last_meta.generated_tokens_count / elapsed_time
|
|
1367
1389
|
printer.print_in_terminal("stream_out_stats",
|
|
1368
1390
|
elapsed_time=elapsed_time,
|
|
1391
|
+
first_token_time=last_meta.first_token_time,
|
|
1369
1392
|
input_tokens=last_meta.input_tokens_count,
|
|
1370
|
-
output_tokens=last_meta.generated_tokens_count
|
|
1393
|
+
output_tokens=last_meta.generated_tokens_count,
|
|
1394
|
+
speed=round(speed, 2))
|
|
1371
1395
|
|
|
1372
1396
|
chat_history["ask_conversation"].append(
|
|
1373
1397
|
{"role": "assistant", "content": assistant_response}
|
|
@@ -32,7 +32,10 @@ if platform.system() == "Windows":
|
|
|
32
32
|
init()
|
|
33
33
|
|
|
34
34
|
|
|
35
|
-
def initialize_system():
|
|
35
|
+
def initialize_system(args):
|
|
36
|
+
if args.product_mode == "lite":
|
|
37
|
+
return
|
|
38
|
+
|
|
36
39
|
print(f"\n\033[1;34m{get_message('initializing')}\033[0m")
|
|
37
40
|
|
|
38
41
|
def print_status(message, status):
|
|
@@ -316,13 +319,30 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
316
319
|
action="store_true",
|
|
317
320
|
help="Whether to return responses without contexts. only works when pro plugin is installed",
|
|
318
321
|
)
|
|
319
|
-
|
|
320
|
-
|
|
322
|
+
|
|
323
|
+
serve_parser.add_argument("--data_cells_max_num",
|
|
321
324
|
type=int,
|
|
322
325
|
default=2000,
|
|
323
326
|
help="Maximum number of data cells to process",
|
|
324
327
|
)
|
|
325
328
|
|
|
329
|
+
serve_parser.add_argument(
|
|
330
|
+
"--product_mode",
|
|
331
|
+
type=str,
|
|
332
|
+
default="pro",
|
|
333
|
+
help="The mode of the auto-coder.rag, lite/pro default is pro",
|
|
334
|
+
)
|
|
335
|
+
serve_parser.add_argument(
|
|
336
|
+
"--lite",
|
|
337
|
+
action="store_true",
|
|
338
|
+
help="Run in lite mode (equivalent to --product_mode=lite)",
|
|
339
|
+
)
|
|
340
|
+
serve_parser.add_argument(
|
|
341
|
+
"--pro",
|
|
342
|
+
action="store_true",
|
|
343
|
+
help="Run in pro mode (equivalent to --product_mode=pro)",
|
|
344
|
+
)
|
|
345
|
+
|
|
326
346
|
serve_parser.add_argument(
|
|
327
347
|
"--recall_model",
|
|
328
348
|
default="",
|
|
@@ -373,6 +393,22 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
373
393
|
# Tools command
|
|
374
394
|
tools_parser = subparsers.add_parser("tools", help="Various tools")
|
|
375
395
|
tools_subparsers = tools_parser.add_subparsers(dest="tool", help="Available tools")
|
|
396
|
+
tools_parser.add_argument(
|
|
397
|
+
"--product_mode",
|
|
398
|
+
type=str,
|
|
399
|
+
default="pro",
|
|
400
|
+
help="The mode of the auto-coder.rag, lite/pro default is pro",
|
|
401
|
+
)
|
|
402
|
+
tools_parser.add_argument(
|
|
403
|
+
"--lite",
|
|
404
|
+
action="store_true",
|
|
405
|
+
help="Run in lite mode (equivalent to --product_mode=lite)",
|
|
406
|
+
)
|
|
407
|
+
tools_parser.add_argument(
|
|
408
|
+
"--pro",
|
|
409
|
+
action="store_true",
|
|
410
|
+
help="Run in pro mode (equivalent to --product_mode=pro)",
|
|
411
|
+
)
|
|
376
412
|
|
|
377
413
|
# Count tool
|
|
378
414
|
count_parser = tools_subparsers.add_parser("count", help="Count tokens in a file")
|
|
@@ -431,8 +467,15 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
431
467
|
benchmark_byzerllm(args.model, args.parallel, args.rounds, args.query)
|
|
432
468
|
|
|
433
469
|
elif args.command == "serve":
|
|
470
|
+
# Handle lite/pro flags
|
|
471
|
+
if args.lite:
|
|
472
|
+
args.product_mode = "lite"
|
|
473
|
+
elif args.pro:
|
|
474
|
+
args.product_mode = "pro"
|
|
475
|
+
|
|
434
476
|
if not args.quick:
|
|
435
|
-
initialize_system()
|
|
477
|
+
initialize_system(args)
|
|
478
|
+
|
|
436
479
|
server_args = ServerArgs(
|
|
437
480
|
**{
|
|
438
481
|
arg: getattr(args, arg)
|
|
@@ -448,7 +491,11 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
448
491
|
}
|
|
449
492
|
)
|
|
450
493
|
|
|
451
|
-
|
|
494
|
+
|
|
495
|
+
if auto_coder_args.enable_hybrid_index and args.product_mode == "lite":
|
|
496
|
+
raise Exception("Hybrid index is not supported in lite mode")
|
|
497
|
+
|
|
498
|
+
if auto_coder_args.enable_hybrid_index and args.product_mode == "pro":
|
|
452
499
|
# 尝试连接storage
|
|
453
500
|
try:
|
|
454
501
|
from byzerllm.apps.byzer_storage.simple_api import ByzerStorage
|
|
@@ -460,36 +507,107 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
460
507
|
"When enable_hybrid_index is true, ByzerStorage must be started"
|
|
461
508
|
)
|
|
462
509
|
logger.error("Please run 'byzerllm storage start' first")
|
|
463
|
-
return
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
llm = byzerllm.ByzerLLM()
|
|
467
|
-
llm.setup_default_model_name(args.model)
|
|
468
|
-
|
|
469
|
-
# Setup sub models if specified
|
|
470
|
-
if args.recall_model:
|
|
471
|
-
recall_model = byzerllm.ByzerLLM()
|
|
472
|
-
recall_model.setup_default_model_name(args.recall_model)
|
|
473
|
-
llm.setup_sub_client("recall_model", recall_model)
|
|
474
|
-
|
|
475
|
-
if args.chunk_model:
|
|
476
|
-
chunk_model = byzerllm.ByzerLLM()
|
|
477
|
-
chunk_model.setup_default_model_name(args.chunk_model)
|
|
478
|
-
llm.setup_sub_client("chunk_model", chunk_model)
|
|
510
|
+
return
|
|
511
|
+
|
|
512
|
+
|
|
479
513
|
|
|
480
|
-
if args.
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
llm.
|
|
514
|
+
if args.product_mode == "pro":
|
|
515
|
+
byzerllm.connect_cluster(address=args.ray_address)
|
|
516
|
+
llm = byzerllm.ByzerLLM()
|
|
517
|
+
llm.skip_nontext_check = True
|
|
518
|
+
llm.setup_default_model_name(args.model)
|
|
519
|
+
|
|
520
|
+
# Setup sub models if specified
|
|
521
|
+
if args.recall_model:
|
|
522
|
+
recall_model = byzerllm.ByzerLLM()
|
|
523
|
+
recall_model.setup_default_model_name(args.recall_model)
|
|
524
|
+
recall_model.skip_nontext_check = True
|
|
525
|
+
llm.setup_sub_client("recall_model", recall_model)
|
|
526
|
+
|
|
527
|
+
if args.chunk_model:
|
|
528
|
+
chunk_model = byzerllm.ByzerLLM()
|
|
529
|
+
chunk_model.setup_default_model_name(args.chunk_model)
|
|
530
|
+
llm.setup_sub_client("chunk_model", chunk_model)
|
|
531
|
+
|
|
532
|
+
if args.qa_model:
|
|
533
|
+
qa_model = byzerllm.ByzerLLM()
|
|
534
|
+
qa_model.setup_default_model_name(args.qa_model)
|
|
535
|
+
qa_model.skip_nontext_check = True
|
|
536
|
+
llm.setup_sub_client("qa_model", qa_model)
|
|
537
|
+
|
|
538
|
+
# 当启用hybrid_index时,检查必要的组件
|
|
539
|
+
if auto_coder_args.enable_hybrid_index:
|
|
540
|
+
if not llm.is_model_exist("emb"):
|
|
541
|
+
logger.error(
|
|
542
|
+
"When enable_hybrid_index is true, an 'emb' model must be deployed"
|
|
543
|
+
)
|
|
544
|
+
return
|
|
545
|
+
llm.setup_default_emb_model_name("emb")
|
|
546
|
+
|
|
547
|
+
elif args.product_mode == "lite":
|
|
548
|
+
from autocoder import models as models_module
|
|
549
|
+
model_info = models_module.get_model_by_name(args.model)
|
|
550
|
+
llm = byzerllm.SimpleByzerLLM(default_model_name=args.model)
|
|
551
|
+
llm.deploy(
|
|
552
|
+
model_path="",
|
|
553
|
+
pretrained_model_type=model_info["model_type"],
|
|
554
|
+
udf_name=args.model,
|
|
555
|
+
infer_params={
|
|
556
|
+
"saas.base_url": model_info["base_url"],
|
|
557
|
+
"saas.api_key": model_info["api_key"],
|
|
558
|
+
"saas.model": model_info["model_name"],
|
|
559
|
+
"saas.is_reasoning": model_info["is_reasoning"]
|
|
560
|
+
}
|
|
561
|
+
)
|
|
484
562
|
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
563
|
+
# Setup sub models if specified
|
|
564
|
+
if args.recall_model:
|
|
565
|
+
model_info = models_module.get_model_by_name(args.recall_model)
|
|
566
|
+
recall_model = byzerllm.SimpleByzerLLM(default_model_name=args.recall_model)
|
|
567
|
+
recall_model.deploy(
|
|
568
|
+
model_path="",
|
|
569
|
+
pretrained_model_type=model_info["model_type"],
|
|
570
|
+
udf_name=args.recall_model,
|
|
571
|
+
infer_params={
|
|
572
|
+
"saas.base_url": model_info["base_url"],
|
|
573
|
+
"saas.api_key": model_info["api_key"],
|
|
574
|
+
"saas.model": model_info["model_name"],
|
|
575
|
+
"saas.is_reasoning": model_info["is_reasoning"]
|
|
576
|
+
}
|
|
490
577
|
)
|
|
491
|
-
|
|
492
|
-
|
|
578
|
+
llm.setup_sub_client("recall_model", recall_model)
|
|
579
|
+
|
|
580
|
+
if args.chunk_model:
|
|
581
|
+
model_info = models_module.get_model_by_name(args.chunk_model)
|
|
582
|
+
chunk_model = byzerllm.SimpleByzerLLM(default_model_name=args.chunk_model)
|
|
583
|
+
chunk_model.deploy(
|
|
584
|
+
model_path="",
|
|
585
|
+
pretrained_model_type=model_info["model_type"],
|
|
586
|
+
udf_name=args.chunk_model,
|
|
587
|
+
infer_params={
|
|
588
|
+
"saas.base_url": model_info["base_url"],
|
|
589
|
+
"saas.api_key": model_info["api_key"],
|
|
590
|
+
"saas.model": model_info["model_name"],
|
|
591
|
+
"saas.is_reasoning": model_info["is_reasoning"]
|
|
592
|
+
}
|
|
593
|
+
)
|
|
594
|
+
llm.setup_sub_client("chunk_model", chunk_model)
|
|
595
|
+
|
|
596
|
+
if args.qa_model:
|
|
597
|
+
model_info = models_module.get_model_by_name(args.qa_model)
|
|
598
|
+
qa_model = byzerllm.SimpleByzerLLM(default_model_name=args.qa_model)
|
|
599
|
+
qa_model.deploy(
|
|
600
|
+
model_path="",
|
|
601
|
+
pretrained_model_type=model_info["model_type"],
|
|
602
|
+
udf_name=args.qa_model,
|
|
603
|
+
infer_params={
|
|
604
|
+
"saas.base_url": model_info["base_url"],
|
|
605
|
+
"saas.api_key": model_info["api_key"],
|
|
606
|
+
"saas.model": model_info["model_name"],
|
|
607
|
+
"saas.is_reasoning": model_info["is_reasoning"]
|
|
608
|
+
}
|
|
609
|
+
)
|
|
610
|
+
llm.setup_sub_client("qa_model", qa_model)
|
|
493
611
|
|
|
494
612
|
if server_args.doc_dir:
|
|
495
613
|
auto_coder_args.rag_type = "simple"
|
|
@@ -577,16 +695,61 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
577
695
|
count_tokens(args.tokenizer_path, args.file)
|
|
578
696
|
elif args.tool == "recall":
|
|
579
697
|
from .common.recall_validation import validate_recall
|
|
580
|
-
|
|
581
|
-
|
|
698
|
+
from autocoder import models as models_module
|
|
699
|
+
|
|
700
|
+
# Handle lite/pro flags
|
|
701
|
+
if args.lite:
|
|
702
|
+
args.product_mode = "lite"
|
|
703
|
+
elif args.pro:
|
|
704
|
+
args.product_mode = "pro"
|
|
705
|
+
|
|
706
|
+
if args.product_mode == "pro":
|
|
707
|
+
llm = byzerllm.ByzerLLM.from_default_model(args.model)
|
|
708
|
+
else: # lite mode
|
|
709
|
+
model_info = models_module.get_model_by_name(args.model)
|
|
710
|
+
llm = byzerllm.SimpleByzerLLM(default_model_name=args.model)
|
|
711
|
+
llm.deploy(
|
|
712
|
+
model_path="",
|
|
713
|
+
pretrained_model_type=model_info["model_type"],
|
|
714
|
+
udf_name=args.model,
|
|
715
|
+
infer_params={
|
|
716
|
+
"saas.base_url": model_info["base_url"],
|
|
717
|
+
"saas.api_key": model_info["api_key"],
|
|
718
|
+
"saas.model": model_info["model_name"],
|
|
719
|
+
"saas.is_reasoning": model_info["is_reasoning"]
|
|
720
|
+
}
|
|
721
|
+
)
|
|
582
722
|
|
|
583
723
|
content = None if not args.content else [args.content]
|
|
584
724
|
result = validate_recall(llm, content=content, query=args.query)
|
|
585
725
|
print(f"Recall Validation Result:\n{result}")
|
|
726
|
+
|
|
586
727
|
elif args.tool == "chunk":
|
|
587
728
|
from .common.chunk_validation import validate_chunk
|
|
729
|
+
from autocoder import models as models_module
|
|
730
|
+
|
|
731
|
+
if args.lite:
|
|
732
|
+
args.product_mode = "lite"
|
|
733
|
+
elif args.pro:
|
|
734
|
+
args.product_mode = "pro"
|
|
735
|
+
|
|
736
|
+
if args.product_mode == "pro":
|
|
737
|
+
llm = byzerllm.ByzerLLM.from_default_model(args.model)
|
|
738
|
+
else: # lite mode
|
|
739
|
+
model_info = models_module.get_model_by_name(args.model)
|
|
740
|
+
llm = byzerllm.SimpleByzerLLM(default_model_name=args.model)
|
|
741
|
+
llm.deploy(
|
|
742
|
+
model_path="",
|
|
743
|
+
pretrained_model_type=model_info["model_type"],
|
|
744
|
+
udf_name=args.model,
|
|
745
|
+
infer_params={
|
|
746
|
+
"saas.base_url": model_info["base_url"],
|
|
747
|
+
"saas.api_key": model_info["api_key"],
|
|
748
|
+
"saas.model": model_info["model_name"],
|
|
749
|
+
"saas.is_reasoning": model_info["is_reasoning"]
|
|
750
|
+
}
|
|
751
|
+
)
|
|
588
752
|
|
|
589
|
-
llm = byzerllm.ByzerLLM.from_default_model(args.model)
|
|
590
753
|
content = None if not args.content else [args.content]
|
|
591
754
|
result = validate_chunk(llm, content=content, query=args.query)
|
|
592
755
|
print(f"Chunk Model Validation Result:\n{result}")
|
|
@@ -54,7 +54,7 @@ import shlex
|
|
|
54
54
|
from autocoder.utils.llms import get_single_llm
|
|
55
55
|
import pkg_resources
|
|
56
56
|
from autocoder.common.printer import Printer
|
|
57
|
-
from
|
|
57
|
+
from autocoder.utils.thread_utils import run_in_thread,run_in_raw_thread
|
|
58
58
|
|
|
59
59
|
class SymbolItem(BaseModel):
|
|
60
60
|
symbol_name: str
|
|
@@ -136,6 +136,8 @@ commands = [
|
|
|
136
136
|
|
|
137
137
|
|
|
138
138
|
def show_help():
|
|
139
|
+
print(f"\033[1m{get_message('official_doc')}\033[0m")
|
|
140
|
+
print()
|
|
139
141
|
print(f"\033[1m{get_message('supported_commands')}\033[0m")
|
|
140
142
|
print()
|
|
141
143
|
print(
|
|
@@ -1713,7 +1715,7 @@ def commit(query: str):
|
|
|
1713
1715
|
if os.path.exists(temp_yaml):
|
|
1714
1716
|
os.remove(temp_yaml)
|
|
1715
1717
|
|
|
1716
|
-
target_model = args.
|
|
1718
|
+
target_model = args.commit_model or args.model
|
|
1717
1719
|
llm = get_single_llm(target_model, product_mode)
|
|
1718
1720
|
printer = Printer()
|
|
1719
1721
|
printer.print_in_terminal("commit_generating", style="yellow", model_name=target_model)
|
|
@@ -2208,6 +2210,9 @@ def manage_models(params, query: str):
|
|
|
2208
2210
|
table.add_column("Name", style="cyan", width=40, no_wrap=False)
|
|
2209
2211
|
table.add_column("Model Name", style="magenta", width=30, overflow="fold")
|
|
2210
2212
|
table.add_column("Base URL", style="white", width=50, overflow="fold")
|
|
2213
|
+
table.add_column("Input Price (M)", style="magenta", width=15)
|
|
2214
|
+
table.add_column("Output Price (M)", style="magenta", width=15)
|
|
2215
|
+
table.add_column("Speed (s/req)", style="blue", width=15)
|
|
2211
2216
|
for m in models_data:
|
|
2212
2217
|
# Check if api_key_path exists and file exists
|
|
2213
2218
|
is_api_key_set = "api_key" in m
|
|
@@ -2221,12 +2226,60 @@ def manage_models(params, query: str):
|
|
|
2221
2226
|
table.add_row(
|
|
2222
2227
|
name,
|
|
2223
2228
|
m.get("model_name", ""),
|
|
2224
|
-
m.get("base_url", "")
|
|
2229
|
+
m.get("base_url", ""),
|
|
2230
|
+
f"{m.get('input_price', 0.0):.2f}",
|
|
2231
|
+
f"{m.get('output_price', 0.0):.2f}",
|
|
2232
|
+
f"{m.get('average_speed', 0.0):.3f}"
|
|
2225
2233
|
)
|
|
2226
2234
|
console.print(table)
|
|
2227
2235
|
else:
|
|
2228
2236
|
printer.print_in_terminal("models_no_models", style="yellow")
|
|
2229
2237
|
|
|
2238
|
+
elif subcmd == "/input_price":
|
|
2239
|
+
args = query.strip().split()
|
|
2240
|
+
if len(args) >= 2:
|
|
2241
|
+
name = args[0]
|
|
2242
|
+
try:
|
|
2243
|
+
price = float(args[1])
|
|
2244
|
+
if models.update_model_input_price(name, price):
|
|
2245
|
+
printer.print_in_terminal("models_input_price_updated", style="green", name=name, price=price)
|
|
2246
|
+
else:
|
|
2247
|
+
printer.print_in_terminal("models_not_found", style="red", name=name)
|
|
2248
|
+
except ValueError as e:
|
|
2249
|
+
printer.print_in_terminal("models_invalid_price", style="red", error=str(e))
|
|
2250
|
+
else:
|
|
2251
|
+
printer.print_in_terminal("models_input_price_usage", style="red")
|
|
2252
|
+
|
|
2253
|
+
elif subcmd == "/output_price":
|
|
2254
|
+
args = query.strip().split()
|
|
2255
|
+
if len(args) >= 2:
|
|
2256
|
+
name = args[0]
|
|
2257
|
+
try:
|
|
2258
|
+
price = float(args[1])
|
|
2259
|
+
if models.update_model_output_price(name, price):
|
|
2260
|
+
printer.print_in_terminal("models_output_price_updated", style="green", name=name, price=price)
|
|
2261
|
+
else:
|
|
2262
|
+
printer.print_in_terminal("models_not_found", style="red", name=name)
|
|
2263
|
+
except ValueError as e:
|
|
2264
|
+
printer.print_in_terminal("models_invalid_price", style="red", error=str(e))
|
|
2265
|
+
else:
|
|
2266
|
+
printer.print_in_terminal("models_output_price_usage", style="red")
|
|
2267
|
+
|
|
2268
|
+
elif subcmd == "/speed":
|
|
2269
|
+
args = query.strip().split()
|
|
2270
|
+
if len(args) >= 2:
|
|
2271
|
+
name = args[0]
|
|
2272
|
+
try:
|
|
2273
|
+
speed = float(args[1])
|
|
2274
|
+
if models.update_model_speed(name, speed):
|
|
2275
|
+
printer.print_in_terminal("models_speed_updated", style="green", name=name, speed=speed)
|
|
2276
|
+
else:
|
|
2277
|
+
printer.print_in_terminal("models_not_found", style="red", name=name)
|
|
2278
|
+
except ValueError as e:
|
|
2279
|
+
printer.print_in_terminal("models_invalid_speed", style="red", error=str(e))
|
|
2280
|
+
else:
|
|
2281
|
+
printer.print_in_terminal("models_speed_usage", style="red")
|
|
2282
|
+
|
|
2230
2283
|
elif subcmd == "/add":
|
|
2231
2284
|
# Support both simplified and legacy formats
|
|
2232
2285
|
args = query.strip().split(" ")
|
|
@@ -85,7 +85,7 @@ MESSAGES = {
|
|
|
85
85
|
"design_desc": "Generate SVG image based on the provided description",
|
|
86
86
|
"commit_desc": "Auto generate yaml file and commit changes based on user's manual changes",
|
|
87
87
|
"models_desc": "Manage model configurations, only available in lite mode",
|
|
88
|
-
"models_usage": "Usage: /models /list|/add|/add_model|/remove ...",
|
|
88
|
+
"models_usage": "Usage: /models /list|/add|/add_model|/remove|/price|/speed ...",
|
|
89
89
|
"models_added": "Added/Updated model '{{name}}' successfully.",
|
|
90
90
|
"models_add_failed": "Failed to add model '{{name}}'. Model not found in defaults.",
|
|
91
91
|
"models_add_usage": "Usage: /models /add <name> <api_key> or\n/models /add <name> <model_type> <model_name> <base_url> <api_key_path> [description]",
|
|
@@ -96,6 +96,14 @@ MESSAGES = {
|
|
|
96
96
|
"models_add_model_remove": "Model '{{name}}' not found.",
|
|
97
97
|
"models_add_model_removed": "Removed model: {{name}}",
|
|
98
98
|
"models_unknown_subcmd": "Unknown subcommand: {{subcmd}}",
|
|
99
|
+
"models_input_price_updated": "Updated input price for model {{name}} to {{price}} M/token",
|
|
100
|
+
"models_output_price_updated": "Updated output price for model {{name}} to {{price}} M/token",
|
|
101
|
+
"models_invalid_price": "Invalid price value: {{error}}",
|
|
102
|
+
"models_input_price_usage": "Usage: /models /input_price <name> <value>",
|
|
103
|
+
"models_output_price_usage": "Usage: /models /output_price <name> <value>",
|
|
104
|
+
"models_speed_updated": "Updated speed for model {{name}} to {{speed}} s/request",
|
|
105
|
+
"models_invalid_speed": "Invalid speed value: {{error}}",
|
|
106
|
+
"models_speed_usage": "Usage: /models /speed <name> <value>",
|
|
99
107
|
"models_title": "All Models (内置 + models.json)",
|
|
100
108
|
"models_no_models": "No models found.",
|
|
101
109
|
"models_lite_only": "The /models command is only available in lite mode",
|
|
@@ -117,6 +125,7 @@ MESSAGES = {
|
|
|
117
125
|
"commit_message": "{{ model_name }} Generated commit message: {{ message }}",
|
|
118
126
|
"commit_failed": "{{ model_name }} Failed to generate commit message: {{ error }}",
|
|
119
127
|
"confirm_execute": "Do you want to execute this script?",
|
|
128
|
+
"official_doc": "Official Documentation: https://uelng8wukz.feishu.cn/wiki/NhPNwSRcWimKFIkQINIckloBncI",
|
|
120
129
|
},
|
|
121
130
|
"zh": {
|
|
122
131
|
"commit_generating": "{{ model_name }} 正在生成提交信息...",
|
|
@@ -204,7 +213,7 @@ MESSAGES = {
|
|
|
204
213
|
"conf_value": "值",
|
|
205
214
|
"conf_title": "配置设置",
|
|
206
215
|
"conf_subtitle": "使用 /conf <key>:<value> 修改这些设置",
|
|
207
|
-
"models_usage": "用法: /models /list|/add|/add_model|/remove ...",
|
|
216
|
+
"models_usage": "用法: /models /list|/add|/add_model|/remove|/price|/speed ...",
|
|
208
217
|
"models_added": "成功添加/更新模型 '{{name}}'。",
|
|
209
218
|
"models_add_failed": "添加模型 '{{name}}' 失败。在默认模型中未找到该模型。",
|
|
210
219
|
"models_add_usage": "用法: /models /add <name> <api_key> 或\n/models /add <name> <model_type> <model_name> <base_url> <api_key_path> [description]",
|
|
@@ -215,6 +224,14 @@ MESSAGES = {
|
|
|
215
224
|
"models_add_model_remove": "找不到模型 '{{name}}'。",
|
|
216
225
|
"models_add_model_removed": "已移除模型: {{name}}",
|
|
217
226
|
"models_unknown_subcmd": "未知的子命令: {{subcmd}}",
|
|
227
|
+
"models_input_price_updated": "已更新模型 {{name}} 的输入价格为 {{price}} M/token",
|
|
228
|
+
"models_output_price_updated": "已更新模型 {{name}} 的输出价格为 {{price}} M/token",
|
|
229
|
+
"models_invalid_price": "无效的价格值: {{error}}",
|
|
230
|
+
"models_input_price_usage": "用法: /models /input_price <name> <value>",
|
|
231
|
+
"models_output_price_usage": "用法: /models /output_price <name> <value>",
|
|
232
|
+
"models_speed_updated": "已更新模型 {{name}} 的速度为 {{speed}} 秒/请求",
|
|
233
|
+
"models_invalid_speed": "无效的速度值: {{error}}",
|
|
234
|
+
"models_speed_usage": "用法: /models /speed <name> <value>",
|
|
218
235
|
"models_title": "所有模型 (内置 + models.json)",
|
|
219
236
|
"models_no_models": "未找到任何模型。",
|
|
220
237
|
"models_lite_only": "/models 命令仅在 lite 模式下可用",
|
|
@@ -232,7 +249,8 @@ MESSAGES = {
|
|
|
232
249
|
"remove_files_none": "没有文件被移除。",
|
|
233
250
|
"files_removed": "移除的文件",
|
|
234
251
|
"models_api_key_empty": "警告: {{name}} API key 为空。请设置一个有效的 API key。",
|
|
235
|
-
"confirm_execute": "
|
|
252
|
+
"confirm_execute": "是否执行此脚本?",
|
|
253
|
+
"official_doc": "官方文档: https://uelng8wukz.feishu.cn/wiki/NhPNwSRcWimKFIkQINIckloBncI",
|
|
236
254
|
}
|
|
237
255
|
}
|
|
238
256
|
|
|
@@ -254,6 +254,7 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
254
254
|
planner_model: Optional[str] = ""
|
|
255
255
|
voice2text_model: Optional[str] = ""
|
|
256
256
|
text2voice_model: Optional[str] = ""
|
|
257
|
+
commit_model: Optional[str] = ""
|
|
257
258
|
|
|
258
259
|
skip_build_index: Optional[bool] = False
|
|
259
260
|
skip_filter_index: Optional[bool] = False
|
|
@@ -3,6 +3,7 @@ from byzerllm.utils import format_str_jinja2
|
|
|
3
3
|
|
|
4
4
|
MESSAGES = {
|
|
5
5
|
"en": {
|
|
6
|
+
"generation_cancelled": "[Interrupted] Generation cancelled",
|
|
6
7
|
"model_not_found": "Model {{model_name}} not found",
|
|
7
8
|
"generating_shell_script": "Generating Shell Script",
|
|
8
9
|
"new_session_started": "New session started. Previous chat history has been archived.",
|
|
@@ -51,7 +52,7 @@ MESSAGES = {
|
|
|
51
52
|
"Paste the answer to the input box below, use '/break' to exit, '/clear' to clear the screen, '/eof' to submit."
|
|
52
53
|
),
|
|
53
54
|
"code_generation_start": "Auto generate the code...",
|
|
54
|
-
"code_generation_complete": "Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}",
|
|
55
|
+
"code_generation_complete": "Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}, speed: {{ speed }} tokens/s",
|
|
55
56
|
"code_merge_start": "Auto merge the code...",
|
|
56
57
|
"code_execution_warning": "Content(send to model) is {{ content_length }} tokens (you may collect too much files), which is larger than the maximum input length {{ max_length }}",
|
|
57
58
|
"quick_filter_start": "{{ model_name }} Starting filter context(quick_filter)...",
|
|
@@ -73,7 +74,7 @@ MESSAGES = {
|
|
|
73
74
|
"ranking_process_failed": "Ranking process failed: {{ error }}",
|
|
74
75
|
"ranking_failed": "Ranking failed in {{ elapsed }}s, using original order",
|
|
75
76
|
"begin_index_source_code": "🚀 Begin to index source code in {{ source_dir }}",
|
|
76
|
-
"stream_out_stats": "Elapsed time {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
|
|
77
|
+
"stream_out_stats": "Elapsed time {{ elapsed_time }} seconds, first token time: {{ first_token_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}, speed: {{ speed }} tokens/s",
|
|
77
78
|
"quick_filter_stats": "快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
|
|
78
79
|
"upsert_file": "✅ Updated file: {{ file_path }}",
|
|
79
80
|
"unmerged_blocks_title": "Unmerged Blocks",
|
|
@@ -91,6 +92,7 @@ MESSAGES = {
|
|
|
91
92
|
"estimated_input_tokens_in_generate": "Estimated input tokens in generate ({{ generate_mode }}): {{ estimated_input_tokens }}",
|
|
92
93
|
},
|
|
93
94
|
"zh": {
|
|
95
|
+
"generation_cancelled": "[已中断] 生成已取消",
|
|
94
96
|
"model_not_found": "未找到模型: {{model_name}}",
|
|
95
97
|
"generating_shell_script": "正在生成 Shell 脚本",
|
|
96
98
|
"new_session_started": "新会话已开始。之前的聊天历史已存档。",
|
|
@@ -139,7 +141,7 @@ MESSAGES = {
|
|
|
139
141
|
"将获得答案黏贴到下面的输入框,换行后,使用 '/break' 退出,'/clear' 清屏,'/eof' 提交。"
|
|
140
142
|
),
|
|
141
143
|
"code_generation_start": "正在自动生成代码...",
|
|
142
|
-
"code_generation_complete": "代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
|
|
144
|
+
"code_generation_complete": "代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 速度: {{ speed }} tokens/秒",
|
|
143
145
|
"code_merge_start": "正在自动合并代码...",
|
|
144
146
|
"code_execution_warning": "发送给模型的内容长度为 {{ content_length }} tokens(您可能收集了太多文件),超过了最大输入长度 {{ max_length }}",
|
|
145
147
|
"quick_filter_start": "{{ model_name }} 开始查找上下文(quick_filter)...",
|
|
@@ -171,7 +173,7 @@ MESSAGES = {
|
|
|
171
173
|
"ranking_complete": "排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }}",
|
|
172
174
|
"ranking_process_failed": "排序过程失败: {{ error }}",
|
|
173
175
|
"ranking_failed": "排序失败,耗时 {{ elapsed }} 秒,使用原始顺序",
|
|
174
|
-
"stream_out_stats": "
|
|
176
|
+
"stream_out_stats": "总耗时 {{ elapsed_time }} 秒,首token时间: {{ first_token_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 速度: {{ speed }} tokens/秒",
|
|
175
177
|
"quick_filter_stats": "Quick filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
|
|
176
178
|
"quick_filter_title": "{{ model_name }} 正在分析如何筛选上下文...",
|
|
177
179
|
"quick_filter_failed": "❌ 快速过滤器失败: {{ error }}. ",
|