auto-coder 0.1.253__tar.gz → 0.1.256__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.253 → auto_coder-0.1.256}/PKG-INFO +2 -2
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/auto_coder.egg-info/PKG-INFO +2 -2
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/auto_coder.egg-info/SOURCES.txt +2 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/auto_coder.egg-info/requires.txt +1 -1
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/auto_coder.py +21 -5
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/chat_auto_coder.py +54 -9
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/chat_auto_coder_lang.py +2 -2
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/auto_coder_lang.py +17 -8
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate.py +23 -3
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate_diff.py +22 -3
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate_editblock.py +24 -2
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate_strict_diff.py +23 -4
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_modification_ranker.py +39 -3
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/command_completer.py +5 -1
- auto_coder-0.1.256/src/autocoder/common/model_speed_test.py +392 -0
- auto_coder-0.1.256/src/autocoder/data/byzerllm.md +1549 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/action.py +22 -12
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/plugins/action_regex_project.py +4 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/index/entry.py +9 -2
- auto_coder-0.1.256/src/autocoder/index/filter/quick_filter.py +375 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/models.py +31 -10
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/pyproject/__init__.py +1 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/doc_filter.py +2 -1
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/long_context_rag.py +1 -14
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/suffixproject/__init__.py +1 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/tsproject/__init__.py +1 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/llms.py +27 -0
- auto_coder-0.1.256/src/autocoder/version.py +1 -0
- auto_coder-0.1.253/src/autocoder/index/filter/quick_filter.py +0 -230
- auto_coder-0.1.253/src/autocoder/version.py +0 -1
- {auto_coder-0.1.253 → auto_coder-0.1.256}/LICENSE +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/README.md +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/setup.cfg +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/setup.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/auto_coder.egg-info/dependency_links.txt +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/auto_coder.egg-info/entry_points.txt +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/auto_coder.egg-info/top_level.txt +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/agent/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/agent/auto_demand_organizer.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/agent/auto_filegroup.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/agent/auto_guess_query.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/agent/auto_review_commit.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/agent/auto_tool.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/agent/coder.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/agent/designer.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/agent/planner.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/agent/project_reader.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/auto_coder_rag.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/auto_coder_rag_client_mcp.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/auto_coder_rag_mcp.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/auto_coder_server.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/benchmark.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/chat/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/command_args.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/JupyterClient.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/ShellClient.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/anything2images.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/anything2img.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/audio.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/buildin_tokenizer.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/chunk_validation.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/cleaner.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_auto_execute.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_auto_merge.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_auto_merge_diff.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_auto_merge_editblock.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_auto_merge_strict_diff.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/command_generator.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/command_templates.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/const.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/files.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/git_utils.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/global_cancel.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/image_to_page.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/interpreter.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/llm_rerank.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/mcp_hub.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/mcp_server.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/mcp_servers/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/mcp_servers/mcp_server_perplexity.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/mcp_tools.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/memory_manager.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/printer.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/recall_validation.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/screenshots.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/search.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/search_replace.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/shells.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/sys_prompt.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/text.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/types.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/utils_code_auto_generate.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/data/tokenizer.json +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/db/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/db/store.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/dispacher/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/copilot.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/plugins/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/plugins/action_translate.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/index/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/index/filter/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/index/filter/normal_filter.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/index/for_command.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/index/index.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/index/symbols_utils.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/index/types.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/lang.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/api_server.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/cache/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/cache/base_cache.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/cache/byzer_storage_cache.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/cache/file_monitor_cache.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/cache/simple_cache.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/document_retriever.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/llm_wrapper.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/loaders/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/loaders/docx_loader.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/loaders/excel_loader.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/loaders/pdf_loader.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/loaders/ppt_loader.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/rag_config.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/rag_entry.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/raw_rag.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/relevant_utils.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/simple_directory_reader.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/simple_rag.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/stream_event/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/stream_event/event_writer.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/stream_event/types.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/token_checker.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/token_counter.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/token_limiter.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/types.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/utils.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/rag/variable_holder.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/regexproject/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/_markitdown.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/auto_coder_utils/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/auto_coder_utils/chat_stream_out.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/chat_auto_coder_utils/__init__.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/conversation_store.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/llm_client_interceptors.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/log_capture.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/multi_turn.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/operate_config_api.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/print_table.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/queue_communicate.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/request_event_queue.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/request_queue.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/rest.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/tests.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/thread_utils.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/utils/types.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/tests/test_action_regex_project.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/tests/test_chat_auto_coder.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/tests/test_code_auto_merge_editblock.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/tests/test_command_completer.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/tests/test_planner.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/tests/test_queue_communicate.py +0 -0
- {auto_coder-0.1.253 → auto_coder-0.1.256}/tests/test_symbols_utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.256
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter_client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas]>=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas]>=0.1.164
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff_match_patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.256
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter_client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas]>=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas]>=0.1.164
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff_match_patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -65,6 +65,7 @@ src/autocoder/common/mcp_hub.py
|
|
|
65
65
|
src/autocoder/common/mcp_server.py
|
|
66
66
|
src/autocoder/common/mcp_tools.py
|
|
67
67
|
src/autocoder/common/memory_manager.py
|
|
68
|
+
src/autocoder/common/model_speed_test.py
|
|
68
69
|
src/autocoder/common/printer.py
|
|
69
70
|
src/autocoder/common/recall_validation.py
|
|
70
71
|
src/autocoder/common/screenshots.py
|
|
@@ -77,6 +78,7 @@ src/autocoder/common/types.py
|
|
|
77
78
|
src/autocoder/common/utils_code_auto_generate.py
|
|
78
79
|
src/autocoder/common/mcp_servers/__init__.py
|
|
79
80
|
src/autocoder/common/mcp_servers/mcp_server_perplexity.py
|
|
81
|
+
src/autocoder/data/byzerllm.md
|
|
80
82
|
src/autocoder/data/tokenizer.json
|
|
81
83
|
src/autocoder/db/__init__.py
|
|
82
84
|
src/autocoder/db/store.py
|
|
@@ -256,11 +256,13 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
256
256
|
libs_dir = os.path.join(auto_coder_dir, "storage", "libs")
|
|
257
257
|
code_search_path = None
|
|
258
258
|
if os.path.exists(libs_dir):
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
259
|
+
latest_retrieval_lib_dir = get_latest_byzer_retrieval_lib(libs_dir)
|
|
260
|
+
if latest_retrieval_lib_dir :
|
|
261
|
+
retrieval_libs_dir = os.path.join(
|
|
262
|
+
libs_dir, latest_retrieval_lib_dir
|
|
263
|
+
)
|
|
264
|
+
if os.path.exists(retrieval_libs_dir):
|
|
265
|
+
code_search_path = [retrieval_libs_dir]
|
|
264
266
|
|
|
265
267
|
try:
|
|
266
268
|
init_options = {}
|
|
@@ -1387,11 +1389,25 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1387
1389
|
elapsed_time = time.time() - start_time
|
|
1388
1390
|
printer = Printer()
|
|
1389
1391
|
speed = last_meta.generated_tokens_count / elapsed_time
|
|
1392
|
+
|
|
1393
|
+
# Get model info for pricing
|
|
1394
|
+
from autocoder.utils import llms as llm_utils
|
|
1395
|
+
model_info = llm_utils.get_model_info(model_name, args.product_mode) or {}
|
|
1396
|
+
input_price = model_info.get("input_price", 0.0) if model_info else 0.0
|
|
1397
|
+
output_price = model_info.get("output_price", 0.0) if model_info else 0.0
|
|
1398
|
+
|
|
1399
|
+
# Calculate costs
|
|
1400
|
+
input_cost = (last_meta.input_tokens_count * input_price) / 1000000 # Convert to millions
|
|
1401
|
+
output_cost = (last_meta.generated_tokens_count * output_price) / 1000000 # Convert to millions
|
|
1402
|
+
|
|
1390
1403
|
printer.print_in_terminal("stream_out_stats",
|
|
1404
|
+
model_name=model_name,
|
|
1391
1405
|
elapsed_time=elapsed_time,
|
|
1392
1406
|
first_token_time=last_meta.first_token_time,
|
|
1393
1407
|
input_tokens=last_meta.input_tokens_count,
|
|
1394
1408
|
output_tokens=last_meta.generated_tokens_count,
|
|
1409
|
+
input_cost=round(input_cost, 4),
|
|
1410
|
+
output_cost=round(output_cost, 4),
|
|
1395
1411
|
speed=round(speed, 2))
|
|
1396
1412
|
|
|
1397
1413
|
chat_history["ask_conversation"].append(
|
|
@@ -2197,24 +2197,49 @@ def manage_models(params, query: str):
|
|
|
2197
2197
|
subcmd = "/remove"
|
|
2198
2198
|
query = query.replace("/remove", "", 1).strip()
|
|
2199
2199
|
|
|
2200
|
+
if "/speed-test" in query:
|
|
2201
|
+
subcmd = "/speed-test"
|
|
2202
|
+
query = query.replace("/speed-test", "", 1).strip()
|
|
2203
|
+
|
|
2204
|
+
if "/speed_test" in query:
|
|
2205
|
+
subcmd = "/speed-test"
|
|
2206
|
+
query = query.replace("/speed_test", "", 1).strip()
|
|
2207
|
+
|
|
2208
|
+
if "input_price" in query:
|
|
2209
|
+
subcmd = "/input_price"
|
|
2210
|
+
query = query.replace("/input_price", "", 1).strip()
|
|
2211
|
+
|
|
2212
|
+
if "output_price" in query:
|
|
2213
|
+
subcmd = "/output_price"
|
|
2214
|
+
query = query.replace("/output_price", "", 1).strip()
|
|
2215
|
+
|
|
2216
|
+
if "/speed" in query:
|
|
2217
|
+
subcmd = "/speed"
|
|
2218
|
+
query = query.replace("/speed", "", 1).strip()
|
|
2219
|
+
|
|
2220
|
+
|
|
2221
|
+
|
|
2200
2222
|
if not subcmd:
|
|
2201
|
-
printer.print_in_terminal("models_usage")
|
|
2202
|
-
return
|
|
2223
|
+
printer.print_in_terminal("models_usage")
|
|
2203
2224
|
|
|
2204
2225
|
if subcmd == "/list":
|
|
2205
2226
|
if models_data:
|
|
2227
|
+
# Sort models by speed (average_speed)
|
|
2228
|
+
sorted_models = sorted(models_data, key=lambda x: float(x.get('average_speed', 0)))
|
|
2229
|
+
sorted_models.reverse()
|
|
2230
|
+
|
|
2206
2231
|
table = Table(
|
|
2207
2232
|
title=printer.get_message_from_key("models_title"),
|
|
2208
2233
|
expand=True,
|
|
2209
2234
|
show_lines=True
|
|
2210
2235
|
)
|
|
2211
|
-
table.add_column("Name", style="cyan", width=
|
|
2212
|
-
table.add_column("Model Name", style="magenta", width=30, overflow="fold")
|
|
2213
|
-
table.add_column("Base URL", style="white", width=
|
|
2214
|
-
table.add_column("Input Price (M)", style="magenta", width=15)
|
|
2215
|
-
table.add_column("Output Price (M)", style="magenta", width=15)
|
|
2216
|
-
table.add_column("Speed (s/req)", style="blue", width=15)
|
|
2217
|
-
for m in
|
|
2236
|
+
table.add_column("Name", style="cyan", width=30, overflow="fold", no_wrap=False)
|
|
2237
|
+
table.add_column("Model Name", style="magenta", width=30, overflow="fold", no_wrap=False)
|
|
2238
|
+
table.add_column("Base URL", style="white", width=40, overflow="fold", no_wrap=False)
|
|
2239
|
+
table.add_column("Input Price (M)", style="magenta", width=15, overflow="fold", no_wrap=False)
|
|
2240
|
+
table.add_column("Output Price (M)", style="magenta", width=15, overflow="fold", no_wrap=False)
|
|
2241
|
+
table.add_column("Speed (s/req)", style="blue", width=15, overflow="fold", no_wrap=False)
|
|
2242
|
+
for m in sorted_models:
|
|
2218
2243
|
# Check if api_key_path exists and file exists
|
|
2219
2244
|
is_api_key_set = "api_key" in m
|
|
2220
2245
|
name = m.get("name", "")
|
|
@@ -2281,6 +2306,26 @@ def manage_models(params, query: str):
|
|
|
2281
2306
|
else:
|
|
2282
2307
|
printer.print_in_terminal("models_speed_usage", style="red")
|
|
2283
2308
|
|
|
2309
|
+
elif subcmd == "/speed-test":
|
|
2310
|
+
from autocoder.common.model_speed_test import render_speed_test_in_terminal
|
|
2311
|
+
test_rounds = 1 # 默认测试轮数
|
|
2312
|
+
|
|
2313
|
+
enable_long_context = False
|
|
2314
|
+
if "/long_context" in query:
|
|
2315
|
+
enable_long_context = True
|
|
2316
|
+
query = query.replace("/long_context", "", 1).strip()
|
|
2317
|
+
|
|
2318
|
+
if "/long-context" in query:
|
|
2319
|
+
enable_long_context = True
|
|
2320
|
+
query = query.replace("/long-context", "", 1).strip()
|
|
2321
|
+
|
|
2322
|
+
# 解析可选的测试轮数参数
|
|
2323
|
+
args = query.strip().split()
|
|
2324
|
+
if args and args[0].isdigit():
|
|
2325
|
+
test_rounds = int(args[0])
|
|
2326
|
+
|
|
2327
|
+
render_speed_test_in_terminal(params.product_mode, test_rounds,enable_long_context=enable_long_context)
|
|
2328
|
+
|
|
2284
2329
|
elif subcmd == "/add":
|
|
2285
2330
|
# Support both simplified and legacy formats
|
|
2286
2331
|
args = query.strip().split(" ")
|
|
@@ -85,7 +85,7 @@ MESSAGES = {
|
|
|
85
85
|
"design_desc": "Generate SVG image based on the provided description",
|
|
86
86
|
"commit_desc": "Auto generate yaml file and commit changes based on user's manual changes",
|
|
87
87
|
"models_desc": "Manage model configurations, only available in lite mode",
|
|
88
|
-
"models_usage": "Usage: /models /list
|
|
88
|
+
"models_usage": "Usage: /models <command>\nAvailable subcommands:\n /list - List all models\n /add <name> <api_key> - Add a built-in model\n /add_model - Add a custom model\n /remove <name> - Remove a model\n /input_price <name> <value> - Set model input price\n /output_price <name> <value> - Set model output price\n /speed <name> <value> - Set model speed\n /speed-test - Test models speed\n /speed-test-long - Test models speed with long context",
|
|
89
89
|
"models_added": "Added/Updated model '{{name}}' successfully.",
|
|
90
90
|
"models_add_failed": "Failed to add model '{{name}}'. Model not found in defaults.",
|
|
91
91
|
"models_add_usage": "Usage: /models /add <name> <api_key> or\n/models /add <name> <model_type> <model_name> <base_url> <api_key_path> [description]",
|
|
@@ -213,7 +213,7 @@ MESSAGES = {
|
|
|
213
213
|
"conf_value": "值",
|
|
214
214
|
"conf_title": "配置设置",
|
|
215
215
|
"conf_subtitle": "使用 /conf <key>:<value> 修改这些设置",
|
|
216
|
-
"models_usage": "用法: /models /list
|
|
216
|
+
"models_usage": "用法: /models <命令>\n可用的子命令:\n /list - 列出所有模型\n /add <名称> <API密钥> - 添加内置模型\n /add_model - 添加自定义模型\n /remove <名称> - 移除模型\n /input_price <名称> <价格> - 设置模型输入价格\n /output_price <名称> <价格> - 设置模型输出价格\n /speed <名称> <速度> - 设置模型速度\n /speed-test - 测试模型速度\n /speed-test-long - 使用长文本上下文测试模型速度",
|
|
217
217
|
"models_added": "成功添加/更新模型 '{{name}}'。",
|
|
218
218
|
"models_add_failed": "添加模型 '{{name}}' 失败。在默认模型中未找到该模型。",
|
|
219
219
|
"models_add_usage": "用法: /models /add <name> <api_key> 或\n/models /add <name> <model_type> <model_name> <base_url> <api_key_path> [description]",
|
|
@@ -3,6 +3,11 @@ from byzerllm.utils import format_str_jinja2
|
|
|
3
3
|
|
|
4
4
|
MESSAGES = {
|
|
5
5
|
"en": {
|
|
6
|
+
"models_no_active": "No active models found",
|
|
7
|
+
"models_speed_test_results": "Model Speed Test Results",
|
|
8
|
+
"models_testing": "Testing model: {{name}}...",
|
|
9
|
+
"models_testing_start": "Starting speed test for all active models...",
|
|
10
|
+
"models_testing_progress": "Testing progress: {{ completed }}/{{ total }} models",
|
|
6
11
|
"generation_cancelled": "[Interrupted] Generation cancelled",
|
|
7
12
|
"model_not_found": "Model {{model_name}} not found",
|
|
8
13
|
"generating_shell_script": "Generating Shell Script",
|
|
@@ -52,7 +57,7 @@ MESSAGES = {
|
|
|
52
57
|
"Paste the answer to the input box below, use '/break' to exit, '/clear' to clear the screen, '/eof' to submit."
|
|
53
58
|
),
|
|
54
59
|
"code_generation_start": "Auto generate the code...",
|
|
55
|
-
"code_generation_complete": "Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}, speed: {{ speed }} tokens/s",
|
|
60
|
+
"code_generation_complete": "{{ model_names}} Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}, speed: {{ speed }} tokens/s",
|
|
56
61
|
"code_merge_start": "Auto merge the code...",
|
|
57
62
|
"code_execution_warning": "Content(send to model) is {{ content_length }} tokens (you may collect too much files), which is larger than the maximum input length {{ max_length }}",
|
|
58
63
|
"quick_filter_start": "{{ model_name }} Starting filter context(quick_filter)...",
|
|
@@ -70,12 +75,12 @@ MESSAGES = {
|
|
|
70
75
|
"ranking_start": "Start ranking {{ count }} candidates using model {{ model_name }}",
|
|
71
76
|
"ranking_failed_request": "Ranking request failed: {{ error }}",
|
|
72
77
|
"ranking_all_failed": "All ranking requests failed",
|
|
73
|
-
"ranking_complete": "Ranking completed in {{ elapsed }}s, total voters: {{ total_tasks }}, best candidate index: {{ best_candidate }}, scores: {{ scores }}, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}",
|
|
78
|
+
"ranking_complete": "{{ model_names }} Ranking completed in {{ elapsed }}s, total voters: {{ total_tasks }}, best candidate index: {{ best_candidate }}, scores: {{ scores }}, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}",
|
|
74
79
|
"ranking_process_failed": "Ranking process failed: {{ error }}",
|
|
75
80
|
"ranking_failed": "Ranking failed in {{ elapsed }}s, using original order",
|
|
76
81
|
"begin_index_source_code": "🚀 Begin to index source code in {{ source_dir }}",
|
|
77
|
-
"stream_out_stats": "
|
|
78
|
-
"quick_filter_stats": "快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
|
|
82
|
+
"stream_out_stats": "Model: {{ model_name }}, Total time: {{ elapsed_time }} seconds, First token time: {{ first_token_time }} seconds, Speed: {{ speed }} tokens/s, Input tokens: {{ input_tokens }}, Output tokens: {{ output_tokens }}, Input cost: {{ input_cost }}, Output cost: {{ output_cost }}",
|
|
83
|
+
"quick_filter_stats": "{{ model_names }} 快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
|
|
79
84
|
"upsert_file": "✅ Updated file: {{ file_path }}",
|
|
80
85
|
"unmerged_blocks_title": "Unmerged Blocks",
|
|
81
86
|
"quick_filter_title": "{{ model_name }} is analyzing how to filter context...",
|
|
@@ -92,6 +97,10 @@ MESSAGES = {
|
|
|
92
97
|
"estimated_input_tokens_in_generate": "Estimated input tokens in generate ({{ generate_mode }}): {{ estimated_input_tokens }}",
|
|
93
98
|
},
|
|
94
99
|
"zh": {
|
|
100
|
+
"models_no_active": "未找到激活的模型",
|
|
101
|
+
"models_speed_test_results": "模型速度测试结果",
|
|
102
|
+
"models_testing": "正在测试模型: {{name}}...",
|
|
103
|
+
"models_testing_start": "开始对所有激活的模型进行速度测试...",
|
|
95
104
|
"generation_cancelled": "[已中断] 生成已取消",
|
|
96
105
|
"model_not_found": "未找到模型: {{model_name}}",
|
|
97
106
|
"generating_shell_script": "正在生成 Shell 脚本",
|
|
@@ -141,7 +150,7 @@ MESSAGES = {
|
|
|
141
150
|
"将获得答案黏贴到下面的输入框,换行后,使用 '/break' 退出,'/clear' 清屏,'/eof' 提交。"
|
|
142
151
|
),
|
|
143
152
|
"code_generation_start": "正在自动生成代码...",
|
|
144
|
-
"code_generation_complete": "代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 速度: {{ speed }} tokens/秒",
|
|
153
|
+
"code_generation_complete": "{{ model_names}} 代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}, 速度: {{ speed }} tokens/秒",
|
|
145
154
|
"code_merge_start": "正在自动合并代码...",
|
|
146
155
|
"code_execution_warning": "发送给模型的内容长度为 {{ content_length }} tokens(您可能收集了太多文件),超过了最大输入长度 {{ max_length }}",
|
|
147
156
|
"quick_filter_start": "{{ model_name }} 开始查找上下文(quick_filter)...",
|
|
@@ -170,11 +179,11 @@ MESSAGES = {
|
|
|
170
179
|
"ranking_start": "开始对 {{ count }} 个候选项进行排序,使用模型 {{ model_name }} 打分",
|
|
171
180
|
"ranking_failed_request": "排序请求失败: {{ error }}",
|
|
172
181
|
"ranking_all_failed": "所有排序请求都失败",
|
|
173
|
-
"ranking_complete": "排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }}",
|
|
182
|
+
"ranking_complete": "{{ model_names }} 排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }} 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
|
|
174
183
|
"ranking_process_failed": "排序过程失败: {{ error }}",
|
|
175
184
|
"ranking_failed": "排序失败,耗时 {{ elapsed }} 秒,使用原始顺序",
|
|
176
|
-
"stream_out_stats": "
|
|
177
|
-
"quick_filter_stats": "Quick filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
|
|
185
|
+
"stream_out_stats": "模型: {{ model_name }},总耗时 {{ elapsed_time }} 秒,首token时间: {{ first_token_time }} 秒, 速度: {{ speed }} tokens/秒, 输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
|
|
186
|
+
"quick_filter_stats": "{{ model_names }} Quick filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}, input cost: {{ input_cost }}, output cost: {{ output_cost }}",
|
|
178
187
|
"quick_filter_title": "{{ model_name }} 正在分析如何筛选上下文...",
|
|
179
188
|
"quick_filter_failed": "❌ 快速过滤器失败: {{ error }}. ",
|
|
180
189
|
"estimated_chat_input_tokens": "对话输入token预估为: {{ estimated_input_tokens }}",
|
|
@@ -10,6 +10,7 @@ from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
|
10
10
|
import json
|
|
11
11
|
from autocoder.common.printer import Printer
|
|
12
12
|
from autocoder.rag.token_counter import count_tokens
|
|
13
|
+
from autocoder.utils import llms as llm_utils
|
|
13
14
|
|
|
14
15
|
|
|
15
16
|
class CodeAutoGenerate:
|
|
@@ -193,6 +194,9 @@ class CodeAutoGenerate:
|
|
|
193
194
|
results = []
|
|
194
195
|
input_tokens_count = 0
|
|
195
196
|
generated_tokens_count = 0
|
|
197
|
+
input_tokens_cost = 0
|
|
198
|
+
generated_tokens_cost = 0
|
|
199
|
+
model_names = []
|
|
196
200
|
|
|
197
201
|
printer = Printer()
|
|
198
202
|
estimated_input_tokens = count_tokens(json.dumps(conversations, ensure_ascii=False))
|
|
@@ -206,13 +210,27 @@ class CodeAutoGenerate:
|
|
|
206
210
|
futures = []
|
|
207
211
|
for llm in self.llms:
|
|
208
212
|
for _ in range(self.generate_times_same_model):
|
|
209
|
-
|
|
210
|
-
|
|
213
|
+
|
|
214
|
+
model_names_list = llm_utils.get_llm_names(llm)
|
|
215
|
+
model_name = None
|
|
216
|
+
if model_names_list:
|
|
217
|
+
model_name = model_names_list[0]
|
|
218
|
+
|
|
219
|
+
for _ in range(self.generate_times_same_model):
|
|
220
|
+
model_names.append(model_name)
|
|
221
|
+
futures.append(executor.submit(
|
|
222
|
+
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
223
|
+
|
|
211
224
|
temp_results = [future.result() for future in futures]
|
|
212
225
|
for result in temp_results:
|
|
213
226
|
results.append(result.content)
|
|
214
227
|
input_tokens_count += result.input_tokens_count
|
|
215
228
|
generated_tokens_count += result.generated_tokens_count
|
|
229
|
+
model_info = llm_utils.get_model_info(model_name, self.args.product_mode)
|
|
230
|
+
input_cost = model_info.get("input_price", 0) if model_info else 0
|
|
231
|
+
output_cost = model_info.get("output_price", 0) if model_info else 0
|
|
232
|
+
input_tokens_cost += input_cost * result.input_tokens_count / 1000000
|
|
233
|
+
generated_tokens_cost += output_cost * result.generated_tokens_count / 1000000
|
|
216
234
|
|
|
217
235
|
for result in results:
|
|
218
236
|
conversations_list.append(
|
|
@@ -227,7 +245,9 @@ class CodeAutoGenerate:
|
|
|
227
245
|
|
|
228
246
|
statistics = {
|
|
229
247
|
"input_tokens_count": input_tokens_count,
|
|
230
|
-
"generated_tokens_count": generated_tokens_count
|
|
248
|
+
"generated_tokens_count": generated_tokens_count,
|
|
249
|
+
"input_tokens_cost": input_tokens_cost,
|
|
250
|
+
"generated_tokens_cost": generated_tokens_cost
|
|
231
251
|
}
|
|
232
252
|
|
|
233
253
|
if self.args.request_id and not self.args.skip_events:
|
|
@@ -9,6 +9,7 @@ import json
|
|
|
9
9
|
from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
10
10
|
from autocoder.common.printer import Printer
|
|
11
11
|
from autocoder.rag.token_counter import count_tokens
|
|
12
|
+
from autocoder.utils import llms as llm_utils
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
class CodeAutoGenerateDiff:
|
|
@@ -341,6 +342,9 @@ class CodeAutoGenerateDiff:
|
|
|
341
342
|
results = []
|
|
342
343
|
input_tokens_count = 0
|
|
343
344
|
generated_tokens_count = 0
|
|
345
|
+
input_tokens_cost = 0
|
|
346
|
+
generated_tokens_cost = 0
|
|
347
|
+
model_names = []
|
|
344
348
|
|
|
345
349
|
printer = Printer()
|
|
346
350
|
estimated_input_tokens = count_tokens(json.dumps(conversations, ensure_ascii=False))
|
|
@@ -354,13 +358,26 @@ class CodeAutoGenerateDiff:
|
|
|
354
358
|
futures = []
|
|
355
359
|
for llm in self.llms:
|
|
356
360
|
for _ in range(self.generate_times_same_model):
|
|
357
|
-
|
|
358
|
-
|
|
361
|
+
model_names_list = llm_utils.get_llm_names(llm)
|
|
362
|
+
model_name = None
|
|
363
|
+
if model_names_list:
|
|
364
|
+
model_name = model_names_list[0]
|
|
365
|
+
|
|
366
|
+
for _ in range(self.generate_times_same_model):
|
|
367
|
+
model_names.append(model_name)
|
|
368
|
+
futures.append(executor.submit(
|
|
369
|
+
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
370
|
+
|
|
359
371
|
temp_results = [future.result() for future in futures]
|
|
360
372
|
for result in temp_results:
|
|
361
373
|
results.append(result.content)
|
|
362
374
|
input_tokens_count += result.input_tokens_count
|
|
363
375
|
generated_tokens_count += result.generated_tokens_count
|
|
376
|
+
model_info = llm_utils.get_model_info(model_name, self.args.product_mode)
|
|
377
|
+
input_cost = model_info.get("input_price",0) if model_info else 0
|
|
378
|
+
output_cost = model_info.get("output_price",0) if model_info else 0
|
|
379
|
+
input_tokens_cost += input_cost * result.input_tokens_count / 1000000
|
|
380
|
+
generated_tokens_cost += output_cost * result.generated_tokens_count / 1000000
|
|
364
381
|
|
|
365
382
|
for result in results:
|
|
366
383
|
conversations_list.append(
|
|
@@ -376,7 +393,9 @@ class CodeAutoGenerateDiff:
|
|
|
376
393
|
|
|
377
394
|
statistics = {
|
|
378
395
|
"input_tokens_count": input_tokens_count,
|
|
379
|
-
"generated_tokens_count": generated_tokens_count
|
|
396
|
+
"generated_tokens_count": generated_tokens_count,
|
|
397
|
+
"input_tokens_cost": input_tokens_cost,
|
|
398
|
+
"generated_tokens_cost": generated_tokens_cost
|
|
380
399
|
}
|
|
381
400
|
|
|
382
401
|
if self.args.request_id and not self.args.skip_events:
|
{auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate_editblock.py
RENAMED
|
@@ -13,6 +13,7 @@ from concurrent.futures import ThreadPoolExecutor
|
|
|
13
13
|
from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
14
14
|
from autocoder.common.printer import Printer
|
|
15
15
|
from autocoder.rag.token_counter import count_tokens
|
|
16
|
+
from autocoder.utils import llms as llm_utils
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
class CodeAutoGenerateEditBlock:
|
|
@@ -424,6 +425,11 @@ class CodeAutoGenerateEditBlock:
|
|
|
424
425
|
input_tokens_count = 0
|
|
425
426
|
generated_tokens_count = 0
|
|
426
427
|
|
|
428
|
+
input_tokens_cost = 0
|
|
429
|
+
generated_tokens_cost = 0
|
|
430
|
+
|
|
431
|
+
model_names = []
|
|
432
|
+
|
|
427
433
|
printer = Printer()
|
|
428
434
|
estimated_input_tokens = count_tokens(
|
|
429
435
|
json.dumps(conversations, ensure_ascii=False))
|
|
@@ -437,14 +443,28 @@ class CodeAutoGenerateEditBlock:
|
|
|
437
443
|
with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
|
|
438
444
|
futures = []
|
|
439
445
|
for llm in self.llms:
|
|
446
|
+
|
|
447
|
+
model_names_list = llm_utils.get_llm_names(llm)
|
|
448
|
+
model_name = None
|
|
449
|
+
if model_names_list:
|
|
450
|
+
model_name = model_names_list[0]
|
|
451
|
+
|
|
440
452
|
for _ in range(self.generate_times_same_model):
|
|
453
|
+
model_names.append(model_name)
|
|
441
454
|
futures.append(executor.submit(
|
|
442
455
|
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
456
|
+
|
|
443
457
|
temp_results = [future.result() for future in futures]
|
|
444
|
-
|
|
458
|
+
|
|
459
|
+
for result,model_name in zip(temp_results,model_names):
|
|
445
460
|
results.append(result.content)
|
|
446
461
|
input_tokens_count += result.input_tokens_count
|
|
447
462
|
generated_tokens_count += result.generated_tokens_count
|
|
463
|
+
model_info = llm_utils.get_model_info(model_name,self.args.product_mode)
|
|
464
|
+
input_cost = model_info.get("input_price", 0) if model_info else 0
|
|
465
|
+
output_cost = model_info.get("output_price", 0) if model_info else 0
|
|
466
|
+
input_tokens_cost += input_cost * result.input_tokens_count / 1000000
|
|
467
|
+
generated_tokens_cost += output_cost * result.generated_tokens_count / 1000000
|
|
448
468
|
|
|
449
469
|
for result in results:
|
|
450
470
|
conversations_list.append(
|
|
@@ -461,7 +481,9 @@ class CodeAutoGenerateEditBlock:
|
|
|
461
481
|
|
|
462
482
|
statistics = {
|
|
463
483
|
"input_tokens_count": input_tokens_count,
|
|
464
|
-
"generated_tokens_count": generated_tokens_count
|
|
484
|
+
"generated_tokens_count": generated_tokens_count,
|
|
485
|
+
"input_tokens_cost": input_tokens_cost,
|
|
486
|
+
"generated_tokens_cost": generated_tokens_cost
|
|
465
487
|
}
|
|
466
488
|
|
|
467
489
|
if self.args.request_id and not self.args.skip_events:
|
{auto_coder-0.1.253 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate_strict_diff.py
RENAMED
|
@@ -9,6 +9,7 @@ import json
|
|
|
9
9
|
from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
10
10
|
from autocoder.common.printer import Printer
|
|
11
11
|
from autocoder.rag.token_counter import count_tokens
|
|
12
|
+
from autocoder.utils import llms as llm_utils
|
|
12
13
|
|
|
13
14
|
class CodeAutoGenerateStrictDiff:
|
|
14
15
|
def __init__(
|
|
@@ -311,6 +312,9 @@ class CodeAutoGenerateStrictDiff:
|
|
|
311
312
|
results = []
|
|
312
313
|
input_tokens_count = 0
|
|
313
314
|
generated_tokens_count = 0
|
|
315
|
+
input_tokens_cost = 0
|
|
316
|
+
generated_tokens_cost = 0
|
|
317
|
+
model_names = []
|
|
314
318
|
|
|
315
319
|
printer = Printer()
|
|
316
320
|
estimated_input_tokens = count_tokens(json.dumps(conversations, ensure_ascii=False))
|
|
@@ -324,14 +328,27 @@ class CodeAutoGenerateStrictDiff:
|
|
|
324
328
|
futures = []
|
|
325
329
|
for llm in self.llms:
|
|
326
330
|
for _ in range(self.generate_times_same_model):
|
|
327
|
-
|
|
328
|
-
|
|
331
|
+
|
|
332
|
+
model_names_list = llm_utils.get_llm_names(llm)
|
|
333
|
+
model_name = None
|
|
334
|
+
if model_names_list:
|
|
335
|
+
model_name = model_names_list[0]
|
|
336
|
+
|
|
337
|
+
for _ in range(self.generate_times_same_model):
|
|
338
|
+
model_names.append(model_name)
|
|
339
|
+
futures.append(executor.submit(
|
|
340
|
+
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
341
|
+
|
|
329
342
|
temp_results = [future.result() for future in futures]
|
|
330
343
|
for result in temp_results:
|
|
331
344
|
results.append(result.content)
|
|
332
345
|
input_tokens_count += result.input_tokens_count
|
|
333
346
|
generated_tokens_count += result.generated_tokens_count
|
|
334
|
-
|
|
347
|
+
model_info = llm_utils.get_model_info(model_name, self.args.product_mode)
|
|
348
|
+
input_cost = model_info.get("input_price", 0) if model_info else 0
|
|
349
|
+
output_cost = model_info.get("output_price", 0) if model_info else 0
|
|
350
|
+
input_tokens_cost += input_cost * result.input_tokens_count / 1000000
|
|
351
|
+
generated_tokens_cost += output_cost * result.generated_tokens_count / 1000000
|
|
335
352
|
for result in results:
|
|
336
353
|
conversations_list.append(
|
|
337
354
|
conversations + [{"role": "assistant", "content": result}])
|
|
@@ -345,7 +362,9 @@ class CodeAutoGenerateStrictDiff:
|
|
|
345
362
|
|
|
346
363
|
statistics = {
|
|
347
364
|
"input_tokens_count": input_tokens_count,
|
|
348
|
-
"generated_tokens_count": generated_tokens_count
|
|
365
|
+
"generated_tokens_count": generated_tokens_count,
|
|
366
|
+
"input_tokens_cost": input_tokens_cost,
|
|
367
|
+
"generated_tokens_cost": generated_tokens_cost
|
|
349
368
|
}
|
|
350
369
|
|
|
351
370
|
if self.args.request_id and not self.args.skip_events:
|
|
@@ -8,8 +8,8 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
|
8
8
|
import traceback
|
|
9
9
|
from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
10
10
|
from byzerllm.utils.str2model import to_model
|
|
11
|
+
from autocoder.utils.llms import get_llm_names, get_model_info
|
|
11
12
|
|
|
12
|
-
from autocoder.utils.llms import get_llm_names
|
|
13
13
|
class RankResult(BaseModel):
|
|
14
14
|
rank_result: List[int]
|
|
15
15
|
|
|
@@ -97,13 +97,42 @@ class CodeModificationRanker:
|
|
|
97
97
|
|
|
98
98
|
# Collect all results
|
|
99
99
|
results = []
|
|
100
|
-
|
|
100
|
+
# 获取模型名称列表
|
|
101
|
+
model_names = []
|
|
102
|
+
for llm in self.llms:
|
|
103
|
+
# 获取当前llm实例对应的模型名称
|
|
104
|
+
names = get_llm_names(llm)
|
|
105
|
+
model_names.extend(names)
|
|
106
|
+
|
|
107
|
+
# 获取模型价格信息
|
|
108
|
+
model_info_map = {}
|
|
109
|
+
for name in model_names:
|
|
110
|
+
# 第二个参数是产品模式,从args中获取
|
|
111
|
+
info = get_model_info(name, self.args.product_mode)
|
|
112
|
+
if info:
|
|
113
|
+
model_info_map[name] = {
|
|
114
|
+
"input_cost": info.get("input_price", 0.0), # 每百万tokens成本
|
|
115
|
+
"output_cost": info.get("output_price", 0.0) # 每百万tokens成本
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
# 计算总成本
|
|
119
|
+
total_input_cost = 0.0
|
|
120
|
+
total_output_cost = 0.0
|
|
121
|
+
|
|
122
|
+
for future, model_name in zip(futures, model_names):
|
|
101
123
|
try:
|
|
102
124
|
result = future.result()
|
|
103
125
|
input_tokens_count += result.input_tokens_count
|
|
104
126
|
generated_tokens_count += result.generated_tokens_count
|
|
105
127
|
v = to_model(result.content,RankResult)
|
|
106
128
|
results.append(v.rank_result)
|
|
129
|
+
|
|
130
|
+
# 计算成本
|
|
131
|
+
info = model_info_map.get(model_name, {})
|
|
132
|
+
# 计算公式:token数 * 单价 / 1000000
|
|
133
|
+
total_input_cost += (result.input_tokens_count * info.get("input_cost", 0.0)) / 1000000
|
|
134
|
+
total_output_cost += (result.generated_tokens_count * info.get("output_cost", 0.0)) / 1000000
|
|
135
|
+
|
|
107
136
|
except Exception as e:
|
|
108
137
|
self.printer.print_in_terminal(
|
|
109
138
|
"ranking_failed_request", style="yellow", error=str(e))
|
|
@@ -113,6 +142,10 @@ class CodeModificationRanker:
|
|
|
113
142
|
raise Exception(
|
|
114
143
|
self.printer.get_message_from_key("ranking_all_failed"))
|
|
115
144
|
|
|
145
|
+
# 四舍五入到4位小数
|
|
146
|
+
total_input_cost = round(total_input_cost, 4)
|
|
147
|
+
total_output_cost = round(total_output_cost, 4)
|
|
148
|
+
|
|
116
149
|
# Calculate scores for each candidate
|
|
117
150
|
candidate_scores = defaultdict(float)
|
|
118
151
|
for rank_result in results:
|
|
@@ -137,7 +170,10 @@ class CodeModificationRanker:
|
|
|
137
170
|
best_candidate=sorted_candidates[0],
|
|
138
171
|
scores=score_details,
|
|
139
172
|
input_tokens=input_tokens_count,
|
|
140
|
-
output_tokens=generated_tokens_count
|
|
173
|
+
output_tokens=generated_tokens_count,
|
|
174
|
+
input_cost=total_input_cost,
|
|
175
|
+
output_cost=total_output_cost,
|
|
176
|
+
model_names=", ".join(model_names)
|
|
141
177
|
)
|
|
142
178
|
|
|
143
179
|
rerank_contents = [generate_result.contents[i]
|