auto-coder 0.1.255__tar.gz → 0.1.256__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.255 → auto_coder-0.1.256}/PKG-INFO +2 -2
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/auto_coder.egg-info/PKG-INFO +2 -2
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/auto_coder.egg-info/requires.txt +1 -1
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/auto_coder.py +14 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/auto_coder_lang.py +8 -8
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate.py +23 -3
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate_diff.py +22 -3
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate_editblock.py +24 -2
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate_strict_diff.py +23 -4
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_modification_ranker.py +39 -3
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/action.py +22 -12
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/plugins/action_regex_project.py +4 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/index/filter/quick_filter.py +175 -65
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/models.py +30 -6
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/pyproject/__init__.py +1 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/suffixproject/__init__.py +1 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/tsproject/__init__.py +1 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/llms.py +27 -0
- auto_coder-0.1.256/src/autocoder/version.py +1 -0
- auto_coder-0.1.255/src/autocoder/version.py +0 -1
- {auto_coder-0.1.255 → auto_coder-0.1.256}/LICENSE +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/README.md +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/setup.cfg +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/setup.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/auto_coder.egg-info/SOURCES.txt +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/auto_coder.egg-info/dependency_links.txt +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/auto_coder.egg-info/entry_points.txt +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/auto_coder.egg-info/top_level.txt +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/agent/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/agent/auto_demand_organizer.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/agent/auto_filegroup.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/agent/auto_guess_query.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/agent/auto_review_commit.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/agent/auto_tool.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/agent/coder.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/agent/designer.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/agent/planner.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/agent/project_reader.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/auto_coder_rag.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/auto_coder_rag_client_mcp.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/auto_coder_rag_mcp.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/auto_coder_server.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/benchmark.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/chat/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/chat_auto_coder.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/chat_auto_coder_lang.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/command_args.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/JupyterClient.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/ShellClient.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/anything2images.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/anything2img.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/audio.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/buildin_tokenizer.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/chunk_validation.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/cleaner.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_auto_execute.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_auto_merge.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_auto_merge_diff.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_auto_merge_editblock.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_auto_merge_strict_diff.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/command_completer.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/command_generator.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/command_templates.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/const.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/files.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/git_utils.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/global_cancel.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/image_to_page.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/interpreter.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/llm_rerank.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/mcp_hub.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/mcp_server.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/mcp_servers/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/mcp_servers/mcp_server_perplexity.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/mcp_tools.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/memory_manager.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/model_speed_test.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/printer.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/recall_validation.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/screenshots.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/search.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/search_replace.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/shells.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/sys_prompt.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/text.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/types.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/utils_code_auto_generate.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/data/byzerllm.md +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/data/tokenizer.json +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/db/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/db/store.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/dispacher/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/copilot.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/plugins/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/dispacher/actions/plugins/action_translate.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/index/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/index/entry.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/index/filter/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/index/filter/normal_filter.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/index/for_command.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/index/index.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/index/symbols_utils.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/index/types.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/lang.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/api_server.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/cache/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/cache/base_cache.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/cache/byzer_storage_cache.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/cache/file_monitor_cache.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/cache/simple_cache.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/doc_filter.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/document_retriever.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/llm_wrapper.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/loaders/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/loaders/docx_loader.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/loaders/excel_loader.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/loaders/pdf_loader.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/loaders/ppt_loader.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/long_context_rag.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/rag_config.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/rag_entry.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/raw_rag.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/relevant_utils.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/simple_directory_reader.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/simple_rag.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/stream_event/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/stream_event/event_writer.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/stream_event/types.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/token_checker.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/token_counter.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/token_limiter.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/types.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/utils.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/rag/variable_holder.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/regexproject/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/_markitdown.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/auto_coder_utils/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/auto_coder_utils/chat_stream_out.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/chat_auto_coder_utils/__init__.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/conversation_store.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/llm_client_interceptors.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/log_capture.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/multi_turn.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/operate_config_api.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/print_table.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/queue_communicate.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/request_event_queue.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/request_queue.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/rest.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/tests.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/thread_utils.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/utils/types.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/tests/test_action_regex_project.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/tests/test_chat_auto_coder.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/tests/test_code_auto_merge_editblock.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/tests/test_command_completer.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/tests/test_planner.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/tests/test_queue_communicate.py +0 -0
- {auto_coder-0.1.255 → auto_coder-0.1.256}/tests/test_symbols_utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.256
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter_client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas]>=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas]>=0.1.164
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff_match_patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.256
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter_client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas]>=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas]>=0.1.164
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff_match_patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -1389,11 +1389,25 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1389
1389
|
elapsed_time = time.time() - start_time
|
|
1390
1390
|
printer = Printer()
|
|
1391
1391
|
speed = last_meta.generated_tokens_count / elapsed_time
|
|
1392
|
+
|
|
1393
|
+
# Get model info for pricing
|
|
1394
|
+
from autocoder.utils import llms as llm_utils
|
|
1395
|
+
model_info = llm_utils.get_model_info(model_name, args.product_mode) or {}
|
|
1396
|
+
input_price = model_info.get("input_price", 0.0) if model_info else 0.0
|
|
1397
|
+
output_price = model_info.get("output_price", 0.0) if model_info else 0.0
|
|
1398
|
+
|
|
1399
|
+
# Calculate costs
|
|
1400
|
+
input_cost = (last_meta.input_tokens_count * input_price) / 1000000 # Convert to millions
|
|
1401
|
+
output_cost = (last_meta.generated_tokens_count * output_price) / 1000000 # Convert to millions
|
|
1402
|
+
|
|
1392
1403
|
printer.print_in_terminal("stream_out_stats",
|
|
1404
|
+
model_name=model_name,
|
|
1393
1405
|
elapsed_time=elapsed_time,
|
|
1394
1406
|
first_token_time=last_meta.first_token_time,
|
|
1395
1407
|
input_tokens=last_meta.input_tokens_count,
|
|
1396
1408
|
output_tokens=last_meta.generated_tokens_count,
|
|
1409
|
+
input_cost=round(input_cost, 4),
|
|
1410
|
+
output_cost=round(output_cost, 4),
|
|
1397
1411
|
speed=round(speed, 2))
|
|
1398
1412
|
|
|
1399
1413
|
chat_history["ask_conversation"].append(
|
|
@@ -57,7 +57,7 @@ MESSAGES = {
|
|
|
57
57
|
"Paste the answer to the input box below, use '/break' to exit, '/clear' to clear the screen, '/eof' to submit."
|
|
58
58
|
),
|
|
59
59
|
"code_generation_start": "Auto generate the code...",
|
|
60
|
-
"code_generation_complete": "Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}, speed: {{ speed }} tokens/s",
|
|
60
|
+
"code_generation_complete": "{{ model_names}} Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}, speed: {{ speed }} tokens/s",
|
|
61
61
|
"code_merge_start": "Auto merge the code...",
|
|
62
62
|
"code_execution_warning": "Content(send to model) is {{ content_length }} tokens (you may collect too much files), which is larger than the maximum input length {{ max_length }}",
|
|
63
63
|
"quick_filter_start": "{{ model_name }} Starting filter context(quick_filter)...",
|
|
@@ -75,12 +75,12 @@ MESSAGES = {
|
|
|
75
75
|
"ranking_start": "Start ranking {{ count }} candidates using model {{ model_name }}",
|
|
76
76
|
"ranking_failed_request": "Ranking request failed: {{ error }}",
|
|
77
77
|
"ranking_all_failed": "All ranking requests failed",
|
|
78
|
-
"ranking_complete": "Ranking completed in {{ elapsed }}s, total voters: {{ total_tasks }}, best candidate index: {{ best_candidate }}, scores: {{ scores }}, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}",
|
|
78
|
+
"ranking_complete": "{{ model_names }} Ranking completed in {{ elapsed }}s, total voters: {{ total_tasks }}, best candidate index: {{ best_candidate }}, scores: {{ scores }}, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}",
|
|
79
79
|
"ranking_process_failed": "Ranking process failed: {{ error }}",
|
|
80
80
|
"ranking_failed": "Ranking failed in {{ elapsed }}s, using original order",
|
|
81
81
|
"begin_index_source_code": "🚀 Begin to index source code in {{ source_dir }}",
|
|
82
|
-
"stream_out_stats": "
|
|
83
|
-
"quick_filter_stats": "快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
|
|
82
|
+
"stream_out_stats": "Model: {{ model_name }}, Total time: {{ elapsed_time }} seconds, First token time: {{ first_token_time }} seconds, Speed: {{ speed }} tokens/s, Input tokens: {{ input_tokens }}, Output tokens: {{ output_tokens }}, Input cost: {{ input_cost }}, Output cost: {{ output_cost }}",
|
|
83
|
+
"quick_filter_stats": "{{ model_names }} 快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
|
|
84
84
|
"upsert_file": "✅ Updated file: {{ file_path }}",
|
|
85
85
|
"unmerged_blocks_title": "Unmerged Blocks",
|
|
86
86
|
"quick_filter_title": "{{ model_name }} is analyzing how to filter context...",
|
|
@@ -150,7 +150,7 @@ MESSAGES = {
|
|
|
150
150
|
"将获得答案黏贴到下面的输入框,换行后,使用 '/break' 退出,'/clear' 清屏,'/eof' 提交。"
|
|
151
151
|
),
|
|
152
152
|
"code_generation_start": "正在自动生成代码...",
|
|
153
|
-
"code_generation_complete": "代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 速度: {{ speed }} tokens/秒",
|
|
153
|
+
"code_generation_complete": "{{ model_names}} 代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}, 速度: {{ speed }} tokens/秒",
|
|
154
154
|
"code_merge_start": "正在自动合并代码...",
|
|
155
155
|
"code_execution_warning": "发送给模型的内容长度为 {{ content_length }} tokens(您可能收集了太多文件),超过了最大输入长度 {{ max_length }}",
|
|
156
156
|
"quick_filter_start": "{{ model_name }} 开始查找上下文(quick_filter)...",
|
|
@@ -179,11 +179,11 @@ MESSAGES = {
|
|
|
179
179
|
"ranking_start": "开始对 {{ count }} 个候选项进行排序,使用模型 {{ model_name }} 打分",
|
|
180
180
|
"ranking_failed_request": "排序请求失败: {{ error }}",
|
|
181
181
|
"ranking_all_failed": "所有排序请求都失败",
|
|
182
|
-
"ranking_complete": "排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }}",
|
|
182
|
+
"ranking_complete": "{{ model_names }} 排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }} 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
|
|
183
183
|
"ranking_process_failed": "排序过程失败: {{ error }}",
|
|
184
184
|
"ranking_failed": "排序失败,耗时 {{ elapsed }} 秒,使用原始顺序",
|
|
185
|
-
"stream_out_stats": "
|
|
186
|
-
"quick_filter_stats": "Quick filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
|
|
185
|
+
"stream_out_stats": "模型: {{ model_name }},总耗时 {{ elapsed_time }} 秒,首token时间: {{ first_token_time }} 秒, 速度: {{ speed }} tokens/秒, 输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
|
|
186
|
+
"quick_filter_stats": "{{ model_names }} Quick filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}, input cost: {{ input_cost }}, output cost: {{ output_cost }}",
|
|
187
187
|
"quick_filter_title": "{{ model_name }} 正在分析如何筛选上下文...",
|
|
188
188
|
"quick_filter_failed": "❌ 快速过滤器失败: {{ error }}. ",
|
|
189
189
|
"estimated_chat_input_tokens": "对话输入token预估为: {{ estimated_input_tokens }}",
|
|
@@ -10,6 +10,7 @@ from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
|
10
10
|
import json
|
|
11
11
|
from autocoder.common.printer import Printer
|
|
12
12
|
from autocoder.rag.token_counter import count_tokens
|
|
13
|
+
from autocoder.utils import llms as llm_utils
|
|
13
14
|
|
|
14
15
|
|
|
15
16
|
class CodeAutoGenerate:
|
|
@@ -193,6 +194,9 @@ class CodeAutoGenerate:
|
|
|
193
194
|
results = []
|
|
194
195
|
input_tokens_count = 0
|
|
195
196
|
generated_tokens_count = 0
|
|
197
|
+
input_tokens_cost = 0
|
|
198
|
+
generated_tokens_cost = 0
|
|
199
|
+
model_names = []
|
|
196
200
|
|
|
197
201
|
printer = Printer()
|
|
198
202
|
estimated_input_tokens = count_tokens(json.dumps(conversations, ensure_ascii=False))
|
|
@@ -206,13 +210,27 @@ class CodeAutoGenerate:
|
|
|
206
210
|
futures = []
|
|
207
211
|
for llm in self.llms:
|
|
208
212
|
for _ in range(self.generate_times_same_model):
|
|
209
|
-
|
|
210
|
-
|
|
213
|
+
|
|
214
|
+
model_names_list = llm_utils.get_llm_names(llm)
|
|
215
|
+
model_name = None
|
|
216
|
+
if model_names_list:
|
|
217
|
+
model_name = model_names_list[0]
|
|
218
|
+
|
|
219
|
+
for _ in range(self.generate_times_same_model):
|
|
220
|
+
model_names.append(model_name)
|
|
221
|
+
futures.append(executor.submit(
|
|
222
|
+
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
223
|
+
|
|
211
224
|
temp_results = [future.result() for future in futures]
|
|
212
225
|
for result in temp_results:
|
|
213
226
|
results.append(result.content)
|
|
214
227
|
input_tokens_count += result.input_tokens_count
|
|
215
228
|
generated_tokens_count += result.generated_tokens_count
|
|
229
|
+
model_info = llm_utils.get_model_info(model_name, self.args.product_mode)
|
|
230
|
+
input_cost = model_info.get("input_price", 0) if model_info else 0
|
|
231
|
+
output_cost = model_info.get("output_price", 0) if model_info else 0
|
|
232
|
+
input_tokens_cost += input_cost * result.input_tokens_count / 1000000
|
|
233
|
+
generated_tokens_cost += output_cost * result.generated_tokens_count / 1000000
|
|
216
234
|
|
|
217
235
|
for result in results:
|
|
218
236
|
conversations_list.append(
|
|
@@ -227,7 +245,9 @@ class CodeAutoGenerate:
|
|
|
227
245
|
|
|
228
246
|
statistics = {
|
|
229
247
|
"input_tokens_count": input_tokens_count,
|
|
230
|
-
"generated_tokens_count": generated_tokens_count
|
|
248
|
+
"generated_tokens_count": generated_tokens_count,
|
|
249
|
+
"input_tokens_cost": input_tokens_cost,
|
|
250
|
+
"generated_tokens_cost": generated_tokens_cost
|
|
231
251
|
}
|
|
232
252
|
|
|
233
253
|
if self.args.request_id and not self.args.skip_events:
|
|
@@ -9,6 +9,7 @@ import json
|
|
|
9
9
|
from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
10
10
|
from autocoder.common.printer import Printer
|
|
11
11
|
from autocoder.rag.token_counter import count_tokens
|
|
12
|
+
from autocoder.utils import llms as llm_utils
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
class CodeAutoGenerateDiff:
|
|
@@ -341,6 +342,9 @@ class CodeAutoGenerateDiff:
|
|
|
341
342
|
results = []
|
|
342
343
|
input_tokens_count = 0
|
|
343
344
|
generated_tokens_count = 0
|
|
345
|
+
input_tokens_cost = 0
|
|
346
|
+
generated_tokens_cost = 0
|
|
347
|
+
model_names = []
|
|
344
348
|
|
|
345
349
|
printer = Printer()
|
|
346
350
|
estimated_input_tokens = count_tokens(json.dumps(conversations, ensure_ascii=False))
|
|
@@ -354,13 +358,26 @@ class CodeAutoGenerateDiff:
|
|
|
354
358
|
futures = []
|
|
355
359
|
for llm in self.llms:
|
|
356
360
|
for _ in range(self.generate_times_same_model):
|
|
357
|
-
|
|
358
|
-
|
|
361
|
+
model_names_list = llm_utils.get_llm_names(llm)
|
|
362
|
+
model_name = None
|
|
363
|
+
if model_names_list:
|
|
364
|
+
model_name = model_names_list[0]
|
|
365
|
+
|
|
366
|
+
for _ in range(self.generate_times_same_model):
|
|
367
|
+
model_names.append(model_name)
|
|
368
|
+
futures.append(executor.submit(
|
|
369
|
+
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
370
|
+
|
|
359
371
|
temp_results = [future.result() for future in futures]
|
|
360
372
|
for result in temp_results:
|
|
361
373
|
results.append(result.content)
|
|
362
374
|
input_tokens_count += result.input_tokens_count
|
|
363
375
|
generated_tokens_count += result.generated_tokens_count
|
|
376
|
+
model_info = llm_utils.get_model_info(model_name, self.args.product_mode)
|
|
377
|
+
input_cost = model_info.get("input_price",0) if model_info else 0
|
|
378
|
+
output_cost = model_info.get("output_price",0) if model_info else 0
|
|
379
|
+
input_tokens_cost += input_cost * result.input_tokens_count / 1000000
|
|
380
|
+
generated_tokens_cost += output_cost * result.generated_tokens_count / 1000000
|
|
364
381
|
|
|
365
382
|
for result in results:
|
|
366
383
|
conversations_list.append(
|
|
@@ -376,7 +393,9 @@ class CodeAutoGenerateDiff:
|
|
|
376
393
|
|
|
377
394
|
statistics = {
|
|
378
395
|
"input_tokens_count": input_tokens_count,
|
|
379
|
-
"generated_tokens_count": generated_tokens_count
|
|
396
|
+
"generated_tokens_count": generated_tokens_count,
|
|
397
|
+
"input_tokens_cost": input_tokens_cost,
|
|
398
|
+
"generated_tokens_cost": generated_tokens_cost
|
|
380
399
|
}
|
|
381
400
|
|
|
382
401
|
if self.args.request_id and not self.args.skip_events:
|
{auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate_editblock.py
RENAMED
|
@@ -13,6 +13,7 @@ from concurrent.futures import ThreadPoolExecutor
|
|
|
13
13
|
from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
14
14
|
from autocoder.common.printer import Printer
|
|
15
15
|
from autocoder.rag.token_counter import count_tokens
|
|
16
|
+
from autocoder.utils import llms as llm_utils
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
class CodeAutoGenerateEditBlock:
|
|
@@ -424,6 +425,11 @@ class CodeAutoGenerateEditBlock:
|
|
|
424
425
|
input_tokens_count = 0
|
|
425
426
|
generated_tokens_count = 0
|
|
426
427
|
|
|
428
|
+
input_tokens_cost = 0
|
|
429
|
+
generated_tokens_cost = 0
|
|
430
|
+
|
|
431
|
+
model_names = []
|
|
432
|
+
|
|
427
433
|
printer = Printer()
|
|
428
434
|
estimated_input_tokens = count_tokens(
|
|
429
435
|
json.dumps(conversations, ensure_ascii=False))
|
|
@@ -437,14 +443,28 @@ class CodeAutoGenerateEditBlock:
|
|
|
437
443
|
with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
|
|
438
444
|
futures = []
|
|
439
445
|
for llm in self.llms:
|
|
446
|
+
|
|
447
|
+
model_names_list = llm_utils.get_llm_names(llm)
|
|
448
|
+
model_name = None
|
|
449
|
+
if model_names_list:
|
|
450
|
+
model_name = model_names_list[0]
|
|
451
|
+
|
|
440
452
|
for _ in range(self.generate_times_same_model):
|
|
453
|
+
model_names.append(model_name)
|
|
441
454
|
futures.append(executor.submit(
|
|
442
455
|
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
456
|
+
|
|
443
457
|
temp_results = [future.result() for future in futures]
|
|
444
|
-
|
|
458
|
+
|
|
459
|
+
for result,model_name in zip(temp_results,model_names):
|
|
445
460
|
results.append(result.content)
|
|
446
461
|
input_tokens_count += result.input_tokens_count
|
|
447
462
|
generated_tokens_count += result.generated_tokens_count
|
|
463
|
+
model_info = llm_utils.get_model_info(model_name,self.args.product_mode)
|
|
464
|
+
input_cost = model_info.get("input_price", 0) if model_info else 0
|
|
465
|
+
output_cost = model_info.get("output_price", 0) if model_info else 0
|
|
466
|
+
input_tokens_cost += input_cost * result.input_tokens_count / 1000000
|
|
467
|
+
generated_tokens_cost += output_cost * result.generated_tokens_count / 1000000
|
|
448
468
|
|
|
449
469
|
for result in results:
|
|
450
470
|
conversations_list.append(
|
|
@@ -461,7 +481,9 @@ class CodeAutoGenerateEditBlock:
|
|
|
461
481
|
|
|
462
482
|
statistics = {
|
|
463
483
|
"input_tokens_count": input_tokens_count,
|
|
464
|
-
"generated_tokens_count": generated_tokens_count
|
|
484
|
+
"generated_tokens_count": generated_tokens_count,
|
|
485
|
+
"input_tokens_cost": input_tokens_cost,
|
|
486
|
+
"generated_tokens_cost": generated_tokens_cost
|
|
465
487
|
}
|
|
466
488
|
|
|
467
489
|
if self.args.request_id and not self.args.skip_events:
|
{auto_coder-0.1.255 → auto_coder-0.1.256}/src/autocoder/common/code_auto_generate_strict_diff.py
RENAMED
|
@@ -9,6 +9,7 @@ import json
|
|
|
9
9
|
from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
10
10
|
from autocoder.common.printer import Printer
|
|
11
11
|
from autocoder.rag.token_counter import count_tokens
|
|
12
|
+
from autocoder.utils import llms as llm_utils
|
|
12
13
|
|
|
13
14
|
class CodeAutoGenerateStrictDiff:
|
|
14
15
|
def __init__(
|
|
@@ -311,6 +312,9 @@ class CodeAutoGenerateStrictDiff:
|
|
|
311
312
|
results = []
|
|
312
313
|
input_tokens_count = 0
|
|
313
314
|
generated_tokens_count = 0
|
|
315
|
+
input_tokens_cost = 0
|
|
316
|
+
generated_tokens_cost = 0
|
|
317
|
+
model_names = []
|
|
314
318
|
|
|
315
319
|
printer = Printer()
|
|
316
320
|
estimated_input_tokens = count_tokens(json.dumps(conversations, ensure_ascii=False))
|
|
@@ -324,14 +328,27 @@ class CodeAutoGenerateStrictDiff:
|
|
|
324
328
|
futures = []
|
|
325
329
|
for llm in self.llms:
|
|
326
330
|
for _ in range(self.generate_times_same_model):
|
|
327
|
-
|
|
328
|
-
|
|
331
|
+
|
|
332
|
+
model_names_list = llm_utils.get_llm_names(llm)
|
|
333
|
+
model_name = None
|
|
334
|
+
if model_names_list:
|
|
335
|
+
model_name = model_names_list[0]
|
|
336
|
+
|
|
337
|
+
for _ in range(self.generate_times_same_model):
|
|
338
|
+
model_names.append(model_name)
|
|
339
|
+
futures.append(executor.submit(
|
|
340
|
+
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
341
|
+
|
|
329
342
|
temp_results = [future.result() for future in futures]
|
|
330
343
|
for result in temp_results:
|
|
331
344
|
results.append(result.content)
|
|
332
345
|
input_tokens_count += result.input_tokens_count
|
|
333
346
|
generated_tokens_count += result.generated_tokens_count
|
|
334
|
-
|
|
347
|
+
model_info = llm_utils.get_model_info(model_name, self.args.product_mode)
|
|
348
|
+
input_cost = model_info.get("input_price", 0) if model_info else 0
|
|
349
|
+
output_cost = model_info.get("output_price", 0) if model_info else 0
|
|
350
|
+
input_tokens_cost += input_cost * result.input_tokens_count / 1000000
|
|
351
|
+
generated_tokens_cost += output_cost * result.generated_tokens_count / 1000000
|
|
335
352
|
for result in results:
|
|
336
353
|
conversations_list.append(
|
|
337
354
|
conversations + [{"role": "assistant", "content": result}])
|
|
@@ -345,7 +362,9 @@ class CodeAutoGenerateStrictDiff:
|
|
|
345
362
|
|
|
346
363
|
statistics = {
|
|
347
364
|
"input_tokens_count": input_tokens_count,
|
|
348
|
-
"generated_tokens_count": generated_tokens_count
|
|
365
|
+
"generated_tokens_count": generated_tokens_count,
|
|
366
|
+
"input_tokens_cost": input_tokens_cost,
|
|
367
|
+
"generated_tokens_cost": generated_tokens_cost
|
|
349
368
|
}
|
|
350
369
|
|
|
351
370
|
if self.args.request_id and not self.args.skip_events:
|
|
@@ -8,8 +8,8 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
|
8
8
|
import traceback
|
|
9
9
|
from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
10
10
|
from byzerllm.utils.str2model import to_model
|
|
11
|
+
from autocoder.utils.llms import get_llm_names, get_model_info
|
|
11
12
|
|
|
12
|
-
from autocoder.utils.llms import get_llm_names
|
|
13
13
|
class RankResult(BaseModel):
|
|
14
14
|
rank_result: List[int]
|
|
15
15
|
|
|
@@ -97,13 +97,42 @@ class CodeModificationRanker:
|
|
|
97
97
|
|
|
98
98
|
# Collect all results
|
|
99
99
|
results = []
|
|
100
|
-
|
|
100
|
+
# 获取模型名称列表
|
|
101
|
+
model_names = []
|
|
102
|
+
for llm in self.llms:
|
|
103
|
+
# 获取当前llm实例对应的模型名称
|
|
104
|
+
names = get_llm_names(llm)
|
|
105
|
+
model_names.extend(names)
|
|
106
|
+
|
|
107
|
+
# 获取模型价格信息
|
|
108
|
+
model_info_map = {}
|
|
109
|
+
for name in model_names:
|
|
110
|
+
# 第二个参数是产品模式,从args中获取
|
|
111
|
+
info = get_model_info(name, self.args.product_mode)
|
|
112
|
+
if info:
|
|
113
|
+
model_info_map[name] = {
|
|
114
|
+
"input_cost": info.get("input_price", 0.0), # 每百万tokens成本
|
|
115
|
+
"output_cost": info.get("output_price", 0.0) # 每百万tokens成本
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
# 计算总成本
|
|
119
|
+
total_input_cost = 0.0
|
|
120
|
+
total_output_cost = 0.0
|
|
121
|
+
|
|
122
|
+
for future, model_name in zip(futures, model_names):
|
|
101
123
|
try:
|
|
102
124
|
result = future.result()
|
|
103
125
|
input_tokens_count += result.input_tokens_count
|
|
104
126
|
generated_tokens_count += result.generated_tokens_count
|
|
105
127
|
v = to_model(result.content,RankResult)
|
|
106
128
|
results.append(v.rank_result)
|
|
129
|
+
|
|
130
|
+
# 计算成本
|
|
131
|
+
info = model_info_map.get(model_name, {})
|
|
132
|
+
# 计算公式:token数 * 单价 / 1000000
|
|
133
|
+
total_input_cost += (result.input_tokens_count * info.get("input_cost", 0.0)) / 1000000
|
|
134
|
+
total_output_cost += (result.generated_tokens_count * info.get("output_cost", 0.0)) / 1000000
|
|
135
|
+
|
|
107
136
|
except Exception as e:
|
|
108
137
|
self.printer.print_in_terminal(
|
|
109
138
|
"ranking_failed_request", style="yellow", error=str(e))
|
|
@@ -113,6 +142,10 @@ class CodeModificationRanker:
|
|
|
113
142
|
raise Exception(
|
|
114
143
|
self.printer.get_message_from_key("ranking_all_failed"))
|
|
115
144
|
|
|
145
|
+
# 四舍五入到4位小数
|
|
146
|
+
total_input_cost = round(total_input_cost, 4)
|
|
147
|
+
total_output_cost = round(total_output_cost, 4)
|
|
148
|
+
|
|
116
149
|
# Calculate scores for each candidate
|
|
117
150
|
candidate_scores = defaultdict(float)
|
|
118
151
|
for rank_result in results:
|
|
@@ -137,7 +170,10 @@ class CodeModificationRanker:
|
|
|
137
170
|
best_candidate=sorted_candidates[0],
|
|
138
171
|
scores=score_details,
|
|
139
172
|
input_tokens=input_tokens_count,
|
|
140
|
-
output_tokens=generated_tokens_count
|
|
173
|
+
output_tokens=generated_tokens_count,
|
|
174
|
+
input_cost=total_input_cost,
|
|
175
|
+
output_cost=total_output_cost,
|
|
176
|
+
model_names=", ".join(model_names)
|
|
141
177
|
)
|
|
142
178
|
|
|
143
179
|
rerank_contents = [generate_result.contents[i]
|
|
@@ -125,13 +125,17 @@ class ActionTSProject(BaseAction):
|
|
|
125
125
|
query=args.query, source_content=content
|
|
126
126
|
)
|
|
127
127
|
elapsed_time = time.time() - start_time
|
|
128
|
-
speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
|
|
129
|
-
|
|
128
|
+
speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
|
|
129
|
+
input_tokens_cost = generate_result.metadata.get('input_tokens_cost', 0)
|
|
130
|
+
generated_tokens_cost = generate_result.metadata.get('generated_tokens_cost', 0)
|
|
131
|
+
model_names = ",".join(get_llm_names(generate.llms))
|
|
130
132
|
self.printer.print_in_terminal(
|
|
131
133
|
"code_generation_complete",
|
|
132
134
|
duration=elapsed_time,
|
|
133
135
|
input_tokens=generate_result.metadata.get('input_tokens_count', 0),
|
|
134
136
|
output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
|
|
137
|
+
input_cost=input_tokens_cost,
|
|
138
|
+
output_cost=generated_tokens_cost,
|
|
135
139
|
speed=round(speed, 2),
|
|
136
140
|
model_names=model_names
|
|
137
141
|
)
|
|
@@ -221,12 +225,16 @@ class ActionPyScriptProject(BaseAction):
|
|
|
221
225
|
|
|
222
226
|
elapsed_time = time.time() - start_time
|
|
223
227
|
speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
|
|
224
|
-
model_names = ",".join(get_llm_names(
|
|
228
|
+
model_names = ",".join(get_llm_names(generate.llms))
|
|
229
|
+
input_tokens_cost = generate_result.metadata.get('input_tokens_cost', 0)
|
|
230
|
+
generated_tokens_cost = generate_result.metadata.get('generated_tokens_cost', 0)
|
|
225
231
|
self.printer.print_in_terminal(
|
|
226
232
|
"code_generation_complete",
|
|
227
233
|
duration=elapsed_time,
|
|
228
234
|
input_tokens=generate_result.metadata.get('input_tokens_count', 0),
|
|
229
235
|
output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
|
|
236
|
+
input_cost=input_tokens_cost,
|
|
237
|
+
output_cost=generated_tokens_cost,
|
|
230
238
|
speed=round(speed, 2),
|
|
231
239
|
model_names=model_names
|
|
232
240
|
)
|
|
@@ -264,13 +272,7 @@ class ActionPyScriptProject(BaseAction):
|
|
|
264
272
|
model=self.llm.default_model_name,
|
|
265
273
|
)
|
|
266
274
|
|
|
267
|
-
end_time = time.time()
|
|
268
|
-
self.printer.print_in_terminal(
|
|
269
|
-
"code_generation_complete",
|
|
270
|
-
duration=end_time - start_time,
|
|
271
|
-
input_tokens=generate_result.metadata.get('input_tokens_count', 0),
|
|
272
|
-
output_tokens=generate_result.metadata.get('generated_tokens_count', 0)
|
|
273
|
-
)
|
|
275
|
+
end_time = time.time()
|
|
274
276
|
with open(self.args.target_file, "w") as file:
|
|
275
277
|
file.write(content)
|
|
276
278
|
|
|
@@ -348,12 +350,16 @@ class ActionPyProject(BaseAction):
|
|
|
348
350
|
)
|
|
349
351
|
elapsed_time = time.time() - start_time
|
|
350
352
|
speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
|
|
351
|
-
model_names = ",".join(get_llm_names(
|
|
353
|
+
model_names = ",".join(get_llm_names(generate.llms))
|
|
354
|
+
input_tokens_cost = generate_result.metadata.get('input_tokens_cost', 0)
|
|
355
|
+
generated_tokens_cost = generate_result.metadata.get('generated_tokens_cost', 0)
|
|
352
356
|
self.printer.print_in_terminal(
|
|
353
357
|
"code_generation_complete",
|
|
354
358
|
duration=elapsed_time,
|
|
355
359
|
input_tokens=generate_result.metadata.get('input_tokens_count', 0),
|
|
356
360
|
output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
|
|
361
|
+
input_cost=input_tokens_cost,
|
|
362
|
+
output_cost=generated_tokens_cost,
|
|
357
363
|
speed=round(speed, 2),
|
|
358
364
|
model_names=model_names
|
|
359
365
|
)
|
|
@@ -458,12 +464,16 @@ class ActionSuffixProject(BaseAction):
|
|
|
458
464
|
|
|
459
465
|
elapsed_time = time.time() - start_time
|
|
460
466
|
speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
|
|
461
|
-
model_names = ",".join(get_llm_names(
|
|
467
|
+
model_names = ",".join(get_llm_names(generate.llms))
|
|
468
|
+
input_tokens_cost = generate_result.metadata.get('input_tokens_cost', 0)
|
|
469
|
+
generated_tokens_cost = generate_result.metadata.get('generated_tokens_cost', 0)
|
|
462
470
|
self.printer.print_in_terminal(
|
|
463
471
|
"code_generation_complete",
|
|
464
472
|
duration=elapsed_time,
|
|
465
473
|
input_tokens=generate_result.metadata.get('input_tokens_count', 0),
|
|
466
474
|
output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
|
|
475
|
+
input_cost=input_tokens_cost,
|
|
476
|
+
output_cost=generated_tokens_cost,
|
|
467
477
|
speed=round(speed, 2),
|
|
468
478
|
model_names=model_names
|
|
469
479
|
)
|
|
@@ -88,11 +88,15 @@ class ActionRegexProject:
|
|
|
88
88
|
elapsed_time = time.time() - start_time
|
|
89
89
|
speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
|
|
90
90
|
model_names = ",".join(get_llm_names(self.llm))
|
|
91
|
+
input_tokens_cost = generate_result.metadata.get('input_tokens_cost', 0)
|
|
92
|
+
generated_tokens_cost = generate_result.metadata.get('generated_tokens_cost', 0)
|
|
91
93
|
self.printer.print_in_terminal(
|
|
92
94
|
"code_generation_complete",
|
|
93
95
|
duration=elapsed_time,
|
|
94
96
|
input_tokens=generate_result.metadata.get('input_tokens_count', 0),
|
|
95
97
|
output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
|
|
98
|
+
input_cost=input_tokens_cost,
|
|
99
|
+
output_cost=generated_tokens_cost,
|
|
96
100
|
speed=round(speed, 2),
|
|
97
101
|
model_names=model_names
|
|
98
102
|
)
|