auto-coder 1.0.0__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- auto_coder-2.0.0.dist-info/LICENSE +158 -0
- auto_coder-2.0.0.dist-info/METADATA +558 -0
- auto_coder-2.0.0.dist-info/RECORD +795 -0
- {auto_coder-1.0.0.dist-info → auto_coder-2.0.0.dist-info}/WHEEL +1 -1
- {auto_coder-1.0.0.dist-info → auto_coder-2.0.0.dist-info}/entry_points.txt +3 -3
- autocoder/__init__.py +31 -0
- autocoder/agent/auto_filegroup.py +32 -13
- autocoder/agent/auto_learn_from_commit.py +9 -1
- autocoder/agent/base_agentic/__init__.py +3 -0
- autocoder/agent/base_agentic/agent_hub.py +1 -1
- autocoder/agent/base_agentic/base_agent.py +235 -136
- autocoder/agent/base_agentic/default_tools.py +119 -118
- autocoder/agent/base_agentic/test_base_agent.py +1 -1
- autocoder/agent/base_agentic/tool_registry.py +32 -20
- autocoder/agent/base_agentic/tools/read_file_tool_resolver.py +24 -3
- autocoder/agent/base_agentic/tools/write_to_file_tool_resolver.py +24 -11
- autocoder/agent/base_agentic/types.py +42 -0
- autocoder/agent/entry_command_agent/chat.py +73 -59
- autocoder/auto_coder.py +31 -40
- autocoder/auto_coder_rag.py +11 -1084
- autocoder/auto_coder_runner.py +970 -2345
- autocoder/auto_coder_terminal.py +26 -0
- autocoder/auto_coder_terminal_v3.py +190 -0
- autocoder/chat/conf_command.py +224 -124
- autocoder/chat/models_command.py +361 -299
- autocoder/chat/rules_command.py +79 -31
- autocoder/chat_auto_coder.py +988 -398
- autocoder/chat_auto_coder_lang.py +23 -732
- autocoder/commands/auto_command.py +25 -8
- autocoder/commands/auto_web.py +1 -1
- autocoder/commands/tools.py +44 -44
- autocoder/common/__init__.py +150 -128
- autocoder/common/ac_style_command_parser/__init__.py +39 -2
- autocoder/common/ac_style_command_parser/config.py +422 -0
- autocoder/common/ac_style_command_parser/parser.py +292 -78
- autocoder/common/ac_style_command_parser/test_parser.py +241 -16
- autocoder/common/ac_style_command_parser/test_typed_parser.py +342 -0
- autocoder/common/ac_style_command_parser/typed_parser.py +653 -0
- autocoder/common/action_yml_file_manager.py +25 -13
- autocoder/common/agent_events/__init__.py +52 -0
- autocoder/common/agent_events/agent_event_emitter.py +193 -0
- autocoder/common/agent_events/event_factory.py +177 -0
- autocoder/common/agent_events/examples.py +307 -0
- autocoder/common/agent_events/types.py +113 -0
- autocoder/common/agent_events/utils.py +68 -0
- autocoder/common/agent_hooks/__init__.py +44 -0
- autocoder/common/agent_hooks/examples.py +582 -0
- autocoder/common/agent_hooks/hook_executor.py +217 -0
- autocoder/common/agent_hooks/hook_manager.py +288 -0
- autocoder/common/agent_hooks/types.py +133 -0
- autocoder/common/agent_hooks/utils.py +99 -0
- autocoder/common/agent_query_queue/queue_executor.py +324 -0
- autocoder/common/agent_query_queue/queue_manager.py +325 -0
- autocoder/common/agents/__init__.py +11 -0
- autocoder/common/agents/agent_manager.py +323 -0
- autocoder/common/agents/agent_parser.py +189 -0
- autocoder/common/agents/example_usage.py +344 -0
- autocoder/common/agents/integration_example.py +330 -0
- autocoder/common/agents/test_agent_parser.py +545 -0
- autocoder/common/async_utils.py +101 -0
- autocoder/common/auto_coder_lang.py +23 -972
- autocoder/common/autocoderargs_parser/__init__.py +14 -0
- autocoder/common/autocoderargs_parser/parser.py +184 -0
- autocoder/common/autocoderargs_parser/tests/__init__.py +1 -0
- autocoder/common/autocoderargs_parser/tests/test_args_parser.py +235 -0
- autocoder/common/autocoderargs_parser/tests/test_token_parser.py +195 -0
- autocoder/common/autocoderargs_parser/token_parser.py +290 -0
- autocoder/common/buildin_tokenizer.py +2 -4
- autocoder/common/code_auto_generate.py +149 -74
- autocoder/common/code_auto_generate_diff.py +163 -70
- autocoder/common/code_auto_generate_editblock.py +179 -89
- autocoder/common/code_auto_generate_strict_diff.py +167 -72
- autocoder/common/code_auto_merge_editblock.py +13 -6
- autocoder/common/code_modification_ranker.py +1 -1
- autocoder/common/command_completer.py +3 -3
- autocoder/common/command_file_manager/manager.py +183 -47
- autocoder/common/command_file_manager/test_command_file_manager.py +507 -0
- autocoder/common/command_templates.py +1 -1
- autocoder/common/conf_utils.py +2 -4
- autocoder/common/conversations/config.py +11 -3
- autocoder/common/conversations/get_conversation_manager.py +100 -2
- autocoder/common/conversations/llm_stats_models.py +264 -0
- autocoder/common/conversations/manager.py +112 -28
- autocoder/common/conversations/models.py +16 -2
- autocoder/common/conversations/storage/index_manager.py +134 -10
- autocoder/common/core_config/__init__.py +63 -0
- autocoder/common/core_config/agentic_mode_manager.py +109 -0
- autocoder/common/core_config/base_manager.py +123 -0
- autocoder/common/core_config/compatibility.py +151 -0
- autocoder/common/core_config/config_manager.py +156 -0
- autocoder/common/core_config/conversation_manager.py +31 -0
- autocoder/common/core_config/exclude_manager.py +72 -0
- autocoder/common/core_config/file_manager.py +177 -0
- autocoder/common/core_config/human_as_model_manager.py +129 -0
- autocoder/common/core_config/lib_manager.py +54 -0
- autocoder/common/core_config/main_manager.py +81 -0
- autocoder/common/core_config/mode_manager.py +126 -0
- autocoder/common/core_config/models.py +70 -0
- autocoder/common/core_config/test_memory_manager.py +1056 -0
- autocoder/common/env_manager.py +282 -0
- autocoder/common/env_manager_usage_example.py +211 -0
- autocoder/common/file_checkpoint/conversation_checkpoint.py +19 -19
- autocoder/common/file_checkpoint/manager.py +264 -48
- autocoder/common/file_checkpoint/test_backup.py +1 -18
- autocoder/common/file_checkpoint/test_manager.py +270 -1
- autocoder/common/file_checkpoint/test_store.py +1 -17
- autocoder/common/file_handler/__init__.py +23 -0
- autocoder/common/file_handler/active_context_handler.py +159 -0
- autocoder/common/file_handler/add_files_handler.py +409 -0
- autocoder/common/file_handler/chat_handler.py +180 -0
- autocoder/common/file_handler/coding_handler.py +401 -0
- autocoder/common/file_handler/commit_handler.py +200 -0
- autocoder/common/file_handler/lib_handler.py +156 -0
- autocoder/common/file_handler/list_files_handler.py +111 -0
- autocoder/common/file_handler/mcp_handler.py +268 -0
- autocoder/common/file_handler/models_handler.py +493 -0
- autocoder/common/file_handler/remove_files_handler.py +172 -0
- autocoder/common/git_utils.py +44 -8
- autocoder/common/global_cancel.py +15 -6
- autocoder/common/ignorefiles/test_ignore_file_utils.py +1 -1
- autocoder/common/international/__init__.py +31 -0
- autocoder/common/international/demo_international.py +92 -0
- autocoder/common/international/message_manager.py +157 -0
- autocoder/common/international/messages/__init__.py +56 -0
- autocoder/common/international/messages/async_command_messages.py +507 -0
- autocoder/common/international/messages/auto_coder_messages.py +2208 -0
- autocoder/common/international/messages/chat_auto_coder_messages.py +1547 -0
- autocoder/common/international/messages/command_help_messages.py +986 -0
- autocoder/common/international/messages/conversation_command_messages.py +191 -0
- autocoder/common/international/messages/git_helper_plugin_messages.py +159 -0
- autocoder/common/international/messages/queue_command_messages.py +751 -0
- autocoder/common/international/messages/rules_command_messages.py +77 -0
- autocoder/common/international/messages/sdk_messages.py +1707 -0
- autocoder/common/international/messages/token_helper_plugin_messages.py +361 -0
- autocoder/common/international/messages/tool_display_messages.py +1212 -0
- autocoder/common/international/messages/workflow_exception_messages.py +473 -0
- autocoder/common/international/test_international.py +612 -0
- autocoder/common/linter_core/__init__.py +28 -0
- autocoder/common/linter_core/base_linter.py +61 -0
- autocoder/common/linter_core/config_loader.py +271 -0
- autocoder/common/linter_core/formatters/__init__.py +0 -0
- autocoder/common/linter_core/formatters/base_formatter.py +38 -0
- autocoder/common/linter_core/formatters/raw_formatter.py +17 -0
- autocoder/common/linter_core/linter.py +166 -0
- autocoder/common/linter_core/linter_factory.py +216 -0
- autocoder/common/linter_core/linter_manager.py +333 -0
- autocoder/common/linter_core/linters/__init__.py +9 -0
- autocoder/common/linter_core/linters/java_linter.py +342 -0
- autocoder/common/linter_core/linters/python_linter.py +115 -0
- autocoder/common/linter_core/linters/typescript_linter.py +119 -0
- autocoder/common/linter_core/models/__init__.py +7 -0
- autocoder/common/linter_core/models/lint_result.py +91 -0
- autocoder/common/linter_core/models.py +33 -0
- autocoder/common/linter_core/tests/__init__.py +3 -0
- autocoder/common/linter_core/tests/test_config_loader.py +323 -0
- autocoder/common/linter_core/tests/test_config_loading.py +308 -0
- autocoder/common/linter_core/tests/test_factory_manager.py +234 -0
- autocoder/common/linter_core/tests/test_formatters.py +147 -0
- autocoder/common/linter_core/tests/test_integration.py +317 -0
- autocoder/common/linter_core/tests/test_java_linter.py +496 -0
- autocoder/common/linter_core/tests/test_linters.py +265 -0
- autocoder/common/linter_core/tests/test_models.py +81 -0
- autocoder/common/linter_core/tests/verify_config_loading.py +296 -0
- autocoder/common/linter_core/tests/verify_fixes.py +183 -0
- autocoder/common/llm_friendly_package/__init__.py +31 -0
- autocoder/common/llm_friendly_package/base_manager.py +102 -0
- autocoder/common/llm_friendly_package/docs_manager.py +121 -0
- autocoder/common/llm_friendly_package/library_manager.py +171 -0
- autocoder/common/{llm_friendly_package.py → llm_friendly_package/main_manager.py} +204 -231
- autocoder/common/llm_friendly_package/models.py +40 -0
- autocoder/common/llm_friendly_package/test_llm_friendly_package.py +536 -0
- autocoder/common/llms/__init__.py +15 -0
- autocoder/common/llms/demo_error_handling.py +85 -0
- autocoder/common/llms/factory.py +142 -0
- autocoder/common/llms/manager.py +264 -0
- autocoder/common/llms/pricing.py +121 -0
- autocoder/common/llms/registry.py +288 -0
- autocoder/common/llms/schema.py +77 -0
- autocoder/common/llms/simple_demo.py +45 -0
- autocoder/common/llms/test_quick_model.py +116 -0
- autocoder/common/llms/test_remove_functionality.py +182 -0
- autocoder/common/llms/tests/__init__.py +1 -0
- autocoder/common/llms/tests/test_manager.py +330 -0
- autocoder/common/llms/tests/test_registry.py +364 -0
- autocoder/common/mcp_tools/__init__.py +62 -0
- autocoder/common/{mcp_tools.py → mcp_tools/executor.py} +49 -40
- autocoder/common/{mcp_hub.py → mcp_tools/hub.py} +42 -68
- autocoder/common/{mcp_server_install.py → mcp_tools/installer.py} +16 -28
- autocoder/common/{mcp_server.py → mcp_tools/server.py} +176 -48
- autocoder/common/mcp_tools/test_keyboard_interrupt.py +93 -0
- autocoder/common/mcp_tools/test_mcp_tools.py +391 -0
- autocoder/common/{mcp_server_types.py → mcp_tools/types.py} +121 -48
- autocoder/common/mcp_tools/verify_functionality.py +202 -0
- autocoder/common/model_speed_tester.py +32 -26
- autocoder/common/priority_directory_finder/__init__.py +142 -0
- autocoder/common/priority_directory_finder/examples.py +230 -0
- autocoder/common/priority_directory_finder/finder.py +283 -0
- autocoder/common/priority_directory_finder/models.py +236 -0
- autocoder/common/priority_directory_finder/test_priority_directory_finder.py +431 -0
- autocoder/common/project_scanner/__init__.py +18 -0
- autocoder/common/project_scanner/compat.py +77 -0
- autocoder/common/project_scanner/scanner.py +436 -0
- autocoder/common/project_tracker/__init__.py +27 -0
- autocoder/common/project_tracker/api.py +228 -0
- autocoder/common/project_tracker/demo.py +272 -0
- autocoder/common/project_tracker/tracker.py +487 -0
- autocoder/common/project_tracker/types.py +53 -0
- autocoder/common/pruner/__init__.py +67 -0
- autocoder/common/pruner/agentic_conversation_pruner.py +651 -102
- autocoder/common/pruner/conversation_message_ids_api.py +386 -0
- autocoder/common/pruner/conversation_message_ids_manager.py +347 -0
- autocoder/common/pruner/conversation_message_ids_pruner.py +473 -0
- autocoder/common/pruner/conversation_normalizer.py +347 -0
- autocoder/common/pruner/conversation_pruner.py +26 -6
- autocoder/common/pruner/test_agentic_conversation_pruner.py +554 -112
- autocoder/common/pruner/test_conversation_normalizer.py +502 -0
- autocoder/common/pruner/test_tool_content_detector.py +324 -0
- autocoder/common/pruner/tool_content_detector.py +227 -0
- autocoder/common/pruner/tools/__init__.py +18 -0
- autocoder/common/pruner/tools/query_message_ids.py +264 -0
- autocoder/common/pruner/tools/test_agentic_pruning_logic.py +432 -0
- autocoder/common/pruner/tools/test_message_ids_pruning_only.py +192 -0
- autocoder/common/pull_requests/__init__.py +9 -1
- autocoder/common/pull_requests/utils.py +122 -1
- autocoder/common/rag_manager/rag_manager.py +36 -40
- autocoder/common/rulefiles/__init__.py +53 -1
- autocoder/common/rulefiles/api.py +250 -0
- autocoder/common/rulefiles/core/__init__.py +14 -0
- autocoder/common/rulefiles/core/manager.py +241 -0
- autocoder/common/rulefiles/core/selector.py +805 -0
- autocoder/common/rulefiles/models/__init__.py +20 -0
- autocoder/common/rulefiles/models/index.py +16 -0
- autocoder/common/rulefiles/models/init_rule.py +18 -0
- autocoder/common/rulefiles/models/rule_file.py +18 -0
- autocoder/common/rulefiles/models/rule_relevance.py +14 -0
- autocoder/common/rulefiles/models/summary.py +16 -0
- autocoder/common/rulefiles/test_rulefiles.py +776 -0
- autocoder/common/rulefiles/utils/__init__.py +34 -0
- autocoder/common/rulefiles/utils/monitor.py +86 -0
- autocoder/common/rulefiles/utils/parser.py +230 -0
- autocoder/common/save_formatted_log.py +67 -10
- autocoder/common/search_replace.py +8 -1
- autocoder/common/search_replace_patch/__init__.py +24 -0
- autocoder/common/search_replace_patch/base.py +115 -0
- autocoder/common/search_replace_patch/manager.py +248 -0
- autocoder/common/search_replace_patch/patch_replacer.py +304 -0
- autocoder/common/search_replace_patch/similarity_replacer.py +306 -0
- autocoder/common/search_replace_patch/string_replacer.py +181 -0
- autocoder/common/search_replace_patch/tests/__init__.py +3 -0
- autocoder/common/search_replace_patch/tests/run_tests.py +126 -0
- autocoder/common/search_replace_patch/tests/test_base.py +188 -0
- autocoder/common/search_replace_patch/tests/test_empty_line_insert.py +233 -0
- autocoder/common/search_replace_patch/tests/test_integration.py +389 -0
- autocoder/common/search_replace_patch/tests/test_manager.py +351 -0
- autocoder/common/search_replace_patch/tests/test_patch_replacer.py +316 -0
- autocoder/common/search_replace_patch/tests/test_regex_replacer.py +306 -0
- autocoder/common/search_replace_patch/tests/test_similarity_replacer.py +384 -0
- autocoder/common/shell_commands/__init__.py +197 -0
- autocoder/common/shell_commands/background_process_notifier.py +346 -0
- autocoder/common/shell_commands/command_executor.py +1127 -0
- autocoder/common/shell_commands/error_recovery.py +541 -0
- autocoder/common/shell_commands/exceptions.py +120 -0
- autocoder/common/shell_commands/interactive_executor.py +476 -0
- autocoder/common/shell_commands/interactive_pexpect_process.py +623 -0
- autocoder/common/shell_commands/interactive_process.py +744 -0
- autocoder/common/shell_commands/interactive_session_manager.py +1014 -0
- autocoder/common/shell_commands/monitoring.py +529 -0
- autocoder/common/shell_commands/process_cleanup.py +386 -0
- autocoder/common/shell_commands/process_manager.py +606 -0
- autocoder/common/shell_commands/test_interactive_pexpect_process.py +281 -0
- autocoder/common/shell_commands/tests/__init__.py +6 -0
- autocoder/common/shell_commands/tests/conftest.py +118 -0
- autocoder/common/shell_commands/tests/test_background_process_notifier.py +703 -0
- autocoder/common/shell_commands/tests/test_command_executor.py +448 -0
- autocoder/common/shell_commands/tests/test_error_recovery.py +305 -0
- autocoder/common/shell_commands/tests/test_exceptions.py +299 -0
- autocoder/common/shell_commands/tests/test_execute_batch.py +588 -0
- autocoder/common/shell_commands/tests/test_indented_batch_commands.py +244 -0
- autocoder/common/shell_commands/tests/test_integration.py +664 -0
- autocoder/common/shell_commands/tests/test_monitoring.py +546 -0
- autocoder/common/shell_commands/tests/test_performance.py +632 -0
- autocoder/common/shell_commands/tests/test_process_cleanup.py +397 -0
- autocoder/common/shell_commands/tests/test_process_manager.py +606 -0
- autocoder/common/shell_commands/tests/test_timeout_config.py +343 -0
- autocoder/common/shell_commands/tests/test_timeout_manager.py +520 -0
- autocoder/common/shell_commands/timeout_config.py +315 -0
- autocoder/common/shell_commands/timeout_manager.py +352 -0
- autocoder/common/terminal_paste/__init__.py +14 -0
- autocoder/common/terminal_paste/demo.py +145 -0
- autocoder/common/terminal_paste/demo_paste_functionality.py +95 -0
- autocoder/common/terminal_paste/paste_handler.py +200 -0
- autocoder/common/terminal_paste/paste_manager.py +118 -0
- autocoder/common/terminal_paste/tests/__init__.py +1 -0
- autocoder/common/terminal_paste/tests/test_paste_handler.py +182 -0
- autocoder/common/terminal_paste/tests/test_paste_manager.py +126 -0
- autocoder/common/terminal_paste/utils.py +163 -0
- autocoder/common/test_autocoder_args.py +232 -0
- autocoder/common/test_env_manager.py +173 -0
- autocoder/common/test_env_manager_integration.py +159 -0
- autocoder/common/text_similarity/__init__.py +9 -0
- autocoder/common/text_similarity/demo.py +216 -0
- autocoder/common/text_similarity/examples.py +266 -0
- autocoder/common/text_similarity/test_text_similarity.py +306 -0
- autocoder/common/text_similarity/text_similarity.py +194 -0
- autocoder/common/text_similarity/utils.py +125 -0
- autocoder/common/todos/__init__.py +61 -0
- autocoder/common/todos/cache/__init__.py +16 -0
- autocoder/common/todos/cache/base_cache.py +89 -0
- autocoder/common/todos/cache/cache_manager.py +228 -0
- autocoder/common/todos/cache/memory_cache.py +225 -0
- autocoder/common/todos/config.py +155 -0
- autocoder/common/todos/exceptions.py +35 -0
- autocoder/common/todos/get_todo_manager.py +161 -0
- autocoder/common/todos/manager.py +537 -0
- autocoder/common/todos/models.py +239 -0
- autocoder/common/todos/storage/__init__.py +14 -0
- autocoder/common/todos/storage/base_storage.py +76 -0
- autocoder/common/todos/storage/file_storage.py +278 -0
- autocoder/common/tokens/counter.py +24 -2
- autocoder/common/tools_manager/__init__.py +17 -0
- autocoder/common/tools_manager/examples.py +162 -0
- autocoder/common/tools_manager/manager.py +385 -0
- autocoder/common/tools_manager/models.py +39 -0
- autocoder/common/tools_manager/test_tools_manager.py +303 -0
- autocoder/common/tools_manager/utils.py +191 -0
- autocoder/common/v2/agent/agentic_callbacks.py +270 -0
- autocoder/common/v2/agent/agentic_edit.py +2699 -1856
- autocoder/common/v2/agent/agentic_edit_change_manager.py +474 -0
- autocoder/common/v2/agent/agentic_edit_tools/__init__.py +35 -1
- autocoder/common/v2/agent/agentic_edit_tools/ac_mod_list_tool_resolver.py +279 -0
- autocoder/common/v2/agent/agentic_edit_tools/ac_mod_write_tool_resolver.py +10 -1
- autocoder/common/v2/agent/agentic_edit_tools/background_task_tool_resolver.py +1167 -0
- autocoder/common/v2/agent/agentic_edit_tools/base_tool_resolver.py +2 -2
- autocoder/common/v2/agent/agentic_edit_tools/conversation_message_ids_read_tool_resolver.py +214 -0
- autocoder/common/v2/agent/agentic_edit_tools/conversation_message_ids_write_tool_resolver.py +299 -0
- autocoder/common/v2/agent/agentic_edit_tools/count_tokens_tool_resolver.py +290 -0
- autocoder/common/v2/agent/agentic_edit_tools/execute_command_tool_resolver.py +564 -29
- autocoder/common/v2/agent/agentic_edit_tools/execute_workflow_tool_resolver.py +485 -0
- autocoder/common/v2/agent/agentic_edit_tools/extract_to_text_tool_resolver.py +225 -0
- autocoder/common/v2/agent/agentic_edit_tools/lint_report.py +79 -0
- autocoder/common/v2/agent/agentic_edit_tools/linter_config_models.py +343 -0
- autocoder/common/v2/agent/agentic_edit_tools/linter_enabled_tool_resolver.py +189 -0
- autocoder/common/v2/agent/agentic_edit_tools/list_files_tool_resolver.py +169 -101
- autocoder/common/v2/agent/agentic_edit_tools/load_extra_document_tool_resolver.py +349 -0
- autocoder/common/v2/agent/agentic_edit_tools/read_file_tool_resolver.py +243 -50
- autocoder/common/v2/agent/agentic_edit_tools/replace_in_file_tool_resolver.py +667 -147
- autocoder/common/v2/agent/agentic_edit_tools/run_named_subagents_tool_resolver.py +691 -0
- autocoder/common/v2/agent/agentic_edit_tools/search_files_tool_resolver.py +410 -86
- autocoder/common/v2/agent/agentic_edit_tools/session_interactive_tool_resolver.py +115 -0
- autocoder/common/v2/agent/agentic_edit_tools/session_start_tool_resolver.py +190 -0
- autocoder/common/v2/agent/agentic_edit_tools/session_stop_tool_resolver.py +76 -0
- autocoder/common/v2/agent/agentic_edit_tools/test_write_to_file_tool_resolver.py +207 -192
- autocoder/common/v2/agent/agentic_edit_tools/todo_read_tool_resolver.py +80 -63
- autocoder/common/v2/agent/agentic_edit_tools/todo_write_tool_resolver.py +237 -233
- autocoder/common/v2/agent/agentic_edit_tools/use_mcp_tool_resolver.py +2 -2
- autocoder/common/v2/agent/agentic_edit_tools/web_crawl_tool_resolver.py +557 -0
- autocoder/common/v2/agent/agentic_edit_tools/web_search_tool_resolver.py +600 -0
- autocoder/common/v2/agent/agentic_edit_tools/write_to_file_tool_resolver.py +56 -121
- autocoder/common/v2/agent/agentic_edit_types.py +343 -9
- autocoder/common/v2/agent/runner/__init__.py +3 -3
- autocoder/common/v2/agent/runner/base_runner.py +12 -26
- autocoder/common/v2/agent/runner/{event_runner.py → file_based_event_runner.py} +3 -2
- autocoder/common/v2/agent/runner/sdk_runner.py +150 -8
- autocoder/common/v2/agent/runner/terminal_runner.py +170 -57
- autocoder/common/v2/agent/runner/tool_display.py +557 -159
- autocoder/common/v2/agent/test_agentic_callbacks.py +265 -0
- autocoder/common/v2/agent/test_agentic_edit.py +194 -0
- autocoder/common/v2/agent/tool_caller/__init__.py +24 -0
- autocoder/common/v2/agent/tool_caller/default_tool_resolver_map.py +135 -0
- autocoder/common/v2/agent/tool_caller/integration_test.py +172 -0
- autocoder/common/v2/agent/tool_caller/plugins/__init__.py +14 -0
- autocoder/common/v2/agent/tool_caller/plugins/base_plugin.py +126 -0
- autocoder/common/v2/agent/tool_caller/plugins/examples/__init__.py +13 -0
- autocoder/common/v2/agent/tool_caller/plugins/examples/logging_plugin.py +164 -0
- autocoder/common/v2/agent/tool_caller/plugins/examples/security_filter_plugin.py +198 -0
- autocoder/common/v2/agent/tool_caller/plugins/plugin_interface.py +141 -0
- autocoder/common/v2/agent/tool_caller/test_tool_caller.py +278 -0
- autocoder/common/v2/agent/tool_caller/tool_call_plugin_manager.py +331 -0
- autocoder/common/v2/agent/tool_caller/tool_caller.py +337 -0
- autocoder/common/v2/agent/tool_caller/usage_example.py +193 -0
- autocoder/common/v2/code_agentic_editblock_manager.py +4 -4
- autocoder/common/v2/code_auto_generate.py +136 -78
- autocoder/common/v2/code_auto_generate_diff.py +135 -79
- autocoder/common/v2/code_auto_generate_editblock.py +174 -99
- autocoder/common/v2/code_auto_generate_strict_diff.py +151 -71
- autocoder/common/v2/code_auto_merge.py +1 -1
- autocoder/common/v2/code_auto_merge_editblock.py +13 -1
- autocoder/common/v2/code_diff_manager.py +3 -3
- autocoder/common/v2/code_editblock_manager.py +4 -14
- autocoder/common/v2/code_manager.py +1 -1
- autocoder/common/v2/code_strict_diff_manager.py +2 -2
- autocoder/common/wrap_llm_hint/__init__.py +10 -0
- autocoder/common/wrap_llm_hint/test_wrap_llm_hint.py +1067 -0
- autocoder/common/wrap_llm_hint/utils.py +432 -0
- autocoder/common/wrap_llm_hint/wrap_llm_hint.py +323 -0
- autocoder/completer/__init__.py +8 -0
- autocoder/completer/command_completer_v2.py +1051 -0
- autocoder/default_project/__init__.py +501 -0
- autocoder/dispacher/__init__.py +4 -12
- autocoder/dispacher/actions/action.py +165 -7
- autocoder/dispacher/actions/plugins/action_regex_project.py +2 -2
- autocoder/index/entry.py +116 -124
- autocoder/{agent → index/filter}/agentic_filter.py +322 -333
- autocoder/index/filter/normal_filter.py +5 -11
- autocoder/index/filter/quick_filter.py +1 -1
- autocoder/index/index.py +36 -9
- autocoder/index/tests/__init__.py +1 -0
- autocoder/index/tests/run_tests.py +195 -0
- autocoder/index/tests/test_entry.py +303 -0
- autocoder/index/tests/test_index_manager.py +314 -0
- autocoder/index/tests/test_module_integration.py +300 -0
- autocoder/index/tests/test_symbols_utils.py +183 -0
- autocoder/inner/__init__.py +4 -0
- autocoder/inner/agentic.py +932 -0
- autocoder/inner/async_command_handler.py +992 -0
- autocoder/inner/conversation_command_handlers.py +623 -0
- autocoder/inner/merge_command_handler.py +213 -0
- autocoder/inner/queue_command_handler.py +684 -0
- autocoder/models.py +95 -266
- autocoder/plugins/git_helper_plugin.py +31 -29
- autocoder/plugins/token_helper_plugin.py +65 -46
- autocoder/pyproject/__init__.py +32 -29
- autocoder/rag/agentic_rag.py +215 -75
- autocoder/rag/cache/simple_cache.py +1 -2
- autocoder/rag/loaders/image_loader.py +1 -1
- autocoder/rag/long_context_rag.py +42 -26
- autocoder/rag/qa_conversation_strategy.py +1 -1
- autocoder/rag/terminal/__init__.py +17 -0
- autocoder/rag/terminal/args.py +581 -0
- autocoder/rag/terminal/bootstrap.py +61 -0
- autocoder/rag/terminal/command_handlers.py +653 -0
- autocoder/rag/terminal/formatters/__init__.py +20 -0
- autocoder/rag/terminal/formatters/base.py +70 -0
- autocoder/rag/terminal/formatters/json_format.py +66 -0
- autocoder/rag/terminal/formatters/stream_json.py +95 -0
- autocoder/rag/terminal/formatters/text.py +28 -0
- autocoder/rag/terminal/init.py +120 -0
- autocoder/rag/terminal/utils.py +106 -0
- autocoder/rag/test_agentic_rag.py +389 -0
- autocoder/rag/test_doc_filter.py +3 -3
- autocoder/rag/test_long_context_rag.py +1 -1
- autocoder/rag/test_token_limiter.py +517 -10
- autocoder/rag/token_counter.py +3 -0
- autocoder/rag/token_limiter.py +19 -15
- autocoder/rag/tools/__init__.py +26 -2
- autocoder/rag/tools/bochaai_example.py +343 -0
- autocoder/rag/tools/bochaai_sdk.py +541 -0
- autocoder/rag/tools/metaso_example.py +268 -0
- autocoder/rag/tools/metaso_sdk.py +417 -0
- autocoder/rag/tools/recall_tool.py +28 -7
- autocoder/rag/tools/run_integration_tests.py +204 -0
- autocoder/rag/tools/test_all_providers.py +318 -0
- autocoder/rag/tools/test_bochaai_integration.py +482 -0
- autocoder/rag/tools/test_final_integration.py +215 -0
- autocoder/rag/tools/test_metaso_integration.py +424 -0
- autocoder/rag/tools/test_metaso_real.py +171 -0
- autocoder/rag/tools/test_web_crawl_tool.py +639 -0
- autocoder/rag/tools/test_web_search_tool.py +509 -0
- autocoder/rag/tools/todo_read_tool.py +202 -0
- autocoder/rag/tools/todo_write_tool.py +412 -0
- autocoder/rag/tools/web_crawl_tool.py +634 -0
- autocoder/rag/tools/web_search_tool.py +558 -0
- autocoder/rag/tools/web_tools_example.py +119 -0
- autocoder/rag/types.py +16 -0
- autocoder/rag/variable_holder.py +4 -2
- autocoder/rags.py +86 -79
- autocoder/regexproject/__init__.py +23 -21
- autocoder/sdk/__init__.py +46 -190
- autocoder/sdk/api.py +370 -0
- autocoder/sdk/async_runner/__init__.py +26 -0
- autocoder/sdk/async_runner/async_executor.py +650 -0
- autocoder/sdk/async_runner/async_handler.py +356 -0
- autocoder/sdk/async_runner/markdown_processor.py +595 -0
- autocoder/sdk/async_runner/task_metadata.py +284 -0
- autocoder/sdk/async_runner/worktree_manager.py +438 -0
- autocoder/sdk/cli/__init__.py +2 -5
- autocoder/sdk/cli/formatters.py +28 -204
- autocoder/sdk/cli/handlers.py +77 -44
- autocoder/sdk/cli/main.py +154 -171
- autocoder/sdk/cli/options.py +95 -22
- autocoder/sdk/constants.py +139 -51
- autocoder/sdk/core/auto_coder_core.py +484 -109
- autocoder/sdk/core/bridge.py +297 -115
- autocoder/sdk/exceptions.py +18 -12
- autocoder/sdk/formatters/__init__.py +19 -0
- autocoder/sdk/formatters/input.py +64 -0
- autocoder/sdk/formatters/output.py +247 -0
- autocoder/sdk/formatters/stream.py +54 -0
- autocoder/sdk/models/__init__.py +6 -5
- autocoder/sdk/models/options.py +55 -18
- autocoder/sdk/utils/formatters.py +27 -195
- autocoder/suffixproject/__init__.py +28 -25
- autocoder/terminal/__init__.py +14 -0
- autocoder/terminal/app.py +454 -0
- autocoder/terminal/args.py +32 -0
- autocoder/terminal/bootstrap.py +178 -0
- autocoder/terminal/command_processor.py +521 -0
- autocoder/terminal/command_registry.py +57 -0
- autocoder/terminal/help.py +97 -0
- autocoder/terminal/tasks/__init__.py +5 -0
- autocoder/terminal/tasks/background.py +77 -0
- autocoder/terminal/tasks/task_event.py +70 -0
- autocoder/terminal/ui/__init__.py +13 -0
- autocoder/terminal/ui/completer.py +268 -0
- autocoder/terminal/ui/keybindings.py +75 -0
- autocoder/terminal/ui/session.py +41 -0
- autocoder/terminal/ui/toolbar.py +64 -0
- autocoder/terminal/utils/__init__.py +13 -0
- autocoder/terminal/utils/errors.py +18 -0
- autocoder/terminal/utils/paths.py +19 -0
- autocoder/terminal/utils/shell.py +43 -0
- autocoder/terminal_v3/__init__.py +10 -0
- autocoder/terminal_v3/app.py +201 -0
- autocoder/terminal_v3/handlers/__init__.py +5 -0
- autocoder/terminal_v3/handlers/command_handler.py +131 -0
- autocoder/terminal_v3/models/__init__.py +6 -0
- autocoder/terminal_v3/models/conversation_buffer.py +214 -0
- autocoder/terminal_v3/models/message.py +50 -0
- autocoder/terminal_v3/models/tool_display.py +247 -0
- autocoder/terminal_v3/ui/__init__.py +7 -0
- autocoder/terminal_v3/ui/keybindings.py +56 -0
- autocoder/terminal_v3/ui/layout.py +141 -0
- autocoder/terminal_v3/ui/styles.py +43 -0
- autocoder/tsproject/__init__.py +23 -23
- autocoder/utils/auto_coder_utils/chat_stream_out.py +1 -1
- autocoder/utils/llms.py +88 -80
- autocoder/utils/math_utils.py +101 -0
- autocoder/utils/model_provider_selector.py +16 -4
- autocoder/utils/operate_config_api.py +33 -5
- autocoder/utils/thread_utils.py +2 -2
- autocoder/version.py +4 -2
- autocoder/workflow_agents/__init__.py +84 -0
- autocoder/workflow_agents/agent.py +143 -0
- autocoder/workflow_agents/exceptions.py +573 -0
- autocoder/workflow_agents/executor.py +489 -0
- autocoder/workflow_agents/loader.py +737 -0
- autocoder/workflow_agents/runner.py +267 -0
- autocoder/workflow_agents/types.py +172 -0
- autocoder/workflow_agents/utils.py +434 -0
- autocoder/workflow_agents/workflow_manager.py +211 -0
- auto_coder-1.0.0.dist-info/METADATA +0 -396
- auto_coder-1.0.0.dist-info/RECORD +0 -442
- auto_coder-1.0.0.dist-info/licenses/LICENSE +0 -201
- autocoder/auto_coder_server.py +0 -672
- autocoder/benchmark.py +0 -138
- autocoder/common/ac_style_command_parser/example.py +0 -7
- autocoder/common/cleaner.py +0 -31
- autocoder/common/command_completer_v2.py +0 -615
- autocoder/common/context_pruner.py +0 -477
- autocoder/common/conversation_pruner.py +0 -132
- autocoder/common/directory_cache/__init__.py +0 -1
- autocoder/common/directory_cache/cache.py +0 -192
- autocoder/common/directory_cache/test_cache.py +0 -190
- autocoder/common/file_checkpoint/examples.py +0 -217
- autocoder/common/llm_friendly_package_example.py +0 -138
- autocoder/common/llm_friendly_package_test.py +0 -63
- autocoder/common/pull_requests/test_module.py +0 -1
- autocoder/common/rulefiles/autocoderrules_utils.py +0 -484
- autocoder/common/text.py +0 -30
- autocoder/common/v2/agent/agentic_edit_tools/list_package_info_tool_resolver.py +0 -42
- autocoder/common/v2/agent/agentic_edit_tools/test_execute_command_tool_resolver.py +0 -70
- autocoder/common/v2/agent/agentic_edit_tools/test_search_files_tool_resolver.py +0 -163
- autocoder/common/v2/agent/agentic_tool_display.py +0 -183
- autocoder/plugins/dynamic_completion_example.py +0 -148
- autocoder/plugins/sample_plugin.py +0 -160
- autocoder/sdk/cli/__main__.py +0 -26
- autocoder/sdk/cli/completion_wrapper.py +0 -38
- autocoder/sdk/cli/install_completion.py +0 -301
- autocoder/sdk/models/messages.py +0 -209
- autocoder/sdk/session/__init__.py +0 -32
- autocoder/sdk/session/session.py +0 -106
- autocoder/sdk/session/session_manager.py +0 -56
- {auto_coder-1.0.0.dist-info → auto_coder-2.0.0.dist-info}/top_level.txt +0 -0
- /autocoder/{sdk/example.py → common/agent_query_queue/__init__.py} +0 -0
autocoder/auto_coder_rag.py
CHANGED
|
@@ -1,1093 +1,20 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
import argparse
|
|
5
|
-
from typing import Optional, List
|
|
6
|
-
import byzerllm
|
|
7
|
-
from autocoder.rag.api_server import serve, ServerArgs
|
|
8
|
-
from autocoder.rag.rag_entry import RAGFactory
|
|
9
|
-
from autocoder.rag.agentic_rag import AgenticRAG
|
|
10
|
-
from autocoder.rag.long_context_rag import LongContextRAG
|
|
11
|
-
from autocoder.rag.llm_wrapper import LLWrapper
|
|
12
|
-
from autocoder.common import AutoCoderArgs
|
|
13
|
-
from autocoder.lang import lang_desc
|
|
14
|
-
import locale
|
|
15
|
-
from autocoder.chat_auto_coder_lang import get_message
|
|
16
|
-
from prompt_toolkit import prompt
|
|
17
|
-
from prompt_toolkit.shortcuts import radiolist_dialog
|
|
18
|
-
from prompt_toolkit.formatted_text import HTML
|
|
19
|
-
import platform
|
|
20
|
-
import subprocess
|
|
21
|
-
import shlex
|
|
22
|
-
from rich.console import Console
|
|
23
|
-
from rich.table import Table
|
|
24
|
-
import os
|
|
25
|
-
import hashlib
|
|
26
|
-
from loguru import logger
|
|
27
|
-
import sys
|
|
28
|
-
import asyncio
|
|
29
|
-
from datetime import datetime
|
|
30
|
-
from autocoder.common.file_monitor.monitor import FileMonitor
|
|
31
|
-
from autocoder.common.rulefiles.autocoderrules_utils import get_rules
|
|
32
|
-
|
|
33
|
-
from autocoder.rag.utils import process_file_local
|
|
34
|
-
import pkg_resources
|
|
35
|
-
from autocoder.rag.token_counter import TokenCounter
|
|
36
|
-
from autocoder.rag.types import RAGServiceInfo
|
|
37
|
-
from autocoder.version import __version__
|
|
38
|
-
from autocoder.rags import get_rag_config
|
|
39
|
-
|
|
40
|
-
if platform.system() == "Windows":
|
|
41
|
-
from colorama import init
|
|
42
|
-
|
|
43
|
-
init()
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
def generate_unique_name_from_path(path: str) -> str:
|
|
47
|
-
"""
|
|
48
|
-
Generate a unique name (MD5 hash) from a path after normalizing it.
|
|
49
|
-
For Linux/Unix systems, trailing path separators are removed.
|
|
50
|
-
"""
|
|
51
|
-
if not path:
|
|
52
|
-
return ""
|
|
53
|
-
|
|
54
|
-
# Normalize the path (resolve absolute path and remove trailing separators)
|
|
55
|
-
normalized_path = os.path.normpath(os.path.abspath(path))
|
|
56
|
-
|
|
57
|
-
# Generate MD5 hash from the normalized path
|
|
58
|
-
return hashlib.md5(normalized_path.encode("utf-8")).hexdigest()
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
def initialize_system(args):
|
|
62
|
-
if args.product_mode == "lite":
|
|
63
|
-
return
|
|
64
|
-
|
|
65
|
-
print(f"\n\033[1;34m{get_message('initializing')}\033[0m")
|
|
66
|
-
|
|
67
|
-
def print_status(message, status):
|
|
68
|
-
if status == "success":
|
|
69
|
-
print(f"\033[32m✓ {message}\033[0m")
|
|
70
|
-
elif status == "warning":
|
|
71
|
-
print(f"\033[33m! {message}\033[0m")
|
|
72
|
-
elif status == "error":
|
|
73
|
-
print(f"\033[31m✗ {message}\033[0m")
|
|
74
|
-
else:
|
|
75
|
-
print(f" {message}")
|
|
76
|
-
|
|
77
|
-
# Check if Ray is running
|
|
78
|
-
print_status(get_message("checking_ray"), "")
|
|
79
|
-
ray_status = subprocess.run(["ray", "status"], capture_output=True, text=True)
|
|
80
|
-
if ray_status.returncode != 0:
|
|
81
|
-
print_status(get_message("ray_not_running"), "warning")
|
|
82
|
-
try:
|
|
83
|
-
subprocess.run(["ray", "start", "--head"], check=True)
|
|
84
|
-
print_status(get_message("ray_start_success"), "success")
|
|
85
|
-
except subprocess.CalledProcessError:
|
|
86
|
-
print_status(get_message("ray_start_fail"), "error")
|
|
87
|
-
return
|
|
88
|
-
else:
|
|
89
|
-
print_status(get_message("ray_running"), "success")
|
|
90
|
-
|
|
91
|
-
# Check if deepseek_chat model is available
|
|
92
|
-
print_status(get_message("checking_model"), "")
|
|
93
|
-
try:
|
|
94
|
-
result = subprocess.run(
|
|
95
|
-
["easy-byzerllm", "chat", "v3_chat", "你好"],
|
|
96
|
-
capture_output=True,
|
|
97
|
-
text=True,
|
|
98
|
-
timeout=30,
|
|
99
|
-
)
|
|
100
|
-
if result.returncode == 0:
|
|
101
|
-
print_status(get_message("model_available"), "success")
|
|
102
|
-
print_status(get_message("init_complete_final"), "success")
|
|
103
|
-
return
|
|
104
|
-
except subprocess.TimeoutExpired:
|
|
105
|
-
print_status(get_message("model_timeout"), "error")
|
|
106
|
-
except subprocess.CalledProcessError:
|
|
107
|
-
print_status(get_message("model_error"), "error")
|
|
108
|
-
|
|
109
|
-
# If deepseek_chat is not available, prompt user to choose a provider
|
|
110
|
-
print_status(get_message("model_not_available"), "warning")
|
|
111
|
-
choice = radiolist_dialog(
|
|
112
|
-
title=get_message("provider_selection"),
|
|
113
|
-
text=get_message("provider_selection"),
|
|
114
|
-
values=[
|
|
115
|
-
("1", "Deepseek官方(https://www.deepseek.com/)"),
|
|
116
|
-
],
|
|
117
|
-
).run()
|
|
118
|
-
|
|
119
|
-
if choice is None:
|
|
120
|
-
print_status(get_message("no_provider"), "error")
|
|
121
|
-
return
|
|
122
|
-
|
|
123
|
-
api_key = prompt(HTML(f"<b>{get_message('enter_api_key')} </b>"))
|
|
124
|
-
|
|
125
|
-
if choice == "1":
|
|
126
|
-
print_status(get_message("deploying_model").format("Deepseek官方"), "")
|
|
127
|
-
|
|
128
|
-
deploy_cmd = [
|
|
129
|
-
"byzerllm",
|
|
130
|
-
"deploy",
|
|
131
|
-
"--pretrained_model_type",
|
|
132
|
-
"saas/openai",
|
|
133
|
-
"--cpus_per_worker",
|
|
134
|
-
"0.001",
|
|
135
|
-
"--gpus_per_worker",
|
|
136
|
-
"0",
|
|
137
|
-
"--worker_concurrency",
|
|
138
|
-
"1000",
|
|
139
|
-
"--num_workers",
|
|
140
|
-
"1",
|
|
141
|
-
"--infer_params",
|
|
142
|
-
f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
|
|
143
|
-
"--model",
|
|
144
|
-
"v3_chat",
|
|
145
|
-
]
|
|
146
|
-
|
|
147
|
-
try:
|
|
148
|
-
subprocess.run(deploy_cmd, check=True)
|
|
149
|
-
print_status(get_message("deploy_complete"), "success")
|
|
150
|
-
except subprocess.CalledProcessError:
|
|
151
|
-
print_status(get_message("deploy_fail"), "error")
|
|
152
|
-
return
|
|
153
|
-
|
|
154
|
-
# Validate the deployment
|
|
155
|
-
print_status(get_message("validating_deploy"), "")
|
|
156
|
-
try:
|
|
157
|
-
validation_result = subprocess.run(
|
|
158
|
-
["easy-byzerllm", "chat", "v3_chat", "你好"],
|
|
159
|
-
capture_output=True,
|
|
160
|
-
text=True,
|
|
161
|
-
timeout=30,
|
|
162
|
-
check=True,
|
|
163
|
-
)
|
|
164
|
-
print_status(get_message("validation_success"), "success")
|
|
165
|
-
except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
|
|
166
|
-
print_status(get_message("validation_fail"), "error")
|
|
167
|
-
print_status(get_message("manual_start"), "warning")
|
|
168
|
-
print_status("easy-byzerllm chat v3_chat 你好", "")
|
|
169
|
-
|
|
170
|
-
print_status(get_message("init_complete_final"), "success")
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
def merge_args_with_config(args, config, arg_class, parser):
|
|
174
|
-
"""
|
|
175
|
-
合并命令行参数和配置文件参数,优先级如下:
|
|
176
|
-
1. 命令行参数非默认值,以命令行为准
|
|
177
|
-
2. 命令行参数为默认值,且配置文件有值,以配置文件为准
|
|
178
|
-
3. 否则用命令行参数
|
|
179
|
-
"""
|
|
180
|
-
merged = {}
|
|
181
|
-
for arg in vars(arg_class()):
|
|
182
|
-
# 获取默认值
|
|
183
|
-
try:
|
|
184
|
-
default = parser.get_default(arg)
|
|
185
|
-
except Exception:
|
|
186
|
-
default = None
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
if not hasattr(args,arg) and arg not in config:
|
|
190
|
-
continue
|
|
191
|
-
|
|
192
|
-
cli_value = getattr(args, arg, None)
|
|
193
|
-
config_value = config.get(arg, None)
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
# 判断优先级
|
|
197
|
-
if cli_value != default:
|
|
198
|
-
merged[arg] = cli_value
|
|
199
|
-
elif config_value is not None:
|
|
200
|
-
merged[arg] = config_value
|
|
201
|
-
else:
|
|
202
|
-
merged[arg] = cli_value
|
|
203
|
-
|
|
204
|
-
return arg_class(**merged)
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
def main(input_args: Optional[List[str]] = None):
|
|
208
|
-
print(
|
|
209
|
-
f"""
|
|
210
|
-
\033[1;32m
|
|
211
|
-
_ _ __ __ _ _ _ _____ _____ _______ ____ _ ____
|
|
212
|
-
| | | | | \/ | | \ | | / \|_ _|_ _\ \ / / ____| | _ \ / \ / ___|
|
|
213
|
-
| | | | | |\/| |_____| \| | / _ \ | | | | \ \ / /| _| | |_) | / _ \| | _
|
|
214
|
-
| |___| |___| | | |_____| |\ |/ ___ \| | | | \ V / | |___ | _ < / ___ \ |_| |
|
|
215
|
-
|_____|_____|_| |_| |_| \_/_/ \_\_| |___| \_/ |_____| |_| \_\/_/ \_\____|
|
|
216
|
-
v{__version__}
|
|
217
|
-
\033[0m"""
|
|
218
|
-
)
|
|
219
|
-
|
|
220
|
-
try:
|
|
221
|
-
tokenizer_path = pkg_resources.resource_filename(
|
|
222
|
-
"autocoder", "data/tokenizer.json"
|
|
223
|
-
)
|
|
224
|
-
except FileNotFoundError:
|
|
225
|
-
tokenizer_path = None
|
|
226
|
-
|
|
227
|
-
system_lang, _ = locale.getdefaultlocale()
|
|
228
|
-
lang = "zh" if system_lang and system_lang.startswith("zh") else "en"
|
|
229
|
-
desc = lang_desc[lang]
|
|
230
|
-
parser = argparse.ArgumentParser(description="Auto Coder RAG Server")
|
|
231
|
-
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
|
232
|
-
|
|
233
|
-
# Build hybrid index command
|
|
234
|
-
build_index_parser = subparsers.add_parser(
|
|
235
|
-
"build_hybrid_index", help="Build hybrid index for RAG"
|
|
236
|
-
)
|
|
237
|
-
|
|
238
|
-
build_index_parser.add_argument(
|
|
239
|
-
"--rag_storage_type",
|
|
240
|
-
type=str,
|
|
241
|
-
default="duckdb",
|
|
242
|
-
help="The storage type of the RAG, duckdb or byzer-storage",
|
|
243
|
-
)
|
|
244
|
-
|
|
245
|
-
build_index_parser.add_argument(
|
|
246
|
-
"--rag_index_build_workers",
|
|
247
|
-
type=int,
|
|
248
|
-
default=5,
|
|
249
|
-
help="The number of workers to build the RAG index",
|
|
250
|
-
)
|
|
251
|
-
|
|
252
|
-
build_index_parser.add_argument(
|
|
253
|
-
"--quick", action="store_true", help="Skip system initialization"
|
|
254
|
-
)
|
|
255
|
-
build_index_parser.add_argument("--file", default="", help=desc["file"])
|
|
256
|
-
build_index_parser.add_argument(
|
|
257
|
-
"--model", default="v3_chat", help=desc["model"]
|
|
258
|
-
)
|
|
259
|
-
|
|
260
|
-
build_index_parser.add_argument(
|
|
261
|
-
"--on_ray", action="store_true", help="Run on Ray"
|
|
262
|
-
)
|
|
263
|
-
|
|
264
|
-
build_index_parser.add_argument(
|
|
265
|
-
"--index_model", default="", help=desc["index_model"]
|
|
266
|
-
)
|
|
267
|
-
build_index_parser.add_argument("--emb_model", default="", help=desc["emb_model"])
|
|
268
|
-
build_index_parser.add_argument(
|
|
269
|
-
"--ray_address", default="auto", help=desc["ray_address"]
|
|
270
|
-
)
|
|
271
|
-
build_index_parser.add_argument(
|
|
272
|
-
"--required_exts", default="", help=desc["doc_build_parse_required_exts"]
|
|
273
|
-
)
|
|
274
|
-
build_index_parser.add_argument(
|
|
275
|
-
"--source_dir", default=".", help="Source directory path"
|
|
276
|
-
)
|
|
277
|
-
build_index_parser.add_argument(
|
|
278
|
-
"--tokenizer_path", default=tokenizer_path, help="Path to tokenizer file"
|
|
279
|
-
)
|
|
280
|
-
build_index_parser.add_argument(
|
|
281
|
-
"--doc_dir", default="", help="Document directory path"
|
|
282
|
-
)
|
|
283
|
-
build_index_parser.add_argument(
|
|
284
|
-
"--enable_hybrid_index", action="store_true", help="Enable hybrid index"
|
|
285
|
-
)
|
|
286
|
-
|
|
287
|
-
# Serve command
|
|
288
|
-
serve_parser = subparsers.add_parser("serve", help="Start the RAG server")
|
|
289
|
-
serve_parser.add_argument(
|
|
290
|
-
"--quick", action="store_true", help="Skip system initialization"
|
|
291
|
-
)
|
|
292
|
-
serve_parser.add_argument("--file", default="", help=desc["file"])
|
|
293
|
-
serve_parser.add_argument("--model", default="v3_chat", help=desc["model"])
|
|
294
|
-
serve_parser.add_argument("--index_model", default="", help=desc["index_model"])
|
|
295
|
-
serve_parser.add_argument("--ray_address", default="auto", help=desc["ray_address"])
|
|
296
|
-
serve_parser.add_argument(
|
|
297
|
-
"--index_filter_workers",
|
|
298
|
-
type=int,
|
|
299
|
-
default=100,
|
|
300
|
-
help=desc["index_filter_workers"],
|
|
301
|
-
)
|
|
302
|
-
serve_parser.add_argument(
|
|
303
|
-
"--index_filter_file_num",
|
|
304
|
-
type=int,
|
|
305
|
-
default=3,
|
|
306
|
-
help=desc["index_filter_file_num"],
|
|
307
|
-
)
|
|
308
|
-
serve_parser.add_argument(
|
|
309
|
-
"--rag_context_window_limit",
|
|
310
|
-
type=int,
|
|
311
|
-
default=56000,
|
|
312
|
-
help="The input context window limit for RAG",
|
|
313
|
-
)
|
|
314
|
-
serve_parser.add_argument(
|
|
315
|
-
"--full_text_ratio",
|
|
316
|
-
type=float,
|
|
317
|
-
default=0.7,
|
|
318
|
-
help="The ratio of full text area in the input context window (0.0 to 1.0)",
|
|
319
|
-
)
|
|
320
|
-
serve_parser.add_argument(
|
|
321
|
-
"--segment_ratio",
|
|
322
|
-
type=float,
|
|
323
|
-
default=0.2,
|
|
324
|
-
help="The ratio of segment area in the input context window (0.0 to 1.0)",
|
|
325
|
-
)
|
|
326
|
-
serve_parser.add_argument(
|
|
327
|
-
"--required_exts", default="", help=desc["doc_build_parse_required_exts"]
|
|
328
|
-
)
|
|
329
|
-
serve_parser.add_argument(
|
|
330
|
-
"--rag_doc_filter_relevance", type=int, default=5, help=""
|
|
331
|
-
)
|
|
332
|
-
serve_parser.add_argument("--source_dir", default=".", help="")
|
|
333
|
-
serve_parser.add_argument("--host", default="", help="")
|
|
334
|
-
serve_parser.add_argument("--port", type=int, default=8000, help="")
|
|
335
|
-
serve_parser.add_argument("--name", default="", help="RAG服务的名称(可选)")
|
|
336
|
-
serve_parser.add_argument("--workers", type=int, default=4, help="")
|
|
337
|
-
serve_parser.add_argument("--uvicorn_log_level", default="info", help="")
|
|
338
|
-
serve_parser.add_argument("--allow_credentials", action="store_true", help="")
|
|
339
|
-
serve_parser.add_argument("--allowed_origins", default=["*"], help="")
|
|
340
|
-
serve_parser.add_argument("--allowed_methods", default=["*"], help="")
|
|
341
|
-
serve_parser.add_argument("--allowed_headers", default=["*"], help="")
|
|
342
|
-
serve_parser.add_argument("--api_key", default="", help="")
|
|
343
|
-
serve_parser.add_argument("--served_model_name", default="", help="")
|
|
344
|
-
serve_parser.add_argument("--prompt_template", default="", help="")
|
|
345
|
-
serve_parser.add_argument("--ssl_keyfile", default="", help="")
|
|
346
|
-
serve_parser.add_argument("--ssl_certfile", default="", help="")
|
|
347
|
-
serve_parser.add_argument("--response_role", default="assistant", help="")
|
|
348
|
-
serve_parser.add_argument(
|
|
349
|
-
"--doc_dir",
|
|
350
|
-
default="",
|
|
351
|
-
help="Document directory path, also used as the root directory for serving static files"
|
|
352
|
-
)
|
|
353
|
-
serve_parser.add_argument("--enable_local_image_host", action="store_true", help=" enable local image host for local Chat app")
|
|
354
|
-
serve_parser.add_argument("--agentic", action="store_true", help="使用 AgenticRAG 而不是 LongContextRAG")
|
|
355
|
-
serve_parser.add_argument("--tokenizer_path", default=tokenizer_path, help="")
|
|
356
|
-
serve_parser.add_argument(
|
|
357
|
-
"--collections", default="", help="Collection name for indexing"
|
|
358
|
-
)
|
|
359
|
-
serve_parser.add_argument(
|
|
360
|
-
"--base_dir",
|
|
361
|
-
default="",
|
|
362
|
-
help="Path where the processed text embeddings were stored",
|
|
363
|
-
)
|
|
364
|
-
serve_parser.add_argument(
|
|
365
|
-
"--monitor_mode",
|
|
366
|
-
action="store_true",
|
|
367
|
-
help="Monitor mode for the doc update",
|
|
368
|
-
)
|
|
369
|
-
serve_parser.add_argument(
|
|
370
|
-
"--max_static_path_length",
|
|
371
|
-
type=int,
|
|
372
|
-
default=3000,
|
|
373
|
-
help="Maximum length allowed for static file paths (larger value to better support Chinese characters)"
|
|
374
|
-
)
|
|
375
|
-
serve_parser.add_argument(
|
|
376
|
-
"--enable_nginx_x_accel",
|
|
377
|
-
action="store_true",
|
|
378
|
-
help="Enable Nginx X-Accel-Redirect for static file serving when behind Nginx"
|
|
379
|
-
)
|
|
380
|
-
serve_parser.add_argument(
|
|
381
|
-
"--disable_auto_window",
|
|
382
|
-
action="store_true",
|
|
383
|
-
help="Disable automatic window adaptation for documents",
|
|
384
|
-
)
|
|
385
|
-
serve_parser.add_argument(
|
|
386
|
-
"--disable_segment_reorder",
|
|
387
|
-
action="store_true",
|
|
388
|
-
help="Disable reordering of document segments after retrieval",
|
|
389
|
-
)
|
|
390
|
-
|
|
391
|
-
serve_parser.add_argument(
|
|
392
|
-
"--disable_inference_enhance",
|
|
393
|
-
action="store_true",
|
|
394
|
-
help="Disable enhanced inference mode",
|
|
395
|
-
)
|
|
396
|
-
serve_parser.add_argument(
|
|
397
|
-
"--inference_deep_thought",
|
|
398
|
-
action="store_true",
|
|
399
|
-
help="Enable deep thought in inference mode",
|
|
400
|
-
)
|
|
401
|
-
serve_parser.add_argument(
|
|
402
|
-
"--inference_slow_without_deep_thought",
|
|
403
|
-
action="store_true",
|
|
404
|
-
help="Enable slow inference without deep thought",
|
|
405
|
-
)
|
|
406
|
-
serve_parser.add_argument(
|
|
407
|
-
"--inference_compute_precision",
|
|
408
|
-
type=int,
|
|
409
|
-
default=64,
|
|
410
|
-
help="The precision of the inference compute",
|
|
411
|
-
)
|
|
412
|
-
|
|
413
|
-
serve_parser.add_argument(
|
|
414
|
-
"--enable_hybrid_index",
|
|
415
|
-
action="store_true",
|
|
416
|
-
help="Enable hybrid index",
|
|
417
|
-
)
|
|
418
|
-
|
|
419
|
-
serve_parser.add_argument(
|
|
420
|
-
"--rag_storage_type",
|
|
421
|
-
type=str,
|
|
422
|
-
default="duckdb",
|
|
423
|
-
help="The storage type of the RAG, duckdb or byzer-storage",
|
|
424
|
-
)
|
|
425
|
-
|
|
426
|
-
serve_parser.add_argument(
|
|
427
|
-
"--hybrid_index_max_output_tokens",
|
|
428
|
-
type=int,
|
|
429
|
-
default=1000000,
|
|
430
|
-
help="The maximum number of tokens in the output. This is only used when enable_hybrid_index is true.",
|
|
431
|
-
)
|
|
432
|
-
|
|
433
|
-
serve_parser.add_argument(
|
|
434
|
-
"--without_contexts",
|
|
435
|
-
action="store_true",
|
|
436
|
-
help="Whether to return responses without contexts. only works when pro plugin is installed",
|
|
437
|
-
)
|
|
438
|
-
|
|
439
|
-
serve_parser.add_argument("--data_cells_max_num",
|
|
440
|
-
type=int,
|
|
441
|
-
default=2000,
|
|
442
|
-
help="Maximum number of data cells to process",
|
|
443
|
-
)
|
|
444
|
-
|
|
445
|
-
serve_parser.add_argument(
|
|
446
|
-
"--product_mode",
|
|
447
|
-
type=str,
|
|
448
|
-
default="pro",
|
|
449
|
-
help="The mode of the auto-coder.rag, lite/pro default is pro",
|
|
450
|
-
)
|
|
451
|
-
serve_parser.add_argument(
|
|
452
|
-
"--lite",
|
|
453
|
-
action="store_true",
|
|
454
|
-
help="Run in lite mode (equivalent to --product_mode=lite)",
|
|
455
|
-
)
|
|
456
|
-
serve_parser.add_argument(
|
|
457
|
-
"--pro",
|
|
458
|
-
action="store_true",
|
|
459
|
-
help="Run in pro mode (equivalent to --product_mode=pro)",
|
|
460
|
-
)
|
|
461
|
-
|
|
462
|
-
serve_parser.add_argument(
|
|
463
|
-
"--recall_model",
|
|
464
|
-
default="",
|
|
465
|
-
help="The model used for recall documents",
|
|
466
|
-
)
|
|
467
|
-
|
|
468
|
-
serve_parser.add_argument(
|
|
469
|
-
"--chunk_model",
|
|
470
|
-
default="",
|
|
471
|
-
help="The model used for chunk documents",
|
|
472
|
-
)
|
|
473
|
-
|
|
474
|
-
serve_parser.add_argument(
|
|
475
|
-
"--qa_model",
|
|
476
|
-
default="",
|
|
477
|
-
help="The model used for question answering",
|
|
478
|
-
)
|
|
479
|
-
|
|
480
|
-
serve_parser.add_argument(
|
|
481
|
-
"--emb_model",
|
|
482
|
-
default="",
|
|
483
|
-
help="The model used for embedding documents",
|
|
484
|
-
)
|
|
485
|
-
|
|
486
|
-
serve_parser.add_argument(
|
|
487
|
-
"--agentic_model",
|
|
488
|
-
default="",
|
|
489
|
-
help="The model used for agentic operations",
|
|
490
|
-
)
|
|
491
|
-
|
|
492
|
-
serve_parser.add_argument(
|
|
493
|
-
"--context_prune_model",
|
|
494
|
-
default="",
|
|
495
|
-
help="The model used for context pruning",
|
|
496
|
-
)
|
|
497
|
-
|
|
498
|
-
# Benchmark command
|
|
499
|
-
benchmark_parser = subparsers.add_parser(
|
|
500
|
-
"benchmark", help="Benchmark LLM client performance"
|
|
501
|
-
)
|
|
502
|
-
benchmark_parser.add_argument(
|
|
503
|
-
"--model", default="v3_chat", help="Model to benchmark"
|
|
504
|
-
)
|
|
505
|
-
benchmark_parser.add_argument(
|
|
506
|
-
"--parallel", type=int, default=10, help="Number of parallel requests"
|
|
507
|
-
)
|
|
508
|
-
benchmark_parser.add_argument(
|
|
509
|
-
"--rounds", type=int, default=1, help="Number of rounds to run"
|
|
510
|
-
)
|
|
511
|
-
benchmark_parser.add_argument(
|
|
512
|
-
"--type",
|
|
513
|
-
choices=["openai", "byzerllm"],
|
|
514
|
-
default="byzerllm",
|
|
515
|
-
help="Client type to benchmark",
|
|
516
|
-
)
|
|
517
|
-
benchmark_parser.add_argument(
|
|
518
|
-
"--api_key", default="", help="OpenAI API key for OpenAI client"
|
|
519
|
-
)
|
|
520
|
-
benchmark_parser.add_argument(
|
|
521
|
-
"--base_url", default="", help="Base URL for OpenAI client"
|
|
522
|
-
)
|
|
523
|
-
benchmark_parser.add_argument(
|
|
524
|
-
"--query", default="Hello, how are you?", help="Query to use for benchmarking"
|
|
525
|
-
)
|
|
526
|
-
|
|
527
|
-
# Tools command
|
|
528
|
-
tools_parser = subparsers.add_parser("tools", help="Various tools")
|
|
529
|
-
tools_subparsers = tools_parser.add_subparsers(dest="tool", help="Available tools")
|
|
530
|
-
tools_parser.add_argument(
|
|
531
|
-
"--product_mode",
|
|
532
|
-
type=str,
|
|
533
|
-
default="pro",
|
|
534
|
-
help="The mode of the auto-coder.rag, lite/pro default is pro",
|
|
535
|
-
)
|
|
536
|
-
tools_parser.add_argument(
|
|
537
|
-
"--lite",
|
|
538
|
-
action="store_true",
|
|
539
|
-
help="Run in lite mode (equivalent to --product_mode=lite)",
|
|
540
|
-
)
|
|
541
|
-
tools_parser.add_argument(
|
|
542
|
-
"--pro",
|
|
543
|
-
action="store_true",
|
|
544
|
-
help="Run in pro mode (equivalent to --product_mode=pro)",
|
|
545
|
-
)
|
|
546
|
-
|
|
547
|
-
# Count tool
|
|
548
|
-
count_parser = tools_subparsers.add_parser("count", help="Count tokens in a file")
|
|
1
|
+
"""
|
|
2
|
+
Auto Coder RAG - RAG 系统的终端入口
|
|
549
3
|
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
)
|
|
554
|
-
recall_parser.add_argument(
|
|
555
|
-
"--model", required=True, help="Model to use for recall validation"
|
|
556
|
-
)
|
|
557
|
-
recall_parser.add_argument(
|
|
558
|
-
"--content", default=None, help="Content to validate against"
|
|
559
|
-
)
|
|
560
|
-
recall_parser.add_argument(
|
|
561
|
-
"--query", default=None, help="Query to use for validation"
|
|
562
|
-
)
|
|
4
|
+
这是重构后的 RAG 终端交互入口,代码结构更清晰,职责更分明。
|
|
5
|
+
具体实现逻辑已拆分到 autocoder.rag.terminal 模块中。
|
|
6
|
+
"""
|
|
563
7
|
|
|
564
|
-
|
|
565
|
-
chunk_parser = tools_subparsers.add_parser(
|
|
566
|
-
"chunk", help="Validate chunk model performance"
|
|
567
|
-
)
|
|
568
|
-
chunk_parser.add_argument(
|
|
569
|
-
"--model", required=True, help="Model to use for chunk validation"
|
|
570
|
-
)
|
|
571
|
-
chunk_parser.add_argument(
|
|
572
|
-
"--content", default=None, help="Content to validate against"
|
|
573
|
-
)
|
|
574
|
-
chunk_parser.add_argument(
|
|
575
|
-
"--query", default=None, help="Query to use for validation"
|
|
576
|
-
)
|
|
577
|
-
count_parser.add_argument(
|
|
578
|
-
"--tokenizer_path",
|
|
579
|
-
default=tokenizer_path,
|
|
580
|
-
help="Path to the tokenizer",
|
|
581
|
-
)
|
|
582
|
-
count_parser.add_argument(
|
|
583
|
-
"--file", required=True, help="Path to the file to count tokens"
|
|
584
|
-
)
|
|
585
|
-
|
|
586
|
-
args = parser.parse_args(input_args)
|
|
587
|
-
|
|
588
|
-
if args.command == "benchmark":
|
|
589
|
-
from .benchmark import benchmark_openai, benchmark_byzerllm
|
|
590
|
-
|
|
591
|
-
if args.type == "openai":
|
|
592
|
-
if not args.api_key:
|
|
593
|
-
print("OpenAI API key is required for OpenAI client benchmark")
|
|
594
|
-
return
|
|
595
|
-
asyncio.run(
|
|
596
|
-
benchmark_openai(
|
|
597
|
-
args.model, args.parallel, args.api_key, args.base_url, args.rounds, args.query
|
|
598
|
-
)
|
|
599
|
-
)
|
|
600
|
-
else: # byzerllm
|
|
601
|
-
benchmark_byzerllm(args.model, args.parallel, args.rounds, args.query)
|
|
602
|
-
|
|
603
|
-
elif args.command == "serve":
|
|
604
|
-
# 如果用户传递了 --name 参数,则加载已保存的配置并与命令行参数合并
|
|
605
|
-
server_args_config = {}
|
|
606
|
-
auto_coder_args_config = {}
|
|
607
|
-
if args.name:
|
|
608
|
-
saved_config = get_rag_config(args.name)
|
|
609
|
-
if saved_config:
|
|
610
|
-
logger.info(f"加载已保存的RAG配置: {args.name}")
|
|
611
|
-
# 将保存的配置合并到 args 中(命令行参数优先)
|
|
612
|
-
for key, value in saved_config.items():
|
|
613
|
-
# 跳过一些不应该被合并的字段
|
|
614
|
-
skip_fields = {'name', 'status', 'created_at', 'updated_at', 'process_id', 'stdout_fd', 'stderr_fd', 'cache_build_task_id'}
|
|
615
|
-
if key in skip_fields:
|
|
616
|
-
continue
|
|
617
|
-
server_args_config[key] = value
|
|
618
|
-
setattr(args, key, value)
|
|
619
|
-
# 特殊处理 infer_params 字段
|
|
620
|
-
if 'infer_params' in saved_config and saved_config['infer_params']:
|
|
621
|
-
for infer_key, infer_value in saved_config['infer_params'].items():
|
|
622
|
-
auto_coder_args_config[infer_key] = infer_value
|
|
623
|
-
logger.info(f"配置合并完成,使用文档目录: {getattr(args, 'doc_dir', 'N/A')}")
|
|
624
|
-
else:
|
|
625
|
-
logger.warning(f"未找到名为 '{args.name}' 的RAG配置")
|
|
626
|
-
|
|
627
|
-
# Handle lite/pro flags
|
|
628
|
-
if args.pro:
|
|
629
|
-
args.product_mode = "pro"
|
|
630
|
-
else:
|
|
631
|
-
args.product_mode = "lite"
|
|
632
|
-
|
|
633
|
-
if not args.quick:
|
|
634
|
-
initialize_system(args)
|
|
635
|
-
|
|
636
|
-
# 使用新的合并逻辑
|
|
637
|
-
server_args = merge_args_with_config(args, server_args_config, ServerArgs, serve_parser)
|
|
638
|
-
auto_coder_args = merge_args_with_config(args, server_args_config, AutoCoderArgs, serve_parser)
|
|
639
|
-
# 设置本地图床的地址
|
|
640
|
-
if args.enable_local_image_host:
|
|
641
|
-
host = server_args.host or "127.0.0.1"
|
|
642
|
-
if host == "0.0.0.0":
|
|
643
|
-
host = "127.0.0.1"
|
|
644
|
-
port = str(server_args.port)
|
|
645
|
-
auto_coder_args.local_image_host = f"{host}:{port}"
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
# Generate unique name for RAG build if doc_dir exists
|
|
649
|
-
if server_args.doc_dir:
|
|
650
|
-
auto_coder_args.rag_build_name = generate_unique_name_from_path(server_args.doc_dir)
|
|
651
|
-
auto_coder_args.source_dir = server_args.doc_dir
|
|
652
|
-
logger.info(f"Generated RAG build name: {auto_coder_args.rag_build_name}")
|
|
653
|
-
|
|
654
|
-
if auto_coder_args.enable_hybrid_index and args.product_mode == "pro":
|
|
655
|
-
# 尝试连接storage
|
|
656
|
-
try:
|
|
657
|
-
from byzerllm.apps.byzer_storage.simple_api import ByzerStorage
|
|
658
|
-
|
|
659
|
-
storage = ByzerStorage("byzerai_store", "rag", auto_coder_args.rag_build_name)
|
|
660
|
-
storage.retrieval.cluster_info("byzerai_store")
|
|
661
|
-
except Exception as e:
|
|
662
|
-
logger.error(
|
|
663
|
-
"When enable_hybrid_index is true, ByzerStorage must be started"
|
|
664
|
-
)
|
|
665
|
-
logger.error("Please run 'byzerllm storage start' first")
|
|
666
|
-
return
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
if args.product_mode == "pro":
|
|
671
|
-
byzerllm.connect_cluster(address=args.ray_address)
|
|
672
|
-
llm = byzerllm.ByzerLLM()
|
|
673
|
-
llm.skip_nontext_check = True
|
|
674
|
-
llm.setup_default_model_name(args.model)
|
|
675
|
-
|
|
676
|
-
# Setup sub models if specified
|
|
677
|
-
if args.recall_model:
|
|
678
|
-
recall_model = byzerllm.ByzerLLM()
|
|
679
|
-
recall_model.setup_default_model_name(args.recall_model)
|
|
680
|
-
recall_model.skip_nontext_check = True
|
|
681
|
-
llm.setup_sub_client("recall_model", recall_model)
|
|
682
|
-
|
|
683
|
-
if args.chunk_model:
|
|
684
|
-
chunk_model = byzerllm.ByzerLLM()
|
|
685
|
-
chunk_model.setup_default_model_name(args.chunk_model)
|
|
686
|
-
llm.setup_sub_client("chunk_model", chunk_model)
|
|
687
|
-
|
|
688
|
-
if args.qa_model:
|
|
689
|
-
qa_model = byzerllm.ByzerLLM()
|
|
690
|
-
qa_model.setup_default_model_name(args.qa_model)
|
|
691
|
-
qa_model.skip_nontext_check = True
|
|
692
|
-
llm.setup_sub_client("qa_model", qa_model)
|
|
693
|
-
|
|
694
|
-
if args.emb_model:
|
|
695
|
-
emb_model = byzerllm.ByzerLLM()
|
|
696
|
-
emb_model.setup_default_model_name(args.emb_model)
|
|
697
|
-
emb_model.skip_nontext_check = True
|
|
698
|
-
llm.setup_sub_client("emb_model", emb_model)
|
|
699
|
-
|
|
700
|
-
if args.agentic_model:
|
|
701
|
-
agentic_model = byzerllm.ByzerLLM()
|
|
702
|
-
agentic_model.setup_default_model_name(args.agentic_model)
|
|
703
|
-
agentic_model.skip_nontext_check = True
|
|
704
|
-
llm.setup_sub_client("agentic_model", agentic_model)
|
|
705
|
-
|
|
706
|
-
if args.context_prune_model:
|
|
707
|
-
context_prune_model = byzerllm.ByzerLLM()
|
|
708
|
-
context_prune_model.setup_default_model_name(args.context_prune_model)
|
|
709
|
-
context_prune_model.skip_nontext_check = True
|
|
710
|
-
llm.setup_sub_client("context_prune_model", context_prune_model)
|
|
711
|
-
|
|
712
|
-
# 当启用hybrid_index时,检查必要的组件
|
|
713
|
-
if auto_coder_args.enable_hybrid_index:
|
|
714
|
-
if not args.emb_model and not llm.is_model_exist("emb"):
|
|
715
|
-
logger.error(
|
|
716
|
-
"When enable_hybrid_index is true, an 'emb' model must be deployed"
|
|
717
|
-
)
|
|
718
|
-
return
|
|
719
|
-
llm.setup_default_emb_model_name(args.emb_model or "emb")
|
|
720
|
-
|
|
721
|
-
if args.product_mode == "lite":
|
|
722
|
-
from autocoder import models as models_module
|
|
723
|
-
model_info = models_module.get_model_by_name(args.model)
|
|
724
|
-
llm = byzerllm.SimpleByzerLLM(default_model_name=args.model)
|
|
725
|
-
llm.deploy(
|
|
726
|
-
model_path="",
|
|
727
|
-
pretrained_model_type=model_info["model_type"],
|
|
728
|
-
udf_name=args.model,
|
|
729
|
-
infer_params={
|
|
730
|
-
"saas.base_url": model_info["base_url"],
|
|
731
|
-
"saas.api_key": model_info["api_key"],
|
|
732
|
-
"saas.model": model_info["model_name"],
|
|
733
|
-
"saas.is_reasoning": model_info["is_reasoning"],
|
|
734
|
-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
735
|
-
}
|
|
736
|
-
)
|
|
737
|
-
|
|
738
|
-
# Setup sub models if specified
|
|
739
|
-
if args.recall_model:
|
|
740
|
-
model_info = models_module.get_model_by_name(args.recall_model)
|
|
741
|
-
recall_model = byzerllm.SimpleByzerLLM(default_model_name=args.recall_model)
|
|
742
|
-
recall_model.deploy(
|
|
743
|
-
model_path="",
|
|
744
|
-
pretrained_model_type=model_info["model_type"],
|
|
745
|
-
udf_name=args.recall_model,
|
|
746
|
-
infer_params={
|
|
747
|
-
"saas.base_url": model_info["base_url"],
|
|
748
|
-
"saas.api_key": model_info["api_key"],
|
|
749
|
-
"saas.model": model_info["model_name"],
|
|
750
|
-
"saas.is_reasoning": model_info["is_reasoning"],
|
|
751
|
-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
752
|
-
}
|
|
753
|
-
)
|
|
754
|
-
llm.setup_sub_client("recall_model", recall_model)
|
|
755
|
-
|
|
756
|
-
if args.chunk_model:
|
|
757
|
-
model_info = models_module.get_model_by_name(args.chunk_model)
|
|
758
|
-
chunk_model = byzerllm.SimpleByzerLLM(default_model_name=args.chunk_model)
|
|
759
|
-
chunk_model.deploy(
|
|
760
|
-
model_path="",
|
|
761
|
-
pretrained_model_type=model_info["model_type"],
|
|
762
|
-
udf_name=args.chunk_model,
|
|
763
|
-
infer_params={
|
|
764
|
-
"saas.base_url": model_info["base_url"],
|
|
765
|
-
"saas.api_key": model_info["api_key"],
|
|
766
|
-
"saas.model": model_info["model_name"],
|
|
767
|
-
"saas.is_reasoning": model_info["is_reasoning"],
|
|
768
|
-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
769
|
-
}
|
|
770
|
-
)
|
|
771
|
-
llm.setup_sub_client("chunk_model", chunk_model)
|
|
772
|
-
|
|
773
|
-
if args.qa_model:
|
|
774
|
-
model_info = models_module.get_model_by_name(args.qa_model)
|
|
775
|
-
qa_model = byzerllm.SimpleByzerLLM(default_model_name=args.qa_model)
|
|
776
|
-
qa_model.deploy(
|
|
777
|
-
model_path="",
|
|
778
|
-
pretrained_model_type=model_info["model_type"],
|
|
779
|
-
udf_name=args.qa_model,
|
|
780
|
-
infer_params={
|
|
781
|
-
"saas.base_url": model_info["base_url"],
|
|
782
|
-
"saas.api_key": model_info["api_key"],
|
|
783
|
-
"saas.model": model_info["model_name"],
|
|
784
|
-
"saas.is_reasoning": model_info["is_reasoning"],
|
|
785
|
-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
786
|
-
}
|
|
787
|
-
)
|
|
788
|
-
llm.setup_sub_client("qa_model", qa_model)
|
|
789
|
-
|
|
790
|
-
if args.emb_model:
|
|
791
|
-
model_info = models_module.get_model_by_name(args.emb_model)
|
|
792
|
-
emb_model = byzerllm.SimpleByzerLLM(default_model_name=args.emb_model)
|
|
793
|
-
emb_model.deploy(
|
|
794
|
-
model_path="",
|
|
795
|
-
pretrained_model_type=model_info["model_type"],
|
|
796
|
-
udf_name=args.emb_model,
|
|
797
|
-
infer_params={
|
|
798
|
-
"saas.base_url": model_info["base_url"],
|
|
799
|
-
"saas.api_key": model_info["api_key"],
|
|
800
|
-
"saas.model": model_info["model_name"],
|
|
801
|
-
"saas.is_reasoning": False,
|
|
802
|
-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
803
|
-
}
|
|
804
|
-
)
|
|
805
|
-
llm.setup_sub_client("emb_model", emb_model)
|
|
806
|
-
|
|
807
|
-
if args.agentic_model:
|
|
808
|
-
model_info = models_module.get_model_by_name(args.agentic_model)
|
|
809
|
-
agentic_model = byzerllm.SimpleByzerLLM(default_model_name=args.agentic_model)
|
|
810
|
-
agentic_model.deploy(
|
|
811
|
-
model_path="",
|
|
812
|
-
pretrained_model_type=model_info["model_type"],
|
|
813
|
-
udf_name=args.agentic_model,
|
|
814
|
-
infer_params={
|
|
815
|
-
"saas.base_url": model_info["base_url"],
|
|
816
|
-
"saas.api_key": model_info["api_key"],
|
|
817
|
-
"saas.model": model_info["model_name"],
|
|
818
|
-
"saas.is_reasoning": model_info["is_reasoning"],
|
|
819
|
-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
820
|
-
}
|
|
821
|
-
)
|
|
822
|
-
llm.setup_sub_client("agentic_model", agentic_model)
|
|
823
|
-
|
|
824
|
-
if args.context_prune_model:
|
|
825
|
-
model_info = models_module.get_model_by_name(args.context_prune_model)
|
|
826
|
-
context_prune_model = byzerllm.SimpleByzerLLM(default_model_name=args.context_prune_model)
|
|
827
|
-
context_prune_model.deploy(
|
|
828
|
-
model_path="",
|
|
829
|
-
pretrained_model_type=model_info["model_type"],
|
|
830
|
-
udf_name=args.context_prune_model,
|
|
831
|
-
infer_params={
|
|
832
|
-
"saas.base_url": model_info["base_url"],
|
|
833
|
-
"saas.api_key": model_info["api_key"],
|
|
834
|
-
"saas.model": model_info["model_name"],
|
|
835
|
-
"saas.is_reasoning": model_info["is_reasoning"],
|
|
836
|
-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
837
|
-
}
|
|
838
|
-
)
|
|
839
|
-
llm.setup_sub_client("context_prune_model", context_prune_model)
|
|
840
|
-
|
|
841
|
-
if args.enable_hybrid_index:
|
|
842
|
-
if not args.emb_model:
|
|
843
|
-
raise Exception("When enable_hybrid_index is true, an 'emb' model must be specified")
|
|
844
|
-
|
|
845
|
-
if server_args.doc_dir:
|
|
846
|
-
auto_coder_args.rag_build_name = generate_unique_name_from_path(server_args.doc_dir)
|
|
847
|
-
if args.agentic:
|
|
848
|
-
rag = AgenticRAG(llm=llm, args=auto_coder_args, path=server_args.doc_dir, tokenizer_path=server_args.tokenizer_path)
|
|
849
|
-
else:
|
|
850
|
-
rag = LongContextRAG(llm=llm, args=auto_coder_args, path=server_args.doc_dir, tokenizer_path=server_args.tokenizer_path)
|
|
851
|
-
else:
|
|
852
|
-
raise Exception("doc_dir is required")
|
|
853
|
-
|
|
854
|
-
llm_wrapper = LLWrapper(llm=llm, rag=rag)
|
|
855
|
-
# Save service info
|
|
856
|
-
service_info = RAGServiceInfo(
|
|
857
|
-
host=server_args.host or "127.0.0.1",
|
|
858
|
-
port=server_args.port,
|
|
859
|
-
model=args.model,
|
|
860
|
-
_pid=os.getpid(),
|
|
861
|
-
_timestamp=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
862
|
-
args={k: v for k, v in vars(args).items() if not k.startswith("_")}
|
|
863
|
-
)
|
|
864
|
-
try:
|
|
865
|
-
service_info.save()
|
|
866
|
-
except Exception as e:
|
|
867
|
-
logger.warning(f"Failed to save service info: {str(e)}")
|
|
868
|
-
|
|
869
|
-
# Start FileMonitor if monitor_mode is enabled and source_dir is provided
|
|
870
|
-
if server_args.doc_dir:
|
|
871
|
-
try:
|
|
872
|
-
# Use singleton pattern to get/create monitor instance
|
|
873
|
-
# FileMonitor ensures only one instance runs per root_dir
|
|
874
|
-
monitor = FileMonitor(server_args.doc_dir)
|
|
875
|
-
if not monitor.is_running():
|
|
876
|
-
# TODO: Register specific callbacks here if needed in the future
|
|
877
|
-
# Example: monitor.register(os.path.join(args.source_dir, "specific_file.py"), my_callback)
|
|
878
|
-
monitor.start()
|
|
879
|
-
logger.info(f"File monitor started for directory: {server_args.doc_dir}")
|
|
880
|
-
else:
|
|
881
|
-
# Log if monitor was already running (e.g., started by another part of the app)
|
|
882
|
-
# Check if the existing monitor's root matches the current request
|
|
883
|
-
if monitor.root_dir == os.path.abspath(server_args.doc_dir):
|
|
884
|
-
logger.info(f"File monitor already running for directory: {monitor.root_dir}")
|
|
885
|
-
else:
|
|
886
|
-
logger.warning(f"File monitor is running for a different directory ({monitor.root_dir}), cannot start a new one for {args.source_dir}.")
|
|
887
|
-
|
|
888
|
-
logger.info(f"Getting rules for {server_args.doc_dir}")
|
|
889
|
-
_ = get_rules(server_args.doc_dir)
|
|
890
|
-
|
|
891
|
-
except ValueError as ve: # Catch specific error if root_dir is invalid during init
|
|
892
|
-
logger.error(f"Failed to initialize file monitor for {args.source_dir}: {ve}")
|
|
893
|
-
except ImportError as ie: # Catch if watchfiles is not installed
|
|
894
|
-
logger.error(f"Failed to start file monitor: {ie}")
|
|
895
|
-
except Exception as e:
|
|
896
|
-
logger.error(f"An unexpected error occurred while starting file monitor for {args.source_dir}: {e}")
|
|
897
|
-
|
|
898
|
-
serve(llm=llm_wrapper, args=server_args)
|
|
899
|
-
elif args.command == "build_hybrid_index":
|
|
900
|
-
auto_coder_args = AutoCoderArgs(
|
|
901
|
-
**{
|
|
902
|
-
arg: getattr(args, arg)
|
|
903
|
-
for arg in vars(AutoCoderArgs())
|
|
904
|
-
if hasattr(args, arg)
|
|
905
|
-
}
|
|
906
|
-
)
|
|
907
|
-
|
|
908
|
-
# Generate unique name for RAG build if doc_dir exists
|
|
909
|
-
if args.doc_dir:
|
|
910
|
-
auto_coder_args.rag_build_name = generate_unique_name_from_path(args.doc_dir)
|
|
911
|
-
logger.info(f"Generated RAG build name: {auto_coder_args.rag_build_name}")
|
|
912
|
-
|
|
913
|
-
auto_coder_args.enable_hybrid_index = True
|
|
914
|
-
auto_coder_args.rag_type = "simple"
|
|
915
|
-
|
|
916
|
-
if args.on_ray:
|
|
917
|
-
|
|
918
|
-
try:
|
|
919
|
-
from byzerllm.apps.byzer_storage.simple_api import ByzerStorage
|
|
920
|
-
|
|
921
|
-
storage = ByzerStorage("byzerai_store", "rag", "files")
|
|
922
|
-
storage.retrieval.cluster_info("byzerai_store")
|
|
923
|
-
except Exception as e:
|
|
924
|
-
logger.error(
|
|
925
|
-
"When enable_hybrid_index is true, ByzerStorage must be started"
|
|
926
|
-
)
|
|
927
|
-
logger.error("Please run 'byzerllm storage start' first")
|
|
928
|
-
return
|
|
929
|
-
|
|
930
|
-
llm = byzerllm.ByzerLLM()
|
|
931
|
-
llm.setup_default_model_name(args.model)
|
|
932
|
-
|
|
933
|
-
# 当启用hybrid_index时,检查必要的组件
|
|
934
|
-
if auto_coder_args.enable_hybrid_index:
|
|
935
|
-
if not llm.is_model_exist("emb"):
|
|
936
|
-
logger.error(
|
|
937
|
-
"When enable_hybrid_index is true, an 'emb' model must be deployed"
|
|
938
|
-
)
|
|
939
|
-
return
|
|
940
|
-
llm.setup_default_emb_model_name("emb")
|
|
941
|
-
else:
|
|
942
|
-
from autocoder import models as models_module
|
|
943
|
-
model_info = models_module.get_model_by_name(args.model)
|
|
944
|
-
llm = byzerllm.SimpleByzerLLM(default_model_name=args.model)
|
|
945
|
-
llm.deploy(
|
|
946
|
-
model_path="",
|
|
947
|
-
pretrained_model_type=model_info["model_type"],
|
|
948
|
-
udf_name=args.model,
|
|
949
|
-
infer_params={
|
|
950
|
-
"saas.base_url": model_info["base_url"],
|
|
951
|
-
"saas.api_key": model_info["api_key"],
|
|
952
|
-
"saas.model": model_info["model_name"],
|
|
953
|
-
"saas.is_reasoning": model_info["is_reasoning"],
|
|
954
|
-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
955
|
-
}
|
|
956
|
-
)
|
|
957
|
-
|
|
958
|
-
model_info = models_module.get_model_by_name(args.emb_model)
|
|
959
|
-
emb_model = byzerllm.SimpleByzerLLM(default_model_name=args.emb_model)
|
|
960
|
-
emb_model.deploy(
|
|
961
|
-
model_path="",
|
|
962
|
-
pretrained_model_type=model_info["model_type"],
|
|
963
|
-
udf_name=args.emb_model,
|
|
964
|
-
infer_params={
|
|
965
|
-
"saas.base_url": model_info["base_url"],
|
|
966
|
-
"saas.api_key": model_info["api_key"],
|
|
967
|
-
"saas.model": model_info["model_name"],
|
|
968
|
-
"saas.is_reasoning": False,
|
|
969
|
-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
970
|
-
}
|
|
971
|
-
)
|
|
972
|
-
llm.setup_sub_client("emb_model", emb_model)
|
|
973
|
-
|
|
974
|
-
rag = RAGFactory.get_rag(
|
|
975
|
-
llm=llm,
|
|
976
|
-
args=auto_coder_args,
|
|
977
|
-
path=args.doc_dir,
|
|
978
|
-
tokenizer_path=args.tokenizer_path,
|
|
979
|
-
)
|
|
980
|
-
|
|
981
|
-
if hasattr(rag.document_retriever, "cacher"):
|
|
982
|
-
rag.document_retriever.cacher.build_cache()
|
|
983
|
-
else:
|
|
984
|
-
logger.error(
|
|
985
|
-
"The document retriever does not support hybrid index building"
|
|
986
|
-
)
|
|
987
|
-
try:
|
|
988
|
-
monitor = FileMonitor(args.doc_dir)
|
|
989
|
-
monitor.stop()
|
|
990
|
-
except Exception as e:
|
|
991
|
-
logger.warning(f"Failed to stop file monitor: {e}")
|
|
992
|
-
|
|
993
|
-
elif args.command == "tools":
|
|
994
|
-
if args.tool == "count":
|
|
995
|
-
# auto-coder.rag tools count --tokenizer_path /Users/allwefantasy/Downloads/tokenizer.json --file /Users/allwefantasy/data/yum/schema/schema.xlsx
|
|
996
|
-
count_tokens(args.tokenizer_path, args.file)
|
|
997
|
-
elif args.tool == "recall":
|
|
998
|
-
from .common.recall_validation import validate_recall
|
|
999
|
-
from autocoder import models as models_module
|
|
1000
|
-
|
|
1001
|
-
# Handle lite/pro flags
|
|
1002
|
-
if args.lite:
|
|
1003
|
-
args.product_mode = "lite"
|
|
1004
|
-
elif args.pro:
|
|
1005
|
-
args.product_mode = "pro"
|
|
1006
|
-
|
|
1007
|
-
if args.product_mode == "pro":
|
|
1008
|
-
llm = byzerllm.ByzerLLM.from_default_model(args.model)
|
|
1009
|
-
else: # lite mode
|
|
1010
|
-
model_info = models_module.get_model_by_name(args.model)
|
|
1011
|
-
llm = byzerllm.SimpleByzerLLM(default_model_name=args.model)
|
|
1012
|
-
llm.deploy(
|
|
1013
|
-
model_path="",
|
|
1014
|
-
pretrained_model_type=model_info["model_type"],
|
|
1015
|
-
udf_name=args.model,
|
|
1016
|
-
infer_params={
|
|
1017
|
-
"saas.base_url": model_info["base_url"],
|
|
1018
|
-
"saas.api_key": model_info["api_key"],
|
|
1019
|
-
"saas.model": model_info["model_name"],
|
|
1020
|
-
"saas.is_reasoning": model_info["is_reasoning"],
|
|
1021
|
-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
1022
|
-
}
|
|
1023
|
-
)
|
|
1024
|
-
|
|
1025
|
-
content = None if not args.content else [args.content]
|
|
1026
|
-
result = validate_recall(llm, content=content, query=args.query)
|
|
1027
|
-
print(f"Recall Validation Result:\n{result}")
|
|
1028
|
-
|
|
1029
|
-
elif args.tool == "chunk":
|
|
1030
|
-
from .common.chunk_validation import validate_chunk
|
|
1031
|
-
from autocoder import models as models_module
|
|
1032
|
-
|
|
1033
|
-
if args.lite:
|
|
1034
|
-
args.product_mode = "lite"
|
|
1035
|
-
elif args.pro:
|
|
1036
|
-
args.product_mode = "pro"
|
|
1037
|
-
|
|
1038
|
-
if args.product_mode == "pro":
|
|
1039
|
-
llm = byzerllm.ByzerLLM.from_default_model(args.model)
|
|
1040
|
-
else: # lite mode
|
|
1041
|
-
model_info = models_module.get_model_by_name(args.model)
|
|
1042
|
-
llm = byzerllm.SimpleByzerLLM(default_model_name=args.model)
|
|
1043
|
-
llm.deploy(
|
|
1044
|
-
model_path="",
|
|
1045
|
-
pretrained_model_type=model_info["model_type"],
|
|
1046
|
-
udf_name=args.model,
|
|
1047
|
-
infer_params={
|
|
1048
|
-
"saas.base_url": model_info["base_url"],
|
|
1049
|
-
"saas.api_key": model_info["api_key"],
|
|
1050
|
-
"saas.model": model_info["model_name"],
|
|
1051
|
-
"saas.is_reasoning": model_info["is_reasoning"],
|
|
1052
|
-
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
1053
|
-
}
|
|
1054
|
-
)
|
|
1055
|
-
|
|
1056
|
-
content = None if not args.content else [args.content]
|
|
1057
|
-
result = validate_chunk(llm, content=content, query=args.query)
|
|
1058
|
-
print(f"Chunk Model Validation Result:\n{result}")
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
def count_tokens(tokenizer_path: str, file_path: str):
|
|
1062
|
-
from autocoder.rag.variable_holder import VariableHolder
|
|
1063
|
-
from tokenizers import Tokenizer
|
|
1064
|
-
VariableHolder.TOKENIZER_PATH = tokenizer_path
|
|
1065
|
-
VariableHolder.TOKENIZER_MODEL = Tokenizer.from_file(tokenizer_path)
|
|
1066
|
-
token_counter = TokenCounter(tokenizer_path)
|
|
1067
|
-
source_codes = process_file_local(file_path)
|
|
1068
|
-
|
|
1069
|
-
console = Console()
|
|
1070
|
-
table = Table(title="Token Count Results")
|
|
1071
|
-
table.add_column("File", style="cyan")
|
|
1072
|
-
table.add_column("Characters", justify="right", style="magenta")
|
|
1073
|
-
table.add_column("Tokens", justify="right", style="green")
|
|
1074
|
-
|
|
1075
|
-
total_chars = 0
|
|
1076
|
-
total_tokens = 0
|
|
1077
|
-
|
|
1078
|
-
for source_code in source_codes:
|
|
1079
|
-
content = source_code.source_code
|
|
1080
|
-
chars = len(content)
|
|
1081
|
-
tokens = token_counter.count_tokens(content)
|
|
8
|
+
import logging
|
|
1082
9
|
|
|
1083
|
-
|
|
1084
|
-
total_tokens += tokens
|
|
10
|
+
logging.getLogger("ppocr").setLevel(logging.WARNING)
|
|
1085
11
|
|
|
1086
|
-
|
|
12
|
+
from autocoder.rag.terminal.bootstrap import run_cli
|
|
1087
13
|
|
|
1088
|
-
table.add_row("Total", str(total_chars), str(total_tokens), style="bold")
|
|
1089
14
|
|
|
1090
|
-
|
|
15
|
+
def main(input_args=None):
|
|
16
|
+
"""主入口函数"""
|
|
17
|
+
run_cli(input_args)
|
|
1091
18
|
|
|
1092
19
|
|
|
1093
20
|
if __name__ == "__main__":
|