autobyteus 1.2.1__py3-none-any.whl → 1.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autobyteus/agent/agent.py +15 -5
- autobyteus/agent/bootstrap_steps/__init__.py +1 -3
- autobyteus/agent/bootstrap_steps/agent_bootstrapper.py +3 -59
- autobyteus/agent/bootstrap_steps/base_bootstrap_step.py +1 -4
- autobyteus/agent/bootstrap_steps/mcp_server_prewarming_step.py +1 -3
- autobyteus/agent/bootstrap_steps/system_prompt_processing_step.py +16 -13
- autobyteus/agent/bootstrap_steps/workspace_context_initialization_step.py +2 -4
- autobyteus/agent/context/agent_config.py +43 -20
- autobyteus/agent/context/agent_context.py +23 -18
- autobyteus/agent/context/agent_runtime_state.py +19 -19
- autobyteus/agent/events/__init__.py +16 -1
- autobyteus/agent/events/agent_events.py +43 -3
- autobyteus/agent/events/agent_input_event_queue_manager.py +79 -26
- autobyteus/agent/events/event_store.py +57 -0
- autobyteus/agent/events/notifiers.py +69 -59
- autobyteus/agent/events/worker_event_dispatcher.py +21 -64
- autobyteus/agent/factory/agent_factory.py +52 -0
- autobyteus/agent/handlers/__init__.py +2 -0
- autobyteus/agent/handlers/approved_tool_invocation_event_handler.py +51 -34
- autobyteus/agent/handlers/bootstrap_event_handler.py +155 -0
- autobyteus/agent/handlers/inter_agent_message_event_handler.py +10 -0
- autobyteus/agent/handlers/lifecycle_event_logger.py +19 -11
- autobyteus/agent/handlers/llm_complete_response_received_event_handler.py +10 -15
- autobyteus/agent/handlers/llm_user_message_ready_event_handler.py +188 -48
- autobyteus/agent/handlers/tool_execution_approval_event_handler.py +0 -10
- autobyteus/agent/handlers/tool_invocation_request_event_handler.py +53 -48
- autobyteus/agent/handlers/tool_result_event_handler.py +7 -8
- autobyteus/agent/handlers/user_input_message_event_handler.py +10 -3
- autobyteus/agent/input_processor/memory_ingest_input_processor.py +40 -0
- autobyteus/agent/lifecycle/__init__.py +12 -0
- autobyteus/agent/lifecycle/base_processor.py +109 -0
- autobyteus/agent/lifecycle/events.py +35 -0
- autobyteus/agent/lifecycle/processor_definition.py +36 -0
- autobyteus/agent/lifecycle/processor_registry.py +106 -0
- autobyteus/agent/llm_request_assembler.py +98 -0
- autobyteus/agent/llm_response_processor/__init__.py +1 -8
- autobyteus/agent/message/context_file_type.py +1 -1
- autobyteus/agent/runtime/agent_runtime.py +29 -21
- autobyteus/agent/runtime/agent_worker.py +98 -19
- autobyteus/agent/shutdown_steps/__init__.py +2 -0
- autobyteus/agent/shutdown_steps/agent_shutdown_orchestrator.py +2 -0
- autobyteus/agent/shutdown_steps/tool_cleanup_step.py +58 -0
- autobyteus/agent/status/__init__.py +14 -0
- autobyteus/agent/status/manager.py +93 -0
- autobyteus/agent/status/status_deriver.py +96 -0
- autobyteus/agent/{phases/phase_enum.py → status/status_enum.py} +16 -16
- autobyteus/agent/status/status_update_utils.py +73 -0
- autobyteus/agent/streaming/__init__.py +52 -5
- autobyteus/agent/streaming/adapters/__init__.py +18 -0
- autobyteus/agent/streaming/adapters/invocation_adapter.py +184 -0
- autobyteus/agent/streaming/adapters/tool_call_parsing.py +163 -0
- autobyteus/agent/streaming/adapters/tool_syntax_registry.py +67 -0
- autobyteus/agent/streaming/agent_event_stream.py +3 -183
- autobyteus/agent/streaming/api_tool_call/__init__.py +16 -0
- autobyteus/agent/streaming/api_tool_call/file_content_streamer.py +56 -0
- autobyteus/agent/streaming/api_tool_call/json_string_field_extractor.py +175 -0
- autobyteus/agent/streaming/api_tool_call_streaming_response_handler.py +4 -0
- autobyteus/agent/streaming/events/__init__.py +6 -0
- autobyteus/agent/streaming/events/stream_event_payloads.py +284 -0
- autobyteus/agent/streaming/events/stream_events.py +141 -0
- autobyteus/agent/streaming/handlers/__init__.py +15 -0
- autobyteus/agent/streaming/handlers/api_tool_call_streaming_response_handler.py +303 -0
- autobyteus/agent/streaming/handlers/parsing_streaming_response_handler.py +107 -0
- autobyteus/agent/streaming/handlers/pass_through_streaming_response_handler.py +107 -0
- autobyteus/agent/streaming/handlers/streaming_handler_factory.py +177 -0
- autobyteus/agent/streaming/handlers/streaming_response_handler.py +58 -0
- autobyteus/agent/streaming/parser/__init__.py +61 -0
- autobyteus/agent/streaming/parser/event_emitter.py +181 -0
- autobyteus/agent/streaming/parser/events.py +4 -0
- autobyteus/agent/streaming/parser/invocation_adapter.py +4 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/__init__.py +19 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/base.py +32 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/default.py +34 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/gemini.py +31 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/openai.py +64 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/registry.py +75 -0
- autobyteus/agent/streaming/parser/parser_context.py +227 -0
- autobyteus/agent/streaming/parser/parser_factory.py +132 -0
- autobyteus/agent/streaming/parser/sentinel_format.py +7 -0
- autobyteus/agent/streaming/parser/state_factory.py +62 -0
- autobyteus/agent/streaming/parser/states/__init__.py +1 -0
- autobyteus/agent/streaming/parser/states/base_state.py +60 -0
- autobyteus/agent/streaming/parser/states/custom_xml_tag_run_bash_parsing_state.py +38 -0
- autobyteus/agent/streaming/parser/states/custom_xml_tag_write_file_parsing_state.py +55 -0
- autobyteus/agent/streaming/parser/states/delimited_content_state.py +146 -0
- autobyteus/agent/streaming/parser/states/json_initialization_state.py +144 -0
- autobyteus/agent/streaming/parser/states/json_tool_parsing_state.py +137 -0
- autobyteus/agent/streaming/parser/states/sentinel_content_state.py +30 -0
- autobyteus/agent/streaming/parser/states/sentinel_initialization_state.py +117 -0
- autobyteus/agent/streaming/parser/states/text_state.py +78 -0
- autobyteus/agent/streaming/parser/states/xml_patch_file_tool_parsing_state.py +328 -0
- autobyteus/agent/streaming/parser/states/xml_run_bash_tool_parsing_state.py +129 -0
- autobyteus/agent/streaming/parser/states/xml_tag_initialization_state.py +151 -0
- autobyteus/agent/streaming/parser/states/xml_tool_parsing_state.py +63 -0
- autobyteus/agent/streaming/parser/states/xml_write_file_tool_parsing_state.py +343 -0
- autobyteus/agent/streaming/parser/strategies/__init__.py +17 -0
- autobyteus/agent/streaming/parser/strategies/base.py +24 -0
- autobyteus/agent/streaming/parser/strategies/json_tool_strategy.py +26 -0
- autobyteus/agent/streaming/parser/strategies/registry.py +28 -0
- autobyteus/agent/streaming/parser/strategies/sentinel_strategy.py +23 -0
- autobyteus/agent/streaming/parser/strategies/xml_tag_strategy.py +21 -0
- autobyteus/agent/streaming/parser/stream_scanner.py +167 -0
- autobyteus/agent/streaming/parser/streaming_parser.py +212 -0
- autobyteus/agent/streaming/parser/tool_call_parsing.py +4 -0
- autobyteus/agent/streaming/parser/tool_constants.py +7 -0
- autobyteus/agent/streaming/parser/tool_syntax_registry.py +4 -0
- autobyteus/agent/streaming/parser/xml_tool_parsing_state_registry.py +55 -0
- autobyteus/agent/streaming/parsing_streaming_response_handler.py +4 -0
- autobyteus/agent/streaming/pass_through_streaming_response_handler.py +4 -0
- autobyteus/agent/streaming/queue_streamer.py +3 -57
- autobyteus/agent/streaming/segments/__init__.py +5 -0
- autobyteus/agent/streaming/segments/segment_events.py +81 -0
- autobyteus/agent/streaming/stream_event_payloads.py +2 -223
- autobyteus/agent/streaming/stream_events.py +3 -140
- autobyteus/agent/streaming/streaming_handler_factory.py +4 -0
- autobyteus/agent/streaming/streaming_response_handler.py +4 -0
- autobyteus/agent/streaming/streams/__init__.py +5 -0
- autobyteus/agent/streaming/streams/agent_event_stream.py +197 -0
- autobyteus/agent/streaming/utils/__init__.py +5 -0
- autobyteus/agent/streaming/utils/queue_streamer.py +59 -0
- autobyteus/agent/system_prompt_processor/__init__.py +2 -0
- autobyteus/agent/system_prompt_processor/available_skills_processor.py +96 -0
- autobyteus/agent/system_prompt_processor/base_processor.py +1 -1
- autobyteus/agent/system_prompt_processor/processor_meta.py +15 -2
- autobyteus/agent/system_prompt_processor/tool_manifest_injector_processor.py +39 -58
- autobyteus/agent/token_budget.py +56 -0
- autobyteus/agent/tool_execution_result_processor/memory_ingest_tool_result_processor.py +29 -0
- autobyteus/agent/tool_invocation.py +16 -40
- autobyteus/agent/tool_invocation_preprocessor/__init__.py +9 -0
- autobyteus/agent/tool_invocation_preprocessor/base_preprocessor.py +45 -0
- autobyteus/agent/tool_invocation_preprocessor/processor_definition.py +15 -0
- autobyteus/agent/tool_invocation_preprocessor/processor_meta.py +33 -0
- autobyteus/agent/tool_invocation_preprocessor/processor_registry.py +60 -0
- autobyteus/agent/utils/wait_for_idle.py +12 -14
- autobyteus/agent/workspace/base_workspace.py +6 -27
- autobyteus/agent_team/agent_team.py +3 -3
- autobyteus/agent_team/agent_team_builder.py +1 -41
- autobyteus/agent_team/bootstrap_steps/__init__.py +0 -4
- autobyteus/agent_team/bootstrap_steps/agent_configuration_preparation_step.py +8 -18
- autobyteus/agent_team/bootstrap_steps/agent_team_bootstrapper.py +4 -16
- autobyteus/agent_team/bootstrap_steps/base_agent_team_bootstrap_step.py +1 -2
- autobyteus/agent_team/bootstrap_steps/coordinator_initialization_step.py +1 -2
- autobyteus/agent_team/bootstrap_steps/task_notifier_initialization_step.py +1 -2
- autobyteus/agent_team/bootstrap_steps/team_context_initialization_step.py +4 -4
- autobyteus/agent_team/context/agent_team_config.py +6 -3
- autobyteus/agent_team/context/agent_team_context.py +25 -3
- autobyteus/agent_team/context/agent_team_runtime_state.py +9 -6
- autobyteus/agent_team/events/__init__.py +11 -0
- autobyteus/agent_team/events/agent_team_event_dispatcher.py +22 -9
- autobyteus/agent_team/events/agent_team_events.py +16 -0
- autobyteus/agent_team/events/event_store.py +57 -0
- autobyteus/agent_team/factory/agent_team_factory.py +8 -0
- autobyteus/agent_team/handlers/inter_agent_message_request_event_handler.py +18 -2
- autobyteus/agent_team/handlers/lifecycle_agent_team_event_handler.py +21 -5
- autobyteus/agent_team/handlers/process_user_message_event_handler.py +17 -8
- autobyteus/agent_team/handlers/tool_approval_team_event_handler.py +19 -4
- autobyteus/agent_team/runtime/agent_team_runtime.py +41 -10
- autobyteus/agent_team/runtime/agent_team_worker.py +69 -5
- autobyteus/agent_team/status/__init__.py +14 -0
- autobyteus/agent_team/status/agent_team_status.py +18 -0
- autobyteus/agent_team/status/agent_team_status_manager.py +33 -0
- autobyteus/agent_team/status/status_deriver.py +62 -0
- autobyteus/agent_team/status/status_update_utils.py +42 -0
- autobyteus/agent_team/streaming/__init__.py +2 -2
- autobyteus/agent_team/streaming/agent_team_event_notifier.py +6 -6
- autobyteus/agent_team/streaming/agent_team_stream_event_payloads.py +4 -4
- autobyteus/agent_team/streaming/agent_team_stream_events.py +3 -3
- autobyteus/agent_team/system_prompt_processor/__init__.py +6 -0
- autobyteus/agent_team/system_prompt_processor/team_manifest_injector_processor.py +76 -0
- autobyteus/agent_team/task_notification/task_notification_mode.py +19 -0
- autobyteus/agent_team/utils/wait_for_idle.py +4 -4
- autobyteus/cli/agent_cli.py +18 -10
- autobyteus/cli/agent_team_tui/app.py +14 -11
- autobyteus/cli/agent_team_tui/state.py +13 -15
- autobyteus/cli/agent_team_tui/widgets/agent_list_sidebar.py +15 -15
- autobyteus/cli/agent_team_tui/widgets/focus_pane.py +143 -36
- autobyteus/cli/agent_team_tui/widgets/renderables.py +1 -1
- autobyteus/cli/agent_team_tui/widgets/shared.py +25 -25
- autobyteus/cli/cli_display.py +193 -44
- autobyteus/cli/workflow_tui/app.py +9 -10
- autobyteus/cli/workflow_tui/state.py +14 -16
- autobyteus/cli/workflow_tui/widgets/agent_list_sidebar.py +15 -15
- autobyteus/cli/workflow_tui/widgets/focus_pane.py +137 -35
- autobyteus/cli/workflow_tui/widgets/renderables.py +1 -1
- autobyteus/cli/workflow_tui/widgets/shared.py +25 -25
- autobyteus/clients/autobyteus_client.py +94 -1
- autobyteus/events/event_types.py +11 -18
- autobyteus/llm/api/autobyteus_llm.py +33 -29
- autobyteus/llm/api/claude_llm.py +142 -36
- autobyteus/llm/api/gemini_llm.py +163 -59
- autobyteus/llm/api/grok_llm.py +1 -1
- autobyteus/llm/api/minimax_llm.py +26 -0
- autobyteus/llm/api/mistral_llm.py +113 -87
- autobyteus/llm/api/ollama_llm.py +9 -42
- autobyteus/llm/api/openai_compatible_llm.py +127 -91
- autobyteus/llm/api/openai_llm.py +3 -3
- autobyteus/llm/api/openai_responses_llm.py +324 -0
- autobyteus/llm/api/zhipu_llm.py +21 -2
- autobyteus/llm/autobyteus_provider.py +70 -60
- autobyteus/llm/base_llm.py +85 -81
- autobyteus/llm/converters/__init__.py +14 -0
- autobyteus/llm/converters/anthropic_tool_call_converter.py +37 -0
- autobyteus/llm/converters/gemini_tool_call_converter.py +57 -0
- autobyteus/llm/converters/mistral_tool_call_converter.py +37 -0
- autobyteus/llm/converters/openai_tool_call_converter.py +38 -0
- autobyteus/llm/extensions/base_extension.py +6 -12
- autobyteus/llm/extensions/token_usage_tracking_extension.py +45 -18
- autobyteus/llm/llm_factory.py +282 -204
- autobyteus/llm/lmstudio_provider.py +60 -49
- autobyteus/llm/models.py +35 -2
- autobyteus/llm/ollama_provider.py +60 -49
- autobyteus/llm/ollama_provider_resolver.py +0 -1
- autobyteus/llm/prompt_renderers/__init__.py +19 -0
- autobyteus/llm/prompt_renderers/anthropic_prompt_renderer.py +104 -0
- autobyteus/llm/prompt_renderers/autobyteus_prompt_renderer.py +19 -0
- autobyteus/llm/prompt_renderers/base_prompt_renderer.py +10 -0
- autobyteus/llm/prompt_renderers/gemini_prompt_renderer.py +63 -0
- autobyteus/llm/prompt_renderers/mistral_prompt_renderer.py +87 -0
- autobyteus/llm/prompt_renderers/ollama_prompt_renderer.py +51 -0
- autobyteus/llm/prompt_renderers/openai_chat_renderer.py +97 -0
- autobyteus/llm/prompt_renderers/openai_responses_renderer.py +101 -0
- autobyteus/llm/providers.py +1 -3
- autobyteus/llm/token_counter/claude_token_counter.py +56 -25
- autobyteus/llm/token_counter/mistral_token_counter.py +12 -8
- autobyteus/llm/token_counter/openai_token_counter.py +24 -5
- autobyteus/llm/token_counter/token_counter_factory.py +12 -5
- autobyteus/llm/utils/llm_config.py +6 -12
- autobyteus/llm/utils/media_payload_formatter.py +27 -20
- autobyteus/llm/utils/messages.py +55 -3
- autobyteus/llm/utils/response_types.py +3 -0
- autobyteus/llm/utils/tool_call_delta.py +31 -0
- autobyteus/memory/__init__.py +32 -0
- autobyteus/memory/active_transcript.py +69 -0
- autobyteus/memory/compaction/__init__.py +9 -0
- autobyteus/memory/compaction/compaction_result.py +8 -0
- autobyteus/memory/compaction/compactor.py +89 -0
- autobyteus/memory/compaction/summarizer.py +11 -0
- autobyteus/memory/compaction_snapshot_builder.py +84 -0
- autobyteus/memory/memory_manager.py +183 -0
- autobyteus/memory/models/__init__.py +14 -0
- autobyteus/memory/models/episodic_item.py +41 -0
- autobyteus/memory/models/memory_types.py +7 -0
- autobyteus/memory/models/raw_trace_item.py +79 -0
- autobyteus/memory/models/semantic_item.py +41 -0
- autobyteus/memory/models/tool_interaction.py +20 -0
- autobyteus/memory/policies/__init__.py +5 -0
- autobyteus/memory/policies/compaction_policy.py +16 -0
- autobyteus/memory/retrieval/__init__.py +7 -0
- autobyteus/memory/retrieval/memory_bundle.py +11 -0
- autobyteus/memory/retrieval/retriever.py +13 -0
- autobyteus/memory/store/__init__.py +7 -0
- autobyteus/memory/store/base_store.py +14 -0
- autobyteus/memory/store/file_store.py +98 -0
- autobyteus/memory/tool_interaction_builder.py +46 -0
- autobyteus/memory/turn_tracker.py +9 -0
- autobyteus/multimedia/audio/api/autobyteus_audio_client.py +19 -5
- autobyteus/multimedia/audio/api/gemini_audio_client.py +108 -16
- autobyteus/multimedia/audio/audio_client_factory.py +47 -9
- autobyteus/multimedia/audio/audio_model.py +2 -1
- autobyteus/multimedia/image/api/autobyteus_image_client.py +19 -5
- autobyteus/multimedia/image/api/gemini_image_client.py +38 -17
- autobyteus/multimedia/image/api/openai_image_client.py +125 -43
- autobyteus/multimedia/image/autobyteus_image_provider.py +2 -1
- autobyteus/multimedia/image/image_client_factory.py +47 -15
- autobyteus/multimedia/image/image_model.py +5 -2
- autobyteus/multimedia/providers.py +3 -2
- autobyteus/skills/loader.py +71 -0
- autobyteus/skills/model.py +11 -0
- autobyteus/skills/registry.py +70 -0
- autobyteus/task_management/tools/todo_tools/add_todo.py +2 -2
- autobyteus/task_management/tools/todo_tools/create_todo_list.py +2 -2
- autobyteus/task_management/tools/todo_tools/update_todo_status.py +2 -2
- autobyteus/tools/__init__.py +34 -47
- autobyteus/tools/base_tool.py +7 -0
- autobyteus/tools/file/__init__.py +2 -6
- autobyteus/tools/file/patch_file.py +149 -0
- autobyteus/tools/file/read_file.py +36 -5
- autobyteus/tools/file/write_file.py +4 -1
- autobyteus/tools/functional_tool.py +43 -6
- autobyteus/tools/mcp/__init__.py +2 -0
- autobyteus/tools/mcp/config_service.py +5 -1
- autobyteus/tools/mcp/server/__init__.py +2 -0
- autobyteus/tools/mcp/server/http_managed_mcp_server.py +1 -1
- autobyteus/tools/mcp/server/websocket_managed_mcp_server.py +141 -0
- autobyteus/tools/mcp/server_instance_manager.py +8 -1
- autobyteus/tools/mcp/types.py +61 -0
- autobyteus/tools/multimedia/audio_tools.py +70 -17
- autobyteus/tools/multimedia/download_media_tool.py +18 -4
- autobyteus/tools/multimedia/image_tools.py +246 -62
- autobyteus/tools/operation_executor/journal_manager.py +107 -0
- autobyteus/tools/operation_executor/operation_event_buffer.py +57 -0
- autobyteus/tools/operation_executor/operation_event_producer.py +29 -0
- autobyteus/tools/operation_executor/operation_executor.py +58 -0
- autobyteus/tools/registry/tool_definition.py +43 -2
- autobyteus/tools/skill/load_skill.py +50 -0
- autobyteus/tools/terminal/__init__.py +45 -0
- autobyteus/tools/terminal/ansi_utils.py +32 -0
- autobyteus/tools/terminal/background_process_manager.py +233 -0
- autobyteus/tools/terminal/output_buffer.py +105 -0
- autobyteus/tools/terminal/prompt_detector.py +63 -0
- autobyteus/tools/terminal/pty_session.py +241 -0
- autobyteus/tools/terminal/session_factory.py +20 -0
- autobyteus/tools/terminal/terminal_session_manager.py +226 -0
- autobyteus/tools/terminal/tools/__init__.py +13 -0
- autobyteus/tools/terminal/tools/get_process_output.py +81 -0
- autobyteus/tools/terminal/tools/run_bash.py +109 -0
- autobyteus/tools/terminal/tools/start_background_process.py +104 -0
- autobyteus/tools/terminal/tools/stop_background_process.py +67 -0
- autobyteus/tools/terminal/types.py +54 -0
- autobyteus/tools/terminal/wsl_tmux_session.py +221 -0
- autobyteus/tools/terminal/wsl_utils.py +156 -0
- autobyteus/tools/transaction_management/backup_handler.py +48 -0
- autobyteus/tools/transaction_management/operation_lifecycle_manager.py +62 -0
- autobyteus/tools/usage/__init__.py +1 -2
- autobyteus/tools/usage/formatters/__init__.py +17 -1
- autobyteus/tools/usage/formatters/base_formatter.py +8 -0
- autobyteus/tools/usage/formatters/default_xml_schema_formatter.py +2 -2
- autobyteus/tools/usage/formatters/mistral_json_schema_formatter.py +18 -0
- autobyteus/tools/usage/formatters/patch_file_xml_example_formatter.py +64 -0
- autobyteus/tools/usage/formatters/patch_file_xml_schema_formatter.py +31 -0
- autobyteus/tools/usage/formatters/run_bash_xml_example_formatter.py +32 -0
- autobyteus/tools/usage/formatters/run_bash_xml_schema_formatter.py +36 -0
- autobyteus/tools/usage/formatters/write_file_xml_example_formatter.py +53 -0
- autobyteus/tools/usage/formatters/write_file_xml_schema_formatter.py +31 -0
- autobyteus/tools/usage/providers/tool_manifest_provider.py +10 -10
- autobyteus/tools/usage/registries/__init__.py +1 -3
- autobyteus/tools/usage/registries/tool_formatting_registry.py +115 -8
- autobyteus/tools/usage/tool_schema_provider.py +51 -0
- autobyteus/tools/web/__init__.py +4 -0
- autobyteus/tools/web/read_url_tool.py +80 -0
- autobyteus/utils/diff_utils.py +271 -0
- autobyteus/utils/download_utils.py +109 -0
- autobyteus/utils/file_utils.py +57 -2
- autobyteus/utils/gemini_helper.py +56 -0
- autobyteus/utils/gemini_model_mapping.py +71 -0
- autobyteus/utils/llm_output_formatter.py +75 -0
- autobyteus/utils/tool_call_format.py +36 -0
- autobyteus/workflow/agentic_workflow.py +3 -3
- autobyteus/workflow/bootstrap_steps/agent_tool_injection_step.py +2 -2
- autobyteus/workflow/bootstrap_steps/base_workflow_bootstrap_step.py +2 -2
- autobyteus/workflow/bootstrap_steps/coordinator_initialization_step.py +2 -2
- autobyteus/workflow/bootstrap_steps/coordinator_prompt_preparation_step.py +3 -9
- autobyteus/workflow/bootstrap_steps/workflow_bootstrapper.py +6 -6
- autobyteus/workflow/bootstrap_steps/workflow_runtime_queue_initialization_step.py +2 -2
- autobyteus/workflow/context/workflow_context.py +3 -3
- autobyteus/workflow/context/workflow_runtime_state.py +5 -5
- autobyteus/workflow/events/workflow_event_dispatcher.py +5 -5
- autobyteus/workflow/handlers/lifecycle_workflow_event_handler.py +3 -3
- autobyteus/workflow/handlers/process_user_message_event_handler.py +5 -5
- autobyteus/workflow/handlers/tool_approval_workflow_event_handler.py +2 -2
- autobyteus/workflow/runtime/workflow_runtime.py +8 -8
- autobyteus/workflow/runtime/workflow_worker.py +3 -3
- autobyteus/workflow/status/__init__.py +11 -0
- autobyteus/workflow/status/workflow_status.py +19 -0
- autobyteus/workflow/status/workflow_status_manager.py +48 -0
- autobyteus/workflow/streaming/__init__.py +2 -2
- autobyteus/workflow/streaming/workflow_event_notifier.py +7 -7
- autobyteus/workflow/streaming/workflow_stream_event_payloads.py +4 -4
- autobyteus/workflow/streaming/workflow_stream_events.py +3 -3
- autobyteus/workflow/utils/wait_for_idle.py +4 -4
- autobyteus-1.2.3.dist-info/METADATA +293 -0
- autobyteus-1.2.3.dist-info/RECORD +600 -0
- {autobyteus-1.2.1.dist-info → autobyteus-1.2.3.dist-info}/WHEEL +1 -1
- {autobyteus-1.2.1.dist-info → autobyteus-1.2.3.dist-info}/top_level.txt +0 -1
- autobyteus/agent/bootstrap_steps/agent_runtime_queue_initialization_step.py +0 -57
- autobyteus/agent/hooks/__init__.py +0 -16
- autobyteus/agent/hooks/base_phase_hook.py +0 -78
- autobyteus/agent/hooks/hook_definition.py +0 -36
- autobyteus/agent/hooks/hook_meta.py +0 -37
- autobyteus/agent/hooks/hook_registry.py +0 -106
- autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py +0 -103
- autobyteus/agent/phases/__init__.py +0 -18
- autobyteus/agent/phases/discover.py +0 -53
- autobyteus/agent/phases/manager.py +0 -265
- autobyteus/agent/phases/transition_decorator.py +0 -40
- autobyteus/agent/phases/transition_info.py +0 -33
- autobyteus/agent/remote_agent.py +0 -244
- autobyteus/agent/workspace/workspace_definition.py +0 -36
- autobyteus/agent/workspace/workspace_meta.py +0 -37
- autobyteus/agent/workspace/workspace_registry.py +0 -72
- autobyteus/agent_team/bootstrap_steps/agent_team_runtime_queue_initialization_step.py +0 -25
- autobyteus/agent_team/bootstrap_steps/coordinator_prompt_preparation_step.py +0 -85
- autobyteus/agent_team/phases/__init__.py +0 -11
- autobyteus/agent_team/phases/agent_team_operational_phase.py +0 -19
- autobyteus/agent_team/phases/agent_team_phase_manager.py +0 -48
- autobyteus/llm/api/bedrock_llm.py +0 -92
- autobyteus/llm/api/groq_llm.py +0 -94
- autobyteus/llm/api/nvidia_llm.py +0 -108
- autobyteus/llm/utils/token_pricing_config.py +0 -87
- autobyteus/rpc/__init__.py +0 -73
- autobyteus/rpc/client/__init__.py +0 -17
- autobyteus/rpc/client/abstract_client_connection.py +0 -124
- autobyteus/rpc/client/client_connection_manager.py +0 -153
- autobyteus/rpc/client/sse_client_connection.py +0 -306
- autobyteus/rpc/client/stdio_client_connection.py +0 -280
- autobyteus/rpc/config/__init__.py +0 -13
- autobyteus/rpc/config/agent_server_config.py +0 -153
- autobyteus/rpc/config/agent_server_registry.py +0 -152
- autobyteus/rpc/hosting.py +0 -244
- autobyteus/rpc/protocol.py +0 -244
- autobyteus/rpc/server/__init__.py +0 -20
- autobyteus/rpc/server/agent_server_endpoint.py +0 -181
- autobyteus/rpc/server/base_method_handler.py +0 -40
- autobyteus/rpc/server/method_handlers.py +0 -259
- autobyteus/rpc/server/sse_server_handler.py +0 -182
- autobyteus/rpc/server/stdio_server_handler.py +0 -151
- autobyteus/rpc/server_main.py +0 -198
- autobyteus/rpc/transport_type.py +0 -13
- autobyteus/tools/bash/__init__.py +0 -2
- autobyteus/tools/bash/bash_executor.py +0 -100
- autobyteus/tools/browser/__init__.py +0 -2
- autobyteus/tools/browser/session_aware/browser_session_aware_navigate_to.py +0 -75
- autobyteus/tools/browser/session_aware/browser_session_aware_tool.py +0 -30
- autobyteus/tools/browser/session_aware/browser_session_aware_web_element_trigger.py +0 -154
- autobyteus/tools/browser/session_aware/browser_session_aware_webpage_reader.py +0 -89
- autobyteus/tools/browser/session_aware/browser_session_aware_webpage_screenshot_taker.py +0 -107
- autobyteus/tools/browser/session_aware/factory/browser_session_aware_web_element_trigger_factory.py +0 -14
- autobyteus/tools/browser/session_aware/factory/browser_session_aware_webpage_reader_factory.py +0 -26
- autobyteus/tools/browser/session_aware/factory/browser_session_aware_webpage_screenshot_taker_factory.py +0 -14
- autobyteus/tools/browser/session_aware/shared_browser_session.py +0 -11
- autobyteus/tools/browser/session_aware/shared_browser_session_manager.py +0 -25
- autobyteus/tools/browser/session_aware/web_element_action.py +0 -20
- autobyteus/tools/browser/standalone/__init__.py +0 -6
- autobyteus/tools/browser/standalone/factory/__init__.py +0 -0
- autobyteus/tools/browser/standalone/factory/webpage_reader_factory.py +0 -25
- autobyteus/tools/browser/standalone/factory/webpage_screenshot_taker_factory.py +0 -14
- autobyteus/tools/browser/standalone/navigate_to.py +0 -84
- autobyteus/tools/browser/standalone/web_page_pdf_generator.py +0 -101
- autobyteus/tools/browser/standalone/webpage_image_downloader.py +0 -169
- autobyteus/tools/browser/standalone/webpage_reader.py +0 -105
- autobyteus/tools/browser/standalone/webpage_screenshot_taker.py +0 -105
- autobyteus/tools/file/edit_file.py +0 -200
- autobyteus/tools/file/list_directory.py +0 -168
- autobyteus/tools/file/search_files.py +0 -188
- autobyteus/tools/timer.py +0 -175
- autobyteus/tools/usage/parsers/__init__.py +0 -22
- autobyteus/tools/usage/parsers/_json_extractor.py +0 -99
- autobyteus/tools/usage/parsers/_string_decoders.py +0 -18
- autobyteus/tools/usage/parsers/anthropic_xml_tool_usage_parser.py +0 -10
- autobyteus/tools/usage/parsers/base_parser.py +0 -41
- autobyteus/tools/usage/parsers/default_json_tool_usage_parser.py +0 -83
- autobyteus/tools/usage/parsers/default_xml_tool_usage_parser.py +0 -316
- autobyteus/tools/usage/parsers/exceptions.py +0 -13
- autobyteus/tools/usage/parsers/gemini_json_tool_usage_parser.py +0 -77
- autobyteus/tools/usage/parsers/openai_json_tool_usage_parser.py +0 -149
- autobyteus/tools/usage/parsers/provider_aware_tool_usage_parser.py +0 -59
- autobyteus/tools/usage/registries/tool_usage_parser_registry.py +0 -62
- autobyteus/workflow/phases/__init__.py +0 -11
- autobyteus/workflow/phases/workflow_operational_phase.py +0 -19
- autobyteus/workflow/phases/workflow_phase_manager.py +0 -48
- autobyteus-1.2.1.dist-info/METADATA +0 -205
- autobyteus-1.2.1.dist-info/RECORD +0 -511
- examples/__init__.py +0 -1
- examples/agent_team/__init__.py +0 -1
- examples/discover_phase_transitions.py +0 -104
- examples/run_agentic_software_engineer.py +0 -239
- examples/run_browser_agent.py +0 -262
- examples/run_google_slides_agent.py +0 -287
- examples/run_mcp_browser_client.py +0 -174
- examples/run_mcp_google_slides_client.py +0 -270
- examples/run_mcp_list_tools.py +0 -189
- examples/run_poem_writer.py +0 -284
- examples/run_sqlite_agent.py +0 -295
- /autobyteus/{tools/browser/session_aware → skills}/__init__.py +0 -0
- /autobyteus/tools/{browser/session_aware/factory → skill}/__init__.py +0 -0
- {autobyteus-1.2.1.dist-info → autobyteus-1.2.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Dict, List, Any, Optional
|
|
4
|
+
|
|
5
|
+
from ollama import Image
|
|
6
|
+
|
|
7
|
+
from autobyteus.llm.prompt_renderers.base_prompt_renderer import BasePromptRenderer
|
|
8
|
+
from autobyteus.llm.utils.messages import Message, MessageRole, ToolCallPayload, ToolResultPayload
|
|
9
|
+
from autobyteus.llm.utils.media_payload_formatter import media_source_to_base64
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OllamaPromptRenderer(BasePromptRenderer):
|
|
15
|
+
async def render(self, messages: List[Message]) -> List[Dict[str, Any]]:
|
|
16
|
+
formatted_messages: List[Dict[str, Any]] = []
|
|
17
|
+
for msg in messages:
|
|
18
|
+
role = msg.role.value
|
|
19
|
+
content = msg.content or ""
|
|
20
|
+
if msg.tool_payload or msg.role == MessageRole.TOOL:
|
|
21
|
+
content = _format_tool_payload(msg) or ""
|
|
22
|
+
role = (
|
|
23
|
+
MessageRole.USER.value
|
|
24
|
+
if msg.role == MessageRole.TOOL
|
|
25
|
+
else MessageRole.ASSISTANT.value
|
|
26
|
+
)
|
|
27
|
+
msg_dict: Dict[str, Any] = {"role": role, "content": content}
|
|
28
|
+
|
|
29
|
+
if msg.image_urls:
|
|
30
|
+
try:
|
|
31
|
+
image_tasks = [media_source_to_base64(url) for url in msg.image_urls]
|
|
32
|
+
prepared_base64_images = await asyncio.gather(*image_tasks)
|
|
33
|
+
if prepared_base64_images:
|
|
34
|
+
msg_dict["images"] = [Image(value=b64_string) for b64_string in prepared_base64_images]
|
|
35
|
+
except Exception as exc:
|
|
36
|
+
logger.error("Error processing images for Ollama, skipping them. Error: %s", exc)
|
|
37
|
+
|
|
38
|
+
formatted_messages.append(msg_dict)
|
|
39
|
+
return formatted_messages
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _format_tool_payload(message: Message) -> Optional[str]:
|
|
43
|
+
payload = message.tool_payload
|
|
44
|
+
if isinstance(payload, ToolCallPayload):
|
|
45
|
+
lines = [f"[TOOL_CALL] {call.name} {call.arguments}" for call in payload.tool_calls]
|
|
46
|
+
return "\n".join(lines)
|
|
47
|
+
if isinstance(payload, ToolResultPayload):
|
|
48
|
+
if payload.tool_error:
|
|
49
|
+
return f"[TOOL_ERROR] {payload.tool_name} {payload.tool_error}"
|
|
50
|
+
return f"[TOOL_RESULT] {payload.tool_name} {payload.tool_result}"
|
|
51
|
+
return None
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
import logging
|
|
4
|
+
from typing import List, Dict, Any
|
|
5
|
+
|
|
6
|
+
from autobyteus.llm.prompt_renderers.base_prompt_renderer import BasePromptRenderer
|
|
7
|
+
from autobyteus.llm.utils.media_payload_formatter import (
|
|
8
|
+
media_source_to_base64,
|
|
9
|
+
create_data_uri,
|
|
10
|
+
get_mime_type,
|
|
11
|
+
is_valid_media_path,
|
|
12
|
+
)
|
|
13
|
+
from autobyteus.llm.utils.messages import Message, ToolCallPayload, ToolResultPayload
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OpenAIChatRenderer(BasePromptRenderer):
|
|
19
|
+
async def render(self, messages: List[Message]) -> List[Dict[str, Any]]:
|
|
20
|
+
rendered: List[Dict[str, Any]] = []
|
|
21
|
+
|
|
22
|
+
for msg in messages:
|
|
23
|
+
content: Any = msg.content
|
|
24
|
+
if msg.image_urls or msg.audio_urls or msg.video_urls:
|
|
25
|
+
content_parts: List[Dict[str, Any]] = []
|
|
26
|
+
if msg.content:
|
|
27
|
+
content_parts.append({"type": "text", "text": msg.content})
|
|
28
|
+
|
|
29
|
+
image_tasks = []
|
|
30
|
+
for url in msg.image_urls:
|
|
31
|
+
image_tasks.append(media_source_to_base64(url))
|
|
32
|
+
|
|
33
|
+
try:
|
|
34
|
+
base64_images = await asyncio.gather(*image_tasks)
|
|
35
|
+
for i, b64_image in enumerate(base64_images):
|
|
36
|
+
original_url = msg.image_urls[i]
|
|
37
|
+
mime_type = (
|
|
38
|
+
get_mime_type(original_url)
|
|
39
|
+
if is_valid_media_path(original_url)
|
|
40
|
+
else "image/jpeg"
|
|
41
|
+
)
|
|
42
|
+
content_parts.append(create_data_uri(mime_type, b64_image))
|
|
43
|
+
except Exception as e:
|
|
44
|
+
logger.error("Error processing one or more images: %s", e)
|
|
45
|
+
|
|
46
|
+
if msg.audio_urls:
|
|
47
|
+
logger.warning("OpenAI compatible layer does not yet support audio; skipping.")
|
|
48
|
+
if msg.video_urls:
|
|
49
|
+
logger.warning("OpenAI compatible layer does not yet support video; skipping.")
|
|
50
|
+
|
|
51
|
+
content = content_parts
|
|
52
|
+
|
|
53
|
+
if isinstance(msg.tool_payload, ToolCallPayload):
|
|
54
|
+
tool_calls = [
|
|
55
|
+
{
|
|
56
|
+
"id": call.id,
|
|
57
|
+
"type": "function",
|
|
58
|
+
"function": {
|
|
59
|
+
"name": call.name,
|
|
60
|
+
"arguments": json.dumps(call.arguments, ensure_ascii=True),
|
|
61
|
+
},
|
|
62
|
+
}
|
|
63
|
+
for call in msg.tool_payload.tool_calls
|
|
64
|
+
]
|
|
65
|
+
rendered.append(
|
|
66
|
+
{
|
|
67
|
+
"role": "assistant",
|
|
68
|
+
"content": content,
|
|
69
|
+
"tool_calls": tool_calls,
|
|
70
|
+
}
|
|
71
|
+
)
|
|
72
|
+
continue
|
|
73
|
+
|
|
74
|
+
if isinstance(msg.tool_payload, ToolResultPayload):
|
|
75
|
+
result_text = _format_tool_result(msg.tool_payload)
|
|
76
|
+
rendered.append(
|
|
77
|
+
{
|
|
78
|
+
"role": "tool",
|
|
79
|
+
"tool_call_id": msg.tool_payload.tool_call_id,
|
|
80
|
+
"content": result_text,
|
|
81
|
+
}
|
|
82
|
+
)
|
|
83
|
+
continue
|
|
84
|
+
|
|
85
|
+
rendered.append({"role": msg.role.value, "content": content})
|
|
86
|
+
|
|
87
|
+
return rendered
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def _format_tool_result(payload: ToolResultPayload) -> str:
|
|
91
|
+
if payload.tool_error:
|
|
92
|
+
return f"Error: {payload.tool_error}"
|
|
93
|
+
if payload.tool_result is None:
|
|
94
|
+
return ""
|
|
95
|
+
if isinstance(payload.tool_result, (dict, list)):
|
|
96
|
+
return json.dumps(payload.tool_result, ensure_ascii=True)
|
|
97
|
+
return str(payload.tool_result)
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
import logging
|
|
4
|
+
from typing import List, Dict, Any
|
|
5
|
+
|
|
6
|
+
from autobyteus.llm.prompt_renderers.base_prompt_renderer import BasePromptRenderer
|
|
7
|
+
from autobyteus.llm.utils.media_payload_formatter import (
|
|
8
|
+
media_source_to_base64,
|
|
9
|
+
create_data_uri,
|
|
10
|
+
get_mime_type,
|
|
11
|
+
is_valid_media_path,
|
|
12
|
+
)
|
|
13
|
+
from autobyteus.llm.utils.messages import Message, ToolCallPayload, ToolResultPayload
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OpenAIResponsesRenderer(BasePromptRenderer):
|
|
19
|
+
async def render(self, messages: List[Message]) -> List[Dict[str, Any]]:
|
|
20
|
+
rendered: List[Dict[str, Any]] = []
|
|
21
|
+
|
|
22
|
+
for msg in messages:
|
|
23
|
+
if isinstance(msg.tool_payload, ToolCallPayload):
|
|
24
|
+
rendered.append(
|
|
25
|
+
{
|
|
26
|
+
"type": "message",
|
|
27
|
+
"role": "assistant",
|
|
28
|
+
"content": _format_tool_calls(msg.tool_payload),
|
|
29
|
+
}
|
|
30
|
+
)
|
|
31
|
+
continue
|
|
32
|
+
|
|
33
|
+
if isinstance(msg.tool_payload, ToolResultPayload):
|
|
34
|
+
rendered.append(
|
|
35
|
+
{
|
|
36
|
+
"type": "message",
|
|
37
|
+
"role": "user",
|
|
38
|
+
"content": _format_tool_result(msg.tool_payload),
|
|
39
|
+
}
|
|
40
|
+
)
|
|
41
|
+
continue
|
|
42
|
+
|
|
43
|
+
if msg.image_urls or msg.audio_urls or msg.video_urls:
|
|
44
|
+
content_parts: List[Dict[str, Any]] = []
|
|
45
|
+
if msg.content:
|
|
46
|
+
content_parts.append({"type": "input_text", "text": msg.content})
|
|
47
|
+
|
|
48
|
+
image_tasks = []
|
|
49
|
+
for url in msg.image_urls:
|
|
50
|
+
image_tasks.append(media_source_to_base64(url))
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
base64_images = await asyncio.gather(*image_tasks)
|
|
54
|
+
for i, b64_image in enumerate(base64_images):
|
|
55
|
+
original_url = msg.image_urls[i]
|
|
56
|
+
mime_type = (
|
|
57
|
+
get_mime_type(original_url)
|
|
58
|
+
if is_valid_media_path(original_url)
|
|
59
|
+
else "image/jpeg"
|
|
60
|
+
)
|
|
61
|
+
data_uri = create_data_uri(mime_type, b64_image)["image_url"]["url"]
|
|
62
|
+
content_parts.append(
|
|
63
|
+
{"type": "input_image", "image_url": data_uri, "detail": "auto"}
|
|
64
|
+
)
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logger.error("Error processing one or more images: %s", e)
|
|
67
|
+
|
|
68
|
+
if msg.audio_urls:
|
|
69
|
+
logger.warning("OpenAI Responses input does not yet support audio; skipping.")
|
|
70
|
+
if msg.video_urls:
|
|
71
|
+
logger.warning("OpenAI Responses input does not yet support video; skipping.")
|
|
72
|
+
|
|
73
|
+
rendered.append(
|
|
74
|
+
{"type": "message", "role": msg.role.value, "content": content_parts}
|
|
75
|
+
)
|
|
76
|
+
else:
|
|
77
|
+
rendered.append(
|
|
78
|
+
{"type": "message", "role": msg.role.value, "content": msg.content or ""}
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
return rendered
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _format_tool_calls(payload: ToolCallPayload) -> str:
|
|
85
|
+
lines = []
|
|
86
|
+
for call in payload.tool_calls:
|
|
87
|
+
args = json.dumps(call.arguments, ensure_ascii=True)
|
|
88
|
+
lines.append(f"[TOOL_CALL] {call.name} {args}")
|
|
89
|
+
return "\n".join(lines)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _format_tool_result(payload: ToolResultPayload) -> str:
|
|
93
|
+
if payload.tool_error:
|
|
94
|
+
return f"[TOOL_ERROR] {payload.tool_name} {payload.tool_error}"
|
|
95
|
+
if payload.tool_result is None:
|
|
96
|
+
return f"[TOOL_RESULT] {payload.tool_name}"
|
|
97
|
+
if isinstance(payload.tool_result, (dict, list)):
|
|
98
|
+
result_text = json.dumps(payload.tool_result, ensure_ascii=True)
|
|
99
|
+
else:
|
|
100
|
+
result_text = str(payload.tool_result)
|
|
101
|
+
return f"[TOOL_RESULT] {payload.tool_name} {result_text}"
|
autobyteus/llm/providers.py
CHANGED
|
@@ -4,10 +4,7 @@ class LLMProvider(Enum):
|
|
|
4
4
|
OPENAI = "OPENAI"
|
|
5
5
|
ANTHROPIC = "ANTHROPIC"
|
|
6
6
|
MISTRAL = "MISTRAL"
|
|
7
|
-
GROQ = "GROQ"
|
|
8
7
|
GEMINI = "GEMINI"
|
|
9
|
-
NVIDIA = "NVIDIA"
|
|
10
|
-
PERPLEXITY = "PERPLEXITY"
|
|
11
8
|
OLLAMA = "OLLAMA"
|
|
12
9
|
DEEPSEEK = "DEEPSEEK"
|
|
13
10
|
GROK = "GROK"
|
|
@@ -16,3 +13,4 @@ class LLMProvider(Enum):
|
|
|
16
13
|
QWEN = "QWEN"
|
|
17
14
|
LMSTUDIO = "LMSTUDIO"
|
|
18
15
|
ZHIPU = "ZHIPU"
|
|
16
|
+
MINIMAX = "MINIMAX"
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import anthropic
|
|
2
|
-
|
|
2
|
+
import json
|
|
3
|
+
from typing import List, TYPE_CHECKING, Dict, Any, Optional, Tuple
|
|
3
4
|
from autobyteus.llm.token_counter.base_token_counter import BaseTokenCounter
|
|
4
5
|
from autobyteus.llm.models import LLMModel
|
|
5
|
-
from autobyteus.llm.utils.messages import Message
|
|
6
|
+
from autobyteus.llm.utils.messages import Message, MessageRole
|
|
6
7
|
|
|
7
8
|
if TYPE_CHECKING:
|
|
8
9
|
from autobyteus.llm.base_llm import BaseLLM
|
|
@@ -14,23 +15,47 @@ class ClaudeTokenCounter(BaseTokenCounter):
|
|
|
14
15
|
|
|
15
16
|
def __init__(self, model: LLMModel, llm: 'BaseLLM' = None):
|
|
16
17
|
super().__init__(model, llm)
|
|
17
|
-
self.client = anthropic.
|
|
18
|
+
self.client = anthropic.Anthropic()
|
|
18
19
|
|
|
19
|
-
def
|
|
20
|
+
def convert_to_anthropic_messages(self, messages: List[Message]) -> Tuple[Optional[str], List[Dict[str, Any]]]:
|
|
20
21
|
"""
|
|
21
|
-
Convert messages to the
|
|
22
|
+
Convert messages to the format required for Claude token counting.
|
|
22
23
|
|
|
23
24
|
Args:
|
|
24
25
|
messages (List[Message]): The list of input messages.
|
|
25
26
|
|
|
26
27
|
Returns:
|
|
27
|
-
List[str]:
|
|
28
|
+
Tuple[Optional[str], List[Dict[str, Any]]]: System prompt (if any) and message payloads.
|
|
28
29
|
"""
|
|
29
|
-
|
|
30
|
+
system_parts: List[str] = []
|
|
31
|
+
processed_messages: List[Dict[str, Any]] = []
|
|
30
32
|
for message in messages:
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
33
|
+
content = self._extract_message_text(message)
|
|
34
|
+
if message.role == MessageRole.SYSTEM:
|
|
35
|
+
system_parts.append(content)
|
|
36
|
+
continue
|
|
37
|
+
role = "assistant" if message.role == MessageRole.ASSISTANT else "user"
|
|
38
|
+
processed_messages.append({"role": role, "content": content})
|
|
39
|
+
system = "\n".join(system_parts) if system_parts else None
|
|
40
|
+
return system, processed_messages
|
|
41
|
+
|
|
42
|
+
def _extract_message_text(self, message: Message) -> str:
|
|
43
|
+
if message.content is not None:
|
|
44
|
+
return message.content
|
|
45
|
+
if message.tool_payload is None:
|
|
46
|
+
raise ValueError("Message content is None and no tool payload is available.")
|
|
47
|
+
payload = message.to_dict().get("tool_payload")
|
|
48
|
+
return json.dumps(payload, sort_keys=True)
|
|
49
|
+
|
|
50
|
+
def _count_tokens(self, messages: List[Dict[str, Any]], system: Optional[str] = None) -> int:
|
|
51
|
+
kwargs: Dict[str, Any] = {
|
|
52
|
+
"model": self.model.value,
|
|
53
|
+
"messages": messages,
|
|
54
|
+
}
|
|
55
|
+
if system:
|
|
56
|
+
kwargs["system"] = system
|
|
57
|
+
response = self.client.messages.count_tokens(**kwargs)
|
|
58
|
+
return response.input_tokens
|
|
34
59
|
|
|
35
60
|
def count_input_tokens(self, messages: List[Message]) -> int:
|
|
36
61
|
"""
|
|
@@ -45,17 +70,16 @@ class ClaudeTokenCounter(BaseTokenCounter):
|
|
|
45
70
|
if not messages:
|
|
46
71
|
return 0
|
|
47
72
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
total_tokens
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
return total_tokens
|
|
73
|
+
try:
|
|
74
|
+
system, processed_messages = self.convert_to_anthropic_messages(messages)
|
|
75
|
+
if not processed_messages:
|
|
76
|
+
dummy = [{"role": "user", "content": " "}]
|
|
77
|
+
total_tokens = self._count_tokens(dummy, system=system)
|
|
78
|
+
prompt_tokens = self._count_tokens(dummy)
|
|
79
|
+
return max(0, total_tokens - prompt_tokens)
|
|
80
|
+
return self._count_tokens(processed_messages, system=system)
|
|
81
|
+
except Exception as e:
|
|
82
|
+
raise ValueError(f"Failed to count tokens for messages: {str(e)}")
|
|
59
83
|
|
|
60
84
|
def count_output_tokens(self, message: Message) -> int:
|
|
61
85
|
"""
|
|
@@ -67,11 +91,18 @@ class ClaudeTokenCounter(BaseTokenCounter):
|
|
|
67
91
|
Returns:
|
|
68
92
|
int: The number of output tokens.
|
|
69
93
|
"""
|
|
70
|
-
if not message
|
|
94
|
+
if not message:
|
|
71
95
|
return 0
|
|
72
|
-
|
|
96
|
+
|
|
73
97
|
try:
|
|
74
|
-
|
|
75
|
-
|
|
98
|
+
content = self._extract_message_text(message)
|
|
99
|
+
if content == "":
|
|
100
|
+
return 0
|
|
101
|
+
if message.role == MessageRole.ASSISTANT:
|
|
102
|
+
dummy_user = Message(role=MessageRole.USER, content=" ")
|
|
103
|
+
total_tokens = self.count_input_tokens([dummy_user, message])
|
|
104
|
+
prompt_tokens = self.count_input_tokens([dummy_user])
|
|
105
|
+
return max(0, total_tokens - prompt_tokens)
|
|
106
|
+
return self.count_input_tokens([message])
|
|
76
107
|
except Exception as e:
|
|
77
108
|
raise ValueError(f"Failed to count output tokens: {str(e)}")
|
|
@@ -95,7 +95,6 @@ class MistralTokenCounter(BaseTokenCounter):
|
|
|
95
95
|
def count_output_tokens(self, message: Message) -> int:
|
|
96
96
|
"""
|
|
97
97
|
Count the number of tokens in the output message using Mistral's tokenizer.
|
|
98
|
-
This implementation subtracts the token count of previous messages from the total tokens to isolate the output tokens.
|
|
99
98
|
|
|
100
99
|
Args:
|
|
101
100
|
message (Message): The output message.
|
|
@@ -106,10 +105,15 @@ class MistralTokenCounter(BaseTokenCounter):
|
|
|
106
105
|
Raises:
|
|
107
106
|
Exception: If token counting fails for any reason.
|
|
108
107
|
"""
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
108
|
+
if not message:
|
|
109
|
+
return 0
|
|
110
|
+
if message.role == MessageRole.ASSISTANT:
|
|
111
|
+
if not message.content:
|
|
112
|
+
return 0
|
|
113
|
+
# Mistral requires conversations to start with user/system.
|
|
114
|
+
# Estimate output tokens by subtracting a dummy prompt prefix.
|
|
115
|
+
dummy_user = Message(role=MessageRole.USER, content=" ")
|
|
116
|
+
total_tokens = self.count_input_tokens([dummy_user, message])
|
|
117
|
+
prompt_tokens = self.count_input_tokens([dummy_user])
|
|
118
|
+
return max(0, total_tokens - prompt_tokens)
|
|
119
|
+
return self.count_input_tokens([message])
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
import tiktoken
|
|
2
3
|
from typing import List, TYPE_CHECKING
|
|
3
4
|
from autobyteus.llm.token_counter.base_token_counter import BaseTokenCounter
|
|
@@ -7,6 +8,8 @@ from autobyteus.llm.utils.messages import Message
|
|
|
7
8
|
if TYPE_CHECKING:
|
|
8
9
|
from autobyteus.llm.base_llm import BaseLLM
|
|
9
10
|
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
10
13
|
class OpenAITokenCounter(BaseTokenCounter):
|
|
11
14
|
"""
|
|
12
15
|
A token counter implementation for OpenAI models using tiktoken.
|
|
@@ -17,8 +20,24 @@ class OpenAITokenCounter(BaseTokenCounter):
|
|
|
17
20
|
try:
|
|
18
21
|
self.encoding = tiktoken.encoding_for_model(model.value)
|
|
19
22
|
except Exception:
|
|
20
|
-
#
|
|
21
|
-
|
|
23
|
+
# If the specific model is unknown, fall back to the widely available
|
|
24
|
+
# cl100k_base encoding. tiktoken bundles this file; it loads locally
|
|
25
|
+
# without needing network access.
|
|
26
|
+
try:
|
|
27
|
+
logger.warning(
|
|
28
|
+
"tiktoken encoding_for_model failed for '%s'; falling back to cl100k_base (approximate token counts).",
|
|
29
|
+
model.value,
|
|
30
|
+
)
|
|
31
|
+
self.encoding = tiktoken.get_encoding("cl100k_base")
|
|
32
|
+
except Exception:
|
|
33
|
+
# As a last resort (e.g., stripped-down wheels), degrade gracefully
|
|
34
|
+
# with a naive whitespace encoder so tests can still execute offline.
|
|
35
|
+
logger.warning(
|
|
36
|
+
"tiktoken cl100k_base unavailable; using whitespace token counting for model '%s' (very approximate).",
|
|
37
|
+
model.value,
|
|
38
|
+
)
|
|
39
|
+
self.encoding = None
|
|
40
|
+
self._encode = self.encoding.encode if self.encoding else (lambda text: text.split() if text else [])
|
|
22
41
|
|
|
23
42
|
def convert_to_internal_format(self, messages: List[Message]) -> List[str]:
|
|
24
43
|
"""
|
|
@@ -51,7 +70,7 @@ class OpenAITokenCounter(BaseTokenCounter):
|
|
|
51
70
|
processed_messages = self.convert_to_internal_format(messages)
|
|
52
71
|
total_tokens = 0
|
|
53
72
|
for processed_message in processed_messages:
|
|
54
|
-
total_tokens += len(self.
|
|
73
|
+
total_tokens += len(self._encode(processed_message))
|
|
55
74
|
return total_tokens
|
|
56
75
|
|
|
57
76
|
def count_output_tokens(self, message: Message) -> int:
|
|
@@ -67,7 +86,7 @@ class OpenAITokenCounter(BaseTokenCounter):
|
|
|
67
86
|
if not message.content:
|
|
68
87
|
return 0
|
|
69
88
|
processed_message = f"<im_start>{message.role.value}\n{message.content}\n<im_end>"
|
|
70
|
-
return len(self.
|
|
89
|
+
return len(self._encode(processed_message))
|
|
71
90
|
|
|
72
91
|
def count_tokens(self, text: str) -> int:
|
|
73
92
|
"""
|
|
@@ -81,4 +100,4 @@ class OpenAITokenCounter(BaseTokenCounter):
|
|
|
81
100
|
"""
|
|
82
101
|
if not text:
|
|
83
102
|
return 0
|
|
84
|
-
return len(self.
|
|
103
|
+
return len(self._encode(text))
|
|
@@ -1,4 +1,6 @@
|
|
|
1
|
-
from typing import TYPE_CHECKING
|
|
1
|
+
from typing import TYPE_CHECKING, Optional
|
|
2
|
+
import logging
|
|
3
|
+
|
|
2
4
|
from autobyteus.llm.token_counter.openai_token_counter import OpenAITokenCounter
|
|
3
5
|
from autobyteus.llm.token_counter.claude_token_counter import ClaudeTokenCounter
|
|
4
6
|
from autobyteus.llm.token_counter.mistral_token_counter import MistralTokenCounter
|
|
@@ -12,7 +14,9 @@ from autobyteus.llm.providers import LLMProvider
|
|
|
12
14
|
if TYPE_CHECKING:
|
|
13
15
|
from autobyteus.llm.base_llm import BaseLLM
|
|
14
16
|
|
|
15
|
-
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
def get_token_counter(model: LLMModel, llm: 'BaseLLM') -> Optional[BaseTokenCounter]:
|
|
16
20
|
"""
|
|
17
21
|
Return the appropriate token counter implementation based on the model.
|
|
18
22
|
|
|
@@ -21,7 +25,8 @@ def get_token_counter(model: LLMModel, llm: 'BaseLLM') -> BaseTokenCounter:
|
|
|
21
25
|
llm (BaseLLM): The LLM instance.
|
|
22
26
|
|
|
23
27
|
Returns:
|
|
24
|
-
BaseTokenCounter: An instance of a token counter specific to the model
|
|
28
|
+
Optional[BaseTokenCounter]: An instance of a token counter specific to the model,
|
|
29
|
+
or None if no token counter is available for the provider.
|
|
25
30
|
"""
|
|
26
31
|
if model.provider == LLMProvider.OPENAI:
|
|
27
32
|
return OpenAITokenCounter(model, llm)
|
|
@@ -46,5 +51,7 @@ def get_token_counter(model: LLMModel, llm: 'BaseLLM') -> BaseTokenCounter:
|
|
|
46
51
|
elif model.provider == LLMProvider.ZHIPU:
|
|
47
52
|
return ZhipuTokenCounter(model, llm)
|
|
48
53
|
else:
|
|
49
|
-
# For
|
|
50
|
-
|
|
54
|
+
# For providers without a specialized counter, return None and log a warning
|
|
55
|
+
logger.info(f"No token counter available for provider {model.provider.value}. Token usage tracking will be disabled.")
|
|
56
|
+
return None
|
|
57
|
+
|
|
@@ -54,8 +54,9 @@ class LLMConfig:
|
|
|
54
54
|
top_p: Optional[float] = None
|
|
55
55
|
frequency_penalty: Optional[float] = None
|
|
56
56
|
presence_penalty: Optional[float] = None
|
|
57
|
+
compaction_ratio: Optional[float] = None
|
|
58
|
+
safety_margin_tokens: Optional[int] = None
|
|
57
59
|
stop_sequences: Optional[List] = None
|
|
58
|
-
uses_max_completion_tokens: bool = False
|
|
59
60
|
extra_params: Dict[str, Any] = field(default_factory=dict)
|
|
60
61
|
pricing_config: TokenPricingConfig = field(default_factory=TokenPricingConfig)
|
|
61
62
|
|
|
@@ -107,7 +108,7 @@ class LLMConfig:
|
|
|
107
108
|
known_fields = {
|
|
108
109
|
'rate_limit', 'token_limit', 'system_message', 'temperature',
|
|
109
110
|
'max_tokens', 'top_p', 'frequency_penalty', 'presence_penalty',
|
|
110
|
-
'
|
|
111
|
+
'compaction_ratio', 'safety_margin_tokens', 'stop_sequences', 'extra_params',
|
|
111
112
|
'pricing_config'
|
|
112
113
|
}
|
|
113
114
|
|
|
@@ -122,8 +123,9 @@ class LLMConfig:
|
|
|
122
123
|
top_p=init_kwargs.get('top_p'),
|
|
123
124
|
frequency_penalty=init_kwargs.get('frequency_penalty'),
|
|
124
125
|
presence_penalty=init_kwargs.get('presence_penalty'),
|
|
126
|
+
compaction_ratio=init_kwargs.get('compaction_ratio'),
|
|
127
|
+
safety_margin_tokens=init_kwargs.get('safety_margin_tokens'),
|
|
125
128
|
stop_sequences=init_kwargs.get('stop_sequences'),
|
|
126
|
-
uses_max_completion_tokens=init_kwargs.get('uses_max_completion_tokens', False),
|
|
127
129
|
extra_params=init_kwargs.get('extra_params', {}),
|
|
128
130
|
pricing_config=pricing_config_data
|
|
129
131
|
)
|
|
@@ -174,16 +176,8 @@ class LLMConfig:
|
|
|
174
176
|
for f_info in fields(override_config):
|
|
175
177
|
override_value = getattr(override_config, f_info.name)
|
|
176
178
|
|
|
177
|
-
# Special handling for booleans where we want to merge if it's not the default
|
|
178
|
-
# For `uses_max_completion_tokens`, the default is False, so `if override_value:` is fine
|
|
179
|
-
is_boolean_field = f_info.type == bool
|
|
180
|
-
|
|
181
|
-
# Standard check for None, but also merge if it's a non-default boolean
|
|
182
179
|
if override_value is not None:
|
|
183
|
-
|
|
184
|
-
if is_boolean_field and override_value is False and getattr(self, f_info.name) is True:
|
|
185
|
-
setattr(self, f_info.name, override_value)
|
|
186
|
-
elif f_info.name == 'pricing_config':
|
|
180
|
+
if f_info.name == 'pricing_config':
|
|
187
181
|
if not isinstance(self.pricing_config, TokenPricingConfig):
|
|
188
182
|
self.pricing_config = TokenPricingConfig()
|
|
189
183
|
|
|
@@ -27,8 +27,8 @@ logger.warning(
|
|
|
27
27
|
def get_mime_type(file_path: str) -> str:
|
|
28
28
|
"""Determine MIME type of file."""
|
|
29
29
|
mime_type, _ = mimetypes.guess_type(file_path)
|
|
30
|
-
if not mime_type
|
|
31
|
-
return '
|
|
30
|
+
if not mime_type:
|
|
31
|
+
return 'application/octet-stream' # generic fallback
|
|
32
32
|
return mime_type
|
|
33
33
|
|
|
34
34
|
|
|
@@ -44,9 +44,16 @@ def is_base64(s: str) -> bool:
|
|
|
44
44
|
return False
|
|
45
45
|
|
|
46
46
|
|
|
47
|
-
def
|
|
48
|
-
"""Check if path exists and has a valid
|
|
49
|
-
valid_extensions = {
|
|
47
|
+
def is_valid_media_path(path: str) -> bool:
|
|
48
|
+
"""Check if path exists and has a valid media extension."""
|
|
49
|
+
valid_extensions = {
|
|
50
|
+
# Images
|
|
51
|
+
".jpg", ".jpeg", ".png", ".gif", ".webp",
|
|
52
|
+
# Audio
|
|
53
|
+
".mp3", ".wav", ".ogg", ".aac", ".flac",
|
|
54
|
+
# Video
|
|
55
|
+
".mp4", ".mpeg", ".mov", ".avi", ".webm", ".mkv"
|
|
56
|
+
}
|
|
50
57
|
try:
|
|
51
58
|
file_path = Path(path)
|
|
52
59
|
return file_path.is_file() and file_path.suffix.lower() in valid_extensions
|
|
@@ -64,36 +71,36 @@ def create_data_uri(mime_type: str, base64_data: str) -> Dict:
|
|
|
64
71
|
}
|
|
65
72
|
|
|
66
73
|
def file_to_base64(path: str) -> str:
|
|
67
|
-
"""Reads
|
|
74
|
+
"""Reads a file from a local path and returns it as a base64 encoded string."""
|
|
68
75
|
try:
|
|
69
|
-
with open(path, "rb") as
|
|
70
|
-
return base64.b64encode(
|
|
76
|
+
with open(path, "rb") as f:
|
|
77
|
+
return base64.b64encode(f.read()).decode("utf-8")
|
|
71
78
|
except Exception as e:
|
|
72
|
-
logger.error(f"Failed to read and encode
|
|
79
|
+
logger.error(f"Failed to read and encode file at {path}: {e}")
|
|
73
80
|
raise
|
|
74
81
|
|
|
75
82
|
async def url_to_base64(url: str) -> str:
|
|
76
|
-
"""Downloads
|
|
83
|
+
"""Downloads content from a URL and returns it as a base64 encoded string."""
|
|
77
84
|
try:
|
|
78
85
|
response = await _http_client.get(url)
|
|
79
86
|
response.raise_for_status()
|
|
80
87
|
return base64.b64encode(response.content).decode("utf-8")
|
|
81
88
|
except httpx.HTTPError as e:
|
|
82
|
-
logger.error(f"Failed to download
|
|
89
|
+
logger.error(f"Failed to download from URL {url}: {e}")
|
|
83
90
|
raise
|
|
84
91
|
|
|
85
|
-
async def
|
|
92
|
+
async def media_source_to_base64(media_source: str) -> str:
|
|
86
93
|
"""
|
|
87
|
-
Orchestrator function that converts
|
|
94
|
+
Orchestrator function that converts a media source (file path, URL, or existing base64)
|
|
88
95
|
into a base64 encoded string by delegating to specialized functions.
|
|
89
96
|
"""
|
|
90
|
-
if
|
|
91
|
-
return file_to_base64(
|
|
97
|
+
if is_valid_media_path(media_source):
|
|
98
|
+
return file_to_base64(media_source)
|
|
92
99
|
|
|
93
|
-
if
|
|
94
|
-
return await url_to_base64(
|
|
100
|
+
if media_source.startswith(("http://", "https://")):
|
|
101
|
+
return await url_to_base64(media_source)
|
|
95
102
|
|
|
96
|
-
if is_base64(
|
|
97
|
-
return
|
|
103
|
+
if is_base64(media_source):
|
|
104
|
+
return media_source
|
|
98
105
|
|
|
99
|
-
raise ValueError(f"Invalid
|
|
106
|
+
raise ValueError(f"Invalid media source: not a valid file path, URL, or base64 string.")
|