autobyteus 1.2.1__py3-none-any.whl → 1.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autobyteus/agent/agent.py +15 -5
- autobyteus/agent/bootstrap_steps/__init__.py +1 -3
- autobyteus/agent/bootstrap_steps/agent_bootstrapper.py +3 -59
- autobyteus/agent/bootstrap_steps/base_bootstrap_step.py +1 -4
- autobyteus/agent/bootstrap_steps/mcp_server_prewarming_step.py +1 -3
- autobyteus/agent/bootstrap_steps/system_prompt_processing_step.py +16 -13
- autobyteus/agent/bootstrap_steps/workspace_context_initialization_step.py +2 -4
- autobyteus/agent/context/agent_config.py +43 -20
- autobyteus/agent/context/agent_context.py +23 -18
- autobyteus/agent/context/agent_runtime_state.py +19 -19
- autobyteus/agent/events/__init__.py +16 -1
- autobyteus/agent/events/agent_events.py +43 -3
- autobyteus/agent/events/agent_input_event_queue_manager.py +79 -26
- autobyteus/agent/events/event_store.py +57 -0
- autobyteus/agent/events/notifiers.py +69 -59
- autobyteus/agent/events/worker_event_dispatcher.py +21 -64
- autobyteus/agent/factory/agent_factory.py +52 -0
- autobyteus/agent/handlers/__init__.py +2 -0
- autobyteus/agent/handlers/approved_tool_invocation_event_handler.py +51 -34
- autobyteus/agent/handlers/bootstrap_event_handler.py +155 -0
- autobyteus/agent/handlers/inter_agent_message_event_handler.py +10 -0
- autobyteus/agent/handlers/lifecycle_event_logger.py +19 -11
- autobyteus/agent/handlers/llm_complete_response_received_event_handler.py +10 -15
- autobyteus/agent/handlers/llm_user_message_ready_event_handler.py +188 -48
- autobyteus/agent/handlers/tool_execution_approval_event_handler.py +0 -10
- autobyteus/agent/handlers/tool_invocation_request_event_handler.py +53 -48
- autobyteus/agent/handlers/tool_result_event_handler.py +7 -8
- autobyteus/agent/handlers/user_input_message_event_handler.py +10 -3
- autobyteus/agent/input_processor/memory_ingest_input_processor.py +40 -0
- autobyteus/agent/lifecycle/__init__.py +12 -0
- autobyteus/agent/lifecycle/base_processor.py +109 -0
- autobyteus/agent/lifecycle/events.py +35 -0
- autobyteus/agent/lifecycle/processor_definition.py +36 -0
- autobyteus/agent/lifecycle/processor_registry.py +106 -0
- autobyteus/agent/llm_request_assembler.py +98 -0
- autobyteus/agent/llm_response_processor/__init__.py +1 -8
- autobyteus/agent/message/context_file_type.py +1 -1
- autobyteus/agent/runtime/agent_runtime.py +29 -21
- autobyteus/agent/runtime/agent_worker.py +98 -19
- autobyteus/agent/shutdown_steps/__init__.py +2 -0
- autobyteus/agent/shutdown_steps/agent_shutdown_orchestrator.py +2 -0
- autobyteus/agent/shutdown_steps/tool_cleanup_step.py +58 -0
- autobyteus/agent/status/__init__.py +14 -0
- autobyteus/agent/status/manager.py +93 -0
- autobyteus/agent/status/status_deriver.py +96 -0
- autobyteus/agent/{phases/phase_enum.py → status/status_enum.py} +16 -16
- autobyteus/agent/status/status_update_utils.py +73 -0
- autobyteus/agent/streaming/__init__.py +52 -5
- autobyteus/agent/streaming/adapters/__init__.py +18 -0
- autobyteus/agent/streaming/adapters/invocation_adapter.py +184 -0
- autobyteus/agent/streaming/adapters/tool_call_parsing.py +163 -0
- autobyteus/agent/streaming/adapters/tool_syntax_registry.py +67 -0
- autobyteus/agent/streaming/agent_event_stream.py +3 -183
- autobyteus/agent/streaming/api_tool_call/__init__.py +16 -0
- autobyteus/agent/streaming/api_tool_call/file_content_streamer.py +56 -0
- autobyteus/agent/streaming/api_tool_call/json_string_field_extractor.py +175 -0
- autobyteus/agent/streaming/api_tool_call_streaming_response_handler.py +4 -0
- autobyteus/agent/streaming/events/__init__.py +6 -0
- autobyteus/agent/streaming/events/stream_event_payloads.py +284 -0
- autobyteus/agent/streaming/events/stream_events.py +141 -0
- autobyteus/agent/streaming/handlers/__init__.py +15 -0
- autobyteus/agent/streaming/handlers/api_tool_call_streaming_response_handler.py +303 -0
- autobyteus/agent/streaming/handlers/parsing_streaming_response_handler.py +107 -0
- autobyteus/agent/streaming/handlers/pass_through_streaming_response_handler.py +107 -0
- autobyteus/agent/streaming/handlers/streaming_handler_factory.py +177 -0
- autobyteus/agent/streaming/handlers/streaming_response_handler.py +58 -0
- autobyteus/agent/streaming/parser/__init__.py +61 -0
- autobyteus/agent/streaming/parser/event_emitter.py +181 -0
- autobyteus/agent/streaming/parser/events.py +4 -0
- autobyteus/agent/streaming/parser/invocation_adapter.py +4 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/__init__.py +19 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/base.py +32 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/default.py +34 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/gemini.py +31 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/openai.py +64 -0
- autobyteus/agent/streaming/parser/json_parsing_strategies/registry.py +75 -0
- autobyteus/agent/streaming/parser/parser_context.py +227 -0
- autobyteus/agent/streaming/parser/parser_factory.py +132 -0
- autobyteus/agent/streaming/parser/sentinel_format.py +7 -0
- autobyteus/agent/streaming/parser/state_factory.py +62 -0
- autobyteus/agent/streaming/parser/states/__init__.py +1 -0
- autobyteus/agent/streaming/parser/states/base_state.py +60 -0
- autobyteus/agent/streaming/parser/states/custom_xml_tag_run_bash_parsing_state.py +38 -0
- autobyteus/agent/streaming/parser/states/custom_xml_tag_write_file_parsing_state.py +55 -0
- autobyteus/agent/streaming/parser/states/delimited_content_state.py +146 -0
- autobyteus/agent/streaming/parser/states/json_initialization_state.py +144 -0
- autobyteus/agent/streaming/parser/states/json_tool_parsing_state.py +137 -0
- autobyteus/agent/streaming/parser/states/sentinel_content_state.py +30 -0
- autobyteus/agent/streaming/parser/states/sentinel_initialization_state.py +117 -0
- autobyteus/agent/streaming/parser/states/text_state.py +78 -0
- autobyteus/agent/streaming/parser/states/xml_patch_file_tool_parsing_state.py +328 -0
- autobyteus/agent/streaming/parser/states/xml_run_bash_tool_parsing_state.py +129 -0
- autobyteus/agent/streaming/parser/states/xml_tag_initialization_state.py +151 -0
- autobyteus/agent/streaming/parser/states/xml_tool_parsing_state.py +63 -0
- autobyteus/agent/streaming/parser/states/xml_write_file_tool_parsing_state.py +343 -0
- autobyteus/agent/streaming/parser/strategies/__init__.py +17 -0
- autobyteus/agent/streaming/parser/strategies/base.py +24 -0
- autobyteus/agent/streaming/parser/strategies/json_tool_strategy.py +26 -0
- autobyteus/agent/streaming/parser/strategies/registry.py +28 -0
- autobyteus/agent/streaming/parser/strategies/sentinel_strategy.py +23 -0
- autobyteus/agent/streaming/parser/strategies/xml_tag_strategy.py +21 -0
- autobyteus/agent/streaming/parser/stream_scanner.py +167 -0
- autobyteus/agent/streaming/parser/streaming_parser.py +212 -0
- autobyteus/agent/streaming/parser/tool_call_parsing.py +4 -0
- autobyteus/agent/streaming/parser/tool_constants.py +7 -0
- autobyteus/agent/streaming/parser/tool_syntax_registry.py +4 -0
- autobyteus/agent/streaming/parser/xml_tool_parsing_state_registry.py +55 -0
- autobyteus/agent/streaming/parsing_streaming_response_handler.py +4 -0
- autobyteus/agent/streaming/pass_through_streaming_response_handler.py +4 -0
- autobyteus/agent/streaming/queue_streamer.py +3 -57
- autobyteus/agent/streaming/segments/__init__.py +5 -0
- autobyteus/agent/streaming/segments/segment_events.py +81 -0
- autobyteus/agent/streaming/stream_event_payloads.py +2 -223
- autobyteus/agent/streaming/stream_events.py +3 -140
- autobyteus/agent/streaming/streaming_handler_factory.py +4 -0
- autobyteus/agent/streaming/streaming_response_handler.py +4 -0
- autobyteus/agent/streaming/streams/__init__.py +5 -0
- autobyteus/agent/streaming/streams/agent_event_stream.py +197 -0
- autobyteus/agent/streaming/utils/__init__.py +5 -0
- autobyteus/agent/streaming/utils/queue_streamer.py +59 -0
- autobyteus/agent/system_prompt_processor/__init__.py +2 -0
- autobyteus/agent/system_prompt_processor/available_skills_processor.py +96 -0
- autobyteus/agent/system_prompt_processor/base_processor.py +1 -1
- autobyteus/agent/system_prompt_processor/processor_meta.py +15 -2
- autobyteus/agent/system_prompt_processor/tool_manifest_injector_processor.py +39 -58
- autobyteus/agent/token_budget.py +56 -0
- autobyteus/agent/tool_execution_result_processor/memory_ingest_tool_result_processor.py +29 -0
- autobyteus/agent/tool_invocation.py +16 -40
- autobyteus/agent/tool_invocation_preprocessor/__init__.py +9 -0
- autobyteus/agent/tool_invocation_preprocessor/base_preprocessor.py +45 -0
- autobyteus/agent/tool_invocation_preprocessor/processor_definition.py +15 -0
- autobyteus/agent/tool_invocation_preprocessor/processor_meta.py +33 -0
- autobyteus/agent/tool_invocation_preprocessor/processor_registry.py +60 -0
- autobyteus/agent/utils/wait_for_idle.py +12 -14
- autobyteus/agent/workspace/base_workspace.py +6 -27
- autobyteus/agent_team/agent_team.py +3 -3
- autobyteus/agent_team/agent_team_builder.py +1 -41
- autobyteus/agent_team/bootstrap_steps/__init__.py +0 -4
- autobyteus/agent_team/bootstrap_steps/agent_configuration_preparation_step.py +8 -18
- autobyteus/agent_team/bootstrap_steps/agent_team_bootstrapper.py +4 -16
- autobyteus/agent_team/bootstrap_steps/base_agent_team_bootstrap_step.py +1 -2
- autobyteus/agent_team/bootstrap_steps/coordinator_initialization_step.py +1 -2
- autobyteus/agent_team/bootstrap_steps/task_notifier_initialization_step.py +1 -2
- autobyteus/agent_team/bootstrap_steps/team_context_initialization_step.py +4 -4
- autobyteus/agent_team/context/agent_team_config.py +6 -3
- autobyteus/agent_team/context/agent_team_context.py +25 -3
- autobyteus/agent_team/context/agent_team_runtime_state.py +9 -6
- autobyteus/agent_team/events/__init__.py +11 -0
- autobyteus/agent_team/events/agent_team_event_dispatcher.py +22 -9
- autobyteus/agent_team/events/agent_team_events.py +16 -0
- autobyteus/agent_team/events/event_store.py +57 -0
- autobyteus/agent_team/factory/agent_team_factory.py +8 -0
- autobyteus/agent_team/handlers/inter_agent_message_request_event_handler.py +18 -2
- autobyteus/agent_team/handlers/lifecycle_agent_team_event_handler.py +21 -5
- autobyteus/agent_team/handlers/process_user_message_event_handler.py +17 -8
- autobyteus/agent_team/handlers/tool_approval_team_event_handler.py +19 -4
- autobyteus/agent_team/runtime/agent_team_runtime.py +41 -10
- autobyteus/agent_team/runtime/agent_team_worker.py +69 -5
- autobyteus/agent_team/status/__init__.py +14 -0
- autobyteus/agent_team/status/agent_team_status.py +18 -0
- autobyteus/agent_team/status/agent_team_status_manager.py +33 -0
- autobyteus/agent_team/status/status_deriver.py +62 -0
- autobyteus/agent_team/status/status_update_utils.py +42 -0
- autobyteus/agent_team/streaming/__init__.py +2 -2
- autobyteus/agent_team/streaming/agent_team_event_notifier.py +6 -6
- autobyteus/agent_team/streaming/agent_team_stream_event_payloads.py +4 -4
- autobyteus/agent_team/streaming/agent_team_stream_events.py +3 -3
- autobyteus/agent_team/system_prompt_processor/__init__.py +6 -0
- autobyteus/agent_team/system_prompt_processor/team_manifest_injector_processor.py +76 -0
- autobyteus/agent_team/task_notification/task_notification_mode.py +19 -0
- autobyteus/agent_team/utils/wait_for_idle.py +4 -4
- autobyteus/cli/agent_cli.py +18 -10
- autobyteus/cli/agent_team_tui/app.py +14 -11
- autobyteus/cli/agent_team_tui/state.py +13 -15
- autobyteus/cli/agent_team_tui/widgets/agent_list_sidebar.py +15 -15
- autobyteus/cli/agent_team_tui/widgets/focus_pane.py +143 -36
- autobyteus/cli/agent_team_tui/widgets/renderables.py +1 -1
- autobyteus/cli/agent_team_tui/widgets/shared.py +25 -25
- autobyteus/cli/cli_display.py +193 -44
- autobyteus/cli/workflow_tui/app.py +9 -10
- autobyteus/cli/workflow_tui/state.py +14 -16
- autobyteus/cli/workflow_tui/widgets/agent_list_sidebar.py +15 -15
- autobyteus/cli/workflow_tui/widgets/focus_pane.py +137 -35
- autobyteus/cli/workflow_tui/widgets/renderables.py +1 -1
- autobyteus/cli/workflow_tui/widgets/shared.py +25 -25
- autobyteus/clients/autobyteus_client.py +94 -1
- autobyteus/events/event_types.py +11 -18
- autobyteus/llm/api/autobyteus_llm.py +33 -29
- autobyteus/llm/api/claude_llm.py +142 -36
- autobyteus/llm/api/gemini_llm.py +163 -59
- autobyteus/llm/api/grok_llm.py +1 -1
- autobyteus/llm/api/minimax_llm.py +26 -0
- autobyteus/llm/api/mistral_llm.py +113 -87
- autobyteus/llm/api/ollama_llm.py +9 -42
- autobyteus/llm/api/openai_compatible_llm.py +127 -91
- autobyteus/llm/api/openai_llm.py +3 -3
- autobyteus/llm/api/openai_responses_llm.py +324 -0
- autobyteus/llm/api/zhipu_llm.py +21 -2
- autobyteus/llm/autobyteus_provider.py +70 -60
- autobyteus/llm/base_llm.py +85 -81
- autobyteus/llm/converters/__init__.py +14 -0
- autobyteus/llm/converters/anthropic_tool_call_converter.py +37 -0
- autobyteus/llm/converters/gemini_tool_call_converter.py +57 -0
- autobyteus/llm/converters/mistral_tool_call_converter.py +37 -0
- autobyteus/llm/converters/openai_tool_call_converter.py +38 -0
- autobyteus/llm/extensions/base_extension.py +6 -12
- autobyteus/llm/extensions/token_usage_tracking_extension.py +45 -18
- autobyteus/llm/llm_factory.py +282 -204
- autobyteus/llm/lmstudio_provider.py +60 -49
- autobyteus/llm/models.py +35 -2
- autobyteus/llm/ollama_provider.py +60 -49
- autobyteus/llm/ollama_provider_resolver.py +0 -1
- autobyteus/llm/prompt_renderers/__init__.py +19 -0
- autobyteus/llm/prompt_renderers/anthropic_prompt_renderer.py +104 -0
- autobyteus/llm/prompt_renderers/autobyteus_prompt_renderer.py +19 -0
- autobyteus/llm/prompt_renderers/base_prompt_renderer.py +10 -0
- autobyteus/llm/prompt_renderers/gemini_prompt_renderer.py +63 -0
- autobyteus/llm/prompt_renderers/mistral_prompt_renderer.py +87 -0
- autobyteus/llm/prompt_renderers/ollama_prompt_renderer.py +51 -0
- autobyteus/llm/prompt_renderers/openai_chat_renderer.py +97 -0
- autobyteus/llm/prompt_renderers/openai_responses_renderer.py +101 -0
- autobyteus/llm/providers.py +1 -3
- autobyteus/llm/token_counter/claude_token_counter.py +56 -25
- autobyteus/llm/token_counter/mistral_token_counter.py +12 -8
- autobyteus/llm/token_counter/openai_token_counter.py +24 -5
- autobyteus/llm/token_counter/token_counter_factory.py +12 -5
- autobyteus/llm/utils/llm_config.py +6 -12
- autobyteus/llm/utils/media_payload_formatter.py +27 -20
- autobyteus/llm/utils/messages.py +55 -3
- autobyteus/llm/utils/response_types.py +3 -0
- autobyteus/llm/utils/tool_call_delta.py +31 -0
- autobyteus/memory/__init__.py +32 -0
- autobyteus/memory/active_transcript.py +69 -0
- autobyteus/memory/compaction/__init__.py +9 -0
- autobyteus/memory/compaction/compaction_result.py +8 -0
- autobyteus/memory/compaction/compactor.py +89 -0
- autobyteus/memory/compaction/summarizer.py +11 -0
- autobyteus/memory/compaction_snapshot_builder.py +84 -0
- autobyteus/memory/memory_manager.py +183 -0
- autobyteus/memory/models/__init__.py +14 -0
- autobyteus/memory/models/episodic_item.py +41 -0
- autobyteus/memory/models/memory_types.py +7 -0
- autobyteus/memory/models/raw_trace_item.py +79 -0
- autobyteus/memory/models/semantic_item.py +41 -0
- autobyteus/memory/models/tool_interaction.py +20 -0
- autobyteus/memory/policies/__init__.py +5 -0
- autobyteus/memory/policies/compaction_policy.py +16 -0
- autobyteus/memory/retrieval/__init__.py +7 -0
- autobyteus/memory/retrieval/memory_bundle.py +11 -0
- autobyteus/memory/retrieval/retriever.py +13 -0
- autobyteus/memory/store/__init__.py +7 -0
- autobyteus/memory/store/base_store.py +14 -0
- autobyteus/memory/store/file_store.py +98 -0
- autobyteus/memory/tool_interaction_builder.py +46 -0
- autobyteus/memory/turn_tracker.py +9 -0
- autobyteus/multimedia/audio/api/autobyteus_audio_client.py +19 -5
- autobyteus/multimedia/audio/api/gemini_audio_client.py +108 -16
- autobyteus/multimedia/audio/audio_client_factory.py +47 -9
- autobyteus/multimedia/audio/audio_model.py +2 -1
- autobyteus/multimedia/image/api/autobyteus_image_client.py +19 -5
- autobyteus/multimedia/image/api/gemini_image_client.py +38 -17
- autobyteus/multimedia/image/api/openai_image_client.py +125 -43
- autobyteus/multimedia/image/autobyteus_image_provider.py +2 -1
- autobyteus/multimedia/image/image_client_factory.py +47 -15
- autobyteus/multimedia/image/image_model.py +5 -2
- autobyteus/multimedia/providers.py +3 -2
- autobyteus/skills/loader.py +71 -0
- autobyteus/skills/model.py +11 -0
- autobyteus/skills/registry.py +70 -0
- autobyteus/task_management/tools/todo_tools/add_todo.py +2 -2
- autobyteus/task_management/tools/todo_tools/create_todo_list.py +2 -2
- autobyteus/task_management/tools/todo_tools/update_todo_status.py +2 -2
- autobyteus/tools/__init__.py +34 -47
- autobyteus/tools/base_tool.py +7 -0
- autobyteus/tools/file/__init__.py +2 -6
- autobyteus/tools/file/patch_file.py +149 -0
- autobyteus/tools/file/read_file.py +36 -5
- autobyteus/tools/file/write_file.py +4 -1
- autobyteus/tools/functional_tool.py +43 -6
- autobyteus/tools/mcp/__init__.py +2 -0
- autobyteus/tools/mcp/config_service.py +5 -1
- autobyteus/tools/mcp/server/__init__.py +2 -0
- autobyteus/tools/mcp/server/http_managed_mcp_server.py +1 -1
- autobyteus/tools/mcp/server/websocket_managed_mcp_server.py +141 -0
- autobyteus/tools/mcp/server_instance_manager.py +8 -1
- autobyteus/tools/mcp/types.py +61 -0
- autobyteus/tools/multimedia/audio_tools.py +70 -17
- autobyteus/tools/multimedia/download_media_tool.py +18 -4
- autobyteus/tools/multimedia/image_tools.py +246 -62
- autobyteus/tools/operation_executor/journal_manager.py +107 -0
- autobyteus/tools/operation_executor/operation_event_buffer.py +57 -0
- autobyteus/tools/operation_executor/operation_event_producer.py +29 -0
- autobyteus/tools/operation_executor/operation_executor.py +58 -0
- autobyteus/tools/registry/tool_definition.py +43 -2
- autobyteus/tools/skill/load_skill.py +50 -0
- autobyteus/tools/terminal/__init__.py +45 -0
- autobyteus/tools/terminal/ansi_utils.py +32 -0
- autobyteus/tools/terminal/background_process_manager.py +233 -0
- autobyteus/tools/terminal/output_buffer.py +105 -0
- autobyteus/tools/terminal/prompt_detector.py +63 -0
- autobyteus/tools/terminal/pty_session.py +241 -0
- autobyteus/tools/terminal/session_factory.py +20 -0
- autobyteus/tools/terminal/terminal_session_manager.py +226 -0
- autobyteus/tools/terminal/tools/__init__.py +13 -0
- autobyteus/tools/terminal/tools/get_process_output.py +81 -0
- autobyteus/tools/terminal/tools/run_bash.py +109 -0
- autobyteus/tools/terminal/tools/start_background_process.py +104 -0
- autobyteus/tools/terminal/tools/stop_background_process.py +67 -0
- autobyteus/tools/terminal/types.py +54 -0
- autobyteus/tools/terminal/wsl_tmux_session.py +221 -0
- autobyteus/tools/terminal/wsl_utils.py +156 -0
- autobyteus/tools/transaction_management/backup_handler.py +48 -0
- autobyteus/tools/transaction_management/operation_lifecycle_manager.py +62 -0
- autobyteus/tools/usage/__init__.py +1 -2
- autobyteus/tools/usage/formatters/__init__.py +17 -1
- autobyteus/tools/usage/formatters/base_formatter.py +8 -0
- autobyteus/tools/usage/formatters/default_xml_schema_formatter.py +2 -2
- autobyteus/tools/usage/formatters/mistral_json_schema_formatter.py +18 -0
- autobyteus/tools/usage/formatters/patch_file_xml_example_formatter.py +64 -0
- autobyteus/tools/usage/formatters/patch_file_xml_schema_formatter.py +31 -0
- autobyteus/tools/usage/formatters/run_bash_xml_example_formatter.py +32 -0
- autobyteus/tools/usage/formatters/run_bash_xml_schema_formatter.py +36 -0
- autobyteus/tools/usage/formatters/write_file_xml_example_formatter.py +53 -0
- autobyteus/tools/usage/formatters/write_file_xml_schema_formatter.py +31 -0
- autobyteus/tools/usage/providers/tool_manifest_provider.py +10 -10
- autobyteus/tools/usage/registries/__init__.py +1 -3
- autobyteus/tools/usage/registries/tool_formatting_registry.py +115 -8
- autobyteus/tools/usage/tool_schema_provider.py +51 -0
- autobyteus/tools/web/__init__.py +4 -0
- autobyteus/tools/web/read_url_tool.py +80 -0
- autobyteus/utils/diff_utils.py +271 -0
- autobyteus/utils/download_utils.py +109 -0
- autobyteus/utils/file_utils.py +57 -2
- autobyteus/utils/gemini_helper.py +56 -0
- autobyteus/utils/gemini_model_mapping.py +71 -0
- autobyteus/utils/llm_output_formatter.py +75 -0
- autobyteus/utils/tool_call_format.py +36 -0
- autobyteus/workflow/agentic_workflow.py +3 -3
- autobyteus/workflow/bootstrap_steps/agent_tool_injection_step.py +2 -2
- autobyteus/workflow/bootstrap_steps/base_workflow_bootstrap_step.py +2 -2
- autobyteus/workflow/bootstrap_steps/coordinator_initialization_step.py +2 -2
- autobyteus/workflow/bootstrap_steps/coordinator_prompt_preparation_step.py +3 -9
- autobyteus/workflow/bootstrap_steps/workflow_bootstrapper.py +6 -6
- autobyteus/workflow/bootstrap_steps/workflow_runtime_queue_initialization_step.py +2 -2
- autobyteus/workflow/context/workflow_context.py +3 -3
- autobyteus/workflow/context/workflow_runtime_state.py +5 -5
- autobyteus/workflow/events/workflow_event_dispatcher.py +5 -5
- autobyteus/workflow/handlers/lifecycle_workflow_event_handler.py +3 -3
- autobyteus/workflow/handlers/process_user_message_event_handler.py +5 -5
- autobyteus/workflow/handlers/tool_approval_workflow_event_handler.py +2 -2
- autobyteus/workflow/runtime/workflow_runtime.py +8 -8
- autobyteus/workflow/runtime/workflow_worker.py +3 -3
- autobyteus/workflow/status/__init__.py +11 -0
- autobyteus/workflow/status/workflow_status.py +19 -0
- autobyteus/workflow/status/workflow_status_manager.py +48 -0
- autobyteus/workflow/streaming/__init__.py +2 -2
- autobyteus/workflow/streaming/workflow_event_notifier.py +7 -7
- autobyteus/workflow/streaming/workflow_stream_event_payloads.py +4 -4
- autobyteus/workflow/streaming/workflow_stream_events.py +3 -3
- autobyteus/workflow/utils/wait_for_idle.py +4 -4
- autobyteus-1.2.3.dist-info/METADATA +293 -0
- autobyteus-1.2.3.dist-info/RECORD +600 -0
- {autobyteus-1.2.1.dist-info → autobyteus-1.2.3.dist-info}/WHEEL +1 -1
- {autobyteus-1.2.1.dist-info → autobyteus-1.2.3.dist-info}/top_level.txt +0 -1
- autobyteus/agent/bootstrap_steps/agent_runtime_queue_initialization_step.py +0 -57
- autobyteus/agent/hooks/__init__.py +0 -16
- autobyteus/agent/hooks/base_phase_hook.py +0 -78
- autobyteus/agent/hooks/hook_definition.py +0 -36
- autobyteus/agent/hooks/hook_meta.py +0 -37
- autobyteus/agent/hooks/hook_registry.py +0 -106
- autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py +0 -103
- autobyteus/agent/phases/__init__.py +0 -18
- autobyteus/agent/phases/discover.py +0 -53
- autobyteus/agent/phases/manager.py +0 -265
- autobyteus/agent/phases/transition_decorator.py +0 -40
- autobyteus/agent/phases/transition_info.py +0 -33
- autobyteus/agent/remote_agent.py +0 -244
- autobyteus/agent/workspace/workspace_definition.py +0 -36
- autobyteus/agent/workspace/workspace_meta.py +0 -37
- autobyteus/agent/workspace/workspace_registry.py +0 -72
- autobyteus/agent_team/bootstrap_steps/agent_team_runtime_queue_initialization_step.py +0 -25
- autobyteus/agent_team/bootstrap_steps/coordinator_prompt_preparation_step.py +0 -85
- autobyteus/agent_team/phases/__init__.py +0 -11
- autobyteus/agent_team/phases/agent_team_operational_phase.py +0 -19
- autobyteus/agent_team/phases/agent_team_phase_manager.py +0 -48
- autobyteus/llm/api/bedrock_llm.py +0 -92
- autobyteus/llm/api/groq_llm.py +0 -94
- autobyteus/llm/api/nvidia_llm.py +0 -108
- autobyteus/llm/utils/token_pricing_config.py +0 -87
- autobyteus/rpc/__init__.py +0 -73
- autobyteus/rpc/client/__init__.py +0 -17
- autobyteus/rpc/client/abstract_client_connection.py +0 -124
- autobyteus/rpc/client/client_connection_manager.py +0 -153
- autobyteus/rpc/client/sse_client_connection.py +0 -306
- autobyteus/rpc/client/stdio_client_connection.py +0 -280
- autobyteus/rpc/config/__init__.py +0 -13
- autobyteus/rpc/config/agent_server_config.py +0 -153
- autobyteus/rpc/config/agent_server_registry.py +0 -152
- autobyteus/rpc/hosting.py +0 -244
- autobyteus/rpc/protocol.py +0 -244
- autobyteus/rpc/server/__init__.py +0 -20
- autobyteus/rpc/server/agent_server_endpoint.py +0 -181
- autobyteus/rpc/server/base_method_handler.py +0 -40
- autobyteus/rpc/server/method_handlers.py +0 -259
- autobyteus/rpc/server/sse_server_handler.py +0 -182
- autobyteus/rpc/server/stdio_server_handler.py +0 -151
- autobyteus/rpc/server_main.py +0 -198
- autobyteus/rpc/transport_type.py +0 -13
- autobyteus/tools/bash/__init__.py +0 -2
- autobyteus/tools/bash/bash_executor.py +0 -100
- autobyteus/tools/browser/__init__.py +0 -2
- autobyteus/tools/browser/session_aware/browser_session_aware_navigate_to.py +0 -75
- autobyteus/tools/browser/session_aware/browser_session_aware_tool.py +0 -30
- autobyteus/tools/browser/session_aware/browser_session_aware_web_element_trigger.py +0 -154
- autobyteus/tools/browser/session_aware/browser_session_aware_webpage_reader.py +0 -89
- autobyteus/tools/browser/session_aware/browser_session_aware_webpage_screenshot_taker.py +0 -107
- autobyteus/tools/browser/session_aware/factory/browser_session_aware_web_element_trigger_factory.py +0 -14
- autobyteus/tools/browser/session_aware/factory/browser_session_aware_webpage_reader_factory.py +0 -26
- autobyteus/tools/browser/session_aware/factory/browser_session_aware_webpage_screenshot_taker_factory.py +0 -14
- autobyteus/tools/browser/session_aware/shared_browser_session.py +0 -11
- autobyteus/tools/browser/session_aware/shared_browser_session_manager.py +0 -25
- autobyteus/tools/browser/session_aware/web_element_action.py +0 -20
- autobyteus/tools/browser/standalone/__init__.py +0 -6
- autobyteus/tools/browser/standalone/factory/__init__.py +0 -0
- autobyteus/tools/browser/standalone/factory/webpage_reader_factory.py +0 -25
- autobyteus/tools/browser/standalone/factory/webpage_screenshot_taker_factory.py +0 -14
- autobyteus/tools/browser/standalone/navigate_to.py +0 -84
- autobyteus/tools/browser/standalone/web_page_pdf_generator.py +0 -101
- autobyteus/tools/browser/standalone/webpage_image_downloader.py +0 -169
- autobyteus/tools/browser/standalone/webpage_reader.py +0 -105
- autobyteus/tools/browser/standalone/webpage_screenshot_taker.py +0 -105
- autobyteus/tools/file/edit_file.py +0 -200
- autobyteus/tools/file/list_directory.py +0 -168
- autobyteus/tools/file/search_files.py +0 -188
- autobyteus/tools/timer.py +0 -175
- autobyteus/tools/usage/parsers/__init__.py +0 -22
- autobyteus/tools/usage/parsers/_json_extractor.py +0 -99
- autobyteus/tools/usage/parsers/_string_decoders.py +0 -18
- autobyteus/tools/usage/parsers/anthropic_xml_tool_usage_parser.py +0 -10
- autobyteus/tools/usage/parsers/base_parser.py +0 -41
- autobyteus/tools/usage/parsers/default_json_tool_usage_parser.py +0 -83
- autobyteus/tools/usage/parsers/default_xml_tool_usage_parser.py +0 -316
- autobyteus/tools/usage/parsers/exceptions.py +0 -13
- autobyteus/tools/usage/parsers/gemini_json_tool_usage_parser.py +0 -77
- autobyteus/tools/usage/parsers/openai_json_tool_usage_parser.py +0 -149
- autobyteus/tools/usage/parsers/provider_aware_tool_usage_parser.py +0 -59
- autobyteus/tools/usage/registries/tool_usage_parser_registry.py +0 -62
- autobyteus/workflow/phases/__init__.py +0 -11
- autobyteus/workflow/phases/workflow_operational_phase.py +0 -19
- autobyteus/workflow/phases/workflow_phase_manager.py +0 -48
- autobyteus-1.2.1.dist-info/METADATA +0 -205
- autobyteus-1.2.1.dist-info/RECORD +0 -511
- examples/__init__.py +0 -1
- examples/agent_team/__init__.py +0 -1
- examples/discover_phase_transitions.py +0 -104
- examples/run_agentic_software_engineer.py +0 -239
- examples/run_browser_agent.py +0 -262
- examples/run_google_slides_agent.py +0 -287
- examples/run_mcp_browser_client.py +0 -174
- examples/run_mcp_google_slides_client.py +0 -270
- examples/run_mcp_list_tools.py +0 -189
- examples/run_poem_writer.py +0 -284
- examples/run_sqlite_agent.py +0 -295
- /autobyteus/{tools/browser/session_aware → skills}/__init__.py +0 -0
- /autobyteus/tools/{browser/session_aware/factory → skill}/__init__.py +0 -0
- {autobyteus-1.2.1.dist-info → autobyteus-1.2.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,72 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
This module provides a central registry for agent workspace types.
|
|
3
|
-
"""
|
|
4
|
-
import logging
|
|
5
|
-
from typing import Dict, Any, Optional, List, TYPE_CHECKING
|
|
6
|
-
from autobyteus.utils.singleton import SingletonMeta
|
|
7
|
-
from .workspace_definition import WorkspaceDefinition
|
|
8
|
-
from .workspace_config import WorkspaceConfig
|
|
9
|
-
|
|
10
|
-
if TYPE_CHECKING:
|
|
11
|
-
from .base_workspace import BaseAgentWorkspace
|
|
12
|
-
|
|
13
|
-
logger = logging.getLogger(__name__)
|
|
14
|
-
|
|
15
|
-
class WorkspaceRegistry(metaclass=SingletonMeta):
|
|
16
|
-
"""
|
|
17
|
-
A singleton registry for WorkspaceDefinition objects. Workspaces are
|
|
18
|
-
typically auto-registered via WorkspaceMeta.
|
|
19
|
-
"""
|
|
20
|
-
def __init__(self):
|
|
21
|
-
self._definitions: Dict[str, WorkspaceDefinition] = {}
|
|
22
|
-
logger.info("Core WorkspaceRegistry initialized.")
|
|
23
|
-
|
|
24
|
-
def register(self, definition: WorkspaceDefinition):
|
|
25
|
-
"""Registers a workspace definition."""
|
|
26
|
-
if not isinstance(definition, WorkspaceDefinition):
|
|
27
|
-
raise TypeError("Can only register WorkspaceDefinition objects.")
|
|
28
|
-
if definition.workspace_type_name in self._definitions:
|
|
29
|
-
logger.warning(f"Overwriting workspace definition for type '{definition.workspace_type_name}'.")
|
|
30
|
-
self._definitions[definition.workspace_type_name] = definition
|
|
31
|
-
|
|
32
|
-
def get_definition(self, workspace_type_name: str) -> Optional[WorkspaceDefinition]:
|
|
33
|
-
"""Retrieves a workspace definition by its unique type name."""
|
|
34
|
-
return self._definitions.get(workspace_type_name)
|
|
35
|
-
|
|
36
|
-
def get_all_definitions(self) -> List[WorkspaceDefinition]:
|
|
37
|
-
"""Returns a list of all registered workspace definitions."""
|
|
38
|
-
return list(self._definitions.values())
|
|
39
|
-
|
|
40
|
-
def create_workspace(self, workspace_type_name: str, config: WorkspaceConfig) -> 'BaseAgentWorkspace':
|
|
41
|
-
"""
|
|
42
|
-
Creates an instance of a workspace.
|
|
43
|
-
|
|
44
|
-
Args:
|
|
45
|
-
workspace_type_name (str): The unique type name of the workspace to create.
|
|
46
|
-
config (WorkspaceConfig): The configuration object for the workspace.
|
|
47
|
-
|
|
48
|
-
Returns:
|
|
49
|
-
An instance of a BaseAgentWorkspace subclass.
|
|
50
|
-
|
|
51
|
-
Raises:
|
|
52
|
-
ValueError: If the type is unknown or parameters are invalid.
|
|
53
|
-
"""
|
|
54
|
-
definition = self.get_definition(workspace_type_name)
|
|
55
|
-
if not definition:
|
|
56
|
-
raise ValueError(f"Unknown workspace type: '{workspace_type_name}'")
|
|
57
|
-
|
|
58
|
-
is_valid, errors = definition.config_schema.validate_config(config.to_dict())
|
|
59
|
-
if not is_valid:
|
|
60
|
-
error_str = ", ".join(errors)
|
|
61
|
-
raise ValueError(f"Invalid parameters for workspace type '{workspace_type_name}': {error_str}")
|
|
62
|
-
|
|
63
|
-
try:
|
|
64
|
-
workspace_class = definition.workspace_class
|
|
65
|
-
instance = workspace_class(config=config)
|
|
66
|
-
logger.info(f"Successfully created instance of workspace type '{workspace_type_name}'.")
|
|
67
|
-
return instance
|
|
68
|
-
except Exception as e:
|
|
69
|
-
logger.error(f"Failed to instantiate workspace class '{definition.workspace_class.__name__}': {e}", exc_info=True)
|
|
70
|
-
raise RuntimeError(f"Workspace instantiation failed for type '{workspace_type_name}': {e}") from e
|
|
71
|
-
|
|
72
|
-
default_workspace_registry = WorkspaceRegistry()
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
# file: autobyteus/autobyteus/agent_team/bootstrap_steps/agent_team_runtime_queue_initialization_step.py
|
|
2
|
-
import logging
|
|
3
|
-
from typing import TYPE_CHECKING
|
|
4
|
-
|
|
5
|
-
from autobyteus.agent_team.bootstrap_steps.base_agent_team_bootstrap_step import BaseAgentTeamBootstrapStep
|
|
6
|
-
from autobyteus.agent_team.events.agent_team_input_event_queue_manager import AgentTeamInputEventQueueManager
|
|
7
|
-
|
|
8
|
-
if TYPE_CHECKING:
|
|
9
|
-
from autobyteus.agent_team.context.agent_team_context import AgentTeamContext
|
|
10
|
-
from autobyteus.agent_team.phases.agent_team_phase_manager import AgentTeamPhaseManager
|
|
11
|
-
|
|
12
|
-
logger = logging.getLogger(__name__)
|
|
13
|
-
|
|
14
|
-
class AgentTeamRuntimeQueueInitializationStep(BaseAgentTeamBootstrapStep):
|
|
15
|
-
"""Bootstrap step for initializing the agent team's runtime event queues."""
|
|
16
|
-
async def execute(self, context: 'AgentTeamContext', phase_manager: 'AgentTeamPhaseManager') -> bool:
|
|
17
|
-
team_id = context.team_id
|
|
18
|
-
logger.info(f"Team '{team_id}': Executing AgentTeamRuntimeQueueInitializationStep.")
|
|
19
|
-
try:
|
|
20
|
-
context.state.input_event_queues = AgentTeamInputEventQueueManager()
|
|
21
|
-
logger.info(f"Team '{team_id}': AgentTeamInputEventQueueManager initialized.")
|
|
22
|
-
return True
|
|
23
|
-
except Exception as e:
|
|
24
|
-
logger.error(f"Team '{team_id}': Critical failure during queue initialization: {e}", exc_info=True)
|
|
25
|
-
return False
|
|
@@ -1,85 +0,0 @@
|
|
|
1
|
-
# file: autobyteus/autobyteus/agent_team/bootstrap_steps/coordinator_prompt_preparation_step.py
|
|
2
|
-
import logging
|
|
3
|
-
from typing import TYPE_CHECKING, List
|
|
4
|
-
|
|
5
|
-
from autobyteus.agent_team.bootstrap_steps.base_agent_team_bootstrap_step import BaseAgentTeamBootstrapStep
|
|
6
|
-
from autobyteus.agent.context import AgentConfig
|
|
7
|
-
from autobyteus.agent_team.context import AgentTeamConfig
|
|
8
|
-
|
|
9
|
-
if TYPE_CHECKING:
|
|
10
|
-
from autobyteus.agent_team.context.agent_team_context import AgentTeamContext
|
|
11
|
-
from autobyteus.agent_team.phases.agent_team_phase_manager import AgentTeamPhaseManager
|
|
12
|
-
|
|
13
|
-
logger = logging.getLogger(__name__)
|
|
14
|
-
|
|
15
|
-
class CoordinatorPromptPreparationStep(BaseAgentTeamBootstrapStep):
|
|
16
|
-
"""
|
|
17
|
-
Bootstrap step to finalize the coordinator's system prompt by injecting a
|
|
18
|
-
dynamically generated team manifest into a user-defined prompt template.
|
|
19
|
-
The user is expected to provide a `system_prompt` in the coordinator's
|
|
20
|
-
AgentConfig with a `{{team}}` placeholder.
|
|
21
|
-
"""
|
|
22
|
-
async def execute(self, context: 'AgentTeamContext', phase_manager: 'AgentTeamPhaseManager') -> bool:
|
|
23
|
-
team_id = context.team_id
|
|
24
|
-
logger.info(f"Team '{team_id}': Executing CoordinatorPromptPreparationStep.")
|
|
25
|
-
try:
|
|
26
|
-
coordinator_node_config_wrapper = context.config.coordinator_node
|
|
27
|
-
|
|
28
|
-
# The coordinator must be an agent with a defined config.
|
|
29
|
-
if not isinstance(coordinator_node_config_wrapper.node_definition, AgentConfig):
|
|
30
|
-
logger.error(f"Team '{team_id}': Coordinator node '{coordinator_node_config_wrapper.name}' is not defined by an AgentConfig. Cannot prepare prompt.")
|
|
31
|
-
return False
|
|
32
|
-
|
|
33
|
-
coordinator_agent_config: AgentConfig = coordinator_node_config_wrapper.node_definition
|
|
34
|
-
|
|
35
|
-
# Start with the user's provided prompt template.
|
|
36
|
-
prompt_template = coordinator_agent_config.system_prompt
|
|
37
|
-
if not prompt_template:
|
|
38
|
-
logger.warning(f"Team '{team_id}': Coordinator '{coordinator_agent_config.name}' has no system_prompt defined. No prompt will be applied.")
|
|
39
|
-
context.state.prepared_coordinator_prompt = ""
|
|
40
|
-
return True
|
|
41
|
-
|
|
42
|
-
team_manifest = self._generate_team_manifest(context)
|
|
43
|
-
|
|
44
|
-
# Inject the manifest into the template.
|
|
45
|
-
if "{{team}}" in prompt_template:
|
|
46
|
-
final_prompt = prompt_template.replace("{{team}}", team_manifest)
|
|
47
|
-
logger.debug(f"Team '{team_id}': Injected team manifest into coordinator's system prompt.")
|
|
48
|
-
else:
|
|
49
|
-
final_prompt = prompt_template
|
|
50
|
-
logger.warning(f"Team '{team_id}': The coordinator's system prompt does not contain a '{{team}}' placeholder. The team manifest will not be injected.")
|
|
51
|
-
|
|
52
|
-
# Store the finalized prompt in the state for the AgentToolInjectionStep to use.
|
|
53
|
-
context.state.prepared_coordinator_prompt = final_prompt
|
|
54
|
-
|
|
55
|
-
logger.info(f"Team '{team_id}': Coordinator prompt prepared successfully and stored in state.")
|
|
56
|
-
return True
|
|
57
|
-
except Exception as e:
|
|
58
|
-
logger.error(f"Team '{team_id}': Failed to prepare coordinator prompt: {e}", exc_info=True)
|
|
59
|
-
return False
|
|
60
|
-
|
|
61
|
-
def _generate_team_manifest(self, context: 'AgentTeamContext') -> str:
|
|
62
|
-
"""Generates a string manifest of all non-coordinator team members."""
|
|
63
|
-
prompt_parts: List[str] = []
|
|
64
|
-
coordinator_node = context.config.coordinator_node
|
|
65
|
-
member_nodes = {node for node in context.config.nodes if node != coordinator_node}
|
|
66
|
-
|
|
67
|
-
if not member_nodes:
|
|
68
|
-
return "You are working alone. You have no team members to delegate to."
|
|
69
|
-
|
|
70
|
-
# Sort for deterministic prompt generation
|
|
71
|
-
for node in sorted(list(member_nodes), key=lambda n: n.name):
|
|
72
|
-
node_def = node.node_definition
|
|
73
|
-
description = "No description available."
|
|
74
|
-
|
|
75
|
-
# --- THE FIX ---
|
|
76
|
-
# Use the 'description' for an AgentConfig and the 'role' for an AgentTeamConfig (sub-team).
|
|
77
|
-
if isinstance(node_def, AgentConfig):
|
|
78
|
-
description = node_def.description
|
|
79
|
-
elif isinstance(node_def, AgentTeamConfig):
|
|
80
|
-
# A sub-team's role is its most concise and relevant description for a parent coordinator.
|
|
81
|
-
description = node_def.role or node_def.description
|
|
82
|
-
|
|
83
|
-
prompt_parts.append(f"- name: {node.name}\n description: {description}")
|
|
84
|
-
|
|
85
|
-
return "\n".join(prompt_parts)
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
# file: autobyteus/autobyteus/agent_team/phases/__init__.py
|
|
2
|
-
"""
|
|
3
|
-
This package contains components for defining and managing agent team operational phases.
|
|
4
|
-
"""
|
|
5
|
-
from autobyteus.agent_team.phases.agent_team_operational_phase import AgentTeamOperationalPhase
|
|
6
|
-
from autobyteus.agent_team.phases.agent_team_phase_manager import AgentTeamPhaseManager
|
|
7
|
-
|
|
8
|
-
__all__ = [
|
|
9
|
-
"AgentTeamOperationalPhase",
|
|
10
|
-
"AgentTeamPhaseManager",
|
|
11
|
-
]
|
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
# file: autobyteus/autobyteus/agent_team/phases/agent_team_operational_phase.py
|
|
2
|
-
from enum import Enum
|
|
3
|
-
|
|
4
|
-
class AgentTeamOperationalPhase(str, Enum):
|
|
5
|
-
"""Defines the operational phases of an AgentTeam."""
|
|
6
|
-
UNINITIALIZED = "uninitialized"
|
|
7
|
-
BOOTSTRAPPING = "bootstrapping"
|
|
8
|
-
IDLE = "idle"
|
|
9
|
-
PROCESSING = "processing"
|
|
10
|
-
SHUTTING_DOWN = "shutting_down"
|
|
11
|
-
SHUTDOWN_COMPLETE = "shutdown_complete"
|
|
12
|
-
ERROR = "error"
|
|
13
|
-
|
|
14
|
-
def is_terminal(self) -> bool:
|
|
15
|
-
"""Checks if the phase is a terminal state."""
|
|
16
|
-
return self in [AgentTeamOperationalPhase.SHUTDOWN_COMPLETE, AgentTeamOperationalPhase.ERROR]
|
|
17
|
-
|
|
18
|
-
def __str__(self) -> str:
|
|
19
|
-
return self.value
|
|
@@ -1,48 +0,0 @@
|
|
|
1
|
-
# file: autobyteus/autobyteus/agent_team/phases/agent_team_phase_manager.py
|
|
2
|
-
import logging
|
|
3
|
-
from typing import TYPE_CHECKING, Optional
|
|
4
|
-
|
|
5
|
-
from autobyteus.agent_team.phases.agent_team_operational_phase import AgentTeamOperationalPhase
|
|
6
|
-
|
|
7
|
-
if TYPE_CHECKING:
|
|
8
|
-
from autobyteus.agent_team.context.agent_team_context import AgentTeamContext
|
|
9
|
-
from autobyteus.agent_team.streaming.agent_team_event_notifier import AgentTeamExternalEventNotifier
|
|
10
|
-
|
|
11
|
-
logger = logging.getLogger(__name__)
|
|
12
|
-
|
|
13
|
-
class AgentTeamPhaseManager:
|
|
14
|
-
"""Manages the operational phase of an agent team."""
|
|
15
|
-
def __init__(self, context: 'AgentTeamContext', notifier: 'AgentTeamExternalEventNotifier'):
|
|
16
|
-
self.context = context
|
|
17
|
-
self.notifier = notifier
|
|
18
|
-
self.context.state.current_phase = AgentTeamOperationalPhase.UNINITIALIZED
|
|
19
|
-
logger.debug(f"AgentTeamPhaseManager initialized for team '{context.team_id}'.")
|
|
20
|
-
|
|
21
|
-
async def _transition_phase(self, new_phase: AgentTeamOperationalPhase, extra_data: Optional[dict] = None):
|
|
22
|
-
old_phase = self.context.state.current_phase
|
|
23
|
-
if old_phase == new_phase:
|
|
24
|
-
return
|
|
25
|
-
logger.info(f"Team '{self.context.team_id}' transitioning from {old_phase.value} to {new_phase.value}.")
|
|
26
|
-
self.context.state.current_phase = new_phase
|
|
27
|
-
self.notifier.notify_phase_change(new_phase, old_phase, extra_data)
|
|
28
|
-
|
|
29
|
-
async def notify_bootstrapping_started(self):
|
|
30
|
-
await self._transition_phase(AgentTeamOperationalPhase.BOOTSTRAPPING)
|
|
31
|
-
|
|
32
|
-
async def notify_initialization_complete(self):
|
|
33
|
-
await self._transition_phase(AgentTeamOperationalPhase.IDLE)
|
|
34
|
-
|
|
35
|
-
async def notify_processing_started(self):
|
|
36
|
-
await self._transition_phase(AgentTeamOperationalPhase.PROCESSING)
|
|
37
|
-
|
|
38
|
-
async def notify_processing_complete_and_idle(self):
|
|
39
|
-
await self._transition_phase(AgentTeamOperationalPhase.IDLE)
|
|
40
|
-
|
|
41
|
-
async def notify_error_occurred(self, error_message: str, error_details: Optional[str] = None):
|
|
42
|
-
await self._transition_phase(AgentTeamOperationalPhase.ERROR, {"error_message": error_message, "error_details": error_details})
|
|
43
|
-
|
|
44
|
-
async def notify_shutdown_initiated(self):
|
|
45
|
-
await self._transition_phase(AgentTeamOperationalPhase.SHUTTING_DOWN)
|
|
46
|
-
|
|
47
|
-
async def notify_final_shutdown_complete(self):
|
|
48
|
-
await self._transition_phase(AgentTeamOperationalPhase.SHUTDOWN_COMPLETE)
|
|
@@ -1,92 +0,0 @@
|
|
|
1
|
-
from typing import Dict, Optional, List, AsyncGenerator
|
|
2
|
-
import boto3
|
|
3
|
-
import json
|
|
4
|
-
import os
|
|
5
|
-
from botocore.exceptions import ClientError
|
|
6
|
-
from autobyteus.llm.models import LLMModel
|
|
7
|
-
from autobyteus.llm.base_llm import BaseLLM
|
|
8
|
-
from autobyteus.llm.utils.llm_config import LLMConfig
|
|
9
|
-
from autobyteus.llm.utils.messages import MessageRole, Message
|
|
10
|
-
from autobyteus.llm.utils.token_usage import TokenUsage
|
|
11
|
-
from autobyteus.llm.utils.response_types import CompleteResponse, ChunkResponse
|
|
12
|
-
from autobyteus.llm.user_message import LLMUserMessage
|
|
13
|
-
|
|
14
|
-
class BedrockLLM(BaseLLM):
|
|
15
|
-
def __init__(self, model: LLMModel = None, llm_config: LLMConfig = None):
|
|
16
|
-
if model is None:
|
|
17
|
-
model = LLMModel.BEDROCK_CLAUDE_3_5_SONNET_API
|
|
18
|
-
if llm_config is None:
|
|
19
|
-
llm_config = LLMConfig()
|
|
20
|
-
|
|
21
|
-
super().__init__(model=model, llm_config=llm_config)
|
|
22
|
-
self.client = self.initialize()
|
|
23
|
-
|
|
24
|
-
@classmethod
|
|
25
|
-
def initialize(cls):
|
|
26
|
-
aws_access_key = os.environ.get("AWS_ACCESS_KEY_ID")
|
|
27
|
-
aws_secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY")
|
|
28
|
-
region = os.environ.get("AWS_REGION", "us-east-1")
|
|
29
|
-
|
|
30
|
-
if not (aws_access_key and aws_secret_key):
|
|
31
|
-
raise ValueError(
|
|
32
|
-
"AWS credentials not found. Please set AWS_ACCESS_KEY_ID and "
|
|
33
|
-
"AWS_SECRET_ACCESS_KEY environment variables."
|
|
34
|
-
)
|
|
35
|
-
|
|
36
|
-
try:
|
|
37
|
-
return boto3.client(
|
|
38
|
-
service_name='bedrock-runtime',
|
|
39
|
-
region_name=region,
|
|
40
|
-
aws_access_key_id=aws_access_key,
|
|
41
|
-
aws_secret_access_key=aws_secret_key
|
|
42
|
-
)
|
|
43
|
-
except Exception as e:
|
|
44
|
-
raise ValueError(f"Failed to initialize Bedrock client: {str(e)}")
|
|
45
|
-
|
|
46
|
-
async def _send_user_message_to_llm(self, user_message: LLMUserMessage, **kwargs) -> CompleteResponse:
|
|
47
|
-
self.add_user_message(user_message)
|
|
48
|
-
|
|
49
|
-
# NOTE: This implementation does not yet support multimodal inputs for Bedrock.
|
|
50
|
-
# It will only send the text content.
|
|
51
|
-
|
|
52
|
-
request_body = json.dumps({
|
|
53
|
-
"anthropic_version": "bedrock-2023-05-31",
|
|
54
|
-
"max_tokens": 1000,
|
|
55
|
-
"temperature": 0,
|
|
56
|
-
"messages": [msg.to_dict() for msg in self.messages if msg.role != MessageRole.SYSTEM],
|
|
57
|
-
"system": self.system_message if self.system_message else ""
|
|
58
|
-
})
|
|
59
|
-
|
|
60
|
-
try:
|
|
61
|
-
response = self.client.invoke_model(
|
|
62
|
-
modelId=self.model.value,
|
|
63
|
-
body=request_body
|
|
64
|
-
)
|
|
65
|
-
response_body = json.loads(response['body'].read())
|
|
66
|
-
assistant_message = response_body['content'][0]['text']
|
|
67
|
-
self.add_assistant_message(assistant_message)
|
|
68
|
-
|
|
69
|
-
token_usage = TokenUsage(
|
|
70
|
-
prompt_tokens=0,
|
|
71
|
-
completion_tokens=0,
|
|
72
|
-
total_tokens=0
|
|
73
|
-
)
|
|
74
|
-
|
|
75
|
-
return CompleteResponse(
|
|
76
|
-
content=assistant_message,
|
|
77
|
-
usage=token_usage
|
|
78
|
-
)
|
|
79
|
-
except ClientError as e:
|
|
80
|
-
error_code = e.response['Error']['Code']
|
|
81
|
-
error_message = e.response['Error']['Message']
|
|
82
|
-
raise ValueError(f"Bedrock API error: {error_code} - {error_message}")
|
|
83
|
-
except Exception as e:
|
|
84
|
-
raise ValueError(f"Error in Bedrock API call: {str(e)}")
|
|
85
|
-
|
|
86
|
-
async def _stream_user_message_to_llm(self, user_message: LLMUserMessage, **kwargs) -> AsyncGenerator[ChunkResponse, None]:
|
|
87
|
-
# Placeholder for future implementation
|
|
88
|
-
response = await self._send_user_message_to_llm(user_message, **kwargs)
|
|
89
|
-
yield ChunkResponse(content=response.content, is_complete=True, usage=response.usage)
|
|
90
|
-
|
|
91
|
-
async def cleanup(self):
|
|
92
|
-
await super().cleanup()
|
autobyteus/llm/api/groq_llm.py
DELETED
|
@@ -1,94 +0,0 @@
|
|
|
1
|
-
from typing import Dict, Optional, List, AsyncGenerator
|
|
2
|
-
import logging
|
|
3
|
-
import os
|
|
4
|
-
from autobyteus.llm.models import LLMModel
|
|
5
|
-
from autobyteus.llm.base_llm import BaseLLM
|
|
6
|
-
from autobyteus.llm.utils.llm_config import LLMConfig
|
|
7
|
-
from autobyteus.llm.utils.messages import MessageRole, Message
|
|
8
|
-
from autobyteus.llm.utils.token_usage import TokenUsage
|
|
9
|
-
from autobyteus.llm.utils.response_types import CompleteResponse, ChunkResponse
|
|
10
|
-
from autobyteus.llm.user_message import LLMUserMessage
|
|
11
|
-
|
|
12
|
-
logger = logging.getLogger(__name__)
|
|
13
|
-
|
|
14
|
-
class GroqLLM(BaseLLM):
|
|
15
|
-
def __init__(self, model: LLMModel = None, llm_config: LLMConfig = None):
|
|
16
|
-
# Provide defaults if not specified
|
|
17
|
-
if model is None:
|
|
18
|
-
model = LLMModel.LLAMA_3_1_70B_VERSATILE_API
|
|
19
|
-
if llm_config is None:
|
|
20
|
-
llm_config = LLMConfig()
|
|
21
|
-
|
|
22
|
-
super().__init__(model=model, llm_config=llm_config)
|
|
23
|
-
self.client = self.initialize()
|
|
24
|
-
|
|
25
|
-
@classmethod
|
|
26
|
-
def initialize(cls):
|
|
27
|
-
groq_api_key = os.environ.get("GROQ_API_KEY")
|
|
28
|
-
if not groq_api_key:
|
|
29
|
-
raise ValueError(
|
|
30
|
-
"GROQ_API_KEY environment variable is not set. "
|
|
31
|
-
"Please set this variable in your environment."
|
|
32
|
-
)
|
|
33
|
-
try:
|
|
34
|
-
# Initialize Groq client here
|
|
35
|
-
# Placeholder for actual initialization
|
|
36
|
-
return "GroqClientInitialized"
|
|
37
|
-
except Exception as e:
|
|
38
|
-
raise ValueError(f"Failed to initialize Groq client: {str(e)}")
|
|
39
|
-
|
|
40
|
-
async def _send_user_message_to_llm(self, user_message: LLMUserMessage, **kwargs) -> CompleteResponse:
|
|
41
|
-
self.add_user_message(user_message)
|
|
42
|
-
try:
|
|
43
|
-
# Placeholder for sending message to Groq API
|
|
44
|
-
assistant_message = "Response from Groq API"
|
|
45
|
-
self.add_assistant_message(assistant_message)
|
|
46
|
-
|
|
47
|
-
token_usage = TokenUsage(
|
|
48
|
-
prompt_tokens=0,
|
|
49
|
-
completion_tokens=0,
|
|
50
|
-
total_tokens=0
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
return CompleteResponse(
|
|
54
|
-
content=assistant_message,
|
|
55
|
-
usage=token_usage
|
|
56
|
-
)
|
|
57
|
-
except Exception as e:
|
|
58
|
-
logger.error(f"Error in Groq API call: {str(e)}")
|
|
59
|
-
raise ValueError(f"Error in Groq API call: {str(e)}")
|
|
60
|
-
|
|
61
|
-
async def _stream_user_message_to_llm(
|
|
62
|
-
self, user_message: LLMUserMessage, **kwargs
|
|
63
|
-
) -> AsyncGenerator[ChunkResponse, None]:
|
|
64
|
-
self.add_user_message(user_message)
|
|
65
|
-
complete_response = ""
|
|
66
|
-
try:
|
|
67
|
-
# Placeholder for streaming from Groq API
|
|
68
|
-
tokens = ["Response ", "streamed ", "from ", "Groq ", "API."]
|
|
69
|
-
for token in tokens:
|
|
70
|
-
complete_response += token
|
|
71
|
-
yield ChunkResponse(
|
|
72
|
-
content=token,
|
|
73
|
-
is_complete=False
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
token_usage = TokenUsage(
|
|
77
|
-
prompt_tokens=0,
|
|
78
|
-
completion_tokens=0,
|
|
79
|
-
total_tokens=0
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
yield ChunkResponse(
|
|
83
|
-
content="",
|
|
84
|
-
is_complete=True,
|
|
85
|
-
usage=token_usage
|
|
86
|
-
)
|
|
87
|
-
|
|
88
|
-
self.add_assistant_message(complete_response)
|
|
89
|
-
except Exception as e:
|
|
90
|
-
logger.error(f"Error in Groq API streaming: {str(e)}")
|
|
91
|
-
raise ValueError(f"Error in Groq API streaming: {str(e)}")
|
|
92
|
-
|
|
93
|
-
async def cleanup(self):
|
|
94
|
-
await super().cleanup()
|
autobyteus/llm/api/nvidia_llm.py
DELETED
|
@@ -1,108 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
from typing import Dict, Optional, List, AsyncGenerator
|
|
3
|
-
from openai import OpenAI
|
|
4
|
-
import os
|
|
5
|
-
from autobyteus.llm.models import LLMModel
|
|
6
|
-
from autobyteus.llm.base_llm import BaseLLM
|
|
7
|
-
from autobyteus.llm.utils.llm_config import LLMConfig
|
|
8
|
-
from autobyteus.llm.utils.messages import MessageRole, Message
|
|
9
|
-
from autobyteus.llm.utils.token_usage import TokenUsage
|
|
10
|
-
from autobyteus.llm.utils.response_types import CompleteResponse, ChunkResponse
|
|
11
|
-
from autobyteus.llm.user_message import LLMUserMessage
|
|
12
|
-
|
|
13
|
-
logger = logging.getLogger(__name__)
|
|
14
|
-
|
|
15
|
-
class NvidiaLLM(BaseLLM):
|
|
16
|
-
def __init__(self, model: LLMModel = None, llm_config: LLMConfig = None):
|
|
17
|
-
# Provide defaults if not specified
|
|
18
|
-
if model is None:
|
|
19
|
-
model = LLMModel.NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT_API
|
|
20
|
-
if llm_config is None:
|
|
21
|
-
llm_config = LLMConfig()
|
|
22
|
-
|
|
23
|
-
super().__init__(model=model, llm_config=llm_config)
|
|
24
|
-
self.client = self.initialize()
|
|
25
|
-
|
|
26
|
-
@classmethod
|
|
27
|
-
def initialize(cls):
|
|
28
|
-
nvidia_api_key = os.environ.get("NVIDIA_API_KEY")
|
|
29
|
-
if not nvidia_api_key:
|
|
30
|
-
raise ValueError(
|
|
31
|
-
"NVIDIA_API_KEY environment variable is not set. "
|
|
32
|
-
"Please set this variable in your environment."
|
|
33
|
-
)
|
|
34
|
-
try:
|
|
35
|
-
return OpenAI(
|
|
36
|
-
base_url="https://integrate.api.nvidia.com/v1",
|
|
37
|
-
api_key=nvidia_api_key
|
|
38
|
-
)
|
|
39
|
-
except Exception as e:
|
|
40
|
-
raise ValueError(f"Failed to initialize Nvidia client: {str(e)}")
|
|
41
|
-
|
|
42
|
-
async def _send_user_message_to_llm(self, user_message: LLMUserMessage, **kwargs) -> CompleteResponse:
|
|
43
|
-
self.add_user_message(user_message)
|
|
44
|
-
try:
|
|
45
|
-
completion = self.client.chat.completions.create(
|
|
46
|
-
model=self.model.value,
|
|
47
|
-
messages=[msg.to_dict() for msg in self.messages],
|
|
48
|
-
temperature=0,
|
|
49
|
-
top_p=1,
|
|
50
|
-
max_tokens=1024,
|
|
51
|
-
stream=False
|
|
52
|
-
)
|
|
53
|
-
assistant_message = completion.choices[0].message.content
|
|
54
|
-
self.add_assistant_message(assistant_message)
|
|
55
|
-
|
|
56
|
-
token_usage = TokenUsage(
|
|
57
|
-
prompt_tokens=0,
|
|
58
|
-
completion_tokens=0,
|
|
59
|
-
total_tokens=0
|
|
60
|
-
)
|
|
61
|
-
|
|
62
|
-
return CompleteResponse(
|
|
63
|
-
content=assistant_message,
|
|
64
|
-
usage=token_usage
|
|
65
|
-
)
|
|
66
|
-
except Exception as e:
|
|
67
|
-
raise ValueError(f"Error in Nvidia API call: {str(e)}")
|
|
68
|
-
|
|
69
|
-
async def _stream_user_message_to_llm(self, user_message: LLMUserMessage, **kwargs) -> AsyncGenerator[ChunkResponse, None]:
|
|
70
|
-
self.add_user_message(user_message)
|
|
71
|
-
complete_response = ""
|
|
72
|
-
try:
|
|
73
|
-
completion = self.client.chat.completions.create(
|
|
74
|
-
model=self.model.value,
|
|
75
|
-
messages=[msg.to_dict() for msg in self.messages],
|
|
76
|
-
temperature=0,
|
|
77
|
-
top_p=1,
|
|
78
|
-
max_tokens=1024,
|
|
79
|
-
stream=True
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
for chunk in completion:
|
|
83
|
-
if chunk.choices[0].delta.content is not None:
|
|
84
|
-
token = chunk.choices[0].delta.content
|
|
85
|
-
complete_response += token
|
|
86
|
-
yield ChunkResponse(
|
|
87
|
-
content=token,
|
|
88
|
-
is_complete=False
|
|
89
|
-
)
|
|
90
|
-
|
|
91
|
-
token_usage = TokenUsage(
|
|
92
|
-
prompt_tokens=0,
|
|
93
|
-
completion_tokens=0,
|
|
94
|
-
total_tokens=0
|
|
95
|
-
)
|
|
96
|
-
|
|
97
|
-
yield ChunkResponse(
|
|
98
|
-
content="",
|
|
99
|
-
is_complete=True,
|
|
100
|
-
usage=token_usage
|
|
101
|
-
)
|
|
102
|
-
|
|
103
|
-
self.add_assistant_message(complete_response)
|
|
104
|
-
except Exception as e:
|
|
105
|
-
raise ValueError(f"Error in Nvidia API streaming call: {str(e)}")
|
|
106
|
-
|
|
107
|
-
async def cleanup(self):
|
|
108
|
-
await super().cleanup()
|
|
@@ -1,87 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
from dataclasses import dataclass
|
|
3
|
-
from autobyteus.llm.models import LLMModel
|
|
4
|
-
|
|
5
|
-
@dataclass
|
|
6
|
-
class TokenPricingConfig:
|
|
7
|
-
"""
|
|
8
|
-
Represents the pricing configuration for input and output tokens.
|
|
9
|
-
Prices are in USD per million tokens.
|
|
10
|
-
"""
|
|
11
|
-
input_token_pricing: float # USD per million tokens
|
|
12
|
-
output_token_pricing: float # USD per million tokens
|
|
13
|
-
|
|
14
|
-
class TokenPricingConfigRegistry:
|
|
15
|
-
"""
|
|
16
|
-
Registry for token pricing configurations of different LLM models.
|
|
17
|
-
Uses model names (enum values) as keys for simplicity.
|
|
18
|
-
"""
|
|
19
|
-
_prices = {
|
|
20
|
-
# ChatGPT models
|
|
21
|
-
LLMModel.GPT_4o_API.value: TokenPricingConfig(2.50, 10.00),
|
|
22
|
-
LLMModel.o1_API.value: TokenPricingConfig(15.00, 60.00),
|
|
23
|
-
LLMModel.o1_MINI_API.value: TokenPricingConfig(3.00, 12.00),
|
|
24
|
-
LLMModel.CHATGPT_4O_LATEST_API.value: TokenPricingConfig(2.50, 10.00),
|
|
25
|
-
LLMModel.GPT_3_5_TURBO_API.value: TokenPricingConfig(1.50, 2.00),
|
|
26
|
-
|
|
27
|
-
# Mistral models
|
|
28
|
-
LLMModel.MISTRAL_SMALL_API.value: TokenPricingConfig(0.20, 0.60),
|
|
29
|
-
LLMModel.MISTRAL_MEDIUM_API.value: TokenPricingConfig(0.20, 0.60),
|
|
30
|
-
LLMModel.MISTRAL_LARGE_API.value: TokenPricingConfig(2.00, 6.00),
|
|
31
|
-
|
|
32
|
-
# Groq models
|
|
33
|
-
LLMModel.GEMMA_2_9B_IT_API.value: TokenPricingConfig(0.0000005, 0.0000005),
|
|
34
|
-
LLMModel.GEMMA_7B_IT_API.value: TokenPricingConfig(0.0000007, 0.0000007),
|
|
35
|
-
LLMModel.LLAMA_3_1_405B_REASONING_API.value: TokenPricingConfig(0.00002, 0.00002),
|
|
36
|
-
LLMModel.LLAMA_3_1_70B_VERSATILE_API.value: TokenPricingConfig(0.00001, 0.00001),
|
|
37
|
-
LLMModel.LLAMA_3_1_8B_INSTANT_API.value: TokenPricingConfig(0.0000005, 0.0000005),
|
|
38
|
-
LLMModel.LLAMA3_70B_8192_API.value: TokenPricingConfig(0.00001, 0.00001),
|
|
39
|
-
LLMModel.LLAMA3_8B_8192_API.value: TokenPricingConfig(0.0000005, 0.0000005),
|
|
40
|
-
LLMModel.MIXTRAL_8X7B_32768_API.value: TokenPricingConfig(0.000001, 0.000001),
|
|
41
|
-
|
|
42
|
-
# Gemini models
|
|
43
|
-
LLMModel.GEMINI_1_0_PRO_API.value: TokenPricingConfig(0.00000025, 0.0000005),
|
|
44
|
-
LLMModel.GEMINI_1_5_PRO_API.value: TokenPricingConfig(0.0000005, 0.000001),
|
|
45
|
-
LLMModel.GEMINI_1_5_PRO_EXPERIMENTAL_API.value: TokenPricingConfig(0.0000005, 0.000001),
|
|
46
|
-
LLMModel.GEMINI_1_5_FLASH_API.value: TokenPricingConfig(0.0000001, 0.0000002),
|
|
47
|
-
LLMModel.GEMMA_2_2B_API.value: TokenPricingConfig(0.0000001, 0.0000002),
|
|
48
|
-
LLMModel.GEMMA_2_9B_API.value: TokenPricingConfig(0.0000001, 0.0000002),
|
|
49
|
-
LLMModel.GEMMA_2_27B_API.value: TokenPricingConfig(0.0000001, 0.0000002),
|
|
50
|
-
|
|
51
|
-
# Claude models
|
|
52
|
-
LLMModel.CLAUDE_3_HAIKU_API.value: TokenPricingConfig(0.25, 1.25),
|
|
53
|
-
LLMModel.CLAUDE_3_OPUS_API.value: TokenPricingConfig(15.00, 75.00),
|
|
54
|
-
LLMModel.CLAUDE_3_5_SONNET_API.value: TokenPricingConfig(3.00, 15.00),
|
|
55
|
-
LLMModel.CLAUDE_3_SONNET_API.value: TokenPricingConfig(3.00, 15.00),
|
|
56
|
-
LLMModel.BEDROCK_CLAUDE_3_5_SONNET_API.value: TokenPricingConfig(3.00, 15.00),
|
|
57
|
-
|
|
58
|
-
# Perplexity models
|
|
59
|
-
LLMModel.LLAMA_3_1_SONAR_LARGE_128K_ONLINE_API.value: TokenPricingConfig(0.000001, 0.000001),
|
|
60
|
-
LLMModel.LLAMA_3_1_SONAR_SMALL_128K_ONLINE_API.value: TokenPricingConfig(0.0000005, 0.0000005),
|
|
61
|
-
LLMModel.LLAMA_3_1_SONAR_LARGE_128K_CHAT_API.value: TokenPricingConfig(0.000001, 0.000001),
|
|
62
|
-
LLMModel.LLAMA_3_1_SONAR_SMALL_128K_CHAT_API.value: TokenPricingConfig(0.0000005, 0.0000005),
|
|
63
|
-
LLMModel.LLAMA_3_1_8B_INSTRUCT_API.value: TokenPricingConfig(0.0000005, 0.0000005),
|
|
64
|
-
LLMModel.LLAMA_3_1_70B_INSTRUCT_API.value: TokenPricingConfig(0.00001, 0.00001),
|
|
65
|
-
LLMModel.GEMMA_2_27B_IT_API.value: TokenPricingConfig(0.000001, 0.000001),
|
|
66
|
-
LLMModel.NEMOTRON_4_340B_INSTRUCT_API.value: TokenPricingConfig(0.00002, 0.00002),
|
|
67
|
-
LLMModel.MIXTRAL_8X7B_INSTRUCT_API.value: TokenPricingConfig(0.000001, 0.000001),
|
|
68
|
-
|
|
69
|
-
# DeepSeek models
|
|
70
|
-
LLMModel.DEEPSEEK_CHAT_API.value: TokenPricingConfig(0.14, 0.28),
|
|
71
|
-
|
|
72
|
-
# NVIDIA models
|
|
73
|
-
LLMModel.NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT_API.value: TokenPricingConfig(0.00002, 0.00002),
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
@classmethod
|
|
77
|
-
def get_pricing(cls, model_name: str) -> TokenPricingConfig:
|
|
78
|
-
"""
|
|
79
|
-
Get the token pricing configuration for a given model name.
|
|
80
|
-
|
|
81
|
-
Args:
|
|
82
|
-
model_name (str): The name of the model to get pricing for.
|
|
83
|
-
|
|
84
|
-
Returns:
|
|
85
|
-
TokenPricingConfig: The token pricing configuration for the model.
|
|
86
|
-
"""
|
|
87
|
-
return cls._prices.get(model_name, TokenPricingConfig(0, 0))
|