autobyteus 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autobyteus/agent/context/__init__.py +4 -2
- autobyteus/agent/context/agent_config.py +0 -4
- autobyteus/agent/context/agent_context_registry.py +73 -0
- autobyteus/agent/events/notifiers.py +4 -0
- autobyteus/agent/handlers/inter_agent_message_event_handler.py +7 -2
- autobyteus/agent/handlers/llm_complete_response_received_event_handler.py +19 -19
- autobyteus/agent/handlers/user_input_message_event_handler.py +15 -0
- autobyteus/agent/message/send_message_to.py +29 -23
- autobyteus/agent/runtime/agent_runtime.py +10 -2
- autobyteus/agent/sender_type.py +15 -0
- autobyteus/agent/streaming/agent_event_stream.py +6 -0
- autobyteus/agent/streaming/stream_event_payloads.py +12 -0
- autobyteus/agent/streaming/stream_events.py +3 -0
- autobyteus/agent/system_prompt_processor/tool_manifest_injector_processor.py +7 -4
- autobyteus/agent_team/__init__.py +1 -0
- autobyteus/agent_team/agent_team.py +93 -0
- autobyteus/agent_team/agent_team_builder.py +184 -0
- autobyteus/agent_team/base_agent_team.py +86 -0
- autobyteus/agent_team/bootstrap_steps/__init__.py +24 -0
- autobyteus/agent_team/bootstrap_steps/agent_configuration_preparation_step.py +73 -0
- autobyteus/agent_team/bootstrap_steps/agent_team_bootstrapper.py +54 -0
- autobyteus/agent_team/bootstrap_steps/agent_team_runtime_queue_initialization_step.py +25 -0
- autobyteus/agent_team/bootstrap_steps/base_agent_team_bootstrap_step.py +23 -0
- autobyteus/agent_team/bootstrap_steps/coordinator_initialization_step.py +41 -0
- autobyteus/agent_team/bootstrap_steps/coordinator_prompt_preparation_step.py +85 -0
- autobyteus/agent_team/bootstrap_steps/task_notifier_initialization_step.py +51 -0
- autobyteus/agent_team/bootstrap_steps/team_context_initialization_step.py +45 -0
- autobyteus/agent_team/context/__init__.py +17 -0
- autobyteus/agent_team/context/agent_team_config.py +33 -0
- autobyteus/agent_team/context/agent_team_context.py +61 -0
- autobyteus/agent_team/context/agent_team_runtime_state.py +56 -0
- autobyteus/agent_team/context/team_manager.py +147 -0
- autobyteus/agent_team/context/team_node_config.py +76 -0
- autobyteus/agent_team/events/__init__.py +29 -0
- autobyteus/agent_team/events/agent_team_event_dispatcher.py +39 -0
- autobyteus/agent_team/events/agent_team_events.py +53 -0
- autobyteus/agent_team/events/agent_team_input_event_queue_manager.py +21 -0
- autobyteus/agent_team/exceptions.py +8 -0
- autobyteus/agent_team/factory/__init__.py +9 -0
- autobyteus/agent_team/factory/agent_team_factory.py +99 -0
- autobyteus/agent_team/handlers/__init__.py +19 -0
- autobyteus/agent_team/handlers/agent_team_event_handler_registry.py +23 -0
- autobyteus/agent_team/handlers/base_agent_team_event_handler.py +16 -0
- autobyteus/agent_team/handlers/inter_agent_message_request_event_handler.py +61 -0
- autobyteus/agent_team/handlers/lifecycle_agent_team_event_handler.py +27 -0
- autobyteus/agent_team/handlers/process_user_message_event_handler.py +46 -0
- autobyteus/agent_team/handlers/tool_approval_team_event_handler.py +48 -0
- autobyteus/agent_team/phases/__init__.py +11 -0
- autobyteus/agent_team/phases/agent_team_operational_phase.py +19 -0
- autobyteus/agent_team/phases/agent_team_phase_manager.py +48 -0
- autobyteus/agent_team/runtime/__init__.py +13 -0
- autobyteus/agent_team/runtime/agent_team_runtime.py +82 -0
- autobyteus/agent_team/runtime/agent_team_worker.py +117 -0
- autobyteus/agent_team/shutdown_steps/__init__.py +17 -0
- autobyteus/agent_team/shutdown_steps/agent_team_shutdown_orchestrator.py +35 -0
- autobyteus/agent_team/shutdown_steps/agent_team_shutdown_step.py +42 -0
- autobyteus/agent_team/shutdown_steps/base_agent_team_shutdown_step.py +16 -0
- autobyteus/agent_team/shutdown_steps/bridge_cleanup_step.py +28 -0
- autobyteus/agent_team/shutdown_steps/sub_team_shutdown_step.py +41 -0
- autobyteus/agent_team/streaming/__init__.py +26 -0
- autobyteus/agent_team/streaming/agent_event_bridge.py +48 -0
- autobyteus/agent_team/streaming/agent_event_multiplexer.py +70 -0
- autobyteus/agent_team/streaming/agent_team_event_notifier.py +64 -0
- autobyteus/agent_team/streaming/agent_team_event_stream.py +33 -0
- autobyteus/agent_team/streaming/agent_team_stream_event_payloads.py +32 -0
- autobyteus/agent_team/streaming/agent_team_stream_events.py +56 -0
- autobyteus/agent_team/streaming/team_event_bridge.py +50 -0
- autobyteus/agent_team/task_notification/__init__.py +11 -0
- autobyteus/agent_team/task_notification/system_event_driven_agent_task_notifier.py +164 -0
- autobyteus/agent_team/task_notification/task_notification_mode.py +24 -0
- autobyteus/agent_team/utils/__init__.py +9 -0
- autobyteus/agent_team/utils/wait_for_idle.py +46 -0
- autobyteus/cli/agent_team_tui/__init__.py +4 -0
- autobyteus/cli/agent_team_tui/app.py +210 -0
- autobyteus/cli/agent_team_tui/state.py +180 -0
- autobyteus/cli/agent_team_tui/widgets/__init__.py +6 -0
- autobyteus/cli/agent_team_tui/widgets/agent_list_sidebar.py +149 -0
- autobyteus/cli/agent_team_tui/widgets/focus_pane.py +320 -0
- autobyteus/cli/agent_team_tui/widgets/logo.py +20 -0
- autobyteus/cli/agent_team_tui/widgets/renderables.py +77 -0
- autobyteus/cli/agent_team_tui/widgets/shared.py +60 -0
- autobyteus/cli/agent_team_tui/widgets/status_bar.py +14 -0
- autobyteus/cli/agent_team_tui/widgets/task_board_panel.py +82 -0
- autobyteus/events/event_types.py +7 -2
- autobyteus/llm/api/autobyteus_llm.py +11 -12
- autobyteus/llm/api/lmstudio_llm.py +10 -13
- autobyteus/llm/api/ollama_llm.py +8 -13
- autobyteus/llm/autobyteus_provider.py +73 -46
- autobyteus/llm/llm_factory.py +102 -140
- autobyteus/llm/lmstudio_provider.py +63 -48
- autobyteus/llm/models.py +83 -53
- autobyteus/llm/ollama_provider.py +69 -61
- autobyteus/llm/ollama_provider_resolver.py +1 -0
- autobyteus/llm/providers.py +13 -13
- autobyteus/llm/runtimes.py +11 -0
- autobyteus/task_management/__init__.py +43 -0
- autobyteus/task_management/base_task_board.py +68 -0
- autobyteus/task_management/converters/__init__.py +11 -0
- autobyteus/task_management/converters/task_board_converter.py +64 -0
- autobyteus/task_management/converters/task_plan_converter.py +48 -0
- autobyteus/task_management/deliverable.py +16 -0
- autobyteus/task_management/deliverables/__init__.py +8 -0
- autobyteus/task_management/deliverables/file_deliverable.py +15 -0
- autobyteus/task_management/events.py +27 -0
- autobyteus/task_management/in_memory_task_board.py +126 -0
- autobyteus/task_management/schemas/__init__.py +15 -0
- autobyteus/task_management/schemas/deliverable_schema.py +13 -0
- autobyteus/task_management/schemas/plan_definition.py +35 -0
- autobyteus/task_management/schemas/task_status_report.py +27 -0
- autobyteus/task_management/task_plan.py +110 -0
- autobyteus/task_management/tools/__init__.py +14 -0
- autobyteus/task_management/tools/get_task_board_status.py +68 -0
- autobyteus/task_management/tools/publish_task_plan.py +113 -0
- autobyteus/task_management/tools/update_task_status.py +135 -0
- autobyteus/tools/bash/bash_executor.py +59 -14
- autobyteus/tools/mcp/config_service.py +63 -58
- autobyteus/tools/mcp/server/http_managed_mcp_server.py +14 -2
- autobyteus/tools/mcp/server/stdio_managed_mcp_server.py +14 -2
- autobyteus/tools/mcp/server_instance_manager.py +30 -4
- autobyteus/tools/mcp/tool_registrar.py +103 -50
- autobyteus/tools/parameter_schema.py +17 -11
- autobyteus/tools/registry/tool_definition.py +24 -29
- autobyteus/tools/tool_category.py +1 -0
- autobyteus/tools/usage/formatters/default_json_example_formatter.py +78 -3
- autobyteus/tools/usage/formatters/default_xml_example_formatter.py +23 -3
- autobyteus/tools/usage/formatters/gemini_json_example_formatter.py +6 -0
- autobyteus/tools/usage/formatters/google_json_example_formatter.py +7 -0
- autobyteus/tools/usage/formatters/openai_json_example_formatter.py +6 -4
- autobyteus/tools/usage/parsers/gemini_json_tool_usage_parser.py +23 -7
- autobyteus/tools/usage/parsers/provider_aware_tool_usage_parser.py +14 -25
- autobyteus/tools/usage/providers/__init__.py +2 -12
- autobyteus/tools/usage/providers/tool_manifest_provider.py +36 -29
- autobyteus/tools/usage/registries/__init__.py +7 -12
- autobyteus/tools/usage/registries/tool_formatter_pair.py +15 -0
- autobyteus/tools/usage/registries/tool_formatting_registry.py +58 -0
- autobyteus/tools/usage/registries/tool_usage_parser_registry.py +55 -0
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/METADATA +3 -3
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/RECORD +146 -72
- examples/agent_team/__init__.py +1 -0
- examples/run_browser_agent.py +17 -15
- examples/run_google_slides_agent.py +17 -16
- examples/run_poem_writer.py +22 -12
- examples/run_sqlite_agent.py +17 -15
- autobyteus/tools/mcp/call_handlers/__init__.py +0 -16
- autobyteus/tools/mcp/call_handlers/base_handler.py +0 -40
- autobyteus/tools/mcp/call_handlers/stdio_handler.py +0 -76
- autobyteus/tools/mcp/call_handlers/streamable_http_handler.py +0 -55
- autobyteus/tools/usage/providers/json_example_provider.py +0 -32
- autobyteus/tools/usage/providers/json_schema_provider.py +0 -35
- autobyteus/tools/usage/providers/json_tool_usage_parser_provider.py +0 -28
- autobyteus/tools/usage/providers/xml_example_provider.py +0 -28
- autobyteus/tools/usage/providers/xml_schema_provider.py +0 -29
- autobyteus/tools/usage/providers/xml_tool_usage_parser_provider.py +0 -26
- autobyteus/tools/usage/registries/json_example_formatter_registry.py +0 -51
- autobyteus/tools/usage/registries/json_schema_formatter_registry.py +0 -51
- autobyteus/tools/usage/registries/json_tool_usage_parser_registry.py +0 -42
- autobyteus/tools/usage/registries/xml_example_formatter_registry.py +0 -30
- autobyteus/tools/usage/registries/xml_schema_formatter_registry.py +0 -33
- autobyteus/tools/usage/registries/xml_tool_usage_parser_registry.py +0 -30
- examples/workflow/__init__.py +0 -1
- examples/workflow/run_basic_research_workflow.py +0 -189
- examples/workflow/run_code_review_workflow.py +0 -269
- examples/workflow/run_debate_workflow.py +0 -212
- examples/workflow/run_workflow_with_tui.py +0 -153
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/WHEEL +0 -0
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/licenses/LICENSE +0 -0
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import List, Optional, Dict, Union
|
|
3
|
+
|
|
4
|
+
from rich.table import Table
|
|
5
|
+
from rich.panel import Panel
|
|
6
|
+
from rich.text import Text
|
|
7
|
+
from textual.widgets import Static
|
|
8
|
+
|
|
9
|
+
from autobyteus.task_management.task_plan import Task
|
|
10
|
+
from autobyteus.task_management.base_task_board import TaskStatus
|
|
11
|
+
from .shared import TASK_STATUS_ICONS, LOG_ICON
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
class TaskBoardPanel(Static):
|
|
16
|
+
"""A widget to display the team's task board."""
|
|
17
|
+
|
|
18
|
+
def __init__(self, tasks: Optional[List[Task]], statuses: Dict[str, TaskStatus], team_name: str, **kwargs) -> None:
|
|
19
|
+
super().__init__(**kwargs)
|
|
20
|
+
self.tasks = tasks or []
|
|
21
|
+
self.statuses = statuses or {}
|
|
22
|
+
self.team_name = team_name
|
|
23
|
+
|
|
24
|
+
def compose(self) -> None:
|
|
25
|
+
if not self.tasks:
|
|
26
|
+
yield Static(Panel("No task plan has been published yet.", title="Task Board", border_style="yellow", title_align="left"))
|
|
27
|
+
return
|
|
28
|
+
|
|
29
|
+
table = Table(
|
|
30
|
+
expand=True,
|
|
31
|
+
show_header=True,
|
|
32
|
+
header_style="bold magenta",
|
|
33
|
+
show_lines=True
|
|
34
|
+
)
|
|
35
|
+
table.add_column("ID", justify="left", style="cyan", no_wrap=True, min_width=10)
|
|
36
|
+
table.add_column("Name", style="white", min_width=15)
|
|
37
|
+
table.add_column("Status", justify="left", style="white")
|
|
38
|
+
table.add_column("Assigned To", justify="center", style="green")
|
|
39
|
+
table.add_column("Deliverables", justify="left", style="cyan", min_width=30)
|
|
40
|
+
table.add_column("Depends On", justify="center", style="dim")
|
|
41
|
+
|
|
42
|
+
# Create a name-to-ID map to resolve dependency names
|
|
43
|
+
id_to_name_map = {task.task_id: task.task_name for task in self.tasks}
|
|
44
|
+
|
|
45
|
+
# Sort tasks by name for consistent ordering
|
|
46
|
+
sorted_tasks = sorted(self.tasks, key=lambda t: t.task_name)
|
|
47
|
+
|
|
48
|
+
for task in sorted_tasks:
|
|
49
|
+
task_status = self.statuses.get(task.task_id, TaskStatus.NOT_STARTED)
|
|
50
|
+
status_icon = TASK_STATUS_ICONS.get(task_status, "❓")
|
|
51
|
+
status_text = f"{status_icon} {task_status.value.upper().replace('_', ' ')}"
|
|
52
|
+
|
|
53
|
+
status_style = "default"
|
|
54
|
+
if task_status == TaskStatus.COMPLETED:
|
|
55
|
+
status_style = "strike dim green"
|
|
56
|
+
elif task_status == TaskStatus.FAILED:
|
|
57
|
+
status_style = "bold red"
|
|
58
|
+
|
|
59
|
+
# Create a renderable for the deliverables column
|
|
60
|
+
deliverables_renderable: Union[str, Text] = "N/A"
|
|
61
|
+
if task.file_deliverables:
|
|
62
|
+
text = Text()
|
|
63
|
+
for i, d in enumerate(task.file_deliverables):
|
|
64
|
+
if i > 0:
|
|
65
|
+
text.append("\n") # Add a newline for spacing between deliverables
|
|
66
|
+
text.append(f"{LOG_ICON} {d.file_path}\n", style="bold")
|
|
67
|
+
text.append(f" └─ {d.summary}", style="dim")
|
|
68
|
+
deliverables_renderable = text
|
|
69
|
+
|
|
70
|
+
# Resolve dependency IDs to names for display
|
|
71
|
+
dep_names = [id_to_name_map.get(dep_id, dep_id) for dep_id in task.dependencies]
|
|
72
|
+
|
|
73
|
+
table.add_row(
|
|
74
|
+
task.task_id,
|
|
75
|
+
task.task_name,
|
|
76
|
+
Text(status_text, style=status_style),
|
|
77
|
+
task.assignee_name or "N/A",
|
|
78
|
+
deliverables_renderable,
|
|
79
|
+
", ".join(dep_names)
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
yield Static(Panel(table, title="Task Board", border_style="blue", title_align="left"))
|
autobyteus/events/event_types.py
CHANGED
|
@@ -34,6 +34,7 @@ class EventType(Enum):
|
|
|
34
34
|
AGENT_DATA_ASSISTANT_COMPLETE_RESPONSE = "agent_data_assistant_complete_response"
|
|
35
35
|
AGENT_DATA_TOOL_LOG = "agent_data_tool_log"
|
|
36
36
|
AGENT_DATA_TOOL_LOG_STREAM_END = "agent_data_tool_log_stream_end"
|
|
37
|
+
AGENT_DATA_SYSTEM_TASK_NOTIFICATION_RECEIVED = "agent_data_system_task_notification_received" # NEW
|
|
37
38
|
|
|
38
39
|
# --- Agent Requests for External Interaction ---
|
|
39
40
|
AGENT_REQUEST_TOOL_INVOCATION_APPROVAL = "agent_request_tool_invocation_approval"
|
|
@@ -42,8 +43,12 @@ class EventType(Enum):
|
|
|
42
43
|
# --- Agent Errors (not necessarily phase changes, e.g., error during output generation) ---
|
|
43
44
|
AGENT_ERROR_OUTPUT_GENERATION = "agent_error_output_generation"
|
|
44
45
|
|
|
45
|
-
# ---
|
|
46
|
-
|
|
46
|
+
# --- Agent Team Events ---
|
|
47
|
+
TEAM_STREAM_EVENT = "team_stream_event" # For unified agent team event stream
|
|
48
|
+
|
|
49
|
+
# --- Task Board Events ---
|
|
50
|
+
TASK_BOARD_PLAN_PUBLISHED = "task_board_plan_published"
|
|
51
|
+
TASK_BOARD_STATUS_UPDATED = "task_board_status_updated"
|
|
47
52
|
|
|
48
53
|
def __str__(self):
|
|
49
54
|
return self.value
|
|
@@ -11,18 +11,17 @@ import uuid
|
|
|
11
11
|
logger = logging.getLogger(__name__)
|
|
12
12
|
|
|
13
13
|
class AutobyteusLLM(BaseLLM):
|
|
14
|
-
def __init__(self, model: LLMModel
|
|
15
|
-
#
|
|
16
|
-
if model
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
if llm_config is None:
|
|
20
|
-
llm_config = LLMConfig()
|
|
21
|
-
|
|
14
|
+
def __init__(self, model: LLMModel, llm_config: LLMConfig):
|
|
15
|
+
# The host URL is now passed via the model object.
|
|
16
|
+
if not model.host_url:
|
|
17
|
+
raise ValueError("AutobyteusLLM requires a host_url to be set in its LLMModel object.")
|
|
18
|
+
|
|
22
19
|
super().__init__(model=model, llm_config=llm_config)
|
|
23
|
-
|
|
20
|
+
|
|
21
|
+
# Instantiate the client with the specific host for this model.
|
|
22
|
+
self.client = AutobyteusClient(server_url=self.model.host_url)
|
|
24
23
|
self.conversation_id = str(uuid.uuid4())
|
|
25
|
-
logger.info(f"AutobyteusLLM initialized
|
|
24
|
+
logger.info(f"AutobyteusLLM initialized for model '{self.model.model_identifier}' with conversation ID: {self.conversation_id}")
|
|
26
25
|
|
|
27
26
|
async def _send_user_message_to_llm(
|
|
28
27
|
self,
|
|
@@ -34,7 +33,7 @@ class AutobyteusLLM(BaseLLM):
|
|
|
34
33
|
try:
|
|
35
34
|
response = await self.client.send_message(
|
|
36
35
|
conversation_id=self.conversation_id,
|
|
37
|
-
model_name=self.model.name,
|
|
36
|
+
model_name=self.model.name, # Use `name` as it's the original model name for the API
|
|
38
37
|
user_message=user_message,
|
|
39
38
|
image_urls=image_urls
|
|
40
39
|
)
|
|
@@ -70,7 +69,7 @@ class AutobyteusLLM(BaseLLM):
|
|
|
70
69
|
try:
|
|
71
70
|
async for chunk in self.client.stream_message(
|
|
72
71
|
conversation_id=self.conversation_id,
|
|
73
|
-
model_name=self.model.name,
|
|
72
|
+
model_name=self.model.name, # Use `name` for the API call
|
|
74
73
|
user_message=user_message,
|
|
75
74
|
image_urls=image_urls
|
|
76
75
|
):
|
|
@@ -9,29 +9,26 @@ logger = logging.getLogger(__name__)
|
|
|
9
9
|
class LMStudioLLM(OpenAICompatibleLLM):
|
|
10
10
|
"""
|
|
11
11
|
LLM class for models served by a local LM Studio instance.
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
It expects the LM Studio server to be running at the address specified by the `LMSTUDIO_HOST`
|
|
15
|
-
environment variable, or at `http://localhost:1234` by default.
|
|
16
|
-
|
|
17
|
-
Note: The LM Studio server does not require a real API key. A dummy key "lm-studio" is used
|
|
18
|
-
by default. If you need to use a different key, you can set the `LMSTUDIO_API_KEY`
|
|
19
|
-
environment variable.
|
|
12
|
+
This class is now decoupled from environment variables and receives its connection
|
|
13
|
+
details from the LLMModel object.
|
|
20
14
|
"""
|
|
21
|
-
DEFAULT_LMSTUDIO_HOST = 'http://localhost:1234'
|
|
22
15
|
|
|
23
16
|
def __init__(self, model: LLMModel, llm_config: LLMConfig):
|
|
24
|
-
|
|
25
|
-
|
|
17
|
+
if not model.host_url:
|
|
18
|
+
raise ValueError("LMStudioLLM requires a host_url to be set in its LLMModel object.")
|
|
19
|
+
|
|
20
|
+
base_url = f"{model.host_url}/v1"
|
|
26
21
|
|
|
22
|
+
# The API key is often not needed for LM Studio, but we allow it to be set via env var.
|
|
23
|
+
# It defaults to a dummy value if not set.
|
|
27
24
|
super().__init__(
|
|
28
25
|
model=model,
|
|
29
26
|
llm_config=llm_config,
|
|
30
27
|
api_key_env_var="LMSTUDIO_API_KEY",
|
|
31
28
|
base_url=base_url,
|
|
32
|
-
api_key_default="lm-studio"
|
|
29
|
+
api_key_default="lm-studio" # Dummy key for LM Studio
|
|
33
30
|
)
|
|
34
|
-
logger.info(f"LMStudioLLM initialized
|
|
31
|
+
logger.info(f"LMStudioLLM initialized for model '{model.model_identifier}' with base URL: {base_url}")
|
|
35
32
|
|
|
36
33
|
async def cleanup(self):
|
|
37
34
|
await super().cleanup()
|
autobyteus/llm/api/ollama_llm.py
CHANGED
|
@@ -14,22 +14,17 @@ import os
|
|
|
14
14
|
logger = logging.getLogger(__name__)
|
|
15
15
|
|
|
16
16
|
class OllamaLLM(BaseLLM):
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
17
|
+
def __init__(self, model: LLMModel, llm_config: LLMConfig):
|
|
18
|
+
# The host URL is now passed via the model object, decoupling from environment variables here.
|
|
19
|
+
if not model.host_url:
|
|
20
|
+
raise ValueError("OllamaLLM requires a host_url to be set in its LLMModel object.")
|
|
21
|
+
|
|
22
|
+
logger.info(f"Initializing OllamaLLM for model '{model.name}' with host: {model.host_url}")
|
|
22
23
|
|
|
23
|
-
self.client = AsyncClient(host=
|
|
24
|
+
self.client = AsyncClient(host=model.host_url)
|
|
24
25
|
|
|
25
|
-
# Provide defaults if not specified
|
|
26
|
-
if model is None:
|
|
27
|
-
model = LLMModel.OLLAMA_LLAMA_3_2
|
|
28
|
-
if llm_config is None:
|
|
29
|
-
llm_config = LLMConfig()
|
|
30
|
-
|
|
31
26
|
super().__init__(model=model, llm_config=llm_config)
|
|
32
|
-
logger.info(f"OllamaLLM initialized with model: {self.model}")
|
|
27
|
+
logger.info(f"OllamaLLM initialized with model: {self.model.model_identifier}")
|
|
33
28
|
|
|
34
29
|
async def _send_user_message_to_llm(self, user_message: str, image_urls: Optional[List[str]] = None, **kwargs) -> CompleteResponse:
|
|
35
30
|
self.add_user_message(user_message)
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
from autobyteus.llm.api.autobyteus_llm import AutobyteusLLM
|
|
2
2
|
from autobyteus.llm.models import LLMModel
|
|
3
3
|
from autobyteus.llm.providers import LLMProvider
|
|
4
|
+
from autobyteus.llm.runtimes import LLMRuntime
|
|
4
5
|
from autobyteus.llm.utils.llm_config import LLMConfig, TokenPricingConfig
|
|
5
|
-
from typing import Dict, Any, TYPE_CHECKING
|
|
6
|
+
from typing import Dict, Any, TYPE_CHECKING, List, Optional
|
|
6
7
|
import os
|
|
7
8
|
import logging
|
|
8
9
|
from urllib.parse import urlparse
|
|
@@ -15,63 +16,89 @@ logger = logging.getLogger(__name__)
|
|
|
15
16
|
|
|
16
17
|
class AutobyteusModelProvider:
|
|
17
18
|
DEFAULT_SERVER_URL = 'https://localhost:8000'
|
|
18
|
-
|
|
19
|
+
|
|
20
|
+
@staticmethod
|
|
21
|
+
def _get_hosts() -> List[str]:
|
|
22
|
+
"""Gets Autobyteus LLM server hosts from env vars, supporting a comma-separated list."""
|
|
23
|
+
hosts_str = os.getenv('AUTOBYTEUS_LLM_SERVER_HOSTS')
|
|
24
|
+
if hosts_str:
|
|
25
|
+
return [host.strip() for host in hosts_str.split(',')]
|
|
26
|
+
|
|
27
|
+
legacy_host = os.getenv('AUTOBYTEUS_LLM_SERVER_URL')
|
|
28
|
+
if legacy_host:
|
|
29
|
+
return [legacy_host]
|
|
30
|
+
|
|
31
|
+
return [AutobyteusModelProvider.DEFAULT_SERVER_URL]
|
|
19
32
|
|
|
20
33
|
@staticmethod
|
|
21
34
|
def discover_and_register():
|
|
22
|
-
"""Discover and register Autobyteus models
|
|
35
|
+
"""Discover and register Autobyteus models from all configured hosts."""
|
|
23
36
|
try:
|
|
24
|
-
from autobyteus.llm.llm_factory import LLMFactory
|
|
25
|
-
|
|
26
|
-
client = None
|
|
27
|
-
try:
|
|
28
|
-
client = AutobyteusClient()
|
|
29
|
-
response = client.get_available_models_sync()
|
|
30
|
-
except Exception as e:
|
|
31
|
-
logger.error(f"Client initialization failed: {str(e)}")
|
|
32
|
-
return
|
|
33
|
-
finally:
|
|
34
|
-
if client:
|
|
35
|
-
client.sync_client.close()
|
|
37
|
+
from autobyteus.llm.llm_factory import LLMFactory
|
|
36
38
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
+
hosts = AutobyteusModelProvider._get_hosts()
|
|
40
|
+
total_registered_count = 0
|
|
39
41
|
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
42
|
+
for host_url in hosts:
|
|
43
|
+
if not AutobyteusModelProvider.is_valid_url(host_url):
|
|
44
|
+
logger.error(f"Invalid Autobyteus host URL: {host_url}, skipping.")
|
|
45
|
+
continue
|
|
46
|
+
|
|
47
|
+
logger.info(f"Discovering Autobyteus models from host: {host_url}")
|
|
48
|
+
client = None
|
|
44
49
|
try:
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
continue
|
|
49
|
-
|
|
50
|
-
llm_config = AutobyteusModelProvider._parse_llm_config(model_info["config"])
|
|
51
|
-
if not llm_config:
|
|
52
|
-
continue
|
|
53
|
-
|
|
54
|
-
llm_model = LLMModel(
|
|
55
|
-
name=model_info["name"],
|
|
56
|
-
value=model_info["value"],
|
|
57
|
-
provider=LLMProvider(model_info["provider"]), # Convert string to enum
|
|
58
|
-
llm_class=AutobyteusLLM,
|
|
59
|
-
canonical_name=model_info["canonical_name"], # Add canonical_name
|
|
60
|
-
default_config=llm_config
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
LLMFactory.register_model(llm_model)
|
|
64
|
-
registered_count += 1
|
|
65
|
-
logger.debug(f"Registered model: {model_info['name']} with canonical name: {model_info['canonical_name']}")
|
|
66
|
-
|
|
50
|
+
# Instantiate client for this specific host
|
|
51
|
+
client = AutobyteusClient(server_url=host_url)
|
|
52
|
+
response = client.get_available_models_sync()
|
|
67
53
|
except Exception as e:
|
|
68
|
-
logger.
|
|
54
|
+
logger.warning(f"Could not connect or fetch models from Autobyteus server at {host_url}: {e}")
|
|
55
|
+
continue
|
|
56
|
+
finally:
|
|
57
|
+
if client:
|
|
58
|
+
client.sync_client.close()
|
|
59
|
+
|
|
60
|
+
if not AutobyteusModelProvider._validate_server_response(response):
|
|
69
61
|
continue
|
|
62
|
+
|
|
63
|
+
models = response.get('models', [])
|
|
64
|
+
host_registered_count = 0
|
|
65
|
+
for model_info in models:
|
|
66
|
+
try:
|
|
67
|
+
validation_result = AutobyteusModelProvider._validate_model_info(model_info)
|
|
68
|
+
if not validation_result["valid"]:
|
|
69
|
+
logger.warning(validation_result["message"])
|
|
70
|
+
continue
|
|
71
|
+
|
|
72
|
+
llm_config = AutobyteusModelProvider._parse_llm_config(model_info["config"])
|
|
73
|
+
if not llm_config:
|
|
74
|
+
continue
|
|
70
75
|
|
|
71
|
-
|
|
76
|
+
llm_model = LLMModel(
|
|
77
|
+
name=model_info["name"],
|
|
78
|
+
value=model_info["value"],
|
|
79
|
+
provider=LLMProvider(model_info["provider"]),
|
|
80
|
+
llm_class=AutobyteusLLM,
|
|
81
|
+
canonical_name=model_info["canonical_name"],
|
|
82
|
+
runtime=LLMRuntime.AUTOBYTEUS,
|
|
83
|
+
host_url=host_url,
|
|
84
|
+
default_config=llm_config
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
LLMFactory.register_model(llm_model)
|
|
88
|
+
host_registered_count += 1
|
|
89
|
+
|
|
90
|
+
except Exception as e:
|
|
91
|
+
logger.error(f"Failed to register Autobyteus model '{model_info.get('name')}' from {host_url}: {e}")
|
|
92
|
+
|
|
93
|
+
if host_registered_count > 0:
|
|
94
|
+
logger.info(f"Registered {host_registered_count} models from Autobyteus host {host_url}")
|
|
95
|
+
total_registered_count += host_registered_count
|
|
96
|
+
|
|
97
|
+
if total_registered_count > 0:
|
|
98
|
+
logger.info(f"Finished Autobyteus discovery. Total models registered: {total_registered_count}")
|
|
72
99
|
|
|
73
100
|
except Exception as e:
|
|
74
|
-
logger.error(f"
|
|
101
|
+
logger.error(f"An unexpected error occurred during Autobyteus model discovery: {e}", exc_info=True)
|
|
75
102
|
|
|
76
103
|
@staticmethod
|
|
77
104
|
def _validate_server_response(response: Dict[str, Any]) -> bool:
|