autobyteus 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autobyteus/agent/context/__init__.py +4 -2
- autobyteus/agent/context/agent_config.py +0 -4
- autobyteus/agent/context/agent_context_registry.py +73 -0
- autobyteus/agent/events/notifiers.py +4 -0
- autobyteus/agent/handlers/inter_agent_message_event_handler.py +7 -2
- autobyteus/agent/handlers/llm_complete_response_received_event_handler.py +19 -19
- autobyteus/agent/handlers/user_input_message_event_handler.py +15 -0
- autobyteus/agent/message/send_message_to.py +29 -23
- autobyteus/agent/runtime/agent_runtime.py +10 -2
- autobyteus/agent/sender_type.py +15 -0
- autobyteus/agent/streaming/agent_event_stream.py +6 -0
- autobyteus/agent/streaming/stream_event_payloads.py +12 -0
- autobyteus/agent/streaming/stream_events.py +3 -0
- autobyteus/agent/system_prompt_processor/tool_manifest_injector_processor.py +7 -4
- autobyteus/agent_team/__init__.py +1 -0
- autobyteus/agent_team/agent_team.py +93 -0
- autobyteus/agent_team/agent_team_builder.py +184 -0
- autobyteus/agent_team/base_agent_team.py +86 -0
- autobyteus/agent_team/bootstrap_steps/__init__.py +24 -0
- autobyteus/agent_team/bootstrap_steps/agent_configuration_preparation_step.py +73 -0
- autobyteus/agent_team/bootstrap_steps/agent_team_bootstrapper.py +54 -0
- autobyteus/agent_team/bootstrap_steps/agent_team_runtime_queue_initialization_step.py +25 -0
- autobyteus/agent_team/bootstrap_steps/base_agent_team_bootstrap_step.py +23 -0
- autobyteus/agent_team/bootstrap_steps/coordinator_initialization_step.py +41 -0
- autobyteus/agent_team/bootstrap_steps/coordinator_prompt_preparation_step.py +85 -0
- autobyteus/agent_team/bootstrap_steps/task_notifier_initialization_step.py +51 -0
- autobyteus/agent_team/bootstrap_steps/team_context_initialization_step.py +45 -0
- autobyteus/agent_team/context/__init__.py +17 -0
- autobyteus/agent_team/context/agent_team_config.py +33 -0
- autobyteus/agent_team/context/agent_team_context.py +61 -0
- autobyteus/agent_team/context/agent_team_runtime_state.py +56 -0
- autobyteus/agent_team/context/team_manager.py +147 -0
- autobyteus/agent_team/context/team_node_config.py +76 -0
- autobyteus/agent_team/events/__init__.py +29 -0
- autobyteus/agent_team/events/agent_team_event_dispatcher.py +39 -0
- autobyteus/agent_team/events/agent_team_events.py +53 -0
- autobyteus/agent_team/events/agent_team_input_event_queue_manager.py +21 -0
- autobyteus/agent_team/exceptions.py +8 -0
- autobyteus/agent_team/factory/__init__.py +9 -0
- autobyteus/agent_team/factory/agent_team_factory.py +99 -0
- autobyteus/agent_team/handlers/__init__.py +19 -0
- autobyteus/agent_team/handlers/agent_team_event_handler_registry.py +23 -0
- autobyteus/agent_team/handlers/base_agent_team_event_handler.py +16 -0
- autobyteus/agent_team/handlers/inter_agent_message_request_event_handler.py +61 -0
- autobyteus/agent_team/handlers/lifecycle_agent_team_event_handler.py +27 -0
- autobyteus/agent_team/handlers/process_user_message_event_handler.py +46 -0
- autobyteus/agent_team/handlers/tool_approval_team_event_handler.py +48 -0
- autobyteus/agent_team/phases/__init__.py +11 -0
- autobyteus/agent_team/phases/agent_team_operational_phase.py +19 -0
- autobyteus/agent_team/phases/agent_team_phase_manager.py +48 -0
- autobyteus/agent_team/runtime/__init__.py +13 -0
- autobyteus/agent_team/runtime/agent_team_runtime.py +82 -0
- autobyteus/agent_team/runtime/agent_team_worker.py +117 -0
- autobyteus/agent_team/shutdown_steps/__init__.py +17 -0
- autobyteus/agent_team/shutdown_steps/agent_team_shutdown_orchestrator.py +35 -0
- autobyteus/agent_team/shutdown_steps/agent_team_shutdown_step.py +42 -0
- autobyteus/agent_team/shutdown_steps/base_agent_team_shutdown_step.py +16 -0
- autobyteus/agent_team/shutdown_steps/bridge_cleanup_step.py +28 -0
- autobyteus/agent_team/shutdown_steps/sub_team_shutdown_step.py +41 -0
- autobyteus/agent_team/streaming/__init__.py +26 -0
- autobyteus/agent_team/streaming/agent_event_bridge.py +48 -0
- autobyteus/agent_team/streaming/agent_event_multiplexer.py +70 -0
- autobyteus/agent_team/streaming/agent_team_event_notifier.py +64 -0
- autobyteus/agent_team/streaming/agent_team_event_stream.py +33 -0
- autobyteus/agent_team/streaming/agent_team_stream_event_payloads.py +32 -0
- autobyteus/agent_team/streaming/agent_team_stream_events.py +56 -0
- autobyteus/agent_team/streaming/team_event_bridge.py +50 -0
- autobyteus/agent_team/task_notification/__init__.py +11 -0
- autobyteus/agent_team/task_notification/system_event_driven_agent_task_notifier.py +164 -0
- autobyteus/agent_team/task_notification/task_notification_mode.py +24 -0
- autobyteus/agent_team/utils/__init__.py +9 -0
- autobyteus/agent_team/utils/wait_for_idle.py +46 -0
- autobyteus/cli/agent_team_tui/__init__.py +4 -0
- autobyteus/cli/agent_team_tui/app.py +210 -0
- autobyteus/cli/agent_team_tui/state.py +180 -0
- autobyteus/cli/agent_team_tui/widgets/__init__.py +6 -0
- autobyteus/cli/agent_team_tui/widgets/agent_list_sidebar.py +149 -0
- autobyteus/cli/agent_team_tui/widgets/focus_pane.py +320 -0
- autobyteus/cli/agent_team_tui/widgets/logo.py +20 -0
- autobyteus/cli/agent_team_tui/widgets/renderables.py +77 -0
- autobyteus/cli/agent_team_tui/widgets/shared.py +60 -0
- autobyteus/cli/agent_team_tui/widgets/status_bar.py +14 -0
- autobyteus/cli/agent_team_tui/widgets/task_board_panel.py +82 -0
- autobyteus/events/event_types.py +7 -2
- autobyteus/llm/api/autobyteus_llm.py +11 -12
- autobyteus/llm/api/lmstudio_llm.py +10 -13
- autobyteus/llm/api/ollama_llm.py +8 -13
- autobyteus/llm/autobyteus_provider.py +73 -46
- autobyteus/llm/llm_factory.py +102 -140
- autobyteus/llm/lmstudio_provider.py +63 -48
- autobyteus/llm/models.py +83 -53
- autobyteus/llm/ollama_provider.py +69 -61
- autobyteus/llm/ollama_provider_resolver.py +1 -0
- autobyteus/llm/providers.py +13 -13
- autobyteus/llm/runtimes.py +11 -0
- autobyteus/task_management/__init__.py +43 -0
- autobyteus/task_management/base_task_board.py +68 -0
- autobyteus/task_management/converters/__init__.py +11 -0
- autobyteus/task_management/converters/task_board_converter.py +64 -0
- autobyteus/task_management/converters/task_plan_converter.py +48 -0
- autobyteus/task_management/deliverable.py +16 -0
- autobyteus/task_management/deliverables/__init__.py +8 -0
- autobyteus/task_management/deliverables/file_deliverable.py +15 -0
- autobyteus/task_management/events.py +27 -0
- autobyteus/task_management/in_memory_task_board.py +126 -0
- autobyteus/task_management/schemas/__init__.py +15 -0
- autobyteus/task_management/schemas/deliverable_schema.py +13 -0
- autobyteus/task_management/schemas/plan_definition.py +35 -0
- autobyteus/task_management/schemas/task_status_report.py +27 -0
- autobyteus/task_management/task_plan.py +110 -0
- autobyteus/task_management/tools/__init__.py +14 -0
- autobyteus/task_management/tools/get_task_board_status.py +68 -0
- autobyteus/task_management/tools/publish_task_plan.py +113 -0
- autobyteus/task_management/tools/update_task_status.py +135 -0
- autobyteus/tools/bash/bash_executor.py +59 -14
- autobyteus/tools/mcp/config_service.py +63 -58
- autobyteus/tools/mcp/server/http_managed_mcp_server.py +14 -2
- autobyteus/tools/mcp/server/stdio_managed_mcp_server.py +14 -2
- autobyteus/tools/mcp/server_instance_manager.py +30 -4
- autobyteus/tools/mcp/tool_registrar.py +103 -50
- autobyteus/tools/parameter_schema.py +17 -11
- autobyteus/tools/registry/tool_definition.py +24 -29
- autobyteus/tools/tool_category.py +1 -0
- autobyteus/tools/usage/formatters/default_json_example_formatter.py +78 -3
- autobyteus/tools/usage/formatters/default_xml_example_formatter.py +23 -3
- autobyteus/tools/usage/formatters/gemini_json_example_formatter.py +6 -0
- autobyteus/tools/usage/formatters/google_json_example_formatter.py +7 -0
- autobyteus/tools/usage/formatters/openai_json_example_formatter.py +6 -4
- autobyteus/tools/usage/parsers/gemini_json_tool_usage_parser.py +23 -7
- autobyteus/tools/usage/parsers/provider_aware_tool_usage_parser.py +14 -25
- autobyteus/tools/usage/providers/__init__.py +2 -12
- autobyteus/tools/usage/providers/tool_manifest_provider.py +36 -29
- autobyteus/tools/usage/registries/__init__.py +7 -12
- autobyteus/tools/usage/registries/tool_formatter_pair.py +15 -0
- autobyteus/tools/usage/registries/tool_formatting_registry.py +58 -0
- autobyteus/tools/usage/registries/tool_usage_parser_registry.py +55 -0
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/METADATA +3 -3
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/RECORD +146 -72
- examples/agent_team/__init__.py +1 -0
- examples/run_browser_agent.py +17 -15
- examples/run_google_slides_agent.py +17 -16
- examples/run_poem_writer.py +22 -12
- examples/run_sqlite_agent.py +17 -15
- autobyteus/tools/mcp/call_handlers/__init__.py +0 -16
- autobyteus/tools/mcp/call_handlers/base_handler.py +0 -40
- autobyteus/tools/mcp/call_handlers/stdio_handler.py +0 -76
- autobyteus/tools/mcp/call_handlers/streamable_http_handler.py +0 -55
- autobyteus/tools/usage/providers/json_example_provider.py +0 -32
- autobyteus/tools/usage/providers/json_schema_provider.py +0 -35
- autobyteus/tools/usage/providers/json_tool_usage_parser_provider.py +0 -28
- autobyteus/tools/usage/providers/xml_example_provider.py +0 -28
- autobyteus/tools/usage/providers/xml_schema_provider.py +0 -29
- autobyteus/tools/usage/providers/xml_tool_usage_parser_provider.py +0 -26
- autobyteus/tools/usage/registries/json_example_formatter_registry.py +0 -51
- autobyteus/tools/usage/registries/json_schema_formatter_registry.py +0 -51
- autobyteus/tools/usage/registries/json_tool_usage_parser_registry.py +0 -42
- autobyteus/tools/usage/registries/xml_example_formatter_registry.py +0 -30
- autobyteus/tools/usage/registries/xml_schema_formatter_registry.py +0 -33
- autobyteus/tools/usage/registries/xml_tool_usage_parser_registry.py +0 -30
- examples/workflow/__init__.py +0 -1
- examples/workflow/run_basic_research_workflow.py +0 -189
- examples/workflow/run_code_review_workflow.py +0 -269
- examples/workflow/run_debate_workflow.py +0 -212
- examples/workflow/run_workflow_with_tui.py +0 -153
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/WHEEL +0 -0
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/licenses/LICENSE +0 -0
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/top_level.txt +0 -0
autobyteus/llm/models.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
from typing import TYPE_CHECKING, Type, Optional, List, Iterator
|
|
3
3
|
from dataclasses import dataclass
|
|
4
|
+
from urllib.parse import urlparse
|
|
4
5
|
|
|
5
6
|
from autobyteus.llm.providers import LLMProvider
|
|
7
|
+
from autobyteus.llm.runtimes import LLMRuntime
|
|
6
8
|
from autobyteus.llm.utils.llm_config import LLMConfig
|
|
7
9
|
|
|
8
10
|
if TYPE_CHECKING:
|
|
@@ -13,9 +15,14 @@ logger = logging.getLogger(__name__)
|
|
|
13
15
|
|
|
14
16
|
@dataclass
|
|
15
17
|
class ModelInfo:
|
|
16
|
-
"""A
|
|
17
|
-
|
|
18
|
+
"""A detailed public data structure for model information."""
|
|
19
|
+
model_identifier: str
|
|
20
|
+
display_name: str
|
|
21
|
+
value: str
|
|
18
22
|
canonical_name: str
|
|
23
|
+
provider: str
|
|
24
|
+
runtime: str
|
|
25
|
+
host_url: Optional[str] = None
|
|
19
26
|
|
|
20
27
|
@dataclass
|
|
21
28
|
class ProviderModelGroup:
|
|
@@ -33,62 +40,56 @@ class LLMModelMeta(type):
|
|
|
33
40
|
Allows iteration over LLMModel instances (e.g., `for model in LLMModel:`).
|
|
34
41
|
Ensures that the LLMFactory has initialized and registered all models.
|
|
35
42
|
"""
|
|
36
|
-
# Import LLMFactory locally to prevent circular import issues at module load time.
|
|
37
43
|
from autobyteus.llm.llm_factory import LLMFactory
|
|
38
44
|
LLMFactory.ensure_initialized()
|
|
45
|
+
|
|
46
|
+
# Iterates over all registered models from the factory's internal dictionary
|
|
47
|
+
for model in LLMFactory._models_by_identifier.values():
|
|
48
|
+
yield model
|
|
39
49
|
|
|
40
|
-
|
|
41
|
-
yield from models
|
|
42
|
-
|
|
43
|
-
def __getitem__(cls, name_or_value: str) -> 'LLMModel':
|
|
50
|
+
def __getitem__(cls, name_or_identifier: str) -> 'LLMModel':
|
|
44
51
|
"""
|
|
45
|
-
Allows dictionary-like access to LLMModel instances by name
|
|
46
|
-
|
|
47
|
-
Search is performed by name first, then by value.
|
|
52
|
+
Allows dictionary-like access to LLMModel instances by name or identifier.
|
|
53
|
+
If a non-unique name is provided that matches multiple models, it raises an error.
|
|
48
54
|
"""
|
|
49
|
-
# Import LLMFactory locally to prevent circular import issues at module load time.
|
|
50
55
|
from autobyteus.llm.llm_factory import LLMFactory
|
|
51
|
-
LLMFactory
|
|
56
|
+
factory = LLMFactory()
|
|
57
|
+
factory.ensure_initialized()
|
|
52
58
|
|
|
53
|
-
#
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
59
|
+
# First, try a direct lookup by unique model_identifier
|
|
60
|
+
model = factory._models_by_identifier.get(name_or_identifier)
|
|
61
|
+
if model:
|
|
62
|
+
return model
|
|
63
|
+
|
|
64
|
+
# If not found, search by name (which might not be unique)
|
|
65
|
+
found_models = [m for m in factory._models_by_identifier.values() if m.name == name_or_identifier]
|
|
66
|
+
|
|
67
|
+
if len(found_models) == 1:
|
|
68
|
+
return found_models[0]
|
|
57
69
|
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
70
|
+
if len(found_models) > 1:
|
|
71
|
+
# Ambiguous name, guide the user to provide a unique identifier
|
|
72
|
+
identifiers = [m.model_identifier for m in found_models]
|
|
73
|
+
raise KeyError(
|
|
74
|
+
f"Model name '{name_or_identifier}' is ambiguous and exists on multiple runtimes. "
|
|
75
|
+
f"Please use one of the unique model identifiers: {identifiers}"
|
|
76
|
+
)
|
|
62
77
|
|
|
63
|
-
#
|
|
64
|
-
available_models =
|
|
65
|
-
raise KeyError(f"Model '{
|
|
78
|
+
# If not found by identifier or name, raise KeyError
|
|
79
|
+
available_models = list(factory._models_by_identifier.keys())
|
|
80
|
+
raise KeyError(f"Model '{name_or_identifier}' not found. Available models are: {available_models}")
|
|
66
81
|
|
|
67
82
|
def __len__(cls) -> int:
|
|
68
83
|
"""
|
|
69
84
|
Allows getting the number of registered models (e.g., `len(LLMModel)`).
|
|
70
85
|
"""
|
|
71
|
-
# Import LLMFactory locally.
|
|
72
86
|
from autobyteus.llm.llm_factory import LLMFactory
|
|
73
87
|
LLMFactory.ensure_initialized()
|
|
74
|
-
|
|
75
|
-
count = 0
|
|
76
|
-
for models in LLMFactory._models_by_provider.values():
|
|
77
|
-
count += len(models)
|
|
78
|
-
return count
|
|
88
|
+
return len(LLMFactory._models_by_identifier)
|
|
79
89
|
|
|
80
90
|
class LLMModel(metaclass=LLMModelMeta):
|
|
81
91
|
"""
|
|
82
|
-
Represents a single model's metadata
|
|
83
|
-
- name (str): A human-readable label, e.g. "gpt-4o"
|
|
84
|
-
- value (str): A unique identifier used in code or APIs, e.g. "gpt-4o"
|
|
85
|
-
- canonical_name (str): A shorter, standardized reference name for prompts, e.g. "gpt-4o" or "claude-3.7"
|
|
86
|
-
- provider (LLMProvider): The provider enum
|
|
87
|
-
- llm_class (Type[BaseLLM]): Which Python class to instantiate
|
|
88
|
-
- default_config (LLMConfig): Default configuration (token limit, etc.)
|
|
89
|
-
|
|
90
|
-
Each model also exposes a create_llm() method to instantiate the underlying class.
|
|
91
|
-
Supports Enum-like access via `LLMModel['MODEL_NAME']` and iteration `for model in LLMModel:`.
|
|
92
|
+
Represents a single model's metadata and connection properties.
|
|
92
93
|
"""
|
|
93
94
|
|
|
94
95
|
def __init__(
|
|
@@ -98,7 +99,9 @@ class LLMModel(metaclass=LLMModelMeta):
|
|
|
98
99
|
provider: LLMProvider,
|
|
99
100
|
llm_class: Type["BaseLLM"],
|
|
100
101
|
canonical_name: str,
|
|
101
|
-
default_config: Optional[LLMConfig] = None
|
|
102
|
+
default_config: Optional[LLMConfig] = None,
|
|
103
|
+
runtime: LLMRuntime = LLMRuntime.API,
|
|
104
|
+
host_url: Optional[str] = None
|
|
102
105
|
):
|
|
103
106
|
self._name = name
|
|
104
107
|
self._value = value
|
|
@@ -106,13 +109,32 @@ class LLMModel(metaclass=LLMModelMeta):
|
|
|
106
109
|
self.provider = provider
|
|
107
110
|
self.llm_class = llm_class
|
|
108
111
|
self.default_config = default_config if default_config else LLMConfig()
|
|
112
|
+
self.runtime = runtime
|
|
113
|
+
self.host_url = host_url
|
|
114
|
+
self._model_identifier = self._generate_identifier()
|
|
115
|
+
|
|
116
|
+
def _generate_identifier(self) -> str:
|
|
117
|
+
"""Generates the globally unique model identifier."""
|
|
118
|
+
if self.runtime == LLMRuntime.API:
|
|
119
|
+
return self.name
|
|
120
|
+
|
|
121
|
+
if not self.host_url:
|
|
122
|
+
raise ValueError(f"host_url is required for runtime '{self.runtime.value}' on model '{self.name}'")
|
|
123
|
+
|
|
124
|
+
try:
|
|
125
|
+
parsed_url = urlparse(self.host_url)
|
|
126
|
+
host_and_port = parsed_url.netloc
|
|
127
|
+
return f"{self.name}:{self.runtime.value.lower()}@{host_and_port}"
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logger.error(f"Failed to parse host_url '{self.host_url}' for identifier generation: {e}")
|
|
130
|
+
# Fallback to a simpler, but still likely unique, identifier
|
|
131
|
+
return f"{self.name}:{self.runtime.value.lower()}@{self.host_url}"
|
|
109
132
|
|
|
110
133
|
@property
|
|
111
134
|
def name(self) -> str:
|
|
112
135
|
"""
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
Example: "gpt-4o"
|
|
136
|
+
The model's original name as expected by the runtime's API.
|
|
137
|
+
Example: "llama3:latest", "gpt-4o"
|
|
116
138
|
"""
|
|
117
139
|
return self._name
|
|
118
140
|
|
|
@@ -120,16 +142,24 @@ class LLMModel(metaclass=LLMModelMeta):
|
|
|
120
142
|
def value(self) -> str:
|
|
121
143
|
"""
|
|
122
144
|
The underlying unique identifier for this model (e.g. an API model string).
|
|
123
|
-
Example: "gpt-4o"
|
|
145
|
+
Often the same as `name`. Example: "gpt-4o"
|
|
124
146
|
"""
|
|
125
147
|
return self._value
|
|
148
|
+
|
|
149
|
+
@property
|
|
150
|
+
def model_identifier(self) -> str:
|
|
151
|
+
"""
|
|
152
|
+
A globally unique, dynamically generated identifier for use within the system.
|
|
153
|
+
Example: "llama3:latest:ollama@localhost:11434"
|
|
154
|
+
"""
|
|
155
|
+
return self._model_identifier
|
|
126
156
|
|
|
127
157
|
@property
|
|
128
158
|
def canonical_name(self) -> str:
|
|
129
159
|
"""
|
|
130
160
|
A standardized, shorter reference name for this model.
|
|
131
161
|
Useful for prompt engineering and cross-referencing similar models.
|
|
132
|
-
Example: "gpt-4o"
|
|
162
|
+
Example: "gpt-4o", "llama3"
|
|
133
163
|
"""
|
|
134
164
|
return self._canonical_name
|
|
135
165
|
|
|
@@ -145,16 +175,16 @@ class LLMModel(metaclass=LLMModelMeta):
|
|
|
145
175
|
Returns:
|
|
146
176
|
BaseLLM: An instance of the LLM.
|
|
147
177
|
"""
|
|
148
|
-
config_to_use =
|
|
149
|
-
|
|
178
|
+
config_to_use = self.default_config
|
|
179
|
+
if llm_config:
|
|
180
|
+
# Create a copy to avoid modifying the default config
|
|
181
|
+
config_to_use = LLMConfig.from_dict(self.default_config.to_dict())
|
|
182
|
+
config_to_use.merge_with(llm_config)
|
|
183
|
+
|
|
150
184
|
return self.llm_class(model=self, llm_config=config_to_use)
|
|
151
185
|
|
|
152
186
|
def __repr__(self):
|
|
153
187
|
return (
|
|
154
|
-
f"LLMModel(
|
|
155
|
-
f"
|
|
156
|
-
f"provider='{self.provider.name}', llm_class='{self.llm_class.__name__}')"
|
|
188
|
+
f"LLMModel(identifier='{self.model_identifier}', name='{self.name}', "
|
|
189
|
+
f"provider='{self.provider.name}', runtime='{self.runtime.name}')"
|
|
157
190
|
)
|
|
158
|
-
|
|
159
|
-
# __class_getitem__ is now handled by the metaclass LLMModelMeta's __getitem__
|
|
160
|
-
# No need to define it here anymore.
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
from autobyteus.llm.models import LLMModel
|
|
2
2
|
from autobyteus.llm.api.ollama_llm import OllamaLLM
|
|
3
3
|
from autobyteus.llm.providers import LLMProvider
|
|
4
|
+
from autobyteus.llm.runtimes import LLMRuntime
|
|
4
5
|
from autobyteus.llm.utils.llm_config import LLMConfig, TokenPricingConfig
|
|
5
6
|
from autobyteus.llm.ollama_provider_resolver import OllamaProviderResolver
|
|
6
|
-
from typing import TYPE_CHECKING
|
|
7
|
+
from typing import TYPE_CHECKING, List
|
|
7
8
|
import os
|
|
8
9
|
import logging
|
|
9
10
|
from ollama import Client
|
|
@@ -17,7 +18,22 @@ logger = logging.getLogger(__name__)
|
|
|
17
18
|
|
|
18
19
|
class OllamaModelProvider:
|
|
19
20
|
DEFAULT_OLLAMA_HOST = 'http://localhost:11434'
|
|
20
|
-
CONNECTION_TIMEOUT = 5.0
|
|
21
|
+
CONNECTION_TIMEOUT = 5.0
|
|
22
|
+
|
|
23
|
+
@staticmethod
|
|
24
|
+
def _get_hosts() -> List[str]:
|
|
25
|
+
"""Gets Ollama hosts from env vars, supporting comma-separated list."""
|
|
26
|
+
# New multi-host variable
|
|
27
|
+
hosts_str = os.getenv('OLLAMA_HOSTS')
|
|
28
|
+
if hosts_str:
|
|
29
|
+
return [host.strip() for host in hosts_str.split(',')]
|
|
30
|
+
|
|
31
|
+
# Legacy single-host variable for backward compatibility
|
|
32
|
+
legacy_host = os.getenv('DEFAULT_OLLAMA_HOST')
|
|
33
|
+
if legacy_host:
|
|
34
|
+
return [legacy_host]
|
|
35
|
+
|
|
36
|
+
return [OllamaModelProvider.DEFAULT_OLLAMA_HOST]
|
|
21
37
|
|
|
22
38
|
@staticmethod
|
|
23
39
|
def is_valid_url(url: str) -> bool:
|
|
@@ -31,73 +47,65 @@ class OllamaModelProvider:
|
|
|
31
47
|
@staticmethod
|
|
32
48
|
def discover_and_register():
|
|
33
49
|
"""
|
|
34
|
-
Discovers all models
|
|
35
|
-
and registers them directly using LLMFactory.
|
|
36
|
-
|
|
37
|
-
Handles various connection and operational errors gracefully to prevent
|
|
38
|
-
application crashes when Ollama is unavailable.
|
|
50
|
+
Discovers all models from all configured Ollama hosts and registers them.
|
|
39
51
|
"""
|
|
40
52
|
try:
|
|
41
|
-
from autobyteus.llm.llm_factory import LLMFactory
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
logger.warning(f"Could not connect to Ollama server at {ollama_host}. "
|
|
55
|
-
f"Please ensure Ollama is running. Error: {str(e)}")
|
|
56
|
-
return
|
|
57
|
-
except httpx.TimeoutException as e:
|
|
58
|
-
logger.warning(f"Connection to Ollama server timed out. "
|
|
59
|
-
f"Please check if the server is responsive. Error: {str(e)}")
|
|
60
|
-
return
|
|
61
|
-
except httpx.HTTPError as e:
|
|
62
|
-
logger.warning(f"HTTP error occurred while connecting to Ollama: {str(e)}")
|
|
63
|
-
return
|
|
64
|
-
|
|
65
|
-
try:
|
|
66
|
-
models = response['models']
|
|
67
|
-
except (KeyError, TypeError) as e:
|
|
68
|
-
logger.error(f"Unexpected response format from Ollama server: {str(e)}")
|
|
69
|
-
return
|
|
70
|
-
|
|
71
|
-
registered_count = 0
|
|
72
|
-
for model_info in models:
|
|
53
|
+
from autobyteus.llm.llm_factory import LLMFactory
|
|
54
|
+
|
|
55
|
+
hosts = OllamaModelProvider._get_hosts()
|
|
56
|
+
total_registered_count = 0
|
|
57
|
+
|
|
58
|
+
for host_url in hosts:
|
|
59
|
+
if not OllamaModelProvider.is_valid_url(host_url):
|
|
60
|
+
logger.error(f"Invalid Ollama host URL provided: '{host_url}', skipping.")
|
|
61
|
+
continue
|
|
62
|
+
|
|
63
|
+
logger.info(f"Discovering Ollama models from host: {host_url}")
|
|
64
|
+
client = Client(host=host_url)
|
|
65
|
+
|
|
73
66
|
try:
|
|
67
|
+
response = client.list()
|
|
68
|
+
models = response.get('models', [])
|
|
69
|
+
except httpx.ConnectError:
|
|
70
|
+
logger.warning(f"Could not connect to Ollama server at {host_url}. Please ensure it's running.")
|
|
71
|
+
continue
|
|
72
|
+
except Exception as e:
|
|
73
|
+
logger.error(f"Failed to fetch models from {host_url}: {e}")
|
|
74
|
+
continue
|
|
75
|
+
|
|
76
|
+
host_registered_count = 0
|
|
77
|
+
for model_info in models:
|
|
74
78
|
model_name = model_info.get('model')
|
|
75
79
|
if not model_name:
|
|
76
80
|
continue
|
|
77
81
|
|
|
78
|
-
|
|
79
|
-
|
|
82
|
+
try:
|
|
83
|
+
provider = OllamaProviderResolver.resolve(model_name)
|
|
80
84
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
85
|
+
llm_model = LLMModel(
|
|
86
|
+
name=model_name,
|
|
87
|
+
value=model_name,
|
|
88
|
+
provider=provider,
|
|
89
|
+
llm_class=OllamaLLM,
|
|
90
|
+
canonical_name=model_name,
|
|
91
|
+
runtime=LLMRuntime.OLLAMA,
|
|
92
|
+
host_url=host_url,
|
|
93
|
+
default_config=LLMConfig(
|
|
94
|
+
pricing_config=TokenPricingConfig(0.0, 0.0) # Local models are free
|
|
95
|
+
)
|
|
91
96
|
)
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
97
|
+
LLMFactory.register_model(llm_model)
|
|
98
|
+
host_registered_count += 1
|
|
99
|
+
except Exception as e:
|
|
100
|
+
logger.warning(f"Failed to register model '{model_name}' from host {host_url}: {e}")
|
|
101
|
+
|
|
102
|
+
if host_registered_count > 0:
|
|
103
|
+
logger.info(f"Registered {host_registered_count} models from Ollama host {host_url}")
|
|
104
|
+
total_registered_count += host_registered_count
|
|
100
105
|
|
|
106
|
+
if total_registered_count > 0:
|
|
107
|
+
logger.info(f"Finished Ollama discovery. Total models registered: {total_registered_count}")
|
|
108
|
+
|
|
101
109
|
except Exception as e:
|
|
102
|
-
logger.error(f"
|
|
103
|
-
|
|
110
|
+
logger.error(f"An unexpected error occurred during Ollama model discovery: {e}")
|
|
111
|
+
|
|
@@ -13,6 +13,7 @@ class OllamaProviderResolver:
|
|
|
13
13
|
# A mapping from keywords to providers. The list is ordered to handle
|
|
14
14
|
# potential overlaps, though current keywords are distinct.
|
|
15
15
|
KEYWORD_PROVIDER_MAP = [
|
|
16
|
+
(['gpt'], LLMProvider.OPENAI),
|
|
16
17
|
(['gemma', 'gemini'], LLMProvider.GEMINI),
|
|
17
18
|
(['llama'], LLMProvider.GROQ),
|
|
18
19
|
(['mistral'], LLMProvider.MISTRAL),
|
autobyteus/llm/providers.py
CHANGED
|
@@ -1,16 +1,16 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
2
|
|
|
3
3
|
class LLMProvider(Enum):
|
|
4
|
-
OPENAI = "
|
|
5
|
-
ANTHROPIC = "
|
|
6
|
-
MISTRAL = "
|
|
7
|
-
GROQ = "
|
|
8
|
-
GEMINI = "
|
|
9
|
-
NVIDIA = "
|
|
10
|
-
PERPLEXITY = "
|
|
11
|
-
OLLAMA = "
|
|
12
|
-
DEEPSEEK = "
|
|
13
|
-
GROK = "
|
|
14
|
-
AUTOBYTEUS = "
|
|
15
|
-
KIMI = "
|
|
16
|
-
LMSTUDIO = "
|
|
4
|
+
OPENAI = "OPENAI"
|
|
5
|
+
ANTHROPIC = "ANTHROPIC"
|
|
6
|
+
MISTRAL = "MISTRAL"
|
|
7
|
+
GROQ = "GROQ"
|
|
8
|
+
GEMINI = "GEMINI"
|
|
9
|
+
NVIDIA = "NVIDIA"
|
|
10
|
+
PERPLEXITY = "PERPLEXITY"
|
|
11
|
+
OLLAMA = "OLLAMA"
|
|
12
|
+
DEEPSEEK = "DEEPSEEK"
|
|
13
|
+
GROK = "GROK"
|
|
14
|
+
AUTOBYTEUS = "AUTOBYTEUS"
|
|
15
|
+
KIMI = "KIMI"
|
|
16
|
+
LMSTUDIO = "LMSTUDIO"
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
class LLMRuntime(Enum):
|
|
4
|
+
"""
|
|
5
|
+
Represents the serving layer or environment where an LLM model is executed.
|
|
6
|
+
This is distinct from the LLMProvider, which is the creator of the model.
|
|
7
|
+
"""
|
|
8
|
+
API = "api"
|
|
9
|
+
OLLAMA = "ollama"
|
|
10
|
+
LMSTUDIO = "lmstudio"
|
|
11
|
+
AUTOBYTEUS = "autobyteus"
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/task_management/__init__.py
|
|
2
|
+
"""
|
|
3
|
+
This package defines components for task management and state tracking,
|
|
4
|
+
including task plans and live task boards. It is designed to be a general-purpose
|
|
5
|
+
module usable by various components, such as agents or agent teams.
|
|
6
|
+
"""
|
|
7
|
+
from .task_plan import TaskPlan, Task
|
|
8
|
+
from .schemas import (TaskPlanDefinitionSchema, TaskDefinitionSchema, TaskStatusReportSchema,
|
|
9
|
+
TaskStatusReportItemSchema, FileDeliverableSchema)
|
|
10
|
+
from .base_task_board import BaseTaskBoard, TaskStatus
|
|
11
|
+
from .in_memory_task_board import InMemoryTaskBoard
|
|
12
|
+
from .deliverable import FileDeliverable
|
|
13
|
+
from .tools import GetTaskBoardStatus, PublishTaskPlan, UpdateTaskStatus
|
|
14
|
+
from .converters import TaskBoardConverter, TaskPlanConverter
|
|
15
|
+
from .events import BaseTaskBoardEvent, TaskPlanPublishedEvent, TaskStatusUpdatedEvent
|
|
16
|
+
|
|
17
|
+
# For convenience, we can alias InMemoryTaskBoard as the default TaskBoard.
|
|
18
|
+
# This allows other parts of the code to import `TaskBoard` without needing
|
|
19
|
+
# to know the specific implementation being used by default.
|
|
20
|
+
TaskBoard = InMemoryTaskBoard
|
|
21
|
+
|
|
22
|
+
__all__ = [
|
|
23
|
+
"TaskPlan",
|
|
24
|
+
"Task",
|
|
25
|
+
"TaskPlanDefinitionSchema",
|
|
26
|
+
"TaskDefinitionSchema",
|
|
27
|
+
"TaskStatusReportSchema",
|
|
28
|
+
"TaskStatusReportItemSchema",
|
|
29
|
+
"FileDeliverableSchema",
|
|
30
|
+
"BaseTaskBoard",
|
|
31
|
+
"TaskStatus",
|
|
32
|
+
"InMemoryTaskBoard",
|
|
33
|
+
"TaskBoard", # Exposing the alias
|
|
34
|
+
"FileDeliverable",
|
|
35
|
+
"GetTaskBoardStatus",
|
|
36
|
+
"PublishTaskPlan",
|
|
37
|
+
"UpdateTaskStatus",
|
|
38
|
+
"TaskBoardConverter",
|
|
39
|
+
"TaskPlanConverter",
|
|
40
|
+
"BaseTaskBoardEvent",
|
|
41
|
+
"TaskPlanPublishedEvent",
|
|
42
|
+
"TaskStatusUpdatedEvent",
|
|
43
|
+
]
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/task_management/base_task_board.py
|
|
2
|
+
"""
|
|
3
|
+
Defines the abstract interface for a TaskBoard and its related enums.
|
|
4
|
+
"""
|
|
5
|
+
import logging
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from enum import Enum
|
|
8
|
+
from typing import Dict, Any, List, Optional
|
|
9
|
+
|
|
10
|
+
from autobyteus.events.event_emitter import EventEmitter
|
|
11
|
+
from .task_plan import Task, TaskPlan
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
class TaskStatus(str, Enum):
|
|
16
|
+
"""Enumerates the possible lifecycle states of a task on the TaskBoard."""
|
|
17
|
+
NOT_STARTED = "not_started"
|
|
18
|
+
IN_PROGRESS = "in_progress"
|
|
19
|
+
COMPLETED = "completed"
|
|
20
|
+
BLOCKED = "blocked"
|
|
21
|
+
FAILED = "failed"
|
|
22
|
+
|
|
23
|
+
def is_terminal(self) -> bool:
|
|
24
|
+
"""Returns True if the status is a final state."""
|
|
25
|
+
return self in {TaskStatus.COMPLETED, TaskStatus.FAILED}
|
|
26
|
+
|
|
27
|
+
class BaseTaskBoard(ABC, EventEmitter):
|
|
28
|
+
"""
|
|
29
|
+
Abstract base class for a TaskBoard.
|
|
30
|
+
|
|
31
|
+
This class defines the contract for any component that manages the live state
|
|
32
|
+
of a TaskPlan. Implementations could be in-memory, database-backed, or
|
|
33
|
+
connected to external services like JIRA. It inherits from EventEmitter to
|
|
34
|
+
broadcast state changes.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(self, team_id: str):
|
|
38
|
+
EventEmitter.__init__(self)
|
|
39
|
+
self.team_id = team_id
|
|
40
|
+
logger.debug(f"BaseTaskBoard initialized for team '{self.team_id}'.")
|
|
41
|
+
|
|
42
|
+
@abstractmethod
|
|
43
|
+
def load_task_plan(self, plan: TaskPlan) -> bool:
|
|
44
|
+
"""
|
|
45
|
+
Loads a new plan onto the board, resetting its state.
|
|
46
|
+
"""
|
|
47
|
+
raise NotImplementedError
|
|
48
|
+
|
|
49
|
+
@abstractmethod
|
|
50
|
+
def update_task_status(self, task_id: str, status: TaskStatus, agent_name: str) -> bool:
|
|
51
|
+
"""
|
|
52
|
+
Updates the status of a specific task.
|
|
53
|
+
"""
|
|
54
|
+
raise NotImplementedError
|
|
55
|
+
|
|
56
|
+
@abstractmethod
|
|
57
|
+
def get_status_overview(self) -> Dict[str, Any]:
|
|
58
|
+
"""
|
|
59
|
+
Returns a serializable dictionary of the board's current state.
|
|
60
|
+
"""
|
|
61
|
+
raise NotImplementedError
|
|
62
|
+
|
|
63
|
+
@abstractmethod
|
|
64
|
+
def get_next_runnable_tasks(self) -> List[Task]:
|
|
65
|
+
"""
|
|
66
|
+
Calculates which tasks can be executed now based on dependencies and statuses.
|
|
67
|
+
"""
|
|
68
|
+
raise NotImplementedError
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/task_management/converters/__init__.py
|
|
2
|
+
"""
|
|
3
|
+
Exposes the public converters for the task management module.
|
|
4
|
+
"""
|
|
5
|
+
from .task_board_converter import TaskBoardConverter
|
|
6
|
+
from .task_plan_converter import TaskPlanConverter
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"TaskBoardConverter",
|
|
10
|
+
"TaskPlanConverter",
|
|
11
|
+
]
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/task_management/converters/task_board_converter.py
|
|
2
|
+
"""
|
|
3
|
+
Contains converters for translating internal task management objects into
|
|
4
|
+
LLM-friendly Pydantic schemas.
|
|
5
|
+
"""
|
|
6
|
+
import logging
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
from autobyteus.task_management.base_task_board import BaseTaskBoard
|
|
10
|
+
from autobyteus.task_management.schemas import TaskStatusReportSchema, TaskStatusReportItemSchema
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
class TaskBoardConverter:
|
|
15
|
+
"""A converter to transform TaskBoard state into LLM-friendly schemas."""
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
def to_schema(task_board: BaseTaskBoard) -> Optional[TaskStatusReportSchema]:
|
|
19
|
+
"""
|
|
20
|
+
Converts the current state of a TaskBoard into a TaskStatusReportSchema.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
task_board: The task board instance to convert.
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
A TaskStatusReportSchema object if a plan is loaded, otherwise None.
|
|
27
|
+
"""
|
|
28
|
+
internal_status = task_board.get_status_overview()
|
|
29
|
+
plan = task_board.current_plan
|
|
30
|
+
|
|
31
|
+
if not plan:
|
|
32
|
+
logger.debug(f"TaskBoard for team '{task_board.team_id}' has no plan loaded. Cannot generate report.")
|
|
33
|
+
return None
|
|
34
|
+
|
|
35
|
+
# --- Conversion to LLM-Friendly Format ---
|
|
36
|
+
|
|
37
|
+
# 1. Create maps for easy lookup
|
|
38
|
+
id_to_name_map = {task.task_id: task.task_name for task in plan.tasks}
|
|
39
|
+
|
|
40
|
+
# 2. Build the list of LLM-friendly task items
|
|
41
|
+
report_items = []
|
|
42
|
+
for task in plan.tasks:
|
|
43
|
+
# Convert dependency IDs back to names. This is safe because the plan
|
|
44
|
+
# should have been hydrated already.
|
|
45
|
+
dep_names = [id_to_name_map[dep_id] for dep_id in task.dependencies]
|
|
46
|
+
|
|
47
|
+
report_item = TaskStatusReportItemSchema(
|
|
48
|
+
task_name=task.task_name,
|
|
49
|
+
assignee_name=task.assignee_name,
|
|
50
|
+
description=task.description,
|
|
51
|
+
dependencies=dep_names,
|
|
52
|
+
status=internal_status["task_statuses"].get(task.task_id),
|
|
53
|
+
file_deliverables=task.file_deliverables
|
|
54
|
+
)
|
|
55
|
+
report_items.append(report_item)
|
|
56
|
+
|
|
57
|
+
# 3. Assemble the final report object
|
|
58
|
+
status_report = TaskStatusReportSchema(
|
|
59
|
+
overall_goal=plan.overall_goal,
|
|
60
|
+
tasks=report_items
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
logger.debug(f"Successfully converted TaskBoard state to TaskStatusReportSchema for team '{task_board.team_id}'.")
|
|
64
|
+
return status_report
|