autobyteus 1.1.0__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. autobyteus/agent/bootstrap_steps/agent_bootstrapper.py +1 -1
  2. autobyteus/agent/bootstrap_steps/agent_runtime_queue_initialization_step.py +1 -1
  3. autobyteus/agent/bootstrap_steps/base_bootstrap_step.py +1 -1
  4. autobyteus/agent/bootstrap_steps/system_prompt_processing_step.py +1 -1
  5. autobyteus/agent/bootstrap_steps/workspace_context_initialization_step.py +1 -1
  6. autobyteus/agent/context/__init__.py +0 -5
  7. autobyteus/agent/context/agent_config.py +6 -2
  8. autobyteus/agent/context/agent_context.py +2 -5
  9. autobyteus/agent/context/agent_phase_manager.py +105 -5
  10. autobyteus/agent/context/agent_runtime_state.py +2 -2
  11. autobyteus/agent/context/phases.py +2 -0
  12. autobyteus/agent/events/__init__.py +0 -11
  13. autobyteus/agent/events/agent_events.py +0 -37
  14. autobyteus/agent/events/notifiers.py +25 -7
  15. autobyteus/agent/events/worker_event_dispatcher.py +1 -1
  16. autobyteus/agent/factory/agent_factory.py +6 -2
  17. autobyteus/agent/group/agent_group.py +16 -7
  18. autobyteus/agent/handlers/approved_tool_invocation_event_handler.py +28 -14
  19. autobyteus/agent/handlers/lifecycle_event_logger.py +1 -1
  20. autobyteus/agent/handlers/llm_complete_response_received_event_handler.py +4 -2
  21. autobyteus/agent/handlers/tool_invocation_request_event_handler.py +40 -15
  22. autobyteus/agent/handlers/tool_result_event_handler.py +12 -7
  23. autobyteus/agent/hooks/__init__.py +7 -0
  24. autobyteus/agent/hooks/base_phase_hook.py +11 -2
  25. autobyteus/agent/hooks/hook_definition.py +36 -0
  26. autobyteus/agent/hooks/hook_meta.py +37 -0
  27. autobyteus/agent/hooks/hook_registry.py +118 -0
  28. autobyteus/agent/input_processor/base_user_input_processor.py +6 -3
  29. autobyteus/agent/input_processor/passthrough_input_processor.py +2 -1
  30. autobyteus/agent/input_processor/processor_meta.py +1 -1
  31. autobyteus/agent/input_processor/processor_registry.py +19 -0
  32. autobyteus/agent/llm_response_processor/base_processor.py +6 -3
  33. autobyteus/agent/llm_response_processor/processor_meta.py +1 -1
  34. autobyteus/agent/llm_response_processor/processor_registry.py +19 -0
  35. autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py +2 -1
  36. autobyteus/agent/message/context_file_type.py +2 -3
  37. autobyteus/agent/phases/__init__.py +18 -0
  38. autobyteus/agent/phases/discover.py +52 -0
  39. autobyteus/agent/phases/manager.py +265 -0
  40. autobyteus/agent/phases/phase_enum.py +49 -0
  41. autobyteus/agent/phases/transition_decorator.py +40 -0
  42. autobyteus/agent/phases/transition_info.py +33 -0
  43. autobyteus/agent/remote_agent.py +1 -1
  44. autobyteus/agent/runtime/agent_runtime.py +4 -6
  45. autobyteus/agent/runtime/agent_worker.py +1 -1
  46. autobyteus/agent/streaming/agent_event_stream.py +58 -5
  47. autobyteus/agent/streaming/stream_event_payloads.py +24 -13
  48. autobyteus/agent/streaming/stream_events.py +14 -11
  49. autobyteus/agent/system_prompt_processor/base_processor.py +6 -3
  50. autobyteus/agent/system_prompt_processor/processor_meta.py +1 -1
  51. autobyteus/agent/system_prompt_processor/tool_manifest_injector_processor.py +45 -31
  52. autobyteus/agent/tool_invocation.py +29 -3
  53. autobyteus/agent/utils/wait_for_idle.py +1 -1
  54. autobyteus/agent/workspace/__init__.py +2 -0
  55. autobyteus/agent/workspace/base_workspace.py +33 -11
  56. autobyteus/agent/workspace/workspace_config.py +160 -0
  57. autobyteus/agent/workspace/workspace_definition.py +36 -0
  58. autobyteus/agent/workspace/workspace_meta.py +37 -0
  59. autobyteus/agent/workspace/workspace_registry.py +72 -0
  60. autobyteus/cli/__init__.py +4 -3
  61. autobyteus/cli/agent_cli.py +25 -207
  62. autobyteus/cli/cli_display.py +205 -0
  63. autobyteus/events/event_manager.py +2 -1
  64. autobyteus/events/event_types.py +3 -1
  65. autobyteus/llm/api/autobyteus_llm.py +2 -12
  66. autobyteus/llm/api/deepseek_llm.py +5 -5
  67. autobyteus/llm/api/grok_llm.py +5 -5
  68. autobyteus/llm/api/mistral_llm.py +4 -4
  69. autobyteus/llm/api/ollama_llm.py +2 -2
  70. autobyteus/llm/extensions/token_usage_tracking_extension.py +11 -1
  71. autobyteus/llm/llm_factory.py +106 -42
  72. autobyteus/llm/models.py +25 -29
  73. autobyteus/llm/ollama_provider.py +6 -2
  74. autobyteus/llm/ollama_provider_resolver.py +44 -0
  75. autobyteus/tools/__init__.py +2 -0
  76. autobyteus/tools/base_tool.py +7 -1
  77. autobyteus/tools/functional_tool.py +20 -5
  78. autobyteus/tools/mcp/call_handlers/stdio_handler.py +15 -1
  79. autobyteus/tools/mcp/config_service.py +106 -127
  80. autobyteus/tools/mcp/registrar.py +247 -59
  81. autobyteus/tools/mcp/types.py +5 -3
  82. autobyteus/tools/registry/tool_definition.py +8 -1
  83. autobyteus/tools/registry/tool_registry.py +18 -0
  84. autobyteus/tools/tool_category.py +11 -0
  85. autobyteus/tools/tool_meta.py +3 -1
  86. autobyteus/tools/tool_state.py +20 -0
  87. autobyteus/tools/usage/parsers/default_json_tool_usage_parser.py +3 -3
  88. autobyteus/tools/usage/parsers/default_xml_tool_usage_parser.py +2 -1
  89. autobyteus/tools/usage/parsers/gemini_json_tool_usage_parser.py +17 -19
  90. autobyteus/tools/usage/parsers/openai_json_tool_usage_parser.py +126 -77
  91. {autobyteus-1.1.0.dist-info → autobyteus-1.1.1.dist-info}/METADATA +11 -11
  92. {autobyteus-1.1.0.dist-info → autobyteus-1.1.1.dist-info}/RECORD +95 -78
  93. {autobyteus-1.1.0.dist-info → autobyteus-1.1.1.dist-info}/WHEEL +0 -0
  94. {autobyteus-1.1.0.dist-info → autobyteus-1.1.1.dist-info}/licenses/LICENSE +0 -0
  95. {autobyteus-1.1.0.dist-info → autobyteus-1.1.1.dist-info}/top_level.txt +0 -0
@@ -3,7 +3,7 @@ import logging
3
3
  import inspect
4
4
 
5
5
  from autobyteus.llm.autobyteus_provider import AutobyteusModelProvider
6
- from autobyteus.llm.models import LLMModel
6
+ from autobyteus.llm.models import LLMModel, ModelInfo, ProviderModelGroup
7
7
  from autobyteus.llm.providers import LLMProvider
8
8
  from autobyteus.llm.utils.llm_config import LLMConfig, TokenPricingConfig
9
9
  from autobyteus.llm.base_llm import BaseLLM
@@ -36,21 +36,6 @@ class LLMFactory(metaclass=SingletonMeta):
36
36
  LLMFactory._initialize_registry()
37
37
  LLMFactory._initialized = True
38
38
 
39
- @staticmethod
40
- def _clear_model_class_attributes():
41
- """
42
- Clear all LLMModel instances that were set as class attributes on the LLMModel class.
43
- This is necessary for reinitialization to avoid 'model already exists' errors.
44
- """
45
- # Get all attributes of the LLMModel class
46
- for attr_name in list(vars(LLMModel).keys()):
47
- attr_value = getattr(LLMModel, attr_name)
48
- # Check if the attribute is an instance of LLMModel
49
- if isinstance(attr_value, LLMModel):
50
- logger.debug(f"Removing LLMModel class attribute: {attr_name}")
51
- # Delete the attribute to avoid 'model already exists' errors during reinitialization
52
- delattr(LLMModel, attr_name)
53
-
54
39
  @staticmethod
55
40
  def reinitialize():
56
41
  """
@@ -66,9 +51,6 @@ class LLMFactory(metaclass=SingletonMeta):
66
51
  try:
67
52
  logger.info("Reinitializing LLM model registry...")
68
53
 
69
- # Clear all LLMModel instances set as class attributes
70
- LLMFactory._clear_model_class_attributes()
71
-
72
54
  # Reset the initialized flag
73
55
  LLMFactory._initialized = False
74
56
 
@@ -94,7 +76,7 @@ class LLMFactory(metaclass=SingletonMeta):
94
76
  supported_models = [
95
77
  # OPENAI Provider Models
96
78
  LLMModel(
97
- name="GPT_4o_API",
79
+ name="gpt-4o",
98
80
  value="gpt-4o",
99
81
  provider=LLMProvider.OPENAI,
100
82
  llm_class=OpenAILLM,
@@ -106,7 +88,7 @@ class LLMFactory(metaclass=SingletonMeta):
106
88
  )
107
89
  ),
108
90
  LLMModel(
109
- name="o3_API",
91
+ name="o3",
110
92
  value="o3",
111
93
  provider=LLMProvider.OPENAI,
112
94
  llm_class=OpenAILLM,
@@ -116,7 +98,7 @@ class LLMFactory(metaclass=SingletonMeta):
116
98
  )
117
99
  ),
118
100
  LLMModel(
119
- name="o4_MINI_API",
101
+ name="o4-mini",
120
102
  value="o4-mini",
121
103
  provider=LLMProvider.OPENAI,
122
104
  llm_class=OpenAILLM,
@@ -127,7 +109,7 @@ class LLMFactory(metaclass=SingletonMeta):
127
109
  ),
128
110
  # MISTRAL Provider Models
129
111
  LLMModel(
130
- name="MISTRAL_LARGE_API",
112
+ name="mistral-large",
131
113
  value="mistral-large-latest",
132
114
  provider=LLMProvider.MISTRAL,
133
115
  llm_class=MistralLLM,
@@ -138,28 +120,48 @@ class LLMFactory(metaclass=SingletonMeta):
138
120
  ),
139
121
  # ANTHROPIC Provider Models
140
122
  LLMModel(
141
- name="CLAUDE_3_7_SONNET_API",
142
- value="claude-3-7-sonnet-20250219",
123
+ name="claude-4-opus",
124
+ value="claude-opus-4-20250514",
125
+ provider=LLMProvider.ANTHROPIC,
126
+ llm_class=ClaudeLLM,
127
+ canonical_name="claude-4-opus",
128
+ default_config=LLMConfig(
129
+ pricing_config=TokenPricingConfig(15.00, 75.00)
130
+ )
131
+ ),
132
+ LLMModel(
133
+ name="bedrock-claude-4-opus",
134
+ value="anthropic.claude-opus-4-20250514-v1:0",
143
135
  provider=LLMProvider.ANTHROPIC,
144
136
  llm_class=ClaudeLLM,
145
- canonical_name="claude-3.7",
137
+ canonical_name="claude-4-opus",
138
+ default_config=LLMConfig(
139
+ pricing_config=TokenPricingConfig(15.00, 75.00)
140
+ )
141
+ ),
142
+ LLMModel(
143
+ name="claude-4-sonnet",
144
+ value="claude-sonnet-4-20250514",
145
+ provider=LLMProvider.ANTHROPIC,
146
+ llm_class=ClaudeLLM,
147
+ canonical_name="claude-4-sonnet",
146
148
  default_config=LLMConfig(
147
149
  pricing_config=TokenPricingConfig(3.00, 15.00)
148
150
  )
149
151
  ),
150
152
  LLMModel(
151
- name="BEDROCK_CLAUDE_3_7_SONNET_API",
152
- value="anthropic.claude-3-7-sonnet-20250219-v1:0",
153
+ name="bedrock-claude-4-sonnet",
154
+ value="anthropic.claude-sonnet-4-20250514-v1:0",
153
155
  provider=LLMProvider.ANTHROPIC,
154
156
  llm_class=ClaudeLLM,
155
- canonical_name="claude-3.7",
157
+ canonical_name="claude-4-sonnet",
156
158
  default_config=LLMConfig(
157
159
  pricing_config=TokenPricingConfig(3.00, 15.00)
158
160
  )
159
161
  ),
160
162
  # DEEPSEEK Provider Models
161
163
  LLMModel(
162
- name="DEEPSEEK_CHAT_API",
164
+ name="deepseek-chat",
163
165
  value="deepseek-chat",
164
166
  provider=LLMProvider.DEEPSEEK,
165
167
  llm_class=DeepSeekLLM,
@@ -172,7 +174,7 @@ class LLMFactory(metaclass=SingletonMeta):
172
174
  ),
173
175
  # Adding deepseek-reasoner support
174
176
  LLMModel(
175
- name="DEEPSEEK_REASONER_API",
177
+ name="deepseek-reasoner",
176
178
  value="deepseek-reasoner",
177
179
  provider=LLMProvider.DEEPSEEK,
178
180
  llm_class=DeepSeekLLM,
@@ -185,28 +187,48 @@ class LLMFactory(metaclass=SingletonMeta):
185
187
  ),
186
188
  # GEMINI Provider Models
187
189
  LLMModel(
188
- name="GEMINI_1_5_PRO_API",
189
- value="gemini-1-5-pro",
190
+ name="gemini-2.5-pro",
191
+ value="gemini-2.5-pro",
192
+ provider=LLMProvider.GEMINI,
193
+ llm_class=OpenAILLM,
194
+ canonical_name="gemini-2.5-pro",
195
+ default_config=LLMConfig(
196
+ pricing_config=TokenPricingConfig(2.50, 10.00)
197
+ )
198
+ ),
199
+ LLMModel(
200
+ name="gemini-2.5-flash",
201
+ value="gemini-2.5-flash",
190
202
  provider=LLMProvider.GEMINI,
191
203
  llm_class=OpenAILLM,
192
- canonical_name="gemini-1.5-pro",
204
+ canonical_name="gemini-2.5-flash",
193
205
  default_config=LLMConfig(
194
- pricing_config=TokenPricingConfig(1.25, 5.00)
206
+ pricing_config=TokenPricingConfig(0.15, 0.60)
195
207
  )
196
208
  ),
197
209
  LLMModel(
198
- name="GEMINI_1_5_FLASH_API",
199
- value="gemini-1-5-flash",
210
+ name="gemini-2.0-flash",
211
+ value="gemini-2.0-flash",
200
212
  provider=LLMProvider.GEMINI,
201
213
  llm_class=OpenAILLM,
202
- canonical_name="gemini-1.5-flash",
214
+ canonical_name="gemini-2.0-flash",
215
+ default_config=LLMConfig(
216
+ pricing_config=TokenPricingConfig(0.1, 0.40)
217
+ )
218
+ ),
219
+ LLMModel(
220
+ name="gemini-2.0-flash-lite",
221
+ value="gemini-2.0-flash-lite",
222
+ provider=LLMProvider.GEMINI,
223
+ llm_class=OpenAILLM,
224
+ canonical_name="gemini-2.0-flash-lite",
203
225
  default_config=LLMConfig(
204
226
  pricing_config=TokenPricingConfig(0.075, 0.30)
205
227
  )
206
228
  ),
207
229
  # GROK Provider Models
208
230
  LLMModel(
209
- name="GROK_2_1212_API",
231
+ name="grok-2-1212",
210
232
  value="grok-2-1212",
211
233
  provider=LLMProvider.GROK,
212
234
  llm_class=GrokLLM,
@@ -228,7 +250,18 @@ class LLMFactory(metaclass=SingletonMeta):
228
250
  def register_model(model: LLMModel):
229
251
  """
230
252
  Register a new LLM model, storing it under its provider category.
253
+ If a model with the same name already exists, it will be replaced.
231
254
  """
255
+ # Using a flat list of all models to check for existing model by name
256
+ all_models = [m for models in LLMFactory._models_by_provider.values() for m in models]
257
+
258
+ for existing_model in all_models:
259
+ if existing_model.name == model.name:
260
+ logger.warning(f"Model with name '{model.name}' is being redefined.")
261
+ # Remove the old model from its provider list
262
+ LLMFactory._models_by_provider[existing_model.provider].remove(existing_model)
263
+ break
264
+
232
265
  models = LLMFactory._models_by_provider.setdefault(model.provider, [])
233
266
  models.append(model)
234
267
 
@@ -238,7 +271,7 @@ class LLMFactory(metaclass=SingletonMeta):
238
271
  Create an LLM instance for the specified model identifier.
239
272
 
240
273
  Args:
241
- model_identifier (str): The model name or value to create an instance for.
274
+ model_identifier (str): The model name to create an instance for.
242
275
  llm_config (Optional[LLMConfig]): Configuration for the LLM. If None,
243
276
  the model's default configuration is used.
244
277
 
@@ -251,7 +284,7 @@ class LLMFactory(metaclass=SingletonMeta):
251
284
  LLMFactory.ensure_initialized()
252
285
  for models in LLMFactory._models_by_provider.values():
253
286
  for model_instance in models:
254
- if model_instance.value == model_identifier or model_instance.name == model_identifier:
287
+ if model_instance.name == model_identifier:
255
288
  return model_instance.create_llm(llm_config)
256
289
  raise ValueError(f"Unsupported model: {model_identifier}")
257
290
 
@@ -296,7 +329,7 @@ class LLMFactory(metaclass=SingletonMeta):
296
329
  Get the canonical name for a model by its name.
297
330
 
298
331
  Args:
299
- model_name (str): The model name (e.g., "GPT_4o_API")
332
+ model_name (str): The model name (e.g., "gpt_4o")
300
333
 
301
334
  Returns:
302
335
  Optional[str]: The canonical name if found, None otherwise
@@ -308,4 +341,35 @@ class LLMFactory(metaclass=SingletonMeta):
308
341
  return model_instance.canonical_name
309
342
  return None
310
343
 
344
+ @staticmethod
345
+ def get_models_grouped_by_provider() -> List[ProviderModelGroup]:
346
+ """
347
+ Returns a list of all providers, each with a list of its available models,
348
+ sorted by provider name and model name. Providers with no models are included
349
+ with an empty model list.
350
+ """
351
+ LLMFactory.ensure_initialized()
352
+ result: List[ProviderModelGroup] = []
353
+ # Sort all providers from the enum by name for consistent order
354
+ all_providers_sorted = sorted(list(LLMProvider), key=lambda p: p.name)
355
+
356
+ for provider in all_providers_sorted:
357
+ # Get models for the current provider, defaults to [] if none are registered
358
+ models = LLMFactory._models_by_provider.get(provider, [])
359
+
360
+ # Sort the models for this provider by name
361
+ sorted_models = sorted(models, key=lambda model: model.name)
362
+
363
+ model_infos = [
364
+ ModelInfo(name=model.name, canonical_name=model.canonical_name)
365
+ for model in sorted_models
366
+ ]
367
+
368
+ result.append(ProviderModelGroup(
369
+ provider=provider.name,
370
+ models=model_infos
371
+ ))
372
+
373
+ return result
374
+
311
375
  default_llm_factory = LLMFactory()
autobyteus/llm/models.py CHANGED
@@ -1,5 +1,6 @@
1
1
  import logging
2
2
  from typing import TYPE_CHECKING, Type, Optional, List, Iterator
3
+ from dataclasses import dataclass
3
4
 
4
5
  from autobyteus.llm.providers import LLMProvider
5
6
  from autobyteus.llm.utils.llm_config import LLMConfig
@@ -10,6 +11,18 @@ if TYPE_CHECKING:
10
11
 
11
12
  logger = logging.getLogger(__name__)
12
13
 
14
+ @dataclass
15
+ class ModelInfo:
16
+ """A simple data structure for essential model information."""
17
+ name: str
18
+ canonical_name: str
19
+
20
+ @dataclass
21
+ class ProviderModelGroup:
22
+ """A data structure to group models by their provider."""
23
+ provider: str
24
+ models: List[ModelInfo]
25
+
13
26
  class LLMModelMeta(type):
14
27
  """
15
28
  Metaclass for LLMModel to make it iterable and support item access like Enums.
@@ -24,15 +37,12 @@ class LLMModelMeta(type):
24
37
  from autobyteus.llm.llm_factory import LLMFactory
25
38
  LLMFactory.ensure_initialized()
26
39
 
27
- for attr_name in dir(cls):
28
- if not attr_name.startswith('_'): # Skip private/dunder attributes
29
- attr_value = getattr(cls, attr_name)
30
- if isinstance(attr_value, cls): # Check if it's an LLMModel instance
31
- yield attr_value
40
+ for models in LLMFactory._models_by_provider.values():
41
+ yield from models
32
42
 
33
43
  def __getitem__(cls, name_or_value: str) -> 'LLMModel':
34
44
  """
35
- Allows dictionary-like access to LLMModel instances by name (e.g., 'GPT_4o_API')
45
+ Allows dictionary-like access to LLMModel instances by name (e.g., 'gpt-4o')
36
46
  or by value (e.g., 'gpt-4o').
37
47
  Search is performed by name first, then by value.
38
48
  """
@@ -40,13 +50,12 @@ class LLMModelMeta(type):
40
50
  from autobyteus.llm.llm_factory import LLMFactory
41
51
  LLMFactory.ensure_initialized()
42
52
 
43
- # 1. Try to find by name first (e.g., LLMModel['GPT_4o_API'])
44
- if hasattr(cls, name_or_value):
45
- attribute = getattr(cls, name_or_value)
46
- if isinstance(attribute, cls):
47
- return attribute
53
+ # 1. Try to find by name first
54
+ for model in cls:
55
+ if model.name == name_or_value:
56
+ return model
48
57
 
49
- # 2. If not found by name, iterate and find by value (e.g., LLMModel['gpt-4o'])
58
+ # 2. If not found by name, iterate and find by value
50
59
  for model in cls:
51
60
  if model.value == name_or_value:
52
61
  return model
@@ -64,17 +73,14 @@ class LLMModelMeta(type):
64
73
  LLMFactory.ensure_initialized()
65
74
 
66
75
  count = 0
67
- for attr_name in dir(cls):
68
- if not attr_name.startswith('_'):
69
- attr_value = getattr(cls, attr_name)
70
- if isinstance(attr_value, cls):
71
- count += 1
76
+ for models in LLMFactory._models_by_provider.values():
77
+ count += len(models)
72
78
  return count
73
79
 
74
80
  class LLMModel(metaclass=LLMModelMeta):
75
81
  """
76
82
  Represents a single model's metadata:
77
- - name (str): A human-readable label, e.g. "GPT-4 Official"
83
+ - name (str): A human-readable label, e.g. "gpt-4o"
78
84
  - value (str): A unique identifier used in code or APIs, e.g. "gpt-4o"
79
85
  - canonical_name (str): A shorter, standardized reference name for prompts, e.g. "gpt-4o" or "claude-3.7"
80
86
  - provider (LLMProvider): The provider enum
@@ -94,12 +100,6 @@ class LLMModel(metaclass=LLMModelMeta):
94
100
  canonical_name: str,
95
101
  default_config: Optional[LLMConfig] = None
96
102
  ):
97
- # Validate name doesn't already exist as a class attribute
98
- if hasattr(LLMModel, name):
99
- existing_model = getattr(LLMModel, name)
100
- if isinstance(existing_model, LLMModel):
101
- logger.warning(f"Model with name '{name}' is being redefined. This is expected during reinitialization.")
102
-
103
103
  self._name = name
104
104
  self._value = value
105
105
  self._canonical_name = canonical_name
@@ -107,16 +107,12 @@ class LLMModel(metaclass=LLMModelMeta):
107
107
  self.llm_class = llm_class
108
108
  self.default_config = default_config if default_config else LLMConfig()
109
109
 
110
- # Set this instance as a class attribute, making LLMModel.MODEL_NAME available.
111
- logger.debug(f"Setting LLMModel class attribute: {name}")
112
- setattr(LLMModel, name, self)
113
-
114
110
  @property
115
111
  def name(self) -> str:
116
112
  """
117
113
  A friendly or descriptive name for this model (could appear in UI).
118
114
  This is the key used for `LLMModel['MODEL_NAME']` access.
119
- Example: "GPT_4o_API"
115
+ Example: "gpt-4o"
120
116
  """
121
117
  return self._name
122
118
 
@@ -2,6 +2,7 @@ from autobyteus.llm.models import LLMModel
2
2
  from autobyteus.llm.api.ollama_llm import OllamaLLM
3
3
  from autobyteus.llm.providers import LLMProvider
4
4
  from autobyteus.llm.utils.llm_config import LLMConfig, TokenPricingConfig
5
+ from autobyteus.llm.ollama_provider_resolver import OllamaProviderResolver
5
6
  from typing import TYPE_CHECKING
6
7
  import os
7
8
  import logging
@@ -39,7 +40,7 @@ class OllamaModelProvider:
39
40
  try:
40
41
  from autobyteus.llm.llm_factory import LLMFactory # Local import to avoid circular dependency
41
42
 
42
- ollama_host = os.getenv('OLLAMA_HOST', OllamaLLM.DEFAULT_OLLAMA_HOST)
43
+ ollama_host = os.getenv('DEFAULT_OLLAMA_HOST', OllamaLLM.DEFAULT_OLLAMA_HOST)
43
44
 
44
45
  if not OllamaModelProvider.is_valid_url(ollama_host):
45
46
  logger.error(f"Invalid Ollama host URL: {ollama_host}")
@@ -73,11 +74,14 @@ class OllamaModelProvider:
73
74
  model_name = model_info.get('model')
74
75
  if not model_name:
75
76
  continue
77
+
78
+ # Determine the provider based on the model name
79
+ provider = OllamaProviderResolver.resolve(model_name)
76
80
 
77
81
  llm_model = LLMModel(
78
82
  name=model_name,
79
83
  value=model_name,
80
- provider=LLMProvider.OLLAMA,
84
+ provider=provider,
81
85
  llm_class=OllamaLLM,
82
86
  canonical_name=model_name, # Use model_name as the canonical_name
83
87
  default_config=LLMConfig(
@@ -0,0 +1,44 @@
1
+ from autobyteus.llm.providers import LLMProvider
2
+ import logging
3
+
4
+ logger = logging.getLogger(__name__)
5
+
6
+ class OllamaProviderResolver:
7
+ """
8
+ A utility class to resolve the correct LLMProvider for Ollama models
9
+ based on keywords in their names. This helps attribute models to their
10
+ original creators (e.g., Google for 'gemma').
11
+ """
12
+
13
+ # A mapping from keywords to providers. The list is ordered to handle
14
+ # potential overlaps, though current keywords are distinct.
15
+ KEYWORD_PROVIDER_MAP = [
16
+ (['gemma', 'gemini'], LLMProvider.GEMINI),
17
+ (['llama'], LLMProvider.GROQ),
18
+ (['mistral'], LLMProvider.MISTRAL),
19
+ (['deepseek'], LLMProvider.DEEPSEEK),
20
+ ]
21
+
22
+ @staticmethod
23
+ def resolve(model_name: str) -> LLMProvider:
24
+ """
25
+ Resolves the LLMProvider for a given model name from Ollama.
26
+ It checks for keywords in the model name and returns the corresponding
27
+ provider. If no specific provider is found, it defaults to OLLAMA.
28
+
29
+ Args:
30
+ model_name (str): The name of the model discovered from Ollama (e.g., 'gemma:7b').
31
+
32
+ Returns:
33
+ LLMProvider: The resolved provider for the model.
34
+ """
35
+ lower_model_name = model_name.lower()
36
+
37
+ for keywords, provider in OllamaProviderResolver.KEYWORD_PROVIDER_MAP:
38
+ for keyword in keywords:
39
+ if keyword in lower_model_name:
40
+ logger.debug(f"Resolved provider for model '{model_name}' to '{provider.name}' based on keyword '{keyword}'.")
41
+ return provider
42
+
43
+ logger.debug(f"Model '{model_name}' did not match any specific provider keywords. Defaulting to OLLAMA provider.")
44
+ return LLMProvider.OLLAMA
@@ -10,6 +10,7 @@ from .base_tool import BaseTool
10
10
  from .functional_tool import tool # The @tool decorator
11
11
  from .parameter_schema import ParameterSchema, ParameterDefinition, ParameterType
12
12
  from .tool_config import ToolConfig # Configuration data object, primarily for class-based tools
13
+ from .tool_category import ToolCategory
13
14
 
14
15
  # --- Re-export specific tools for easier access ---
15
16
 
@@ -47,6 +48,7 @@ __all__ = [
47
48
  "ParameterDefinition",
48
49
  "ParameterType",
49
50
  "ToolConfig",
51
+ "ToolCategory",
50
52
 
51
53
  # Re-exported functional tool instances
52
54
  "ask_user_input",
@@ -9,10 +9,13 @@ from autobyteus.events.event_emitter import EventEmitter
9
9
  from autobyteus.events.event_types import EventType
10
10
 
11
11
  from .tool_meta import ToolMeta
12
+ from .tool_state import ToolState
13
+
12
14
  if TYPE_CHECKING:
13
15
  from autobyteus.agent.context import AgentContext
14
16
  from autobyteus.tools.parameter_schema import ParameterSchema
15
17
  from autobyteus.tools.tool_config import ToolConfig
18
+ from .tool_state import ToolState
16
19
 
17
20
  logger = logging.getLogger('autobyteus')
18
21
 
@@ -25,7 +28,10 @@ class BaseTool(ABC, EventEmitter, metaclass=ToolMeta):
25
28
  self.agent_id: Optional[str] = None
26
29
  # The config is stored primarily for potential use by subclasses or future base features.
27
30
  self._config = config
28
- logger.debug(f"BaseTool instance initializing for potential class {self.__class__.__name__}")
31
+ # Add a dedicated state dictionary for the tool instance
32
+ # CHANGED: Use ToolState class for explicit state management.
33
+ self.tool_state: 'ToolState' = ToolState()
34
+ logger.debug(f"BaseTool instance initializing for potential class {self.__class__.__name__}. tool_state initialized.")
29
35
 
30
36
  @classmethod
31
37
  def get_name(cls) -> str:
@@ -8,6 +8,7 @@ from autobyteus.tools.base_tool import BaseTool
8
8
  from autobyteus.tools.parameter_schema import ParameterSchema, ParameterDefinition, ParameterType
9
9
  from autobyteus.tools.tool_config import ToolConfig
10
10
  from autobyteus.tools.registry import default_tool_registry, ToolDefinition
11
+ from autobyteus.tools.tool_category import ToolCategory
11
12
 
12
13
  if TYPE_CHECKING:
13
14
  from autobyteus.agent.context import AgentContext
@@ -28,15 +29,20 @@ class FunctionalTool(BaseTool):
28
29
  config_schema: Optional[ParameterSchema],
29
30
  is_async: bool,
30
31
  expects_context: bool,
32
+ expects_tool_state: bool,
31
33
  func_param_names: TypingList[str],
32
34
  instantiation_config: Optional[Dict[str, Any]] = None):
33
35
  super().__init__(config=ToolConfig(params=instantiation_config) if instantiation_config else None)
34
36
  self._original_func = original_func
35
37
  self._is_async = is_async
36
38
  self._expects_context = expects_context
39
+ self._expects_tool_state = expects_tool_state
37
40
  self._func_param_names = func_param_names
38
41
  self._instantiation_config = instantiation_config or {}
39
42
 
43
+ # This instance has its own state dictionary, inherited from BaseTool's __init__
44
+ # self.tool_state: Dict[str, Any] = {} # This is now handled by super().__init__()
45
+
40
46
  # Override instance methods to provide specific schema info
41
47
  self.get_name = lambda: name
42
48
  self.get_description = lambda: description
@@ -65,6 +71,9 @@ class FunctionalTool(BaseTool):
65
71
 
66
72
  if self._expects_context:
67
73
  call_args['context'] = context
74
+
75
+ if self._expects_tool_state:
76
+ call_args['tool_state'] = self.tool_state
68
77
 
69
78
  if self._is_async:
70
79
  return await self._original_func(**call_args)
@@ -143,15 +152,19 @@ def _get_parameter_type_from_hint(py_type: Any, param_name: str) -> Tuple[Parame
143
152
  logger.warning(f"Unmapped type hint {py_type} (actual_type: {actual_type}) for param '{param_name}'. Defaulting to ParameterType.STRING.")
144
153
  return ParameterType.STRING, None
145
154
 
146
- def _parse_signature(sig: inspect.Signature, tool_name: str) -> Tuple[TypingList[str], bool, ParameterSchema]:
155
+ def _parse_signature(sig: inspect.Signature, tool_name: str) -> Tuple[TypingList[str], bool, bool, ParameterSchema]:
147
156
  func_param_names = []
148
157
  expects_context = False
158
+ expects_tool_state = False
149
159
  generated_arg_schema = ParameterSchema()
150
160
 
151
161
  for param_name, param_obj in sig.parameters.items():
152
162
  if param_name == "context":
153
163
  expects_context = True
154
- continue
164
+ continue
165
+ if param_name == "tool_state":
166
+ expects_tool_state = True
167
+ continue
155
168
 
156
169
  func_param_names.append(param_name)
157
170
 
@@ -177,7 +190,7 @@ def _parse_signature(sig: inspect.Signature, tool_name: str) -> Tuple[TypingList
177
190
  )
178
191
  generated_arg_schema.add_parameter(schema_param)
179
192
 
180
- return func_param_names, expects_context, generated_arg_schema
193
+ return func_param_names, expects_context, expects_tool_state, generated_arg_schema
181
194
 
182
195
  # --- The refactored @tool decorator ---
183
196
 
@@ -196,7 +209,7 @@ def tool(
196
209
 
197
210
  sig = inspect.signature(func)
198
211
  is_async = inspect.iscoroutinefunction(func)
199
- func_param_names, expects_context, gen_arg_schema = _parse_signature(sig, tool_name)
212
+ func_param_names, expects_context, expects_tool_state, gen_arg_schema = _parse_signature(sig, tool_name)
200
213
 
201
214
  final_arg_schema = argument_schema if argument_schema is not None else gen_arg_schema
202
215
 
@@ -209,6 +222,7 @@ def tool(
209
222
  config_schema=config_schema,
210
223
  is_async=is_async,
211
224
  expects_context=expects_context,
225
+ expects_tool_state=expects_tool_state,
212
226
  func_param_names=func_param_names,
213
227
  instantiation_config=inst_config.params if inst_config else None
214
228
  )
@@ -221,7 +235,8 @@ def tool(
221
235
  argument_schema=final_arg_schema,
222
236
  config_schema=config_schema,
223
237
  custom_factory=factory,
224
- tool_class=None
238
+ tool_class=None,
239
+ category=ToolCategory.LOCAL
225
240
  )
226
241
  default_tool_registry.register_tool(tool_def)
227
242
 
@@ -1,5 +1,6 @@
1
1
  # file: autobyteus/autobyteus/tools/mcp/call_handlers/stdio_handler.py
2
2
  import logging
3
+ import asyncio
3
4
  from typing import Dict, Any, cast, TYPE_CHECKING
4
5
 
5
6
  from .base_handler import McpCallHandler
@@ -11,6 +12,9 @@ if TYPE_CHECKING:
11
12
 
12
13
  logger = logging.getLogger(__name__)
13
14
 
15
+ # A default timeout for STDIO subprocesses to prevent indefinite hangs.
16
+ DEFAULT_STDIO_TIMEOUT = 30 # seconds
17
+
14
18
  class StdioMcpCallHandler(McpCallHandler):
15
19
  """Handles MCP tool calls over a stateless STDIO transport."""
16
20
 
@@ -23,6 +27,7 @@ class StdioMcpCallHandler(McpCallHandler):
23
27
  """
24
28
  Creates a new subprocess, establishes a session, and executes the
25
29
  requested tool call. It handles 'list_tools' as a special case.
30
+ Includes a timeout to prevent hanging on unresponsive subprocesses.
26
31
  """
27
32
  logger.debug(f"Handling STDIO call to tool '{remote_tool_name}' on server '{config.server_id}'.")
28
33
 
@@ -39,7 +44,8 @@ class StdioMcpCallHandler(McpCallHandler):
39
44
  cwd=stdio_config.cwd
40
45
  )
41
46
 
42
- try:
47
+ async def _perform_call():
48
+ """Inner function to be wrapped by the timeout."""
43
49
  # The stdio_client context manager provides the read/write streams.
44
50
  async with stdio_client(mcp_lib_stdio_params) as (read_stream, write_stream):
45
51
  # The ClientSession is its own context manager that handles initialization.
@@ -54,6 +60,14 @@ class StdioMcpCallHandler(McpCallHandler):
54
60
 
55
61
  logger.debug(f"STDIO call to tool '{remote_tool_name}' on server '{config.server_id}' completed.")
56
62
  return result
63
+
64
+ try:
65
+ return await asyncio.wait_for(_perform_call(), timeout=DEFAULT_STDIO_TIMEOUT)
66
+ except asyncio.TimeoutError:
67
+ error_message = (f"MCP call to '{remote_tool_name}' on server '{config.server_id}' timed out "
68
+ f"after {DEFAULT_STDIO_TIMEOUT} seconds. The subprocess may have hung.")
69
+ logger.error(error_message)
70
+ raise RuntimeError(error_message) from None
57
71
  except Exception as e:
58
72
  logger.error(
59
73
  f"An error occurred during STDIO tool call to '{remote_tool_name}' on server '{config.server_id}': {e}",