autobyteus 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. autobyteus/agent/context/__init__.py +4 -2
  2. autobyteus/agent/context/agent_config.py +0 -4
  3. autobyteus/agent/context/agent_context_registry.py +73 -0
  4. autobyteus/agent/events/notifiers.py +4 -0
  5. autobyteus/agent/handlers/inter_agent_message_event_handler.py +7 -2
  6. autobyteus/agent/handlers/llm_complete_response_received_event_handler.py +19 -19
  7. autobyteus/agent/handlers/user_input_message_event_handler.py +15 -0
  8. autobyteus/agent/message/send_message_to.py +29 -23
  9. autobyteus/agent/runtime/agent_runtime.py +10 -2
  10. autobyteus/agent/sender_type.py +15 -0
  11. autobyteus/agent/streaming/agent_event_stream.py +6 -0
  12. autobyteus/agent/streaming/stream_event_payloads.py +12 -0
  13. autobyteus/agent/streaming/stream_events.py +3 -0
  14. autobyteus/agent/system_prompt_processor/tool_manifest_injector_processor.py +7 -4
  15. autobyteus/agent_team/__init__.py +1 -0
  16. autobyteus/agent_team/agent_team.py +93 -0
  17. autobyteus/agent_team/agent_team_builder.py +184 -0
  18. autobyteus/agent_team/base_agent_team.py +86 -0
  19. autobyteus/agent_team/bootstrap_steps/__init__.py +24 -0
  20. autobyteus/agent_team/bootstrap_steps/agent_configuration_preparation_step.py +73 -0
  21. autobyteus/agent_team/bootstrap_steps/agent_team_bootstrapper.py +54 -0
  22. autobyteus/agent_team/bootstrap_steps/agent_team_runtime_queue_initialization_step.py +25 -0
  23. autobyteus/agent_team/bootstrap_steps/base_agent_team_bootstrap_step.py +23 -0
  24. autobyteus/agent_team/bootstrap_steps/coordinator_initialization_step.py +41 -0
  25. autobyteus/agent_team/bootstrap_steps/coordinator_prompt_preparation_step.py +85 -0
  26. autobyteus/agent_team/bootstrap_steps/task_notifier_initialization_step.py +51 -0
  27. autobyteus/agent_team/bootstrap_steps/team_context_initialization_step.py +45 -0
  28. autobyteus/agent_team/context/__init__.py +17 -0
  29. autobyteus/agent_team/context/agent_team_config.py +33 -0
  30. autobyteus/agent_team/context/agent_team_context.py +61 -0
  31. autobyteus/agent_team/context/agent_team_runtime_state.py +56 -0
  32. autobyteus/agent_team/context/team_manager.py +147 -0
  33. autobyteus/agent_team/context/team_node_config.py +76 -0
  34. autobyteus/agent_team/events/__init__.py +29 -0
  35. autobyteus/agent_team/events/agent_team_event_dispatcher.py +39 -0
  36. autobyteus/agent_team/events/agent_team_events.py +53 -0
  37. autobyteus/agent_team/events/agent_team_input_event_queue_manager.py +21 -0
  38. autobyteus/agent_team/exceptions.py +8 -0
  39. autobyteus/agent_team/factory/__init__.py +9 -0
  40. autobyteus/agent_team/factory/agent_team_factory.py +99 -0
  41. autobyteus/agent_team/handlers/__init__.py +19 -0
  42. autobyteus/agent_team/handlers/agent_team_event_handler_registry.py +23 -0
  43. autobyteus/agent_team/handlers/base_agent_team_event_handler.py +16 -0
  44. autobyteus/agent_team/handlers/inter_agent_message_request_event_handler.py +61 -0
  45. autobyteus/agent_team/handlers/lifecycle_agent_team_event_handler.py +27 -0
  46. autobyteus/agent_team/handlers/process_user_message_event_handler.py +46 -0
  47. autobyteus/agent_team/handlers/tool_approval_team_event_handler.py +48 -0
  48. autobyteus/agent_team/phases/__init__.py +11 -0
  49. autobyteus/agent_team/phases/agent_team_operational_phase.py +19 -0
  50. autobyteus/agent_team/phases/agent_team_phase_manager.py +48 -0
  51. autobyteus/agent_team/runtime/__init__.py +13 -0
  52. autobyteus/agent_team/runtime/agent_team_runtime.py +82 -0
  53. autobyteus/agent_team/runtime/agent_team_worker.py +117 -0
  54. autobyteus/agent_team/shutdown_steps/__init__.py +17 -0
  55. autobyteus/agent_team/shutdown_steps/agent_team_shutdown_orchestrator.py +35 -0
  56. autobyteus/agent_team/shutdown_steps/agent_team_shutdown_step.py +42 -0
  57. autobyteus/agent_team/shutdown_steps/base_agent_team_shutdown_step.py +16 -0
  58. autobyteus/agent_team/shutdown_steps/bridge_cleanup_step.py +28 -0
  59. autobyteus/agent_team/shutdown_steps/sub_team_shutdown_step.py +41 -0
  60. autobyteus/agent_team/streaming/__init__.py +26 -0
  61. autobyteus/agent_team/streaming/agent_event_bridge.py +48 -0
  62. autobyteus/agent_team/streaming/agent_event_multiplexer.py +70 -0
  63. autobyteus/agent_team/streaming/agent_team_event_notifier.py +64 -0
  64. autobyteus/agent_team/streaming/agent_team_event_stream.py +33 -0
  65. autobyteus/agent_team/streaming/agent_team_stream_event_payloads.py +32 -0
  66. autobyteus/agent_team/streaming/agent_team_stream_events.py +56 -0
  67. autobyteus/agent_team/streaming/team_event_bridge.py +50 -0
  68. autobyteus/agent_team/task_notification/__init__.py +11 -0
  69. autobyteus/agent_team/task_notification/system_event_driven_agent_task_notifier.py +164 -0
  70. autobyteus/agent_team/task_notification/task_notification_mode.py +24 -0
  71. autobyteus/agent_team/utils/__init__.py +9 -0
  72. autobyteus/agent_team/utils/wait_for_idle.py +46 -0
  73. autobyteus/cli/agent_team_tui/__init__.py +4 -0
  74. autobyteus/cli/agent_team_tui/app.py +210 -0
  75. autobyteus/cli/agent_team_tui/state.py +180 -0
  76. autobyteus/cli/agent_team_tui/widgets/__init__.py +6 -0
  77. autobyteus/cli/agent_team_tui/widgets/agent_list_sidebar.py +149 -0
  78. autobyteus/cli/agent_team_tui/widgets/focus_pane.py +320 -0
  79. autobyteus/cli/agent_team_tui/widgets/logo.py +20 -0
  80. autobyteus/cli/agent_team_tui/widgets/renderables.py +77 -0
  81. autobyteus/cli/agent_team_tui/widgets/shared.py +60 -0
  82. autobyteus/cli/agent_team_tui/widgets/status_bar.py +14 -0
  83. autobyteus/cli/agent_team_tui/widgets/task_board_panel.py +82 -0
  84. autobyteus/events/event_types.py +7 -2
  85. autobyteus/llm/api/autobyteus_llm.py +11 -12
  86. autobyteus/llm/api/lmstudio_llm.py +10 -13
  87. autobyteus/llm/api/ollama_llm.py +8 -13
  88. autobyteus/llm/autobyteus_provider.py +73 -46
  89. autobyteus/llm/llm_factory.py +102 -140
  90. autobyteus/llm/lmstudio_provider.py +63 -48
  91. autobyteus/llm/models.py +83 -53
  92. autobyteus/llm/ollama_provider.py +69 -61
  93. autobyteus/llm/ollama_provider_resolver.py +1 -0
  94. autobyteus/llm/providers.py +13 -13
  95. autobyteus/llm/runtimes.py +11 -0
  96. autobyteus/task_management/__init__.py +43 -0
  97. autobyteus/task_management/base_task_board.py +68 -0
  98. autobyteus/task_management/converters/__init__.py +11 -0
  99. autobyteus/task_management/converters/task_board_converter.py +64 -0
  100. autobyteus/task_management/converters/task_plan_converter.py +48 -0
  101. autobyteus/task_management/deliverable.py +16 -0
  102. autobyteus/task_management/deliverables/__init__.py +8 -0
  103. autobyteus/task_management/deliverables/file_deliverable.py +15 -0
  104. autobyteus/task_management/events.py +27 -0
  105. autobyteus/task_management/in_memory_task_board.py +126 -0
  106. autobyteus/task_management/schemas/__init__.py +15 -0
  107. autobyteus/task_management/schemas/deliverable_schema.py +13 -0
  108. autobyteus/task_management/schemas/plan_definition.py +35 -0
  109. autobyteus/task_management/schemas/task_status_report.py +27 -0
  110. autobyteus/task_management/task_plan.py +110 -0
  111. autobyteus/task_management/tools/__init__.py +14 -0
  112. autobyteus/task_management/tools/get_task_board_status.py +68 -0
  113. autobyteus/task_management/tools/publish_task_plan.py +113 -0
  114. autobyteus/task_management/tools/update_task_status.py +135 -0
  115. autobyteus/tools/bash/bash_executor.py +59 -14
  116. autobyteus/tools/mcp/config_service.py +63 -58
  117. autobyteus/tools/mcp/server/http_managed_mcp_server.py +14 -2
  118. autobyteus/tools/mcp/server/stdio_managed_mcp_server.py +14 -2
  119. autobyteus/tools/mcp/server_instance_manager.py +30 -4
  120. autobyteus/tools/mcp/tool_registrar.py +103 -50
  121. autobyteus/tools/parameter_schema.py +17 -11
  122. autobyteus/tools/registry/tool_definition.py +24 -29
  123. autobyteus/tools/tool_category.py +1 -0
  124. autobyteus/tools/usage/formatters/default_json_example_formatter.py +78 -3
  125. autobyteus/tools/usage/formatters/default_xml_example_formatter.py +23 -3
  126. autobyteus/tools/usage/formatters/gemini_json_example_formatter.py +6 -0
  127. autobyteus/tools/usage/formatters/google_json_example_formatter.py +7 -0
  128. autobyteus/tools/usage/formatters/openai_json_example_formatter.py +6 -4
  129. autobyteus/tools/usage/parsers/gemini_json_tool_usage_parser.py +23 -7
  130. autobyteus/tools/usage/parsers/provider_aware_tool_usage_parser.py +14 -25
  131. autobyteus/tools/usage/providers/__init__.py +2 -12
  132. autobyteus/tools/usage/providers/tool_manifest_provider.py +36 -29
  133. autobyteus/tools/usage/registries/__init__.py +7 -12
  134. autobyteus/tools/usage/registries/tool_formatter_pair.py +15 -0
  135. autobyteus/tools/usage/registries/tool_formatting_registry.py +58 -0
  136. autobyteus/tools/usage/registries/tool_usage_parser_registry.py +55 -0
  137. {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/METADATA +3 -3
  138. {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/RECORD +146 -72
  139. examples/agent_team/__init__.py +1 -0
  140. examples/run_browser_agent.py +17 -15
  141. examples/run_google_slides_agent.py +17 -16
  142. examples/run_poem_writer.py +22 -12
  143. examples/run_sqlite_agent.py +17 -15
  144. autobyteus/tools/mcp/call_handlers/__init__.py +0 -16
  145. autobyteus/tools/mcp/call_handlers/base_handler.py +0 -40
  146. autobyteus/tools/mcp/call_handlers/stdio_handler.py +0 -76
  147. autobyteus/tools/mcp/call_handlers/streamable_http_handler.py +0 -55
  148. autobyteus/tools/usage/providers/json_example_provider.py +0 -32
  149. autobyteus/tools/usage/providers/json_schema_provider.py +0 -35
  150. autobyteus/tools/usage/providers/json_tool_usage_parser_provider.py +0 -28
  151. autobyteus/tools/usage/providers/xml_example_provider.py +0 -28
  152. autobyteus/tools/usage/providers/xml_schema_provider.py +0 -29
  153. autobyteus/tools/usage/providers/xml_tool_usage_parser_provider.py +0 -26
  154. autobyteus/tools/usage/registries/json_example_formatter_registry.py +0 -51
  155. autobyteus/tools/usage/registries/json_schema_formatter_registry.py +0 -51
  156. autobyteus/tools/usage/registries/json_tool_usage_parser_registry.py +0 -42
  157. autobyteus/tools/usage/registries/xml_example_formatter_registry.py +0 -30
  158. autobyteus/tools/usage/registries/xml_schema_formatter_registry.py +0 -33
  159. autobyteus/tools/usage/registries/xml_tool_usage_parser_registry.py +0 -30
  160. examples/workflow/__init__.py +0 -1
  161. examples/workflow/run_basic_research_workflow.py +0 -189
  162. examples/workflow/run_code_review_workflow.py +0 -269
  163. examples/workflow/run_debate_workflow.py +0 -212
  164. examples/workflow/run_workflow_with_tui.py +0 -153
  165. {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/WHEEL +0 -0
  166. {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/licenses/LICENSE +0 -0
  167. {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/top_level.txt +0 -0
@@ -5,13 +5,13 @@ import inspect
5
5
  from autobyteus.llm.autobyteus_provider import AutobyteusModelProvider
6
6
  from autobyteus.llm.models import LLMModel, ModelInfo, ProviderModelGroup
7
7
  from autobyteus.llm.providers import LLMProvider
8
+ from autobyteus.llm.runtimes import LLMRuntime
8
9
  from autobyteus.llm.utils.llm_config import LLMConfig, TokenPricingConfig
9
10
  from autobyteus.llm.base_llm import BaseLLM
10
11
 
11
12
  from autobyteus.llm.api.claude_llm import ClaudeLLM
12
13
  from autobyteus.llm.api.mistral_llm import MistralLLM
13
14
  from autobyteus.llm.api.openai_llm import OpenAILLM
14
- from autobyteus.llm.api.ollama_llm import OllamaLLM
15
15
  from autobyteus.llm.api.deepseek_llm import DeepSeekLLM
16
16
  from autobyteus.llm.api.grok_llm import GrokLLM
17
17
  from autobyteus.llm.api.kimi_llm import KimiLLM
@@ -23,58 +23,30 @@ logger = logging.getLogger(__name__)
23
23
 
24
24
  class LLMFactory(metaclass=SingletonMeta):
25
25
  _models_by_provider: Dict[LLMProvider, List[LLMModel]] = {}
26
+ _models_by_identifier: Dict[str, LLMModel] = {}
26
27
  _initialized = False
27
28
 
28
- @staticmethod
29
- def register(model: LLMModel):
30
- LLMFactory.register_model(model)
31
-
32
29
  @staticmethod
33
30
  def ensure_initialized():
34
- """
35
- Ensures the factory is initialized before use.
36
- """
31
+ """Ensures the factory is initialized before use."""
37
32
  if not LLMFactory._initialized:
38
33
  LLMFactory._initialize_registry()
39
34
  LLMFactory._initialized = True
40
35
 
41
36
  @staticmethod
42
37
  def reinitialize():
43
- """
44
- Reinitializes the model registry by resetting the initialization state
45
- and reinitializing the registry.
46
-
47
- This is useful when new provider API keys are configured and
48
- we need to discover models that might be available with the new keys.
49
-
50
- Returns:
51
- bool: True if reinitialization was successful, False otherwise.
52
- """
53
- try:
54
- logger.info("Reinitializing LLM model registry...")
55
-
56
- # Reset the initialized flag
57
- LLMFactory._initialized = False
58
-
59
- # Clear existing models registry
60
- LLMFactory._models_by_provider = {}
61
-
62
- # Reinitialize the registry
63
- LLMFactory.ensure_initialized()
64
-
65
- logger.info("LLM model registry reinitialized successfully")
66
- return True
67
- except Exception as e:
68
- logger.error(f"Failed to reinitialize LLM model registry: {str(e)}")
69
- return False
38
+ """Reinitializes the model registry."""
39
+ logger.info("Reinitializing LLM model registry...")
40
+ LLMFactory._initialized = False
41
+ LLMFactory._models_by_provider.clear()
42
+ LLMFactory._models_by_identifier.clear()
43
+ LLMFactory.ensure_initialized()
44
+ logger.info("LLM model registry reinitialized successfully.")
70
45
 
71
46
  @staticmethod
72
47
  def _initialize_registry():
73
- """
74
- Initialize the registry with supported models, discover plugins,
75
- organize models by provider, and assign models as attributes on LLMModel.
76
- """
77
- # Organize supported models by provider sections
48
+ """Initializes the registry with built-in models and discovers runtime models."""
49
+ # Hardcoded direct-API models. Runtime defaults to API.
78
50
  supported_models = [
79
51
  # OPENAI Provider Models
80
52
  LLMModel(
@@ -306,134 +278,124 @@ class LLMFactory(metaclass=SingletonMeta):
306
278
  for model in supported_models:
307
279
  LLMFactory.register_model(model)
308
280
 
281
+ # Discover models from runtimes
309
282
  OllamaModelProvider.discover_and_register()
310
- AutobyteusModelProvider.discover_and_register()
311
283
  LMStudioModelProvider.discover_and_register()
284
+ AutobyteusModelProvider.discover_and_register()
312
285
 
313
286
  @staticmethod
314
287
  def register_model(model: LLMModel):
315
- """
316
- Register a new LLM model, storing it under its provider category.
317
- If a model with the same name already exists, it will be replaced.
318
- """
319
- # Using a flat list of all models to check for existing model by name
320
- all_models = [m for models in LLMFactory._models_by_provider.values() for m in models]
321
-
322
- for existing_model in all_models:
323
- if existing_model.name == model.name:
324
- logger.warning(f"Model with name '{model.name}' is being redefined.")
325
- # Remove the old model from its provider list
326
- LLMFactory._models_by_provider[existing_model.provider].remove(existing_model)
327
- break
288
+ """Registers a new LLM model."""
289
+ identifier = model.model_identifier
290
+ if identifier in LLMFactory._models_by_identifier:
291
+ logger.debug(f"Redefining model with identifier '{identifier}'.")
292
+ # Remove old model from provider group to replace it
293
+ old_model = LLMFactory._models_by_identifier[identifier]
294
+ if old_model.provider in LLMFactory._models_by_provider:
295
+ # This check is needed because a model might be in _models_by_identifier but not yet in _models_by_provider if re-registering
296
+ if old_model in LLMFactory._models_by_provider[old_model.provider]:
297
+ LLMFactory._models_by_provider[old_model.provider].remove(old_model)
328
298
 
329
- models = LLMFactory._models_by_provider.setdefault(model.provider, [])
330
- models.append(model)
299
+ LLMFactory._models_by_identifier[identifier] = model
300
+ LLMFactory._models_by_provider.setdefault(model.provider, []).append(model)
331
301
 
332
302
  @staticmethod
333
303
  def create_llm(model_identifier: str, llm_config: Optional[LLMConfig] = None) -> BaseLLM:
334
304
  """
335
- Create an LLM instance for the specified model identifier.
336
-
337
- Args:
338
- model_identifier (str): The model name to create an instance for.
339
- llm_config (Optional[LLMConfig]): Configuration for the LLM. If None,
340
- the model's default configuration is used.
341
-
342
- Returns:
343
- BaseLLM: An instance of the LLM.
344
-
345
- Raises:
346
- ValueError: If the model is not supported.
305
+ Creates an LLM instance for the specified unique model identifier.
306
+ Raises an error if the identifier is not found or if a non-unique name is provided.
347
307
  """
348
308
  LLMFactory.ensure_initialized()
349
- for models in LLMFactory._models_by_provider.values():
350
- for model_instance in models:
351
- if model_instance.name == model_identifier:
352
- return model_instance.create_llm(llm_config)
353
- raise ValueError(f"Unsupported model: {model_identifier}")
309
+
310
+ # First, try a direct lookup by the unique model_identifier
311
+ model = LLMFactory._models_by_identifier.get(model_identifier)
312
+ if model:
313
+ return model.create_llm(llm_config)
354
314
 
355
- @staticmethod
356
- def get_all_models() -> List[str]:
357
- """
358
- Returns a list of all registered model values.
359
- """
360
- LLMFactory.ensure_initialized()
361
- all_models = []
362
- for models in LLMFactory._models_by_provider.values():
363
- all_models.extend(model.name for model in models)
364
- return all_models
315
+ # If not found, check if the user provided a non-unique name by mistake
316
+ found_by_name = [m for m in LLMFactory._models_by_identifier.values() if m.name == model_identifier]
317
+ if len(found_by_name) > 1:
318
+ identifiers = [m.model_identifier for m in found_by_name]
319
+ raise ValueError(
320
+ f"The model name '{model_identifier}' is ambiguous. Please use one of the unique "
321
+ f"model identifiers: {identifiers}"
322
+ )
323
+
324
+ raise ValueError(f"Model with identifier '{model_identifier}' not found.")
365
325
 
366
- @staticmethod
367
- def get_all_providers() -> Set[LLMProvider]:
368
- """
369
- Returns a set of all available LLM providers.
370
- """
371
- LLMFactory.ensure_initialized()
372
- return set(LLMProvider)
326
+ # --- New Public API ---
373
327
 
374
328
  @staticmethod
375
- def get_models_by_provider(provider: LLMProvider) -> List[str]:
376
- """
377
- Returns a list of all model values for a specific provider.
378
- """
329
+ def list_available_models() -> List[ModelInfo]:
330
+ """Returns a list of all available models with their detailed info."""
379
331
  LLMFactory.ensure_initialized()
380
- return [model.value for model in LLMFactory._models_by_provider.get(provider, [])]
332
+ models = sorted(LLMFactory._models_by_identifier.values(), key=lambda m: m.model_identifier)
333
+ return [
334
+ ModelInfo(
335
+ model_identifier=m.model_identifier,
336
+ display_name=m.name,
337
+ value=m.value,
338
+ canonical_name=m.canonical_name,
339
+ provider=m.provider.value,
340
+ runtime=m.runtime.value,
341
+ host_url=m.host_url
342
+ )
343
+ for m in models
344
+ ]
381
345
 
382
346
  @staticmethod
383
- def get_models_for_provider(provider: LLMProvider) -> List[LLMModel]:
384
- """
385
- Returns a list of LLMModel instances for a specific provider.
386
- """
347
+ def list_models_by_provider(provider: LLMProvider) -> List[ModelInfo]:
348
+ """Returns a list of available models for a specific provider."""
387
349
  LLMFactory.ensure_initialized()
388
- return LLMFactory._models_by_provider.get(provider, [])
350
+ provider_models = sorted(
351
+ [m for m in LLMFactory._models_by_identifier.values() if m.provider == provider],
352
+ key=lambda m: m.model_identifier
353
+ )
354
+ return [
355
+ ModelInfo(
356
+ model_identifier=m.model_identifier,
357
+ display_name=m.name,
358
+ value=m.value,
359
+ canonical_name=m.canonical_name,
360
+ provider=m.provider.value,
361
+ runtime=m.runtime.value,
362
+ host_url=m.host_url
363
+ )
364
+ for m in provider_models
365
+ ]
389
366
 
390
367
  @staticmethod
391
- def get_canonical_name(model_name: str) -> Optional[str]:
392
- """
393
- Get the canonical name for a model by its name.
394
-
395
- Args:
396
- model_name (str): The model name (e.g., "gpt_4o")
397
-
398
- Returns:
399
- Optional[str]: The canonical name if found, None otherwise
400
- """
368
+ def list_models_by_runtime(runtime: LLMRuntime) -> List[ModelInfo]:
369
+ """Returns a list of available models for a specific runtime."""
401
370
  LLMFactory.ensure_initialized()
402
- for models in LLMFactory._models_by_provider.values():
403
- for model_instance in models:
404
- if model_instance.name == model_name:
405
- return model_instance.canonical_name
406
- return None
371
+ runtime_models = sorted(
372
+ [m for m in LLMFactory._models_by_identifier.values() if m.runtime == runtime],
373
+ key=lambda m: m.model_identifier
374
+ )
375
+ return [
376
+ ModelInfo(
377
+ model_identifier=m.model_identifier,
378
+ display_name=m.name,
379
+ value=m.value,
380
+ canonical_name=m.canonical_name,
381
+ provider=m.provider.value,
382
+ runtime=m.runtime.value,
383
+ host_url=m.host_url
384
+ )
385
+ for m in runtime_models
386
+ ]
407
387
 
408
388
  @staticmethod
409
- def get_models_grouped_by_provider() -> List[ProviderModelGroup]:
389
+ def get_canonical_name(model_identifier: str) -> Optional[str]:
410
390
  """
411
- Returns a list of all providers, each with a list of its available models,
412
- sorted by provider name and model name. Providers with no models are included
413
- with an empty model list.
391
+ Retrieves the canonical name for a given model identifier.
414
392
  """
415
393
  LLMFactory.ensure_initialized()
416
- result: List[ProviderModelGroup] = []
417
- # Sort all providers from the enum by name for consistent order
418
- all_providers_sorted = sorted(list(LLMProvider), key=lambda p: p.name)
394
+ model = LLMFactory._models_by_identifier.get(model_identifier)
395
+ if model:
396
+ return model.canonical_name
419
397
 
420
- for provider in all_providers_sorted:
421
- # Get models for the current provider, defaults to [] if none are registered
422
- models = LLMFactory._models_by_provider.get(provider, [])
423
-
424
- # Sort the models for this provider by name
425
- sorted_models = sorted(models, key=lambda model: model.name)
426
-
427
- model_infos = [
428
- ModelInfo(name=model.name, canonical_name=model.canonical_name)
429
- for model in sorted_models
430
- ]
431
-
432
- result.append(ProviderModelGroup(
433
- provider=provider.name,
434
- models=model_infos
435
- ))
436
-
437
- return result
398
+ logger.warning(f"Could not find model with identifier '{model_identifier}' to get its canonical name.")
399
+ return None
438
400
 
439
401
  default_llm_factory = LLMFactory()
@@ -1,8 +1,9 @@
1
1
  from autobyteus.llm.models import LLMModel
2
2
  from autobyteus.llm.api.lmstudio_llm import LMStudioLLM
3
3
  from autobyteus.llm.providers import LLMProvider
4
+ from autobyteus.llm.runtimes import LLMRuntime
4
5
  from autobyteus.llm.utils.llm_config import LLMConfig, TokenPricingConfig
5
- from typing import TYPE_CHECKING
6
+ from typing import TYPE_CHECKING, List
6
7
  import os
7
8
  import logging
8
9
  from openai import OpenAI, APIConnectionError, OpenAIError
@@ -16,6 +17,19 @@ logger = logging.getLogger(__name__)
16
17
  class LMStudioModelProvider:
17
18
  DEFAULT_LMSTUDIO_HOST = 'http://localhost:1234'
18
19
 
20
+ @staticmethod
21
+ def _get_hosts() -> List[str]:
22
+ """Gets LM Studio hosts from env vars, supporting comma-separated list."""
23
+ hosts_str = os.getenv('LMSTUDIO_HOSTS')
24
+ if hosts_str:
25
+ return [host.strip() for host in hosts_str.split(',')]
26
+
27
+ legacy_host = os.getenv('LMSTUDIO_HOST') # For backward compatibility
28
+ if legacy_host:
29
+ return [legacy_host]
30
+
31
+ return [LMStudioModelProvider.DEFAULT_LMSTUDIO_HOST]
32
+
19
33
  @staticmethod
20
34
  def is_valid_url(url: str) -> bool:
21
35
  """Validate if the provided URL is properly formatted."""
@@ -28,62 +42,63 @@ class LMStudioModelProvider:
28
42
  @staticmethod
29
43
  def discover_and_register():
30
44
  """
31
- Discovers models from a local LM Studio instance and registers them with the LLMFactory.
45
+ Discovers models from all configured LM Studio instances and registers them.
32
46
  """
33
47
  try:
34
48
  from autobyteus.llm.llm_factory import LLMFactory
35
49
 
36
- lmstudio_host = os.getenv('LMSTUDIO_HOST', LMStudioModelProvider.DEFAULT_LMSTUDIO_HOST)
37
-
38
- if not LMStudioModelProvider.is_valid_url(lmstudio_host):
39
- logger.error(f"Invalid LM Studio host URL: {lmstudio_host}")
40
- return
50
+ hosts = LMStudioModelProvider._get_hosts()
51
+ total_registered_count = 0
41
52
 
42
- base_url = f"{lmstudio_host}/v1"
43
-
44
- # Use a dummy API key for initialization. LM Studio doesn't require one.
45
- client = OpenAI(base_url=base_url, api_key="lm-studio")
53
+ for host_url in hosts:
54
+ if not LMStudioModelProvider.is_valid_url(host_url):
55
+ logger.error(f"Invalid LM Studio host URL: {host_url}, skipping.")
56
+ continue
46
57
 
47
- try:
48
- response = client.models.list()
49
- models = response.data
50
- except APIConnectionError as e:
51
- logger.warning(
52
- f"Could not connect to LM Studio server at {base_url}. "
53
- "Please ensure LM Studio is running with the server started. "
54
- f"Error: {e.__cause__}"
55
- )
56
- return
57
- except OpenAIError as e:
58
- logger.error(f"An error occurred while fetching models from LM Studio: {e}")
59
- return
58
+ logger.info(f"Discovering LM Studio models from host: {host_url}")
59
+ base_url = f"{host_url}/v1"
60
+ client = OpenAI(base_url=base_url, api_key="lm-studio") # Dummy key
60
61
 
61
- registered_count = 0
62
- for model_info in models:
63
- model_id = model_info.id
64
- if not model_id:
65
- continue
66
-
67
62
  try:
68
- llm_model = LLMModel(
69
- name=model_id,
70
- value=model_id,
71
- provider=LLMProvider.LMSTUDIO,
72
- llm_class=LMStudioLLM,
73
- canonical_name=model_id,
74
- default_config=LLMConfig(
75
- rate_limit=None, # No rate limit for local models by default
76
- token_limit=8192, # A reasonable default
77
- pricing_config=TokenPricingConfig(0.0, 0.0) # Local models are free
63
+ response = client.models.list()
64
+ models = response.data
65
+ except APIConnectionError:
66
+ logger.warning(f"Could not connect to LM Studio at {host_url}. Please ensure the server is running.")
67
+ continue
68
+ except OpenAIError as e:
69
+ logger.error(f"An error occurred fetching models from LM Studio at {host_url}: {e}")
70
+ continue
71
+
72
+ host_registered_count = 0
73
+ for model_info in models:
74
+ model_id = model_info.id
75
+ if not model_id:
76
+ continue
77
+
78
+ try:
79
+ llm_model = LLMModel(
80
+ name=model_id,
81
+ value=model_id,
82
+ provider=LLMProvider.LMSTUDIO, # LMStudio is both provider and runtime
83
+ llm_class=LMStudioLLM,
84
+ canonical_name=model_id,
85
+ runtime=LLMRuntime.LMSTUDIO,
86
+ host_url=host_url,
87
+ default_config=LLMConfig(
88
+ pricing_config=TokenPricingConfig(0.0, 0.0) # Local models are free
89
+ )
78
90
  )
79
- )
80
- LLMFactory.register_model(llm_model)
81
- registered_count += 1
82
- except Exception as e:
83
- logger.warning(f"Failed to register LM Studio model {model_id}: {str(e)}")
91
+ LLMFactory.register_model(llm_model)
92
+ host_registered_count += 1
93
+ except Exception as e:
94
+ logger.warning(f"Failed to register LM Studio model '{model_id}' from {host_url}: {e}")
95
+
96
+ if host_registered_count > 0:
97
+ logger.info(f"Registered {host_registered_count} models from LM Studio host {host_url}")
98
+ total_registered_count += host_registered_count
84
99
 
85
- if registered_count > 0:
86
- logger.info(f"Successfully registered {registered_count} LM Studio models from {lmstudio_host}")
100
+ if total_registered_count > 0:
101
+ logger.info(f"Finished LM Studio discovery. Total models registered: {total_registered_count}")
87
102
 
88
103
  except Exception as e:
89
- logger.error(f"Unexpected error during LM Studio model discovery: {str(e)}")
104
+ logger.error(f"An unexpected error occurred during LM Studio model discovery: {e}")