autobyteus 1.1.8__py3-none-any.whl → 1.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. autobyteus/agent/bootstrap_steps/system_prompt_processing_step.py +6 -2
  2. autobyteus/agent/handlers/inter_agent_message_event_handler.py +17 -19
  3. autobyteus/agent/handlers/llm_complete_response_received_event_handler.py +6 -3
  4. autobyteus/agent/handlers/tool_result_event_handler.py +61 -18
  5. autobyteus/agent/handlers/user_input_message_event_handler.py +19 -10
  6. autobyteus/agent/hooks/base_phase_hook.py +17 -0
  7. autobyteus/agent/hooks/hook_registry.py +15 -27
  8. autobyteus/agent/input_processor/base_user_input_processor.py +17 -1
  9. autobyteus/agent/input_processor/processor_registry.py +15 -27
  10. autobyteus/agent/llm_response_processor/base_processor.py +17 -1
  11. autobyteus/agent/llm_response_processor/processor_registry.py +15 -24
  12. autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py +14 -0
  13. autobyteus/agent/message/agent_input_user_message.py +15 -2
  14. autobyteus/agent/message/send_message_to.py +1 -1
  15. autobyteus/agent/processor_option.py +17 -0
  16. autobyteus/agent/sender_type.py +1 -0
  17. autobyteus/agent/system_prompt_processor/base_processor.py +17 -1
  18. autobyteus/agent/system_prompt_processor/processor_registry.py +15 -27
  19. autobyteus/agent/system_prompt_processor/tool_manifest_injector_processor.py +10 -0
  20. autobyteus/agent/tool_execution_result_processor/base_processor.py +17 -1
  21. autobyteus/agent/tool_execution_result_processor/processor_registry.py +15 -1
  22. autobyteus/agent/workspace/base_workspace.py +1 -1
  23. autobyteus/agent/workspace/workspace_definition.py +1 -1
  24. autobyteus/agent_team/bootstrap_steps/team_context_initialization_step.py +1 -1
  25. autobyteus/agent_team/streaming/agent_team_stream_event_payloads.py +2 -2
  26. autobyteus/agent_team/task_notification/__init__.py +4 -0
  27. autobyteus/agent_team/task_notification/activation_policy.py +70 -0
  28. autobyteus/agent_team/task_notification/system_event_driven_agent_task_notifier.py +56 -122
  29. autobyteus/agent_team/task_notification/task_activator.py +66 -0
  30. autobyteus/cli/agent_team_tui/state.py +17 -20
  31. autobyteus/cli/agent_team_tui/widgets/focus_pane.py +1 -1
  32. autobyteus/cli/agent_team_tui/widgets/task_board_panel.py +1 -1
  33. autobyteus/events/event_types.py +2 -2
  34. autobyteus/llm/api/gemini_llm.py +45 -54
  35. autobyteus/llm/api/qwen_llm.py +25 -0
  36. autobyteus/llm/autobyteus_provider.py +8 -2
  37. autobyteus/llm/llm_factory.py +16 -0
  38. autobyteus/multimedia/audio/api/autobyteus_audio_client.py +4 -1
  39. autobyteus/multimedia/audio/api/gemini_audio_client.py +84 -153
  40. autobyteus/multimedia/audio/audio_client_factory.py +47 -22
  41. autobyteus/multimedia/audio/audio_model.py +13 -6
  42. autobyteus/multimedia/audio/autobyteus_audio_provider.py +8 -2
  43. autobyteus/multimedia/audio/base_audio_client.py +3 -1
  44. autobyteus/multimedia/image/api/autobyteus_image_client.py +12 -5
  45. autobyteus/multimedia/image/api/gemini_image_client.py +72 -130
  46. autobyteus/multimedia/image/api/openai_image_client.py +4 -2
  47. autobyteus/multimedia/image/autobyteus_image_provider.py +8 -2
  48. autobyteus/multimedia/image/base_image_client.py +6 -2
  49. autobyteus/multimedia/image/image_client_factory.py +20 -19
  50. autobyteus/multimedia/image/image_model.py +13 -6
  51. autobyteus/multimedia/providers.py +1 -0
  52. autobyteus/task_management/__init__.py +9 -10
  53. autobyteus/task_management/base_task_board.py +14 -6
  54. autobyteus/task_management/converters/__init__.py +0 -2
  55. autobyteus/task_management/converters/task_board_converter.py +7 -16
  56. autobyteus/task_management/events.py +6 -6
  57. autobyteus/task_management/in_memory_task_board.py +48 -38
  58. autobyteus/task_management/schemas/__init__.py +2 -2
  59. autobyteus/task_management/schemas/{plan_definition.py → task_definition.py} +5 -6
  60. autobyteus/task_management/schemas/task_status_report.py +0 -1
  61. autobyteus/task_management/task.py +60 -0
  62. autobyteus/task_management/tools/__init__.py +4 -2
  63. autobyteus/task_management/tools/get_my_tasks.py +80 -0
  64. autobyteus/task_management/tools/get_task_board_status.py +3 -3
  65. autobyteus/task_management/tools/publish_task.py +77 -0
  66. autobyteus/task_management/tools/publish_tasks.py +74 -0
  67. autobyteus/task_management/tools/update_task_status.py +5 -5
  68. autobyteus/tools/__init__.py +3 -1
  69. autobyteus/tools/base_tool.py +4 -4
  70. autobyteus/tools/browser/session_aware/browser_session_aware_navigate_to.py +1 -1
  71. autobyteus/tools/browser/session_aware/browser_session_aware_web_element_trigger.py +1 -1
  72. autobyteus/tools/browser/session_aware/browser_session_aware_webpage_reader.py +1 -1
  73. autobyteus/tools/browser/session_aware/browser_session_aware_webpage_screenshot_taker.py +1 -1
  74. autobyteus/tools/browser/standalone/navigate_to.py +1 -1
  75. autobyteus/tools/browser/standalone/web_page_pdf_generator.py +1 -1
  76. autobyteus/tools/browser/standalone/webpage_image_downloader.py +1 -1
  77. autobyteus/tools/browser/standalone/webpage_reader.py +1 -1
  78. autobyteus/tools/browser/standalone/webpage_screenshot_taker.py +1 -1
  79. autobyteus/tools/functional_tool.py +1 -1
  80. autobyteus/tools/google_search.py +1 -1
  81. autobyteus/tools/image_downloader.py +1 -1
  82. autobyteus/tools/mcp/factory.py +1 -1
  83. autobyteus/tools/mcp/schema_mapper.py +1 -1
  84. autobyteus/tools/mcp/tool.py +1 -1
  85. autobyteus/tools/multimedia/__init__.py +2 -0
  86. autobyteus/tools/multimedia/audio_tools.py +10 -20
  87. autobyteus/tools/multimedia/image_tools.py +21 -22
  88. autobyteus/tools/multimedia/media_reader_tool.py +117 -0
  89. autobyteus/tools/pydantic_schema_converter.py +1 -1
  90. autobyteus/tools/registry/tool_definition.py +1 -1
  91. autobyteus/tools/timer.py +1 -1
  92. autobyteus/tools/tool_meta.py +1 -1
  93. autobyteus/tools/usage/formatters/default_json_example_formatter.py +1 -1
  94. autobyteus/tools/usage/formatters/default_xml_example_formatter.py +1 -1
  95. autobyteus/tools/usage/formatters/default_xml_schema_formatter.py +59 -3
  96. autobyteus/tools/usage/formatters/gemini_json_example_formatter.py +1 -1
  97. autobyteus/tools/usage/formatters/google_json_example_formatter.py +1 -1
  98. autobyteus/tools/usage/formatters/openai_json_example_formatter.py +1 -1
  99. autobyteus/{tools → utils}/parameter_schema.py +1 -1
  100. {autobyteus-1.1.8.dist-info → autobyteus-1.1.9.dist-info}/METADATA +2 -2
  101. {autobyteus-1.1.8.dist-info → autobyteus-1.1.9.dist-info}/RECORD +105 -99
  102. examples/run_poem_writer.py +1 -1
  103. autobyteus/task_management/converters/task_plan_converter.py +0 -48
  104. autobyteus/task_management/task_plan.py +0 -110
  105. autobyteus/task_management/tools/publish_task_plan.py +0 -101
  106. {autobyteus-1.1.8.dist-info → autobyteus-1.1.9.dist-info}/WHEEL +0 -0
  107. {autobyteus-1.1.8.dist-info → autobyteus-1.1.9.dist-info}/licenses/LICENSE +0 -0
  108. {autobyteus-1.1.8.dist-info → autobyteus-1.1.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,66 @@
1
+ # file: autobyteus/autobyteus/agent_team/task_notification/task_activator.py
2
+ """
3
+ Defines the component responsible for the action of activating an agent.
4
+ """
5
+ import logging
6
+ from typing import TYPE_CHECKING
7
+
8
+ from autobyteus.agent.message import AgentInputUserMessage
9
+ from autobyteus.agent_team.events import ProcessUserMessageEvent
10
+ from autobyteus.agent.sender_type import SenderType, TASK_NOTIFIER_SENDER_ID
11
+
12
+ if TYPE_CHECKING:
13
+ from autobyteus.agent_team.context.team_manager import TeamManager
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ class TaskActivator:
18
+ """
19
+ A component with the single responsibility of activating an agent.
20
+
21
+ Activation involves ensuring the agent is running and sending it a
22
+ standardized "start work" message.
23
+ """
24
+ def __init__(self, team_manager: 'TeamManager'):
25
+ """
26
+ Initializes the TaskActivator.
27
+
28
+ Args:
29
+ team_manager: The team's manager, used to start agents and dispatch messages.
30
+ """
31
+ if not team_manager:
32
+ raise ValueError("TaskActivator requires a valid TeamManager instance.")
33
+ self._team_manager = team_manager
34
+ logger.debug(f"TaskActivator initialized for team '{self._team_manager.team_id}'.")
35
+
36
+ async def activate_agent(self, agent_name: str):
37
+ """
38
+ Activates a specific agent by ensuring it is ready and sending it a
39
+ generic "start work" notification.
40
+
41
+ Args:
42
+ agent_name: The unique name of the agent to activate.
43
+ """
44
+ team_id = self._team_manager.team_id
45
+ try:
46
+ logger.info(f"Team '{team_id}': TaskActivator is activating agent '{agent_name}'.")
47
+
48
+ # This ensures the agent is started and ready to receive the message.
49
+ await self._team_manager.ensure_node_is_ready(agent_name)
50
+
51
+ notification_message = AgentInputUserMessage(
52
+ content="You have new tasks in your queue. Please review your task list using your tools and begin your work.",
53
+ sender_type=SenderType.SYSTEM,
54
+ metadata={'sender_id': TASK_NOTIFIER_SENDER_ID}
55
+ )
56
+ event = ProcessUserMessageEvent(
57
+ user_message=notification_message,
58
+ target_agent_name=agent_name
59
+ )
60
+ await self._team_manager.dispatch_user_message_to_agent(event)
61
+
62
+ logger.info(f"Team '{team_id}': Successfully sent activation notification to '{agent_name}'.")
63
+
64
+ except Exception as e:
65
+ # FIXED: Removed "TaskActivator" from the log message to align with the unit test assertion.
66
+ logger.error(f"Team '{team_id}': Failed to activate agent '{agent_name}': {e}", exc_info=True)
@@ -16,8 +16,8 @@ from autobyteus.agent.streaming.stream_event_payloads import (
16
16
  )
17
17
  from autobyteus.agent_team.streaming.agent_team_stream_events import AgentTeamStreamEvent
18
18
  from autobyteus.agent_team.streaming.agent_team_stream_event_payloads import AgentEventRebroadcastPayload, SubTeamEventRebroadcastPayload, AgentTeamPhaseTransitionData
19
- from autobyteus.task_management.task_plan import Task
20
- from autobyteus.task_management.events import TaskPlanPublishedEvent, TaskStatusUpdatedEvent
19
+ from autobyteus.task_management.task import Task
20
+ from autobyteus.task_management.events import TasksAddedEvent, TaskStatusUpdatedEvent
21
21
  from autobyteus.task_management.base_task_board import TaskStatus
22
22
 
23
23
  logger = logging.getLogger(__name__)
@@ -86,29 +86,26 @@ class TUIStateStore:
86
86
  self._team_event_history[parent_name].append(event)
87
87
 
88
88
  if event.event_source_type == "TASK_BOARD":
89
- # The 'parent_name' argument holds the friendly name of the team (or sub-team)
90
- # that is the context for this event. This is the key we use for UI state.
91
89
  team_name_key = parent_name
92
- if isinstance(event.data, TaskPlanPublishedEvent):
93
- self._task_plans[team_name_key] = event.data.plan.tasks
94
- # Reset statuses when a new plan is published
95
- self._task_statuses[team_name_key] = {task.task_id: TaskStatus.NOT_STARTED for task in event.data.plan.tasks}
96
- logger.debug(f"TUI State: Updated task plan for '{team_name_key}' with {len(event.data.plan.tasks)} tasks.")
90
+ if isinstance(event.data, TasksAddedEvent):
91
+ if team_name_key not in self._task_plans: self._task_plans[team_name_key] = []
92
+ if team_name_key not in self._task_statuses: self._task_statuses[team_name_key] = {}
93
+ self._task_plans[team_name_key].extend(event.data.tasks)
94
+ for task in event.data.tasks:
95
+ self._task_statuses[team_name_key][task.task_id] = TaskStatus.NOT_STARTED
96
+ logger.debug(f"TUI State: Added {len(event.data.tasks)} tasks to board for '{team_name_key}'.")
97
+
97
98
  elif isinstance(event.data, TaskStatusUpdatedEvent):
98
- # Update status
99
- if team_name_key not in self._task_statuses:
100
- self._task_statuses[team_name_key] = {}
99
+ if team_name_key not in self._task_statuses: self._task_statuses[team_name_key] = {}
101
100
  self._task_statuses[team_name_key][event.data.task_id] = event.data.new_status
102
101
  logger.debug(f"TUI State: Updated status for task '{event.data.task_id}' in team '{team_name_key}' to {event.data.new_status}.")
103
102
 
104
- # Update deliverables if they are provided in the event.
105
- if event.data.deliverables is not None:
106
- if team_name_key in self._task_plans:
107
- for task in self._task_plans[team_name_key]:
108
- if task.task_id == event.data.task_id:
109
- task.file_deliverables = event.data.deliverables
110
- logger.debug(f"TUI State: Synced deliverables for task '{event.data.task_id}' in team '{team_name_key}'.")
111
- break
103
+ if event.data.deliverables is not None and team_name_key in self._task_plans:
104
+ for task in self._task_plans[team_name_key]:
105
+ if task.task_id == event.data.task_id:
106
+ task.file_deliverables = event.data.deliverables
107
+ logger.debug(f"TUI State: Synced deliverables for task '{event.data.task_id}' in team '{team_name_key}'.")
108
+ break
112
109
  return
113
110
 
114
111
  if isinstance(event.data, AgentEventRebroadcastPayload):
@@ -15,7 +15,7 @@ from textual.containers import VerticalScroll, Horizontal
15
15
  from autobyteus.agent.phases import AgentOperationalPhase
16
16
  from autobyteus.agent_team.phases import AgentTeamOperationalPhase
17
17
  from autobyteus.task_management.base_task_board import TaskStatus
18
- from autobyteus.task_management.task_plan import Task
18
+ from autobyteus.task_management.task import Task
19
19
  from autobyteus.agent.streaming.stream_events import StreamEvent as AgentStreamEvent, StreamEventType as AgentStreamEventType
20
20
  from autobyteus.agent.streaming.stream_event_payloads import (
21
21
  AgentOperationalPhaseTransitionData, AssistantChunkData, AssistantCompleteResponseData,
@@ -6,7 +6,7 @@ from rich.panel import Panel
6
6
  from rich.text import Text
7
7
  from textual.widgets import Static
8
8
 
9
- from autobyteus.task_management.task_plan import Task
9
+ from autobyteus.task_management.task import Task
10
10
  from autobyteus.task_management.base_task_board import TaskStatus
11
11
  from .shared import TASK_STATUS_ICONS, LOG_ICON
12
12
 
@@ -47,8 +47,8 @@ class EventType(Enum):
47
47
  TEAM_STREAM_EVENT = "team_stream_event" # For unified agent team event stream
48
48
 
49
49
  # --- Task Board Events ---
50
- TASK_BOARD_PLAN_PUBLISHED = "task_board_plan_published"
51
- TASK_BOARD_STATUS_UPDATED = "task_board_status_updated"
50
+ TASK_BOARD_TASKS_ADDED = "task_board.tasks.added"
51
+ TASK_BOARD_STATUS_UPDATED = "task_board.status.updated"
52
52
 
53
53
  def __str__(self):
54
54
  return self.value
@@ -1,6 +1,7 @@
1
1
  import logging
2
- from typing import Dict, List, AsyncGenerator, Any
3
- import google.generativeai as genai # CHANGED: Using the older 'google.generativeai' library
2
+ from typing import Dict, Optional, List, AsyncGenerator, Any
3
+ from google import genai
4
+ from google.genai import types as genai_types
4
5
  import os
5
6
  from autobyteus.llm.models import LLMModel
6
7
  from autobyteus.llm.base_llm import BaseLLM
@@ -13,66 +14,60 @@ from autobyteus.llm.user_message import LLMUserMessage
13
14
  logger = logging.getLogger(__name__)
14
15
 
15
16
  def _format_gemini_history(messages: List[Message]) -> List[Dict[str, Any]]:
16
- """
17
- Formats internal message history for the Gemini API.
18
- This function remains compatible with the older library.
19
- """
17
+ """Formats internal message history for the Gemini API."""
20
18
  history = []
21
- # System message is handled separately in the model initialization
19
+ # System message is handled separately in the new API
22
20
  for msg in messages:
23
21
  if msg.role in [MessageRole.USER, MessageRole.ASSISTANT]:
22
+ # NOTE: This history conversion will need to be updated for multimodal messages
24
23
  role = 'model' if msg.role == MessageRole.ASSISTANT else 'user'
24
+ # The `parts` must be a list of dictionaries (Part objects), not a list of strings.
25
25
  history.append({"role": role, "parts": [{"text": msg.content}]})
26
26
  return history
27
27
 
28
28
  class GeminiLLM(BaseLLM):
29
29
  def __init__(self, model: LLMModel = None, llm_config: LLMConfig = None):
30
+ self.generation_config_dict = {
31
+ "response_mime_type": "text/plain",
32
+ }
33
+
30
34
  if model is None:
31
- model = LLMModel['gemini-2.5-flash'] # Note: Ensure model name is compatible, e.g., 'gemini-1.5-flash-latest'
35
+ model = LLMModel['gemini-2.5-flash']
32
36
  if llm_config is None:
33
37
  llm_config = LLMConfig()
34
-
38
+
35
39
  super().__init__(model=model, llm_config=llm_config)
36
-
37
- # CHANGED: Initialization flow. Configure API key and then instantiate the model.
38
- self.initialize()
39
-
40
- system_instruction = self.system_message if self.system_message else None
41
-
42
- self.model = genai.GenerativeModel(
43
- model_name=self.model.value,
44
- system_instruction=system_instruction
45
- )
40
+ self.client = self.initialize()
41
+ self.async_client = self.client.aio
46
42
 
47
- @staticmethod
48
- def initialize():
49
- """
50
- CHANGED: This method now configures the genai library with the API key
51
- instead of creating a client instance.
52
- """
43
+ @classmethod
44
+ def initialize(cls) -> genai.client.Client:
53
45
  api_key = os.environ.get("GEMINI_API_KEY")
54
46
  if not api_key:
55
47
  logger.error("GEMINI_API_KEY environment variable is not set.")
56
48
  raise ValueError("GEMINI_API_KEY environment variable is not set.")
57
49
  try:
58
- genai.configure(api_key=api_key)
50
+ return genai.Client()
59
51
  except Exception as e:
60
- logger.error(f"Failed to configure Gemini client: {str(e)}")
61
- raise ValueError(f"Failed to configure Gemini client: {str(e)}")
52
+ logger.error(f"Failed to initialize Gemini client: {str(e)}")
53
+ raise ValueError(f"Failed to initialize Gemini client: {str(e)}")
62
54
 
63
- def _get_generation_config(self) -> Dict[str, Any]:
64
- """
65
- CHANGED: Builds the generation config as a dictionary.
66
- 'thinking_config' is not available in the old library.
67
- 'system_instruction' is passed during model initialization.
68
- """
69
- # Basic configuration, you can expand this with temperature, top_p, etc.
70
- # from self.llm_config if needed.
71
- config = {
72
- "response_mime_type": "text/plain",
73
- # Example: "temperature": self.llm_config.temperature
74
- }
75
- return config
55
+ def _get_generation_config(self) -> genai_types.GenerateContentConfig:
56
+ """Builds the generation config, handling special cases like 'thinking'."""
57
+ config = self.generation_config_dict.copy()
58
+
59
+ thinking_config = None
60
+ if "flash" in self.model.value:
61
+ thinking_config = genai_types.ThinkingConfig(thinking_budget=0)
62
+
63
+ # System instruction is now part of the config
64
+ system_instruction = self.system_message if self.system_message else None
65
+
66
+ return genai_types.GenerateContentConfig(
67
+ **config,
68
+ thinking_config=thinking_config,
69
+ system_instruction=system_instruction
70
+ )
76
71
 
77
72
  async def _send_user_message_to_llm(self, user_message: LLMUserMessage, **kwargs) -> CompleteResponse:
78
73
  self.add_user_message(user_message)
@@ -81,20 +76,19 @@ class GeminiLLM(BaseLLM):
81
76
  history = _format_gemini_history(self.messages)
82
77
  generation_config = self._get_generation_config()
83
78
 
84
- # CHANGED: API call now uses the model instance directly.
85
- response = await self.model.generate_content_async(
79
+ response = await self.async_client.models.generate_content(
80
+ model=f"models/{self.model.value}",
86
81
  contents=history,
87
- generation_config=generation_config,
82
+ config=generation_config,
88
83
  )
89
84
 
90
85
  assistant_message = response.text
91
86
  self.add_assistant_message(assistant_message)
92
87
 
93
- # CHANGED: Token usage is extracted from 'usage_metadata'.
94
88
  token_usage = TokenUsage(
95
- prompt_tokens=response.usage_metadata.prompt_token_count,
96
- completion_tokens=response.usage_metadata.candidates_token_count,
97
- total_tokens=response.usage_metadata.total_token_count
89
+ prompt_tokens=0,
90
+ completion_tokens=0,
91
+ total_tokens=0
98
92
  )
99
93
 
100
94
  return CompleteResponse(
@@ -113,11 +107,10 @@ class GeminiLLM(BaseLLM):
113
107
  history = _format_gemini_history(self.messages)
114
108
  generation_config = self._get_generation_config()
115
109
 
116
- # CHANGED: API call for streaming is now part of generate_content_async.
117
- response_stream = await self.model.generate_content_async(
110
+ response_stream = await self.async_client.models.generate_content_stream(
111
+ model=f"models/{self.model.value}",
118
112
  contents=history,
119
- generation_config=generation_config,
120
- stream=True
113
+ config=generation_config,
121
114
  )
122
115
 
123
116
  async for chunk in response_stream:
@@ -130,8 +123,6 @@ class GeminiLLM(BaseLLM):
130
123
 
131
124
  self.add_assistant_message(complete_response)
132
125
 
133
- # NOTE: The old library's async stream does not easily expose token usage.
134
- # Keeping it at 0, consistent with your original implementation.
135
126
  token_usage = TokenUsage(
136
127
  prompt_tokens=0,
137
128
  completion_tokens=0,
@@ -0,0 +1,25 @@
1
+ import logging
2
+ from typing import Optional
3
+ from autobyteus.llm.models import LLMModel
4
+ from autobyteus.llm.utils.llm_config import LLMConfig
5
+ from autobyteus.llm.api.openai_compatible_llm import OpenAICompatibleLLM
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ class QwenLLM(OpenAICompatibleLLM):
10
+ def __init__(self, model: LLMModel = None, llm_config: LLMConfig = None):
11
+ if model is None:
12
+ model = LLMModel['qwen3-max-preview']
13
+ if llm_config is None:
14
+ llm_config = LLMConfig()
15
+
16
+ super().__init__(
17
+ model=model,
18
+ llm_config=llm_config,
19
+ api_key_env_var="DASHSCOPE_API_KEY",
20
+ base_url="https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
21
+ )
22
+ logger.info(f"QwenLLM initialized with model: {self.model}")
23
+
24
+ async def cleanup(self):
25
+ await super().cleanup()
@@ -19,7 +19,9 @@ class AutobyteusModelProvider:
19
19
 
20
20
  @staticmethod
21
21
  def _get_hosts() -> List[str]:
22
- """Gets Autobyteus LLM server hosts from env vars, supporting a comma-separated list."""
22
+ """
23
+ Gets Autobyteus LLM server hosts from env vars. Skips discovery if no host is configured.
24
+ """
23
25
  hosts_str = os.getenv('AUTOBYTEUS_LLM_SERVER_HOSTS')
24
26
  if hosts_str:
25
27
  return [host.strip() for host in hosts_str.split(',')]
@@ -28,7 +30,7 @@ class AutobyteusModelProvider:
28
30
  if legacy_host:
29
31
  return [legacy_host]
30
32
 
31
- return [AutobyteusModelProvider.DEFAULT_SERVER_URL]
33
+ return []
32
34
 
33
35
  @staticmethod
34
36
  def discover_and_register():
@@ -37,6 +39,10 @@ class AutobyteusModelProvider:
37
39
  from autobyteus.llm.llm_factory import LLMFactory
38
40
 
39
41
  hosts = AutobyteusModelProvider._get_hosts()
42
+ if not hosts:
43
+ logger.info("No Autobyteus LLM server hosts configured. Skipping Autobyteus LLM model discovery.")
44
+ return
45
+
40
46
  total_registered_count = 0
41
47
 
42
48
  for host_url in hosts:
@@ -17,6 +17,7 @@ from autobyteus.llm.api.deepseek_llm import DeepSeekLLM
17
17
  from autobyteus.llm.api.gemini_llm import GeminiLLM
18
18
  from autobyteus.llm.api.grok_llm import GrokLLM
19
19
  from autobyteus.llm.api.kimi_llm import KimiLLM
20
+ from autobyteus.llm.api.qwen_llm import QwenLLM
20
21
  from autobyteus.llm.ollama_provider import OllamaModelProvider
21
22
  from autobyteus.llm.lmstudio_provider import LMStudioModelProvider
22
23
  from autobyteus.utils.singleton import SingletonMeta
@@ -344,6 +345,21 @@ class LLMFactory(metaclass=SingletonMeta):
344
345
  pricing_config=TokenPricingConfig(27.59, 27.59)
345
346
  )
346
347
  ),
348
+ # QWEN Provider Models
349
+ LLMModel(
350
+ name="qwen3-max",
351
+ value="qwen-max",
352
+ provider=LLMProvider.QWEN,
353
+ llm_class=QwenLLM,
354
+ canonical_name="qwen3-max",
355
+ default_config=LLMConfig(
356
+ token_limit=262144,
357
+ pricing_config=TokenPricingConfig(
358
+ input_token_pricing=2.4,
359
+ output_token_pricing=12.0
360
+ )
361
+ )
362
+ ),
347
363
  ]
348
364
  for model in supported_models:
349
365
  LLMFactory.register_model(model)
@@ -26,7 +26,8 @@ class AutobyteusAudioClient(BaseAudioClient):
26
26
  async def generate_speech(
27
27
  self,
28
28
  prompt: str,
29
- generation_config: Optional[Dict[str, Any]] = None
29
+ generation_config: Optional[Dict[str, Any]] = None,
30
+ **kwargs
30
31
  ) -> SpeechGenerationResponse:
31
32
  """
32
33
  Generates speech by calling the generate_speech endpoint on the remote Autobyteus server.
@@ -36,6 +37,8 @@ class AutobyteusAudioClient(BaseAudioClient):
36
37
 
37
38
  model_name_for_server = self.model.name
38
39
 
40
+ # Note: The underlying autobyteus_client.generate_speech does not currently accept **kwargs.
41
+ # They are accepted here for interface consistency and future-proofing.
39
42
  response_data = await self.autobyteus_client.generate_speech(
40
43
  model_name=model_name_for_server,
41
44
  prompt=prompt,