bizyengine 1.2.45__py3-none-any.whl → 1.2.71__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. bizyengine/bizy_server/errno.py +21 -0
  2. bizyengine/bizy_server/server.py +130 -160
  3. bizyengine/bizy_server/utils.py +3 -0
  4. bizyengine/bizyair_extras/__init__.py +38 -31
  5. bizyengine/bizyair_extras/third_party_api/__init__.py +15 -0
  6. bizyengine/bizyair_extras/third_party_api/nodes_doubao.py +535 -0
  7. bizyengine/bizyair_extras/third_party_api/nodes_flux.py +173 -0
  8. bizyengine/bizyair_extras/third_party_api/nodes_gemini.py +403 -0
  9. bizyengine/bizyair_extras/third_party_api/nodes_gpt.py +101 -0
  10. bizyengine/bizyair_extras/third_party_api/nodes_hailuo.py +115 -0
  11. bizyengine/bizyair_extras/third_party_api/nodes_kling.py +404 -0
  12. bizyengine/bizyair_extras/third_party_api/nodes_sora.py +218 -0
  13. bizyengine/bizyair_extras/third_party_api/nodes_veo3.py +193 -0
  14. bizyengine/bizyair_extras/third_party_api/nodes_wan_api.py +198 -0
  15. bizyengine/bizyair_extras/third_party_api/trd_nodes_base.py +183 -0
  16. bizyengine/bizyair_extras/utils/aliyun_oss.py +92 -0
  17. bizyengine/bizyair_extras/utils/audio.py +88 -0
  18. bizyengine/bizybot/__init__.py +12 -0
  19. bizyengine/bizybot/client.py +774 -0
  20. bizyengine/bizybot/config.py +129 -0
  21. bizyengine/bizybot/coordinator.py +556 -0
  22. bizyengine/bizybot/exceptions.py +186 -0
  23. bizyengine/bizybot/mcp/__init__.py +3 -0
  24. bizyengine/bizybot/mcp/manager.py +520 -0
  25. bizyengine/bizybot/mcp/models.py +46 -0
  26. bizyengine/bizybot/mcp/registry.py +129 -0
  27. bizyengine/bizybot/mcp/routing.py +378 -0
  28. bizyengine/bizybot/models.py +344 -0
  29. bizyengine/core/__init__.py +1 -0
  30. bizyengine/core/commands/servers/prompt_server.py +10 -1
  31. bizyengine/core/common/client.py +8 -7
  32. bizyengine/core/common/utils.py +30 -1
  33. bizyengine/core/image_utils.py +12 -283
  34. bizyengine/misc/llm.py +32 -15
  35. bizyengine/misc/utils.py +179 -2
  36. bizyengine/version.txt +1 -1
  37. {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/METADATA +3 -1
  38. {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/RECORD +40 -16
  39. {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/WHEEL +0 -0
  40. {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,129 @@
1
+ """
2
+ Configuration management module for the MCP Coordinator.
3
+
4
+ This module provides data models and validation logic for application configuration.
5
+ """
6
+
7
+ from dataclasses import dataclass, field
8
+ from typing import Dict, List, Optional
9
+
10
+
11
+ @dataclass
12
+ class LLMConfig:
13
+ """Configuration for LLM client."""
14
+
15
+ api_key: str
16
+ base_url: str = "https://api.siliconflow.cn/v1"
17
+ model: str = "moonshotai/Kimi-K2-Instruct"
18
+ temperature: float = 0.7
19
+ max_tokens: Optional[int] = None
20
+ timeout: float = 30.0
21
+
22
+ def __post_init__(self):
23
+ """Validate LLM configuration after initialization."""
24
+ self._validate()
25
+
26
+ def _validate(self):
27
+ """Validate LLM configuration parameters."""
28
+ if not self.api_key:
29
+ raise ValueError("LLM API key is required")
30
+
31
+ if not self.base_url:
32
+ raise ValueError("LLM base URL is required")
33
+
34
+ if not self.base_url.startswith(("http://", "https://")):
35
+ raise ValueError("LLM base URL must start with http:// or https://")
36
+
37
+ if not self.model:
38
+ raise ValueError("LLM model is required")
39
+
40
+ if not isinstance(self.temperature, (int, float)) or not (
41
+ 0.0 <= self.temperature <= 2.0
42
+ ):
43
+ raise ValueError("Temperature must be a number between 0.0 and 2.0")
44
+
45
+ if self.max_tokens is not None and (
46
+ not isinstance(self.max_tokens, int) or self.max_tokens <= 0
47
+ ):
48
+ raise ValueError("max_tokens must be a positive integer or None")
49
+
50
+ if not isinstance(self.timeout, (int, float)) or self.timeout <= 0:
51
+ raise ValueError("Timeout must be a positive number")
52
+
53
+
54
+ @dataclass
55
+ class MCPServerConfig:
56
+ """Configuration for a single MCP server."""
57
+
58
+ transport: str = None
59
+
60
+ # For stdio transport
61
+ command: Optional[str] = None
62
+ args: Optional[List[str]] = None
63
+ env: Optional[Dict[str, str]] = None
64
+
65
+ # For HTTP transport
66
+ url: Optional[str] = None
67
+ headers: Optional[Dict[str, str]] = None
68
+
69
+ # Common settings
70
+ timeout: float = 30.0
71
+ retry_attempts: int = 3
72
+ retry_delay: float = 1.0
73
+
74
+ def __post_init__(self):
75
+ """Validate MCP server configuration after initialization."""
76
+ self._validate()
77
+
78
+ def _validate(self):
79
+ """Validate MCP server configuration parameters."""
80
+ if self.transport not in ["stdio", "streamable_http"]:
81
+ raise ValueError(f"Unsupported transport type: {self.transport}")
82
+
83
+ if self.transport == "stdio":
84
+ if not self.command:
85
+ raise ValueError("MCP server command is required for stdio transport")
86
+ elif self.transport == "streamable_http":
87
+ if not self.url:
88
+ raise ValueError(
89
+ "MCP server URL is required for streamable_http transport"
90
+ )
91
+
92
+ if not self.url.startswith(("http://", "https://")):
93
+ raise ValueError("MCP server URL must start with http:// or https://")
94
+
95
+ if not isinstance(self.timeout, (int, float)) or self.timeout <= 0:
96
+ raise ValueError("Timeout must be a positive number")
97
+
98
+ if not isinstance(self.retry_attempts, int) or self.retry_attempts < 0:
99
+ raise ValueError("Retry attempts must be a non-negative integer")
100
+
101
+ if not isinstance(self.retry_delay, (int, float)) or self.retry_delay < 0:
102
+ raise ValueError("Retry delay must be a non-negative number")
103
+
104
+
105
+ @dataclass
106
+ class Config:
107
+ """Main application configuration."""
108
+
109
+ llm: LLMConfig
110
+ mcp_servers: Dict[str, MCPServerConfig] = field(default_factory=dict)
111
+
112
+ def __post_init__(self):
113
+ """Validate main configuration after initialization."""
114
+ self._validate()
115
+
116
+ def _validate(self):
117
+ """Validate main configuration parameters."""
118
+ if not isinstance(self.llm, LLMConfig):
119
+ raise ValueError("LLM configuration must be an LLMConfig instance")
120
+
121
+ if not isinstance(self.mcp_servers, dict):
122
+ raise ValueError("MCP servers must be a dictionary")
123
+
124
+ # Validate each MCP server configuration
125
+ for server_name, server_config in self.mcp_servers.items():
126
+ if not isinstance(server_config, MCPServerConfig):
127
+ raise ValueError(
128
+ f"MCP server '{server_name}' configuration must be an MCPServerConfig instance"
129
+ )
@@ -0,0 +1,556 @@
1
+ """
2
+ Main coordinator class that orchestrates LLM and MCP interactions
3
+ """
4
+
5
+ from typing import Any, AsyncIterator, Dict, List, Optional
6
+
7
+ from bizyengine.bizybot.client import LLMClient, ToolCall
8
+ from bizyengine.bizybot.config import Config
9
+ from bizyengine.bizybot.exceptions import (
10
+ CoordinatorError,
11
+ LLMError,
12
+ MCPError,
13
+ ValidationError,
14
+ )
15
+ from bizyengine.bizybot.mcp.manager import MCPClientManager
16
+ from bizyengine.bizybot.models import Conversation
17
+
18
+
19
+ class CoordinatorInitializationError(CoordinatorError):
20
+ """Coordinator initialization error"""
21
+
22
+ pass
23
+
24
+
25
+ class CoordinatorProcessingError(CoordinatorError):
26
+ """Coordinator message processing error"""
27
+
28
+ pass
29
+
30
+
31
+ class Coordinator:
32
+ """
33
+ Main coordinator that orchestrates interactions between LLM and MCP servers.
34
+
35
+ The coordinator is the central component that:
36
+ 1. Manages conversation state and context
37
+ 2. Coordinates between LLM client and MCP servers
38
+ 3. Handles tool discovery and execution
39
+ 4. Processes streaming responses and tool calls
40
+ """
41
+
42
+ def __init__(self, config: Config):
43
+ """
44
+ Initialize coordinator with configuration
45
+
46
+ Args:
47
+ config: Application configuration containing LLM and MCP settings
48
+ """
49
+ self.config = config
50
+ self.llm_client = LLMClient(config.llm)
51
+ self.mcp_manager = MCPClientManager()
52
+ self._initialized = False
53
+
54
+ async def initialize(self) -> None:
55
+ """
56
+ Initialize all coordinator components
57
+
58
+ This method:
59
+ 1. Initializes MCP server connections
60
+ 2. Discovers available tools from all servers
61
+ 4. Validates the configuration
62
+
63
+ Raises:
64
+ CoordinatorInitializationError: If initialization fails
65
+ """
66
+ if self._initialized:
67
+ return
68
+
69
+ try:
70
+
71
+ # Initialize MCP connections first
72
+ if self.config.mcp_servers:
73
+ await self.mcp_manager.initialize_servers(self.config.mcp_servers)
74
+
75
+ else:
76
+ pass
77
+
78
+ self._initialized = True
79
+
80
+ except Exception as e:
81
+ # Cleanup any partial initialization
82
+ try:
83
+ await self.cleanup()
84
+ except Exception:
85
+ pass
86
+
87
+ raise CoordinatorInitializationError(f"Initialization failed: {e}")
88
+
89
+ async def process_message(
90
+ self,
91
+ message: Optional[str] = None,
92
+ conversation_history: Optional[List[Dict[str, Any]]] = None,
93
+ llm_config_override: Optional[Dict[str, Any]] = None,
94
+ ) -> AsyncIterator[Dict[str, Any]]:
95
+ """
96
+ Process a user message and return streaming responses (stateless mode)
97
+
98
+ This is the main entry point for message processing. It:
99
+ 1. Creates a Conversation object from conversation_history
100
+ 2. Adds the current user message (if provided)
101
+ 3. Gets available tools from MCP servers
102
+ 4. Streams LLM response with tool call handling
103
+
104
+ Args:
105
+ message: Current user input message (optional if conversation_history provided)
106
+ conversation_history: Complete conversation history in OpenAI format
107
+ llm_config_override: Optional LLM configuration overrides
108
+
109
+ Yields:
110
+ dict: Streaming response events with types:
111
+ - content_delta: Incremental content from LLM
112
+ - reasoning_delta: Reasoning content (for reasoning models)
113
+ - tool_calls: Tool calls requested by LLM
114
+ - tool_result: Results from tool execution
115
+ - tool_error: Tool execution errors
116
+ - done: Processing complete
117
+
118
+ Raises:
119
+ CoordinatorProcessingError: If message processing fails
120
+ """
121
+ if not self._initialized:
122
+ raise CoordinatorProcessingError("Coordinator not initialized")
123
+
124
+ # 验证输入
125
+ if not message and not conversation_history:
126
+ raise ValidationError(
127
+ "Either message or conversation_history must be provided"
128
+ )
129
+
130
+ # 如果没有提供conversation_history,创建空列表
131
+ if conversation_history is None:
132
+ conversation_history = []
133
+
134
+ try:
135
+ # 创建Conversation对象
136
+ from bizyengine.bizybot.models import (
137
+ Conversation,
138
+ ConversationValidationError,
139
+ )
140
+
141
+ try:
142
+ conversation = Conversation.from_openai_format(conversation_history)
143
+
144
+ # 添加当前用户消息
145
+ if message and message.strip():
146
+ conversation.add_user_message(message.strip())
147
+
148
+ except ConversationValidationError as e:
149
+ raise ValidationError(f"Invalid conversation_history format: {e}")
150
+
151
+ # Get available tools from MCP servers
152
+ tool_schemas = self.mcp_manager.get_tools_for_llm()
153
+
154
+ # Process the conversation with streaming
155
+ async for event in self._process_llm_stream(
156
+ conversation, tool_schemas, llm_config_override
157
+ ):
158
+ yield event
159
+
160
+ except Exception as e:
161
+ yield {"type": "error", "error": str(e), "error_type": type(e).__name__}
162
+ raise CoordinatorProcessingError(f"Message processing failed: {e}")
163
+
164
+ async def _process_llm_stream(
165
+ self,
166
+ conversation: "Conversation",
167
+ tool_schemas: List[Dict[str, Any]],
168
+ llm_config_override: Optional[Dict[str, Any]] = None,
169
+ ) -> AsyncIterator[Dict[str, Any]]:
170
+ """
171
+ Process LLM streaming response and handle tool calls
172
+
173
+ This method implements the core conversation loop:
174
+ 1. Send conversation context to LLM with available tools
175
+ 2. Stream LLM response, handling content and tool calls
176
+ 3. Execute any requested tool calls via MCP
177
+ 4. Continue conversation with tool results until completion
178
+
179
+ Args:
180
+ conversation: Current conversation context (Conversation object)
181
+ tool_schemas: Available tools in OpenAI format
182
+ llm_config_override: Optional LLM configuration overrides
183
+
184
+ Yields:
185
+ dict: Streaming response events
186
+ """
187
+ try:
188
+ system_prompt = """# 角色和目标
189
+ 你是一个专业的AI图像助手。你的核心任务是精确理解用户的请求,并智能地调用合适的工具来完成 “生成新图片” 或 “从现有图片中提取对象” 或 “编辑图片” 的任务。你的决策必须准确无误。
190
+ # 工作流程与决策逻辑
191
+ 你的工作流程必须遵循以下逻辑:
192
+
193
+ 意图识别 (最重要):
194
+
195
+ 生成意图: 用户的请求是凭空创造内容吗?(例如:“画一只戴着帽子的狗”) -> 使用 text2image。
196
+
197
+ 提取意图: 用户的请求是基于一张已经存在的图片进行操作吗?(例如:“把这张图里的狗抠出来”) -> 使用 extract_image。
198
+
199
+ 处理模糊请求:
200
+
201
+ 如果用户的请求非常模糊(例如:“我想要一辆车”),你 绝不能猜测。
202
+
203
+ 你必须主动提问以澄清用户的真实意图。标准问法:“请问您是想让我生成一张新的关于‘车’的图片,还是您已经有一张图片,需要我从中提取出‘车’?”
204
+
205
+ 参数检查:
206
+
207
+ 在调用任何工具之前,必须在内心确认所有 必需 参数都已具备。
208
+
209
+ 调用 extract_image 前,必须确认你 同时拥有 用户的图片和要提取的对象名称。
210
+
211
+ 回复:在调用工具前先提示用户你即将调用工具,在拿到工具的结果之后再进行总结,并将工具结果返回给用户
212
+
213
+ # 示例
214
+ 示例 1: 正确使用 text2image
215
+ 用户: “给我画一张赛博朋克风格的东京夜景,要有很多霓虹灯。”
216
+
217
+ 你的思考: 用户的意图是“画”,是创造新图片。我应该使用 text2image。
218
+
219
+ 工具调用: print(text2image(user_prompt="赛博朋克风格的东京夜景,有很多霓虹灯", width=1024, height=1024))
220
+
221
+ 示例 2: 正确使用 extract_image
222
+ 用户: [上传一张家庭聚会照片] “把照片里那个穿红色裙子的小女孩提取出来。”
223
+
224
+ 你的思考: 用户提供了一张图片,并要求“提取”其中的特定人物。我应该使用 extract_image。
225
+
226
+ 工具调用: print(extract_image(image=[上传的图片数据], value="穿红色裙子的小女孩"))
227
+
228
+ 示例 3: 正确处理模糊请求
229
+ 用户: “帮我弄一只猫。”
230
+
231
+ 你的思考: “弄”这个词太模糊了。我不知道是生成还是提取。我必须提问。
232
+
233
+ 你的回复 (对用户): “好的!请问您是想让我为您画一只全新的猫,还是您有一张包含猫的图片,需要我帮您把猫提取出来?”
234
+
235
+ converation_history中保存的是与用户会话的历史记录,里面可能包含需要编辑的图片的url
236
+
237
+ """
238
+ conversation.add_system_message(system_prompt)
239
+ # Prepare messages for LLM
240
+ messages = conversation.get_openai_messages_for_llm()
241
+
242
+ # Prepare LLM parameters
243
+ llm_params = {}
244
+ if llm_config_override:
245
+ llm_params.update(llm_config_override)
246
+
247
+ # Start streaming from LLM
248
+ stream = await self.llm_client.chat_completion(
249
+ messages=messages,
250
+ tools=tool_schemas if tool_schemas else None,
251
+ stream=True,
252
+ **llm_params,
253
+ )
254
+
255
+ # Process streaming response
256
+ accumulated_content = ""
257
+ accumulated_reasoning = ""
258
+ tool_calls_completed = []
259
+
260
+ # 重置流式工具调用处理器状态,确保每次对话开始时状态干净
261
+ self.llm_client.clear_streaming_tool_calls()
262
+
263
+ async for chunk in stream:
264
+ # Debug: Log chunk structure (using INFO for visibility)
265
+ # logger.info(f"Received chunk: {chunk}")
266
+
267
+ # Handle content deltas
268
+ if chunk.get("content"):
269
+ accumulated_content += chunk["content"]
270
+ yield {"type": "content_delta", "content": chunk["content"]}
271
+
272
+ # Handle reasoning content (for reasoning models)
273
+ if chunk.get("reasoning_content"):
274
+ accumulated_reasoning += chunk["reasoning_content"]
275
+ yield {
276
+ "type": "reasoning_delta",
277
+ "reasoning_content": chunk["reasoning_content"],
278
+ }
279
+
280
+ # Handle tool calls - 累积工具调用数据;在结束帧收敛
281
+ tool_calls_completed = (
282
+ self.llm_client.process_streaming_tool_calls_incremental(chunk)
283
+ )
284
+
285
+ # Handle completion - 只在这里完成和处理工具调用
286
+ if chunk.get("finish_reason"):
287
+
288
+ # 第一个判断 - 完成工具调用解析
289
+ if chunk["finish_reason"] == "tool_calls":
290
+ """
291
+ 将流式传输中累积的部分数据(ID、函数名、参数片段)组装成完整的工具调用对象
292
+ 验证JSON参数格式是否正确
293
+ 创建 ToolCall 对象
294
+ 并不执行任何实际的工具功能
295
+ """
296
+ if tool_calls_completed:
297
+
298
+ yield {
299
+ "type": "tool_calls",
300
+ "tool_calls": [
301
+ tc.to_dict() for tc in tool_calls_completed
302
+ ],
303
+ }
304
+ # else:
305
+ # 如果没有完成工具调用,可以在此处读取当前状态用于调试
306
+ # 若工具命令不完全可以在这里拿到传回给llm重试
307
+ # self.llm_client.get_streaming_tool_call_status()
308
+
309
+ # 第二个判断 - 执行工具调用或完成对话
310
+ if chunk["finish_reason"] == "tool_calls" and tool_calls_completed:
311
+ # 执行工具调用并继续对话
312
+ async for tool_event in self._execute_tool_calls_and_continue(
313
+ conversation,
314
+ tool_calls_completed,
315
+ accumulated_content,
316
+ accumulated_reasoning,
317
+ tool_schemas,
318
+ llm_config_override,
319
+ ):
320
+ yield tool_event
321
+ else:
322
+ # 常规完成 - 添加助手消息到对话
323
+ if accumulated_content or accumulated_reasoning:
324
+ conversation.add_assistant_message(
325
+ content=accumulated_content or None
326
+ )
327
+
328
+ yield {"type": "done", "finish_reason": chunk["finish_reason"]}
329
+ break
330
+
331
+ except LLMError as e:
332
+ yield {
333
+ "type": "error",
334
+ "error": f"LLM error: {str(e)}",
335
+ "error_type": "LLMError",
336
+ }
337
+ except Exception as e:
338
+ yield {
339
+ "type": "error",
340
+ "error": f"Streaming error: {str(e)}",
341
+ "error_type": type(e).__name__,
342
+ }
343
+
344
+ async def _execute_tool_calls_and_continue(
345
+ self,
346
+ conversation: Conversation,
347
+ tool_calls: List[ToolCall],
348
+ assistant_content: str,
349
+ assistant_reasoning: str,
350
+ tool_schemas: List[Dict[str, Any]],
351
+ llm_config_override: Optional[Dict[str, Any]] = None,
352
+ ) -> AsyncIterator[Dict[str, Any]]:
353
+ """
354
+ Execute tool calls and continue the conversation
355
+
356
+ This method:
357
+ 1. Adds the assistant message with tool calls to conversation
358
+ 2. Executes all tool calls (potentially in parallel)
359
+ 3. Adds tool results to conversation
360
+ 4. Continues LLM conversation to generate final response
361
+
362
+ Args:
363
+ conversation: Current conversation
364
+ tool_calls: List of tool calls to execute
365
+ assistant_content: Assistant's content before tool calls
366
+ assistant_reasoning: Assistant's reasoning content
367
+ tool_schemas: Available tool schemas
368
+ llm_config_override: Optional LLM config overrides
369
+
370
+ Yields:
371
+ dict: Tool execution and continuation events
372
+ """
373
+ try:
374
+ # Add assistant message with tool calls to conversation
375
+ from bizyengine.bizybot.models import Message
376
+
377
+ assistant_message = Message(
378
+ role="assistant",
379
+ content=assistant_content if assistant_content else None,
380
+ reasoning_content=assistant_reasoning if assistant_reasoning else None,
381
+ tool_calls=tool_calls,
382
+ )
383
+ conversation.add_message(assistant_message)
384
+
385
+ # Execute tool calls
386
+ tool_call_dicts = [tc.to_dict() for tc in tool_calls]
387
+
388
+ # Execute tool calls via MCP manager (supports parallel execution)
389
+ tool_results = await self.mcp_manager.execute_tool_calls(tool_call_dicts)
390
+
391
+ # Process tool results and add to conversation
392
+ for i, result in enumerate(tool_results):
393
+ tool_call = tool_calls[i]
394
+
395
+ if result.get("success", False):
396
+ # Successful tool execution
397
+ tool_content = result.get("content", "")
398
+
399
+ yield {
400
+ "type": "tool_result",
401
+ "tool_call_id": tool_call.id,
402
+ "result": tool_content,
403
+ "server_name": result.get("_mcp_server"),
404
+ }
405
+
406
+ # Add tool result to conversation - 使用格式化后的内容
407
+ conversation.add_tool_result(tool_call.id, tool_content)
408
+
409
+ else:
410
+ # Tool execution error
411
+ error_msg = result.get("content", "Unknown tool execution error")
412
+ yield {
413
+ "type": "tool_error",
414
+ "tool_call_id": tool_call.id,
415
+ "error": error_msg,
416
+ }
417
+
418
+ # Add error result to conversation
419
+ conversation.add_tool_result(tool_call.id, f"Error: {error_msg}")
420
+
421
+ # Stream response from LLM - 可能包含更多工具调用
422
+ # 使用递归调用_process_llm_stream来处理可能的多轮工具调用
423
+ async for event in self._process_llm_stream(
424
+ conversation, tool_schemas, llm_config_override
425
+ ):
426
+ yield event
427
+
428
+ except MCPError as e:
429
+ yield {
430
+ "type": "error",
431
+ "error": f"Tool execution error: {str(e)}",
432
+ "error_type": "MCPError",
433
+ }
434
+ except Exception as e:
435
+ yield {
436
+ "type": "error",
437
+ "error": f"Tool execution error: {str(e)}",
438
+ "error_type": type(e).__name__,
439
+ }
440
+
441
+ async def get_available_tools(self) -> List[Dict[str, Any]]:
442
+ """
443
+ Get list of all available tools from MCP servers
444
+
445
+ Returns:
446
+ List of tool definitions in OpenAI format
447
+ """
448
+ if not self._initialized:
449
+ return []
450
+
451
+ return self.mcp_manager.get_tools_for_llm()
452
+
453
+ def get_server_status(self) -> Dict[str, Any]:
454
+ """
455
+ Get status of all MCP servers and coordinator
456
+
457
+ Returns:
458
+ Status dictionary with server and coordinator information
459
+ """
460
+ server_status = self.mcp_manager.get_server_status()
461
+
462
+ return {
463
+ "coordinator": {
464
+ "initialized": self._initialized,
465
+ "llm_model": self.config.llm.model,
466
+ "llm_base_url": self.config.llm.base_url,
467
+ "mode": "stateless",
468
+ },
469
+ "mcp_servers": server_status,
470
+ "conversations": {
471
+ "mode": "stateless",
472
+ "note": "Conversations are managed by client in stateless mode",
473
+ },
474
+ }
475
+
476
+ async def cleanup(self) -> None:
477
+ """
478
+ Cleanup all coordinator resources
479
+
480
+ This method:
481
+ 1. Closes LLM client connections
482
+ 2. Cleans up all MCP connections
483
+ 3. Resets initialization status
484
+ """
485
+
486
+ try:
487
+ # Cleanup LLM client
488
+ if self.llm_client:
489
+ await self.llm_client.close()
490
+
491
+ # Cleanup MCP manager
492
+ if self.mcp_manager:
493
+ await self.mcp_manager.cleanup()
494
+
495
+ self._initialized = False
496
+
497
+ except Exception as e:
498
+ raise CoordinatorError(f"Cleanup failed: {e}")
499
+
500
+ def is_initialized(self) -> bool:
501
+ """Check if coordinator is initialized"""
502
+ return self._initialized
503
+
504
+ def format_streaming_event(self, event: dict) -> dict:
505
+ """
506
+ Format streaming event according to design specification
507
+
508
+ Ensures all streaming events follow the correct format:
509
+ - content_delta: {"type": "content_delta", "content": "..."}
510
+ - reasoning_delta: {"type": "reasoning_delta", "reasoning_content": "..."}
511
+ - tool_calls: {"type": "tool_calls", "tool_calls": [...]}
512
+ - tool_result: {"type": "tool_result", "tool_call_id": "...", "result": {...}}
513
+ - tool_error: {"type": "tool_error", "tool_call_id": "...", "error": "..."}
514
+ - done: {"type": "done", "finish_reason": "..."}
515
+ """
516
+ event_type = event.get("type")
517
+
518
+ # Validate and format based on event type
519
+ if event_type == "content_delta":
520
+ return {"type": "content_delta", "content": event.get("content", "")}
521
+ elif event_type == "reasoning_delta":
522
+ return {
523
+ "type": "reasoning_delta",
524
+ "reasoning_content": event.get("reasoning_content", ""),
525
+ }
526
+ elif event_type == "tool_calls":
527
+ return {"type": "tool_calls", "tool_calls": event.get("tool_calls", [])}
528
+ elif event_type == "tool_result":
529
+ return {
530
+ "type": "tool_result",
531
+ "tool_call_id": event.get("tool_call_id"),
532
+ "result": event.get("result"),
533
+ "server_name": event.get("server_name"),
534
+ }
535
+ elif event_type == "tool_error":
536
+ return {
537
+ "type": "tool_error",
538
+ "tool_call_id": event.get("tool_call_id"),
539
+ "error": event.get("error"),
540
+ }
541
+ elif event_type == "done":
542
+ return {"type": "done", "finish_reason": event.get("finish_reason", "stop")}
543
+ elif event_type == "error":
544
+ return {
545
+ "type": "error",
546
+ "error": event.get("error"),
547
+ "error_type": event.get("error_type"),
548
+ }
549
+ elif event_type == "conversation_started":
550
+ return {
551
+ "type": "conversation_started",
552
+ "conversation_id": event.get("conversation_id"),
553
+ }
554
+ else:
555
+ # Pass through unknown event types
556
+ return event