myagent-ai 1.5.9 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/agents/base.py CHANGED
@@ -109,6 +109,235 @@ class BaseAgent(ABC):
109
109
  logger.error(f"{self.name} LLM 调用失败: {response.error}")
110
110
  return response
111
111
 
112
+ async def _call_llm_stream(self, messages, tools=None, stream_response=None, **kwargs):
113
+ """调用LLM并流式输出token到SSE response
114
+
115
+ 当 stream_response 提供时,逐 token 将内容写入 SSE 流。
116
+ 同时积累 tool_call 增量,在流结束时返回完整的 LLMResponse。
117
+ """
118
+ if not self.llm:
119
+ return LLMResponse(success=False, error="LLM 未初始化")
120
+
121
+ # If no stream_response, fall back to non-streaming
122
+ if not stream_response:
123
+ return await self._call_llm(messages, tools=tools, **kwargs)
124
+
125
+ import asyncio as _asyncio
126
+
127
+ self.llm._ensure_client()
128
+ msg_dicts = [m.to_dict() if hasattr(m, 'to_dict') else m for m in messages]
129
+ request_kwargs = {
130
+ "model": self.llm.model,
131
+ "messages": msg_dicts,
132
+ "temperature": self.llm.temperature,
133
+ "max_tokens": self.llm.max_tokens,
134
+ "stream": True,
135
+ }
136
+ if tools:
137
+ request_kwargs["tools"] = tools
138
+ request_kwargs["tool_choice"] = "auto"
139
+ request_kwargs.update(kwargs)
140
+
141
+ full_text = ""
142
+ tool_calls_acc: Dict[int, Dict] = {} # index -> {id, name, arguments_str}
143
+ finish_reason = ""
144
+
145
+ async def _write_sse(data: dict):
146
+ """将一个事件写入 SSE 流,忽略客户端断开错误"""
147
+ try:
148
+ await stream_response.write(
149
+ ("data: " + json.dumps(data, ensure_ascii=False) + "\n\n").encode()
150
+ )
151
+ except Exception:
152
+ pass # Client disconnected
153
+
154
+ try:
155
+ if self.llm.provider in self.llm._OPENAI_COMPATIBLE_PROVIDERS or self.llm.provider == "zhipu":
156
+ loop = _asyncio.get_running_loop()
157
+
158
+ def _create_stream():
159
+ return self.llm._client.chat.completions.create(**request_kwargs)
160
+
161
+ stream = await loop.run_in_executor(None, _create_stream)
162
+
163
+ def _next_chunk(it):
164
+ try:
165
+ return next(it)
166
+ except StopIteration:
167
+ return None
168
+
169
+ iterator = iter(stream)
170
+ while True:
171
+ chunk = await loop.run_in_executor(None, _next_chunk, iterator)
172
+ if chunk is None:
173
+ break
174
+ if not chunk.choices:
175
+ if hasattr(chunk, 'usage') and chunk.usage:
176
+ self.llm._record_usage(
177
+ {"prompt_tokens": chunk.usage.prompt_tokens,
178
+ "completion_tokens": chunk.usage.completion_tokens,
179
+ "total_tokens": chunk.usage.total_tokens},
180
+ request_kwargs["model"],
181
+ )
182
+ continue
183
+
184
+ delta = chunk.choices[0].delta
185
+ if chunk.choices[0].finish_reason:
186
+ finish_reason = chunk.choices[0].finish_reason
187
+
188
+ # Handle content delta (stream to client)
189
+ if delta.content:
190
+ full_text += delta.content
191
+ await _write_sse({"type": "text_delta", "content": delta.content})
192
+
193
+ # Handle tool_call deltas (accumulate)
194
+ if hasattr(delta, 'tool_calls') and delta.tool_calls:
195
+ for tc_delta in delta.tool_calls:
196
+ idx = tc_delta.index if hasattr(tc_delta, 'index') else 0
197
+ if idx not in tool_calls_acc:
198
+ tool_calls_acc[idx] = {"id": "", "name": "", "arguments": ""}
199
+ if tc_delta.id:
200
+ tool_calls_acc[idx]["id"] = tc_delta.id
201
+ if hasattr(tc_delta, 'function') and tc_delta.function:
202
+ if tc_delta.function.name:
203
+ tool_calls_acc[idx]["name"] = tc_delta.function.name
204
+ if tc_delta.function.arguments:
205
+ tool_calls_acc[idx]["arguments"] += tc_delta.function.arguments
206
+
207
+ # Handle usage in final chunk
208
+ if hasattr(chunk, 'usage') and chunk.usage:
209
+ self.llm._record_usage(
210
+ {"prompt_tokens": chunk.usage.prompt_tokens,
211
+ "completion_tokens": chunk.usage.completion_tokens,
212
+ "total_tokens": chunk.usage.total_tokens},
213
+ request_kwargs["model"],
214
+ )
215
+
216
+ elif self.llm.provider == "anthropic":
217
+ loop = _asyncio.get_running_loop()
218
+
219
+ system_msg = ""
220
+ anth_messages = []
221
+ for m in messages:
222
+ role = m.role if hasattr(m, 'role') else m.get("role", "user")
223
+ content = m.content if hasattr(m, 'content') else m.get("content", "")
224
+ if role == "system":
225
+ system_msg = content
226
+ continue
227
+ anth_messages.append({"role": role, "content": content})
228
+
229
+ create_kwargs = {
230
+ "model": self.llm.model,
231
+ "messages": anth_messages,
232
+ "max_tokens": self.llm.max_tokens,
233
+ "stream": True,
234
+ }
235
+ if system_msg:
236
+ create_kwargs["system"] = system_msg
237
+
238
+ def _create_stream():
239
+ return self.llm._client.messages.create(**create_kwargs)
240
+
241
+ stream = await loop.run_in_executor(None, _create_stream)
242
+
243
+ def _next_event(it):
244
+ try:
245
+ return next(it)
246
+ except StopIteration:
247
+ return None
248
+
249
+ iterator = iter(stream)
250
+ while True:
251
+ event = await loop.run_in_executor(None, _next_event, iterator)
252
+ if event is None:
253
+ break
254
+ if event.type == "content_block_delta":
255
+ if hasattr(event.delta, "text"):
256
+ full_text += event.delta.text
257
+ await _write_sse({"type": "text_delta", "content": event.delta.text})
258
+ elif event.type == "message_stop":
259
+ finish_reason = "stop"
260
+
261
+ elif self.llm.provider == "ollama":
262
+ loop = _asyncio.get_running_loop()
263
+ import requests as req_lib
264
+
265
+ url = f"{self.llm.base_url}/api/chat"
266
+ payload = {
267
+ "model": self.llm.model,
268
+ "messages": msg_dicts,
269
+ "stream": True,
270
+ "options": {
271
+ "temperature": self.llm.temperature,
272
+ "num_predict": self.llm.max_tokens,
273
+ },
274
+ }
275
+
276
+ def _request():
277
+ r = req_lib.post(url, json=payload, stream=True, timeout=self.llm.timeout)
278
+ r.raise_for_status()
279
+ return r.iter_lines()
280
+
281
+ lines_iter = await loop.run_in_executor(None, _request)
282
+
283
+ def _next_line(it):
284
+ try:
285
+ return next(it)
286
+ except StopIteration:
287
+ return None
288
+
289
+ iterator = iter(lines_iter)
290
+ while True:
291
+ line = await loop.run_in_executor(None, _next_line, iterator)
292
+ if line is None:
293
+ break
294
+ try:
295
+ data = json.loads(line.decode('utf-8') if isinstance(line, bytes) else line)
296
+ content = data.get("message", {}).get("content", "")
297
+ if content:
298
+ full_text += content
299
+ await _write_sse({"type": "text_delta", "content": content})
300
+ if data.get("done"):
301
+ finish_reason = "stop"
302
+ # Record usage from Ollama
303
+ usage = data.get("prompt_eval_count") or data.get("eval_count")
304
+ if data.get("prompt_eval_count"):
305
+ self.llm._record_usage(
306
+ {"prompt_tokens": data.get("prompt_eval_count", 0),
307
+ "completion_tokens": data.get("eval_count", 0),
308
+ "total_tokens": data.get("prompt_eval_count", 0) + data.get("eval_count", 0)},
309
+ self.llm.model,
310
+ )
311
+ except Exception:
312
+ continue
313
+ else:
314
+ return LLMResponse(success=False, error="未知提供商,不支持流式")
315
+
316
+ # Build tool_calls list from accumulated deltas
317
+ final_tool_calls = []
318
+ for idx in sorted(tool_calls_acc.keys()):
319
+ tc = tool_calls_acc[idx]
320
+ try:
321
+ args = json.loads(tc["arguments"]) if tc["arguments"] else {}
322
+ except json.JSONDecodeError:
323
+ args = {}
324
+ final_tool_calls.append({
325
+ "id": tc["id"],
326
+ "name": tc["name"],
327
+ "arguments": args,
328
+ })
329
+
330
+ return LLMResponse(
331
+ success=True,
332
+ content=full_text,
333
+ tool_calls=final_tool_calls,
334
+ finish_reason=finish_reason,
335
+ model=request_kwargs.get("model", self.llm.model),
336
+ )
337
+ except Exception as e:
338
+ logger.error(f"LLM 流式调用失败: {e}")
339
+ return LLMResponse(success=False, error=str(e))
340
+
112
341
  async def _call_llm_json(self, messages: List[Message], **kwargs) -> Dict[str, Any]:
113
342
  """调用 LLM 并获取 JSON 响应"""
114
343
  if not self.llm:
@@ -416,8 +416,8 @@ class MainAgent(BaseAgent):
416
416
  system_prompt += "\n\n" + org_context
417
417
 
418
418
  # 注入 Agent 专属 system_prompt(如果有覆盖)
419
- agent_prompt = getattr(self, '_agent_override_prompt', None)
420
- agent_path = getattr(self, '_agent_override_path', None)
419
+ agent_prompt = context.metadata.get("agent_override_prompt") if context.metadata else None
420
+ agent_path = context.metadata.get("agent_override_path") if context.metadata else None
421
421
  if agent_prompt:
422
422
  prefix = f"## 当前角色\n你当前正在扮演「{agent_path}」Agent。请严格遵循以下角色设定:\n\n"
423
423
  system_prompt += "\n\n" + prefix + agent_prompt
@@ -426,7 +426,7 @@ class MainAgent(BaseAgent):
426
426
  system_prompt += "\n\n## 对话规则\n- 绝对不要在回复开头进行自我介绍(如'你好,我是XXX'),直接回答用户的问题或执行任务\n- 不要重复问候,除非用户主动打招呼"
427
427
 
428
428
  # 执行模式:强调主动执行能力
429
- chat_mode = getattr(self, '_chat_mode', '')
429
+ chat_mode = (context.metadata.get("chat_mode") or '') if context.metadata else ''
430
430
  if chat_mode == 'exec':
431
431
  system_prompt += "\n\n## 执行模式 (当前激活)\n你当前处于执行模式,请务必主动使用可用工具执行操作,而不是只提供建议或反问用户。\n- 优先使用技能系统(skill)完成任务\n- 需要执行代码时,直接使用 code action 执行\n- 遇到不确定的操作,先尝试执行,失败后再调整\n- 不要反复询问用户是否要执行,直接执行并报告结果"
432
432
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "myagent-ai",
3
- "version": "1.5.9",
3
+ "version": "1.6.1",
4
4
  "description": "本地桌面端执行型AI助手 - Open Interpreter 风格 | Local Desktop Execution-Oriented AI Assistant",
5
5
  "main": "main.py",
6
6
  "bin": {
package/web/api_server.py CHANGED
@@ -235,6 +235,7 @@ class ApiServer:
235
235
  r.add_get("/api/logs", self.handle_get_logs)
236
236
  r.add_get("/api/logs/stream", self.handle_log_stream)
237
237
  r.add_post("/api/chat", self.handle_chat)
238
+ r.add_post("/api/chat/stream", self.handle_chat_stream)
238
239
  r.add_get("/chat", self.handle_chat_page)
239
240
  r.add_get("/api/execution/progress", self.handle_execution_progress)
240
241
  # ── 组织管理 ──
@@ -393,18 +394,23 @@ class ApiServer:
393
394
  chat_mode = data.get("mode", "") # "exec" = 执行模式
394
395
  escalated = data.get("escalated", False) # 临时提权到 local
395
396
 
396
- # ── 全局执行锁检查 ──
397
+ # ── 全局执行锁检查 + 获取(原子操作,check+set 之间无 await)──
397
398
  agent_cfg_early = self._read_agent_config(agent_path)
398
399
  execution_mode = agent_cfg_early.get("execution_mode", "sandbox") if agent_cfg_early else "sandbox"
399
400
  needs_lock_check = (execution_mode == "local") or escalated
400
- if needs_lock_check and self._execution_lock["locked"]:
401
- locked_by = self._execution_lock["locked_by"]
402
- if locked_by and locked_by != agent_path:
403
- return web.json_response({
404
- "error": f"该Agent当前无法以本地模式运行,因为全局锁被 {locked_by} 持有。请先释放锁或切换到沙盒模式。",
405
- "locked_by": locked_by,
406
- "locked_at": self._execution_lock.get("locked_at"),
407
- }, status=423)
401
+ if needs_lock_check:
402
+ if self._execution_lock["locked"]:
403
+ locked_by = self._execution_lock["locked_by"]
404
+ if locked_by and locked_by != agent_path:
405
+ return web.json_response({
406
+ "error": f"该Agent当前无法以本地模式运行,因为全局锁被 {locked_by} 持有。请先释放锁或切换到沙盒模式。",
407
+ "locked_by": locked_by,
408
+ "locked_at": self._execution_lock.get("locked_at"),
409
+ }, status=423)
410
+ # Acquire lock atomically (no await between check and set)
411
+ self._execution_lock["locked"] = True
412
+ self._execution_lock["locked_by"] = agent_path
413
+ self._execution_lock["locked_at"] = time.time()
408
414
 
409
415
  # ── 执行模式: 注入任务规划上下文 ──
410
416
  task_plan_context = ""
@@ -466,6 +472,134 @@ class ApiServer:
466
472
  except Exception as e:
467
473
  logger.error(f"Chat error: {e}", exc_info=True)
468
474
  return web.json_response({"error": str(e)}, status=500)
475
+ finally:
476
+ # Release execution lock if we acquired it
477
+ if needs_lock_check and self._execution_lock["locked_by"] == agent_path:
478
+ self._execution_lock["locked"] = False
479
+ self._execution_lock["locked_by"] = None
480
+ self._execution_lock["locked_at"] = None
481
+
482
+ async def handle_chat_stream(self, request):
483
+ """POST /api/chat/stream - SSE 流式聊天"""
484
+ try:
485
+ data = await request.json()
486
+ except Exception:
487
+ return web.Response(text="data: " + json.dumps({"error": "invalid JSON"}) + "\n\n", content_type="text/event-stream")
488
+
489
+ message = data.get("message", "").strip()
490
+ if not message:
491
+ return web.Response(text="data: " + json.dumps({"error": "message is required"}) + "\n\n", content_type="text/event-stream")
492
+
493
+ agent_path = data.get("agent_path", data.get("agent_name", "default")) or "default"
494
+ raw_session_id = data.get("session_id", "") or "web_default"
495
+ session_id = f"{agent_path}_{raw_session_id}"
496
+ chat_mode = data.get("mode", "")
497
+ escalated = data.get("escalated", False)
498
+
499
+ # Lock check
500
+ agent_cfg_early = self._read_agent_config(agent_path)
501
+ execution_mode = agent_cfg_early.get("execution_mode", "sandbox") if agent_cfg_early else "sandbox"
502
+ needs_lock_check = (execution_mode == "local") or escalated
503
+ if needs_lock_check and self._execution_lock["locked"]:
504
+ locked_by = self._execution_lock["locked_by"]
505
+ if locked_by and locked_by != agent_path:
506
+ error_data = json.dumps({"error": f"执行锁被 {locked_by} 持有", "locked": True})
507
+ return web.Response(text="data: " + error_data + "\n\n", content_type="text/event-stream")
508
+
509
+ # Acquire execution lock if needed
510
+ if needs_lock_check:
511
+ self._execution_lock["locked"] = True
512
+ self._execution_lock["locked_by"] = agent_path
513
+ self._execution_lock["locked_at"] = time.time()
514
+
515
+ response = web.StreamResponse(
516
+ status=200,
517
+ headers={
518
+ "Content-Type": "text/event-stream",
519
+ "Cache-Control": "no-cache",
520
+ "Connection": "keep-alive",
521
+ "X-Accel-Buffering": "no",
522
+ }
523
+ )
524
+ await response.prepare(request)
525
+
526
+ try:
527
+ agent_cfg = self._read_agent_config(agent_path)
528
+ model_chain = self._build_model_chain(agent_cfg, agent_path)
529
+
530
+ # Build context with task plan injection (SEPARATE from user message)
531
+ task_plan_context = self._build_task_plan_context(agent_path, chat_mode, message)
532
+ clean_message, agent_system_prompt = self._build_agent_chat_context(agent_path, agent_cfg, message)
533
+ if task_plan_context:
534
+ clean_message = clean_message + task_plan_context
535
+
536
+ # Send session info first
537
+ await response.write(("data: " + json.dumps({"type": "session", "session_id": session_id, "agent_path": agent_path}) + "\n\n").encode())
538
+
539
+ # Use streaming LLM call
540
+ if model_chain and self.core.llm:
541
+ full_response = await self._try_model_chain_stream(
542
+ model_chain, clean_message, session_id,
543
+ agent_path=agent_path, agent_system_prompt=agent_system_prompt,
544
+ chat_mode=chat_mode, stream_response=response,
545
+ )
546
+ else:
547
+ full_response = await self.core.process_message(clean_message, session_id)
548
+ await response.write(("data: " + json.dumps({"type": "text", "content": full_response}) + "\n\n").encode())
549
+
550
+ # Collect exec events
551
+ exec_events = []
552
+ if self.core.main_agent:
553
+ exec_events = self.core.main_agent.get_execution_events()
554
+
555
+ # Update task plan from response
556
+ if chat_mode == "exec" and self.core.llm and full_response:
557
+ try:
558
+ await self._extract_and_update_task_plan(agent_path, full_response)
559
+ except Exception:
560
+ pass
561
+
562
+ # Send done event
563
+ done_data = {"type": "done", "exec_events": exec_events}
564
+ await response.write(("data: " + json.dumps(done_data) + "\n\n").encode())
565
+
566
+ except Exception as e:
567
+ logger.error(f"Stream chat error: {e}", exc_info=True)
568
+ await response.write(("data: " + json.dumps({"type": "error", "error": str(e)}) + "\n\n").encode())
569
+ finally:
570
+ # Release execution lock
571
+ if needs_lock_check and self._execution_lock["locked_by"] == agent_path:
572
+ self._execution_lock["locked"] = False
573
+ self._execution_lock["locked_by"] = None
574
+ self._execution_lock["locked_at"] = None
575
+ try:
576
+ await response.write_eof()
577
+ except Exception:
578
+ pass
579
+
580
+ return response
581
+
582
+ def _build_task_plan_context(self, agent_path: str, chat_mode: str, original_message: str) -> str:
583
+ """构建任务计划上下文(不修改原始消息)"""
584
+ if chat_mode != "exec":
585
+ return ""
586
+ tp = self._task_md_path(agent_path)
587
+ if not tp.exists():
588
+ # No existing task plan - tell agent to create one
589
+ return "\n\n## 任务规划\n你当前处于执行模式。请先分析用户需求,制定任务计划,然后逐步执行。\n在回复开头用以下格式列出任务计划:\n```\n## 任务计划\n- [ ] 步骤1\n- [ ] 步骤2\n```\n然后开始执行第一个步骤。"
590
+ existing_content = tp.read_text(encoding="utf-8")
591
+ tasks = self._parse_task_md(existing_content)
592
+ if not tasks:
593
+ return "\n\n## 任务规划\n你当前处于执行模式。请先分析用户需求,制定任务计划,然后逐步执行。\n在回复开头用以下格式列出任务计划:\n```\n## 任务计划\n- [ ] 步骤1\n- [ ] 步骤2\n```\n然后开始执行第一个步骤。"
594
+ pending = [f" - [ ] {t['text']}" for t in tasks if not t['done']]
595
+ done = [f" - [x] {t['text']}" for t in tasks if t['done']]
596
+ context = "\n\n## 当前任务计划 (task.md)\n"
597
+ if done:
598
+ context += "已完成:\n" + "\n".join(done) + "\n"
599
+ if pending:
600
+ context += "待完成:\n" + "\n".join(pending) + "\n"
601
+ context += "\n请继续执行任务。执行完成后更新任务计划状态。在回复开头包含更新后的任务计划:\n```\n## 任务计划\n- [x] 已完成的步骤\n- [ ] 下一步骤\n```\n"
602
+ return context
469
603
 
470
604
  async def handle_chat_page(self, request):
471
605
  """GET /chat - 重定向到聊天页面"""
@@ -2201,6 +2335,266 @@ class ApiServer:
2201
2335
  # 所有模型都失败
2202
2336
  return f"⚠️ 所有模型均调用失败 (共 {len(model_chain)} 个)。最后错误: {last_error}"
2203
2337
 
2338
+ async def _try_model_chain_stream(self, model_chain, message, session_id,
2339
+ agent_path=None, agent_system_prompt=None,
2340
+ chat_mode="", stream_response=None):
2341
+ """流式版本的模型链调用,逐token输出到SSE"""
2342
+ if not model_chain:
2343
+ result = await self.core.process_message(message, session_id)
2344
+ await stream_response.write(("data: " + json.dumps({"type": "text", "content": result}) + "\n\n").encode())
2345
+ return result
2346
+
2347
+ llm = self.core.llm
2348
+ full_text = ""
2349
+
2350
+ for i, mc in enumerate(model_chain):
2351
+ orig = {
2352
+ "provider": llm.provider, "model": llm.model, "base_url": llm.base_url,
2353
+ "api_key": llm.api_key, "temperature": llm.temperature, "max_tokens": llm.max_tokens,
2354
+ }
2355
+ try:
2356
+ if "provider" in mc: llm.provider = mc["provider"]
2357
+ if "model" in mc: llm.model = mc["model"]
2358
+ if "base_url" in mc: llm.base_url = mc["base_url"]
2359
+ if "api_key" in mc: llm.api_key = mc["api_key"]
2360
+ if "temperature" in mc: llm.temperature = mc["temperature"]
2361
+ if "max_tokens" in mc: llm.max_tokens = mc["max_tokens"]
2362
+ llm._client = None
2363
+
2364
+ # Pass agent context through AgentContext instead of instance attrs
2365
+ result = await self._stream_process_message(
2366
+ message, session_id, stream_response,
2367
+ agent_path=agent_path, agent_system_prompt=agent_system_prompt,
2368
+ chat_mode=chat_mode,
2369
+ )
2370
+ if result and not result.startswith("⚠️") and not result.startswith("❌"):
2371
+ return result
2372
+ except Exception as e:
2373
+ logger.warning(f"模型 {i} 流式调用失败: {e}")
2374
+ finally:
2375
+ llm.provider = orig["provider"]; llm.model = orig["model"]
2376
+ llm.base_url = orig["base_url"]; llm.api_key = orig["api_key"]
2377
+ llm.temperature = orig["temperature"]; llm.max_tokens = orig["max_tokens"]
2378
+
2379
+ return full_text
2380
+
2381
+ async def _stream_process_message(self, user_message, session_id, stream_response,
2382
+ agent_path=None, agent_system_prompt=None, chat_mode=""):
2383
+ """使用流式LLM调用处理消息,支持完整的agent循环(工具调用/操作执行)+ 实时流式输出
2384
+
2385
+ 实现与 MainAgent._process_inner() 相同的计划-执行-反思循环,
2386
+ 但将 LLM 的文本响应逐 token 流式推送到 SSE。
2387
+ """
2388
+ if not self.core.main_agent or not self.core.llm:
2389
+ result = await self.core.process_message(user_message, session_id)
2390
+ await stream_response.write(("data: " + json.dumps({"type": "text", "content": result}) + "\n\n").encode())
2391
+ return result
2392
+
2393
+ agent = self.core.main_agent
2394
+ from agents.base import AgentContext
2395
+ from core.utils import safe_json_parse, truncate_str
2396
+ context = AgentContext(session_id=session_id, user_message=user_message)
2397
+
2398
+ # Set agent context through context metadata instead of instance attributes
2399
+ context.metadata["agent_override_prompt"] = agent_system_prompt
2400
+ context.metadata["agent_override_path"] = agent_path
2401
+ context.metadata["chat_mode"] = chat_mode
2402
+
2403
+ # Clear execution events from previous runs
2404
+ agent.clear_execution_events()
2405
+
2406
+ # Load memory
2407
+ if agent.memory_agent:
2408
+ mem_ctx = AgentContext(task_id="", session_id=session_id, user_message=user_message,
2409
+ metadata={"memory_action": "get_relevant"})
2410
+ await agent.memory_agent.process(mem_ctx)
2411
+ if "memory_context_prompt" in mem_ctx.working_memory:
2412
+ context.working_memory["memory_context_prompt"] = mem_ctx.working_memory["memory_context_prompt"]
2413
+
2414
+ # Save ORIGINAL user message (not the one with task plan appended)
2415
+ if agent.memory:
2416
+ agent.memory.add_short_term(session_id=session_id, role="user", content=user_message)
2417
+
2418
+ async def _write_sse(data: dict):
2419
+ """Write SSE event, ignoring client disconnect errors"""
2420
+ try:
2421
+ await stream_response.write(
2422
+ ("data: " + json.dumps(data, ensure_ascii=False) + "\n\n").encode()
2423
+ )
2424
+ except Exception:
2425
+ pass # Client disconnected
2426
+
2427
+ # Full agent loop (plan-execute-reflect) — mirrors MainAgent._process_inner
2428
+ max_iter = agent.config.agent.max_iterations if agent.config else 30
2429
+ final_response = ""
2430
+ iteration = 0
2431
+
2432
+ while iteration < max_iter:
2433
+ iteration += 1
2434
+
2435
+ # Clear any intermediate text from previous iterations on the frontend
2436
+ if iteration > 1:
2437
+ await _write_sse({"type": "clear_text"})
2438
+
2439
+ # Build messages
2440
+ messages = agent._build_messages(context)
2441
+ tools = agent._get_tools()
2442
+
2443
+ # Call LLM with streaming — tokens are pushed to SSE in real-time
2444
+ response = await agent._call_llm_stream(messages, tools=tools, stream_response=stream_response)
2445
+ if not response.success:
2446
+ await _write_sse({"type": "text", "content": f"⚠️ LLM调用失败: {response.error}"})
2447
+ return f"⚠️ LLM 调用失败: {response.error}"
2448
+
2449
+ content = response.content or ""
2450
+
2451
+ # ── Check for tool calls (OpenAI function calling) ──
2452
+ if response.tool_calls:
2453
+ # Send tool_call event to frontend
2454
+ agent._add_exec_event("tool_call", {
2455
+ "title": f"调用 {len(response.tool_calls)} 个工具",
2456
+ "tool_names": [tc["name"] for tc in response.tool_calls],
2457
+ })
2458
+ await _write_sse({"type": "exec_event", "data": {
2459
+ "type": "tool_call",
2460
+ "title": f"调用 {len(response.tool_calls)} 个工具",
2461
+ }})
2462
+
2463
+ # Add assistant tool_calls message to history (OpenAI format requirement)
2464
+ context.conversation_history.append(
2465
+ Message(role="assistant", content=response.content or "",
2466
+ tool_calls=response.tool_calls)
2467
+ )
2468
+
2469
+ # Execute tool calls
2470
+ tool_results = await agent._handle_tool_calls(response.tool_calls, context, "")
2471
+
2472
+ # Send tool_result event to frontend
2473
+ for tc, result in tool_results:
2474
+ success = result.get("success", False)
2475
+ agent._add_exec_event("tool_result", {
2476
+ "title": f"工具结果: {tc['name']}",
2477
+ "tool_name": tc["name"],
2478
+ "success": success,
2479
+ "summary": truncate_str(result.get("output", result.get("error", "")), 500),
2480
+ })
2481
+ await _write_sse({"type": "exec_event", "data": {
2482
+ "type": "tool_result",
2483
+ "title": f"工具结果: {tc['name']}",
2484
+ "success": success,
2485
+ }})
2486
+
2487
+ # Add tool results to history
2488
+ for tc, result in tool_results:
2489
+ context.conversation_history.append(
2490
+ Message(role="tool", content=json.dumps(result, ensure_ascii=False),
2491
+ tool_call_id=tc["id"], name=tc["name"])
2492
+ )
2493
+ continue # Next iteration — let LLM process tool results
2494
+
2495
+ # ── Try parsing JSON action instructions ──
2496
+ action_data = safe_json_parse(content)
2497
+
2498
+ if action_data and isinstance(action_data, dict):
2499
+ # Has structured action instructions
2500
+ if "actions" in action_data:
2501
+ # Execute action list
2502
+ results = await agent._execute_actions(action_data, context, "")
2503
+
2504
+ # Send execution events accumulated by _execute_actions
2505
+ # _execute_actions calls _add_exec_event internally; relay them
2506
+ recent_events = agent.get_execution_events()
2507
+ # Only send events from this round (events added since last check)
2508
+ # Since we cleared at start, send all new events
2509
+ for evt in recent_events:
2510
+ await _write_sse({"type": "exec_event", "data": evt})
2511
+
2512
+ # Add assistant action message to conversation history
2513
+ context.conversation_history.append(Message(role="assistant", content=content))
2514
+
2515
+ result_summary = agent._summarize_action_results(results)
2516
+
2517
+ # Handle timeout diagnostics (same as _process_inner)
2518
+ has_timeout = any(r.get("timed_out") for r in results)
2519
+ timeout_detail = ""
2520
+ if has_timeout:
2521
+ timeout_details = []
2522
+ for i, r in enumerate(results, 1):
2523
+ if r.get("timed_out"):
2524
+ diag = r.get("timeout_diagnosis", {})
2525
+ timeout_details.append(
2526
+ f"### 命令 {i} 超时诊断\n"
2527
+ f"- 原因: {diag.get('diagnosis', '未知')}\n"
2528
+ f"- 进展: {diag.get('progress', '未知')}\n"
2529
+ f"- 是否建议重试: {'是' if diag.get('should_retry') else '否'}\n"
2530
+ f"- 重试策略: {diag.get('retry_strategy', '无')}\n"
2531
+ )
2532
+ timeout_detail = "\n\n## ⏰ 超时诊断详情\n" + "\n".join(timeout_details)
2533
+
2534
+ feedback_msg = f"[执行结果]\n{result_summary}\n\n请基于以上结果继续。"
2535
+ if timeout_detail:
2536
+ feedback_msg += timeout_detail + "\n\n请根据以上诊断信息决定下一步操作。"
2537
+
2538
+ context.conversation_history.append(
2539
+ Message(role="user", content=feedback_msg)
2540
+ )
2541
+
2542
+ # Check if all actions succeeded
2543
+ all_success = all(r.get("success", False) for r in results)
2544
+ if all_success and results:
2545
+ final_response = action_data.get("thought", "")
2546
+ if "plan" in action_data and action_data["plan"]:
2547
+ final_response += "\n\n已完成: " + " → ".join(action_data["plan"])
2548
+ break
2549
+
2550
+ # Check if timeout diagnosis suggests aborting
2551
+ if has_timeout:
2552
+ should_abort = False
2553
+ abort_reasons = []
2554
+ for i, r in enumerate(results, 1):
2555
+ if r.get("timed_out"):
2556
+ diag = r.get("timeout_diagnosis", {})
2557
+ if diag.get("should_retry") is False:
2558
+ should_abort = True
2559
+ abort_reasons.append(
2560
+ f"命令{i}: {diag.get('diagnosis', '不可恢复的超时')}"
2561
+ )
2562
+ if should_abort:
2563
+ abort_msg = (
2564
+ "[系统通知] 以下命令因超时被终止,且超时诊断结果表明不应重试:\n"
2565
+ )
2566
+ for reason in abort_reasons:
2567
+ abort_msg += f"- {reason}\n"
2568
+ abort_msg += (
2569
+ "\n请直接以纯文本或 {\"type\": \"final_answer\", \"content\": \"...\"} "
2570
+ "格式回复,告知用户任务无法完成的原因和建议的替代方案。"
2571
+ )
2572
+ context.conversation_history.append(
2573
+ Message(role="user", content=abort_msg)
2574
+ )
2575
+ continue # Let LLM generate final_answer
2576
+
2577
+ continue # Next iteration
2578
+
2579
+ # Single action — final_answer
2580
+ if action_data.get("type") == "final_answer":
2581
+ final_response = action_data.get("content", content)
2582
+ # Stream the final answer text if not already streamed
2583
+ if final_response and final_response != content:
2584
+ await _write_sse({"type": "text_delta", "content": final_response})
2585
+ break
2586
+
2587
+ # ── Pure text response (no actions/tool calls) — this is the final answer ──
2588
+ # Content was already streamed token-by-token via _call_llm_stream
2589
+ final_response = content
2590
+ break
2591
+
2592
+ # Save assistant response to memory
2593
+ if agent.memory and final_response:
2594
+ agent.memory.add_short_term(session_id=session_id, role="assistant", content=final_response)
2595
+
2596
+ return final_response
2597
+
2204
2598
  async def handle_reload_config(self, request):
2205
2599
  """POST /api/config/reload - 从配置文件热重载(无需重启)
2206
2600
 
@@ -3008,11 +3402,14 @@ class ApiServer:
3008
3402
  )
3009
3403
 
3010
3404
  # 3. 广播到所有非禁言成员agent,并行处理
3011
- responses = []
3012
3405
  active_members = [m for m in group.members if not m.muted]
3013
3406
 
3014
3407
  import asyncio
3408
+ # Build a member_order map for deterministic sorting after gather
3409
+ member_order = {m.agent_path: i for i, m in enumerate(active_members)}
3410
+
3015
3411
  async def process_agent_member(member):
3412
+ """Process a single member's response (DO NOT save messages here)"""
3016
3413
  try:
3017
3414
  agent_path = member.agent_path
3018
3415
  agent_cfg = self._read_agent_config(agent_path)
@@ -3037,17 +3434,6 @@ class ApiServer:
3037
3434
  avatar = agent_cfg.get("avatar_emoji", "🤖") or "🤖"
3038
3435
  display_name = agent_cfg.get("name", agent_path)
3039
3436
 
3040
- # 保存agent回复到群消息
3041
- agent_msg = GroupMessage(
3042
- group_id=gid,
3043
- sender="agent",
3044
- sender_name=display_name,
3045
- sender_avatar=avatar,
3046
- content=response,
3047
- agent_path=agent_path,
3048
- )
3049
- mgr.add_message(agent_msg)
3050
-
3051
3437
  return {
3052
3438
  "ok": True,
3053
3439
  "agent_path": agent_path,
@@ -3068,23 +3454,41 @@ class ApiServer:
3068
3454
  # 并行调用所有成员agent
3069
3455
  tasks = [process_agent_member(m) for m in active_members]
3070
3456
  try:
3071
- responses = await asyncio.gather(*tasks, return_exceptions=True)
3457
+ gather_results = await asyncio.gather(*tasks, return_exceptions=True)
3072
3458
  except Exception as e:
3073
3459
  logger.error(f"群消息广播异常: {e}")
3074
3460
  tp.update_task_status(task_id, "failed", last_message=f"广播异常: {str(e)}")
3075
3461
  return web.json_response({"error": f"群消息广播异常: {str(e)}"}, status=500)
3076
3462
 
3077
- # 处理异常结果
3078
- final_responses = []
3079
- for r in responses:
3463
+ # 处理异常结果并按原始成员顺序排序
3464
+ raw_responses = []
3465
+ for r in gather_results:
3080
3466
  if isinstance(r, Exception):
3081
- final_responses.append({
3467
+ raw_responses.append({
3082
3468
  "ok": False, "agent_path": "unknown",
3083
3469
  "name": "unknown", "avatar": "❌",
3084
3470
  "response": f"异常: {str(r)}",
3085
3471
  })
3086
3472
  else:
3087
- final_responses.append(r)
3473
+ raw_responses.append(r)
3474
+
3475
+ # Sort by original member order to ensure deterministic message ordering
3476
+ final_responses = sorted(
3477
+ raw_responses,
3478
+ key=lambda r: member_order.get(r.get("agent_path", ""), 999999)
3479
+ )
3480
+
3481
+ # Save agent messages sequentially in sorted order
3482
+ for resp in final_responses:
3483
+ agent_msg = GroupMessage(
3484
+ group_id=gid,
3485
+ sender="agent",
3486
+ sender_name=resp["name"],
3487
+ sender_avatar=resp["avatar"],
3488
+ content=resp["response"],
3489
+ agent_path=resp["agent_path"],
3490
+ )
3491
+ mgr.add_message(agent_msg)
3088
3492
 
3089
3493
  # 更新任务状态
3090
3494
  has_failure = any(not r.get("ok") for r in final_responses)
package/web/ui/chat.html CHANGED
@@ -3556,8 +3556,11 @@ async function sendMessage() {
3556
3556
 
3557
3557
  try {
3558
3558
  state.abortController = new AbortController();
3559
- const data = await api('/api/chat', {
3559
+
3560
+ // Use SSE streaming
3561
+ const resp = await fetch('/api/chat/stream', {
3560
3562
  method: 'POST',
3563
+ headers: { 'Content-Type': 'application/json' },
3561
3564
  body: JSON.stringify({
3562
3565
  message: text,
3563
3566
  session_id: sessionId,
@@ -3568,34 +3571,90 @@ async function sendMessage() {
3568
3571
  }),
3569
3572
  signal: state.abortController.signal,
3570
3573
  });
3571
-
3572
- // Add assistant response
3573
- state.messages.push({
3574
- role: 'assistant',
3575
- content: data.response || '(无回复)',
3576
- time: new Date().toISOString(),
3577
- exec_events: data.exec_events || [],
3578
- });
3579
-
3580
- // Update session in list
3581
- const existing = state.sessions.find(s => s.id === sessionId);
3574
+
3575
+ if (!resp.ok) {
3576
+ const errText = await resp.text();
3577
+ throw new Error('HTTP ' + resp.status + ': ' + errText);
3578
+ }
3579
+
3580
+ const reader = resp.body.getReader();
3581
+ const decoder = new TextDecoder();
3582
+ let buffer = '';
3583
+ let fullResponse = '';
3584
+ let msgIdx = state.messages.length;
3585
+ let sessionIdReceived = sessionId;
3586
+ let execEventsReceived = [];
3587
+
3588
+ // Add placeholder for streaming response
3589
+ state.messages.push({ role: 'assistant', content: '', time: new Date().toISOString(), streaming: true });
3590
+ renderMessages();
3591
+
3592
+ while (true) {
3593
+ const { done, value } = await reader.read();
3594
+ if (done) break;
3595
+
3596
+ buffer += decoder.decode(value, { stream: true });
3597
+ const lines = buffer.split('\n');
3598
+ buffer = lines.pop() || '';
3599
+
3600
+ for (const line of lines) {
3601
+ if (!line.startsWith('data: ')) continue;
3602
+ try {
3603
+ const evt = JSON.parse(line.substring(6));
3604
+
3605
+ if (evt.type === 'session') {
3606
+ sessionIdReceived = evt.session_id;
3607
+ } else if (evt.type === 'text') {
3608
+ fullResponse = evt.content;
3609
+ state.messages[msgIdx].content = evt.content;
3610
+ renderMessages();
3611
+ } else if (evt.type === 'text_delta') {
3612
+ // Incremental streaming token
3613
+ fullResponse += evt.content;
3614
+ state.messages[msgIdx].content = fullResponse;
3615
+ renderMessages();
3616
+ } else if (evt.type === 'clear_text') {
3617
+ // Clear intermediate text from previous agent loop iterations
3618
+ fullResponse = '';
3619
+ state.messages[msgIdx].content = '';
3620
+ renderMessages();
3621
+ } else if (evt.type === 'exec_event') {
3622
+ // Real-time execution event (tool call, code exec, skill result, etc.)
3623
+ execEventsReceived.push(evt.data);
3624
+ } else if (evt.type === 'done') {
3625
+ execEventsReceived = evt.exec_events || [];
3626
+ } else if (evt.type === 'error') {
3627
+ fullResponse = '❌ ' + evt.error;
3628
+ state.messages[msgIdx].content = fullResponse;
3629
+ }
3630
+ } catch (e) { /* skip malformed */ }
3631
+ }
3632
+ }
3633
+
3634
+ // Finalize message
3635
+ if (state.messages[msgIdx]) {
3636
+ state.messages[msgIdx].streaming = false;
3637
+ if (execEventsReceived.length > 0) {
3638
+ state.messages[msgIdx].exec_events = execEventsReceived;
3639
+ }
3640
+ if (!state.messages[msgIdx].content) {
3641
+ state.messages[msgIdx].content = '(无回复)';
3642
+ }
3643
+ }
3644
+
3645
+ // Update session
3646
+ const existing = state.sessions.find(s => s.id === sessionIdReceived);
3582
3647
  if (existing) {
3583
3648
  existing.messages = (existing.messages || 0) + 2;
3584
3649
  existing.last = new Date().toISOString();
3585
3650
  } else {
3586
- state.sessions.splice(1, 0, {
3587
- id: sessionId,
3588
- name: formatSessionName(sessionId),
3589
- messages: 2,
3590
- last: new Date().toISOString(),
3591
- });
3651
+ state.sessions.splice(1, 0, { id: sessionIdReceived, name: formatSessionName(sessionIdReceived), messages: 2, last: new Date().toISOString() });
3592
3652
  }
3593
- // Update cache
3594
3653
  state.agentSessions[state.activeAgent] = [...state.sessions];
3595
3654
  renderSessions();
3596
3655
 
3597
3656
  // Auto-play TTS if enabled (skip command execution results)
3598
- if (ttsManager.enabled && data.response && !data.response.match(/^\s*[✅❌⏰]\s*\[执行结果\]/m)) {
3657
+ if (ttsManager.enabled && fullResponse && !fullResponse.match(/^\s*[✅❌⏰]\s*\[执行结果\]/m)) {
3599
3658
  const idx = state.messages.length - 1;
3600
3659
  ttsManager.speak(idx);
3601
3660
  }