autoglm-gui 1.5.1__py3-none-any.whl → 1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. AutoGLM_GUI/__init__.py +1 -1
  2. AutoGLM_GUI/__main__.py +11 -2
  3. AutoGLM_GUI/adb_plus/qr_pair.py +3 -3
  4. AutoGLM_GUI/agents/__init__.py +7 -2
  5. AutoGLM_GUI/agents/factory.py +46 -6
  6. AutoGLM_GUI/agents/glm/agent.py +2 -2
  7. AutoGLM_GUI/agents/glm/async_agent.py +515 -0
  8. AutoGLM_GUI/agents/glm/parser.py +4 -2
  9. AutoGLM_GUI/agents/protocols.py +111 -1
  10. AutoGLM_GUI/agents/stream_runner.py +4 -5
  11. AutoGLM_GUI/api/__init__.py +3 -1
  12. AutoGLM_GUI/api/agents.py +78 -37
  13. AutoGLM_GUI/api/devices.py +72 -0
  14. AutoGLM_GUI/api/layered_agent.py +9 -8
  15. AutoGLM_GUI/api/mcp.py +6 -4
  16. AutoGLM_GUI/config_manager.py +38 -1
  17. AutoGLM_GUI/device_manager.py +28 -4
  18. AutoGLM_GUI/device_metadata_manager.py +174 -0
  19. AutoGLM_GUI/devices/mock_device.py +8 -1
  20. AutoGLM_GUI/phone_agent_manager.py +145 -32
  21. AutoGLM_GUI/scheduler_manager.py +6 -6
  22. AutoGLM_GUI/schemas.py +89 -0
  23. AutoGLM_GUI/scrcpy_stream.py +2 -1
  24. AutoGLM_GUI/static/assets/{about-CfwX1Cmc.js → about-DTrVqEQH.js} +1 -1
  25. AutoGLM_GUI/static/assets/{alert-dialog-CtGlN2IJ.js → alert-dialog-B2KxPLtZ.js} +1 -1
  26. AutoGLM_GUI/static/assets/chat-BkrVbc3X.js +129 -0
  27. AutoGLM_GUI/static/assets/{circle-alert-t08bEMPO.js → circle-alert-vnNxOaxv.js} +1 -1
  28. AutoGLM_GUI/static/assets/{dialog-FNwZJFwk.js → dialog-Cuw3N8_F.js} +1 -1
  29. AutoGLM_GUI/static/assets/{eye-D0UPWCWC.js → eye-JD1jbm99.js} +1 -1
  30. AutoGLM_GUI/static/assets/{history-CRo95B7i.js → history-CobYdXju.js} +1 -1
  31. AutoGLM_GUI/static/assets/{index-CTHbFvKl.js → index-BzP-Te33.js} +5 -5
  32. AutoGLM_GUI/static/assets/index-y1vOOBHH.js +1 -0
  33. AutoGLM_GUI/static/assets/label-BpCMrXj_.js +1 -0
  34. AutoGLM_GUI/static/assets/{logs-RW09DyYY.js → logs-BcsSAeol.js} +1 -1
  35. AutoGLM_GUI/static/assets/{popover--JTJrE5v.js → popover-BHbCs5Wl.js} +1 -1
  36. AutoGLM_GUI/static/assets/scheduled-tasks-WvtmRsex.js +1 -0
  37. AutoGLM_GUI/static/assets/{textarea-PRmVnWq5.js → textarea-B84jf3cE.js} +1 -1
  38. AutoGLM_GUI/static/assets/workflows-DhBpqdz_.js +1 -0
  39. AutoGLM_GUI/static/index.html +1 -1
  40. {autoglm_gui-1.5.1.dist-info → autoglm_gui-1.5.3.dist-info}/METADATA +10 -1
  41. {autoglm_gui-1.5.1.dist-info → autoglm_gui-1.5.3.dist-info}/RECORD +44 -43
  42. AutoGLM_GUI/static/assets/chat-BYa-foUI.js +0 -129
  43. AutoGLM_GUI/static/assets/index-BaLMSqd3.js +0 -1
  44. AutoGLM_GUI/static/assets/label-DJFevVmr.js +0 -1
  45. AutoGLM_GUI/static/assets/scheduled-tasks-DTRKsQXF.js +0 -1
  46. AutoGLM_GUI/static/assets/square-pen-CPK_K680.js +0 -1
  47. AutoGLM_GUI/static/assets/workflows-CdcsAoaT.js +0 -1
  48. {autoglm_gui-1.5.1.dist-info → autoglm_gui-1.5.3.dist-info}/WHEEL +0 -0
  49. {autoglm_gui-1.5.1.dist-info → autoglm_gui-1.5.3.dist-info}/entry_points.txt +0 -0
  50. {autoglm_gui-1.5.1.dist-info → autoglm_gui-1.5.3.dist-info}/licenses/LICENSE +0 -0
@@ -7,6 +7,8 @@ from contextlib import asynccontextmanager
7
7
  from importlib.resources import files
8
8
  from pathlib import Path
9
9
 
10
+ from typing import AsyncGenerator
11
+
10
12
  from fastapi import FastAPI
11
13
  from fastapi.middleware.cors import CORSMiddleware
12
14
  from fastapi.responses import FileResponse
@@ -82,7 +84,7 @@ def create_app() -> FastAPI:
82
84
 
83
85
  # Define combined lifespan
84
86
  @asynccontextmanager
85
- async def combined_lifespan(app: FastAPI):
87
+ async def combined_lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
86
88
  """Combine app startup logic with MCP lifespan."""
87
89
  # App startup
88
90
  asyncio.create_task(qr_pairing_manager.cleanup_expired_sessions())
AutoGLM_GUI/api/agents.py CHANGED
@@ -1,12 +1,12 @@
1
1
  """Agent lifecycle and chat routes."""
2
2
 
3
+ import asyncio
3
4
  import json
4
5
 
5
6
  from fastapi import APIRouter, HTTPException
6
7
  from fastapi.responses import StreamingResponse
7
8
  from pydantic import ValidationError
8
9
 
9
- from AutoGLM_GUI.agents.events import AgentEventType
10
10
  from AutoGLM_GUI.config import AgentConfig, ModelConfig
11
11
  from AutoGLM_GUI.logger import logger
12
12
  from AutoGLM_GUI.schemas import (
@@ -154,8 +154,8 @@ def init_agent(request: InitRequest) -> dict:
154
154
 
155
155
 
156
156
  @router.post("/api/chat", response_model=ChatResponse)
157
- def chat(request: ChatRequest) -> ChatResponse:
158
- """发送任务给 Agent 并执行。
157
+ async def chat(request: ChatRequest) -> ChatResponse:
158
+ """发送任务给 Agent 并执行(支持 AsyncAgent)。
159
159
 
160
160
  Agent 会在首次使用时自动初始化,无需手动调用 /api/init。
161
161
  """
@@ -165,15 +165,27 @@ def chat(request: ChatRequest) -> ChatResponse:
165
165
  device_id = request.device_id
166
166
  manager = PhoneAgentManager.get_instance()
167
167
 
168
- # use_agent 默认 auto_initialize=True,会自动初始化 Agent
168
+ acquired = False
169
169
  try:
170
- with manager.use_agent(device_id, timeout=None) as agent:
171
- result = agent.run(request.message)
172
- steps = agent.step_count
173
- agent.reset()
174
- return ChatResponse(result=result, steps=steps, success=True)
170
+ acquired = await asyncio.to_thread(
171
+ manager.acquire_device, device_id, timeout=None, auto_initialize=True
172
+ )
173
+ # Use chat context with async agent
174
+ agent = await asyncio.to_thread(
175
+ manager.get_agent_with_context,
176
+ device_id,
177
+ context="chat",
178
+ agent_type="glm-async",
179
+ )
180
+
181
+ # AsyncAgent is always used for chat context
182
+ result = await agent.run(request.message) # type: ignore[misc]
183
+
184
+ steps = agent.step_count
185
+ agent.reset()
186
+ return ChatResponse(result=result, steps=steps, success=True) # type: ignore[arg-type]
187
+
175
188
  except AgentInitializationError as e:
176
- # 配置错误或初始化失败
177
189
  logger.error(f"Failed to initialize agent for {device_id}: {e}")
178
190
  raise HTTPException(
179
191
  status_code=500,
@@ -186,17 +198,21 @@ def chat(request: ChatRequest) -> ChatResponse:
186
198
  except Exception as e:
187
199
  logger.exception(f"Unexpected error in chat for {device_id}")
188
200
  return ChatResponse(result=str(e), steps=0, success=False)
201
+ finally:
202
+ if acquired:
203
+ await asyncio.to_thread(manager.release_device, device_id)
189
204
 
190
205
 
191
206
  @router.post("/api/chat/stream")
192
- def chat_stream(request: ChatRequest):
207
+ async def chat_stream(request: ChatRequest):
193
208
  """发送任务给 Agent 并实时推送执行进度(SSE,多设备支持)。
194
209
 
195
210
  Agent 会在首次使用时自动初始化,无需手动调用 /api/init。
211
+
212
+ Chat API 使用 AsyncAgent 实现原生 async streaming 和立即取消。
196
213
  """
197
214
  from datetime import datetime
198
215
 
199
- from AutoGLM_GUI.agents.stream_runner import AgentStepStreamer
200
216
  from AutoGLM_GUI.device_manager import DeviceManager
201
217
  from AutoGLM_GUI.exceptions import AgentInitializationError, DeviceBusyError
202
218
  from AutoGLM_GUI.history_manager import history_manager
@@ -206,7 +222,7 @@ def chat_stream(request: ChatRequest):
206
222
  device_id = request.device_id
207
223
  manager = PhoneAgentManager.get_instance()
208
224
 
209
- def event_generator():
225
+ async def event_generator():
210
226
  acquired = False
211
227
  start_time = datetime.now()
212
228
  final_message = ""
@@ -225,29 +241,42 @@ def chat_stream(request: ChatRequest):
225
241
  )
226
242
 
227
243
  try:
228
- acquired = manager.acquire_device(
229
- device_id, timeout=0, raise_on_timeout=True, auto_initialize=True
244
+ # 获取设备锁(在线程池中执行)
245
+ acquired = await asyncio.to_thread(
246
+ manager.acquire_device,
247
+ device_id,
248
+ timeout=0,
249
+ raise_on_timeout=True,
250
+ auto_initialize=True,
230
251
  )
231
252
 
232
253
  try:
233
- agent = manager.get_agent(device_id)
234
- streamer = AgentStepStreamer(agent=agent, task=request.message)
235
-
236
- with streamer.stream_context() as abort_fn:
237
- manager.register_abort_handler(device_id, abort_fn)
238
-
239
- for event in streamer:
254
+ # 使用 chat context 获取 AsyncAgent
255
+ agent = await asyncio.to_thread(
256
+ manager.get_agent_with_context,
257
+ device_id,
258
+ context="chat",
259
+ agent_type="glm-async",
260
+ )
261
+
262
+ logger.info(f"Using AsyncAgent for device {device_id}")
263
+
264
+ # 注册异步取消处理器
265
+ async def cancel_handler():
266
+ await agent.cancel() # type: ignore[union-attr]
267
+
268
+ await asyncio.to_thread(
269
+ manager.register_abort_handler, device_id, cancel_handler
270
+ )
271
+
272
+ try:
273
+ # 直接使用 agent.stream()
274
+ async for event in agent.stream(request.message): # type: ignore[union-attr]
240
275
  event_type = event["type"]
241
276
  event_data_dict = event["data"]
242
277
 
243
- if (
244
- event_type == AgentEventType.STEP.value
245
- and event_data_dict.get("step") == -1
246
- ):
247
- continue
248
-
249
278
  # 收集每个 step 的消息
250
- if event_type == AgentEventType.STEP.value:
279
+ if event_type == "step":
251
280
  messages.append(
252
281
  MessageRecord(
253
282
  role="assistant",
@@ -259,19 +288,28 @@ def chat_stream(request: ChatRequest):
259
288
  )
260
289
  )
261
290
 
262
- if event_type == AgentEventType.DONE.value:
291
+ if event_type == "done":
263
292
  final_message = event_data_dict.get("message", "")
264
293
  final_success = event_data_dict.get("success", False)
265
294
  final_steps = event_data_dict.get("steps", 0)
266
295
 
267
- event_data = _create_sse_event(event_type, event_data_dict)
268
-
296
+ # 发送 SSE 事件
297
+ sse_event = _create_sse_event(event_type, event_data_dict)
269
298
  yield f"event: {event_type}\n"
270
- yield f"data: {json.dumps(event_data, ensure_ascii=False)}\n\n"
299
+ yield f"data: {json.dumps(sse_event, ensure_ascii=False)}\n\n"
300
+
301
+ except asyncio.CancelledError:
302
+ logger.info(f"AsyncAgent task cancelled for device {device_id}")
303
+ yield "event: cancelled\n"
304
+ yield f"data: {json.dumps({'message': 'Task cancelled by user'})}\n\n"
305
+ raise
306
+
307
+ finally:
308
+ await asyncio.to_thread(manager.unregister_abort_handler, device_id)
271
309
 
272
310
  finally:
273
311
  if acquired:
274
- manager.release_device(device_id)
312
+ await asyncio.to_thread(manager.release_device, device_id)
275
313
 
276
314
  device_manager = DeviceManager.get_instance()
277
315
  serialno = device_manager.get_serial_by_device_id(device_id)
@@ -375,14 +413,15 @@ def reset_agent(request: ResetRequest) -> dict:
375
413
 
376
414
 
377
415
  @router.post("/api/chat/abort")
378
- def abort_chat(request: AbortRequest) -> dict:
379
- """中断正在进行的对话流。"""
416
+ async def abort_chat(request: AbortRequest) -> dict:
417
+ """中断正在进行的对话流 (支持 AsyncAgent)。"""
380
418
  from AutoGLM_GUI.phone_agent_manager import PhoneAgentManager
381
419
 
382
420
  device_id = request.device_id
383
421
  manager = PhoneAgentManager.get_instance()
384
422
 
385
- success = manager.abort_streaming_chat(device_id)
423
+ # 使用异步方法 (支持 AsyncAgent 和 BaseAgent)
424
+ success = await manager.abort_streaming_chat_async(device_id)
386
425
 
387
426
  return {
388
427
  "success": success,
@@ -413,6 +452,7 @@ def get_config_endpoint() -> ConfigResponse:
413
452
  agent_type=effective_config.agent_type,
414
453
  agent_config_params=effective_config.agent_config_params,
415
454
  default_max_steps=effective_config.default_max_steps,
455
+ layered_max_turns=effective_config.layered_max_turns,
416
456
  decision_base_url=effective_config.decision_base_url,
417
457
  decision_model_name=effective_config.decision_model_name,
418
458
  decision_api_key=effective_config.decision_api_key,
@@ -456,6 +496,7 @@ def save_config_endpoint(request: ConfigSaveRequest) -> dict:
456
496
  agent_type=request.agent_type,
457
497
  agent_config_params=request.agent_config_params,
458
498
  default_max_steps=request.default_max_steps,
499
+ layered_max_turns=request.layered_max_turns,
459
500
  decision_base_url=request.decision_base_url,
460
501
  decision_model_name=request.decision_model_name,
461
502
  decision_api_key=request.decision_api_key,
@@ -15,6 +15,8 @@ from AutoGLM_GUI.logger import logger
15
15
 
16
16
  from AutoGLM_GUI.schemas import (
17
17
  DeviceListResponse,
18
+ DeviceNameResponse,
19
+ DeviceNameUpdateRequest,
18
20
  DeviceResponse,
19
21
  MdnsDeviceResponse,
20
22
  MdnsDiscoverResponse,
@@ -454,3 +456,73 @@ def remove_remote_device(
454
456
  message=message,
455
457
  error=None if success else "remove_failed",
456
458
  )
459
+
460
+
461
+ @router.put("/api/devices/{serial}/name", response_model=DeviceNameResponse)
462
+ def update_device_name(
463
+ serial: str, request: DeviceNameUpdateRequest
464
+ ) -> DeviceNameResponse:
465
+ """Update or clear device display name.
466
+
467
+ Args:
468
+ serial: Device hardware serial number
469
+ request: Contains display_name (str or None to clear)
470
+
471
+ Returns:
472
+ DeviceNameResponse with updated name or error
473
+ """
474
+ from AutoGLM_GUI.device_manager import DeviceManager
475
+
476
+ try:
477
+ device_manager = DeviceManager.get_instance()
478
+ device_manager.set_device_display_name(serial, request.display_name)
479
+
480
+ return DeviceNameResponse(
481
+ success=True,
482
+ serial=serial,
483
+ display_name=request.display_name,
484
+ )
485
+ except ValueError as e:
486
+ logger.warning(f"Failed to update device name for {serial}: {e}")
487
+ return DeviceNameResponse(
488
+ success=False,
489
+ serial=serial,
490
+ error=str(e),
491
+ )
492
+ except Exception as e:
493
+ logger.exception(f"Unexpected error updating device name for {serial}")
494
+ return DeviceNameResponse(
495
+ success=False,
496
+ serial=serial,
497
+ error=f"Internal error: {str(e)}",
498
+ )
499
+
500
+
501
+ @router.get("/api/devices/{serial}/name", response_model=DeviceNameResponse)
502
+ def get_device_name(serial: str) -> DeviceNameResponse:
503
+ """Get device display name.
504
+
505
+ Args:
506
+ serial: Device hardware serial number
507
+
508
+ Returns:
509
+ DeviceNameResponse with current display name or None if not set
510
+ """
511
+ from AutoGLM_GUI.device_manager import DeviceManager
512
+
513
+ try:
514
+ device_manager = DeviceManager.get_instance()
515
+ display_name = device_manager.get_device_display_name(serial)
516
+
517
+ return DeviceNameResponse(
518
+ success=True,
519
+ serial=serial,
520
+ display_name=display_name,
521
+ )
522
+ except Exception as e:
523
+ logger.exception(f"Unexpected error getting device name for {serial}")
524
+ return DeviceNameResponse(
525
+ success=False,
526
+ serial=serial,
527
+ error=f"Internal error: {str(e)}",
528
+ )
@@ -7,7 +7,7 @@ a decision model for planning and autoglm-phone for execution.
7
7
  import asyncio
8
8
  import json
9
9
  import threading
10
- from typing import TYPE_CHECKING, Any
10
+ from typing import TYPE_CHECKING, Any, AsyncGenerator
11
11
 
12
12
  from agents import Agent, Runner, SQLiteSession, function_tool
13
13
 
@@ -216,7 +216,7 @@ def _sync_chat(device_id: str, message: str) -> str:
216
216
  # 重置 agent 确保干净状态
217
217
  agent.reset()
218
218
 
219
- result = agent.run(message)
219
+ result = agent.run(message) # type: ignore[misc]
220
220
  steps = agent.step_count
221
221
 
222
222
  # 检查是否达到步数限制
@@ -384,7 +384,7 @@ class LayeredAgentRequest(BaseModel):
384
384
 
385
385
 
386
386
  @router.post("/api/layered-agent/chat")
387
- async def layered_agent_chat(request: LayeredAgentRequest):
387
+ async def layered_agent_chat(request: LayeredAgentRequest) -> StreamingResponse:
388
388
  """
389
389
  Layered agent chat API with streaming execution steps.
390
390
 
@@ -407,7 +407,7 @@ async def layered_agent_chat(request: LayeredAgentRequest):
407
407
  from AutoGLM_GUI.history_manager import history_manager
408
408
  from AutoGLM_GUI.models.history import ConversationRecord
409
409
 
410
- async def event_generator():
410
+ async def event_generator() -> AsyncGenerator[str, None]:
411
411
  start_time = datetime.now()
412
412
  final_output = ""
413
413
  final_success = False
@@ -418,11 +418,12 @@ async def layered_agent_chat(request: LayeredAgentRequest):
418
418
  session_id = request.session_id or request.device_id or "default"
419
419
  session = _get_or_create_session(session_id)
420
420
 
421
- # Run the agent with streaming and session for memory
421
+ effective_config = config_manager.get_effective_config()
422
+
422
423
  result = Runner.run_streamed(
423
424
  agent,
424
425
  request.message,
425
- max_turns=50,
426
+ max_turns=effective_config.layered_max_turns,
426
427
  session=session,
427
428
  )
428
429
 
@@ -663,7 +664,7 @@ class AbortSessionRequest(BaseModel):
663
664
 
664
665
 
665
666
  @router.post("/api/layered-agent/abort")
666
- def abort_session(request: AbortSessionRequest):
667
+ def abort_session(request: AbortSessionRequest) -> dict[str, Any]:
667
668
  """
668
669
  Abort a running layered agent session.
669
670
 
@@ -697,7 +698,7 @@ class ResetSessionRequest(BaseModel):
697
698
 
698
699
 
699
700
  @router.post("/api/layered-agent/reset")
700
- def reset_session(request: ResetSessionRequest):
701
+ def reset_session(request: ResetSessionRequest) -> dict[str, Any]:
701
702
  """
702
703
  Reset/clear a session to forget conversation history.
703
704
 
AutoGLM_GUI/api/mcp.py CHANGED
@@ -1,5 +1,7 @@
1
1
  """MCP (Model Context Protocol) tools for AutoGLM-GUI."""
2
2
 
3
+ from typing import Any
4
+
3
5
  from typing_extensions import TypedDict
4
6
 
5
7
  from fastmcp import FastMCP
@@ -56,12 +58,12 @@ def chat(device_id: str, message: str) -> ChatResult:
56
58
  # Reset agent before each chat to ensure clean state
57
59
  agent.reset()
58
60
 
59
- result = agent.run(message)
61
+ result = agent.run(message) # type: ignore[misc]
60
62
  steps = agent.step_count
61
63
 
62
64
  # Check if MCP step limit was reached
63
65
  if steps >= MCP_MAX_STEPS and result == "Max steps reached":
64
- return {
66
+ return { # type: ignore[return-value]
65
67
  "result": (
66
68
  f"已达到 MCP 最大步数限制({MCP_MAX_STEPS}步)。任务可能未完成,"
67
69
  "建议将任务拆分为更小的子任务。"
@@ -70,7 +72,7 @@ def chat(device_id: str, message: str) -> ChatResult:
70
72
  "success": False,
71
73
  }
72
74
 
73
- return {"result": result, "steps": steps, "success": True}
75
+ return {"result": result, "steps": steps, "success": True} # type: ignore[return-value]
74
76
 
75
77
  finally:
76
78
  # Restore original config
@@ -123,7 +125,7 @@ def list_devices() -> list[DeviceResponse]:
123
125
  return devices_with_agents
124
126
 
125
127
 
126
- def get_mcp_asgi_app():
128
+ def get_mcp_asgi_app() -> Any:
127
129
  """
128
130
  Get the MCP server's ASGI app for mounting in FastAPI.
129
131
 
@@ -23,6 +23,10 @@ from pydantic import BaseModel, field_validator
23
23
  from AutoGLM_GUI.logger import logger
24
24
 
25
25
 
26
+ LAYERED_MAX_TURNS_DEFAULT = 50
27
+ LAYERED_MAX_TURNS_MIN = 1
28
+
29
+
26
30
  # ==================== 配置源枚举 ====================
27
31
 
28
32
 
@@ -53,12 +57,14 @@ class ConfigModel(BaseModel):
53
57
  api_key: str = "EMPTY"
54
58
 
55
59
  # Agent 类型配置
56
- agent_type: str = "glm" # Agent type (e.g., "glm", "mai")
60
+ agent_type: str = "glm" # Agent type (e.g., "glm", "mai", "glm-sync")
57
61
  agent_config_params: dict | None = None # Agent-specific configuration
58
62
 
59
63
  # Agent 执行配置
60
64
  default_max_steps: int = 100 # 单次任务最大执行步数
61
65
 
66
+ layered_max_turns: int = LAYERED_MAX_TURNS_DEFAULT
67
+
62
68
  # 决策模型配置(用于分层代理)
63
69
  decision_base_url: str | None = None
64
70
  decision_model_name: str | None = None
@@ -110,6 +116,13 @@ class ConfigModel(BaseModel):
110
116
  raise ValueError("decision_model_name cannot be empty string")
111
117
  return v.strip() if v else v
112
118
 
119
+ @field_validator("layered_max_turns")
120
+ @classmethod
121
+ def validate_layered_max_turns(cls, v: int) -> int:
122
+ if v < LAYERED_MAX_TURNS_MIN:
123
+ raise ValueError(f"layered_max_turns must be >= {LAYERED_MAX_TURNS_MIN}")
124
+ return v
125
+
113
126
 
114
127
  # ==================== 配置层数据类 ====================
115
128
 
@@ -126,6 +139,7 @@ class ConfigLayer:
126
139
  agent_config_params: Optional[dict] = None
127
140
  # Agent 执行配置
128
141
  default_max_steps: Optional[int] = None
142
+ layered_max_turns: Optional[int] = None
129
143
  # 决策模型配置
130
144
  decision_base_url: Optional[str] = None
131
145
  decision_model_name: Optional[str] = None
@@ -160,6 +174,7 @@ class ConfigLayer:
160
174
  "agent_type": self.agent_type,
161
175
  "agent_config_params": self.agent_config_params,
162
176
  "default_max_steps": self.default_max_steps,
177
+ "layered_max_turns": self.layered_max_turns,
163
178
  "decision_base_url": self.decision_base_url,
164
179
  "decision_model_name": self.decision_model_name,
165
180
  "decision_api_key": self.decision_api_key,
@@ -225,6 +240,7 @@ class UnifiedConfigManager:
225
240
  agent_type="glm",
226
241
  agent_config_params=None,
227
242
  default_max_steps=100,
243
+ layered_max_turns=LAYERED_MAX_TURNS_DEFAULT,
228
244
  decision_base_url=None,
229
245
  decision_model_name=None,
230
246
  decision_api_key=None,
@@ -248,6 +264,7 @@ class UnifiedConfigManager:
248
264
  base_url: Optional[str] = None,
249
265
  model_name: Optional[str] = None,
250
266
  api_key: Optional[str] = None,
267
+ layered_max_turns: Optional[int] = None,
251
268
  ) -> None:
252
269
  """
253
270
  设置 CLI 参数配置(最高优先级).
@@ -256,11 +273,13 @@ class UnifiedConfigManager:
256
273
  base_url: 从 --base-url 获取的值
257
274
  model_name: 从 --model 获取的值
258
275
  api_key: 从 --apikey 获取的值
276
+ layered_max_turns: 从 --layered-max-turns 获取的值
259
277
  """
260
278
  self._cli_layer = ConfigLayer(
261
279
  base_url=base_url,
262
280
  model_name=model_name,
263
281
  api_key=api_key,
282
+ layered_max_turns=layered_max_turns,
264
283
  source=ConfigSource.CLI,
265
284
  )
266
285
  self._effective_config = None # 清除缓存
@@ -277,6 +296,7 @@ class UnifiedConfigManager:
277
296
  - AUTOGLM_DECISION_BASE_URL
278
297
  - AUTOGLM_DECISION_MODEL_NAME
279
298
  - AUTOGLM_DECISION_API_KEY
299
+ - AUTOGLM_LAYERED_MAX_TURNS
280
300
  """
281
301
  base_url = os.getenv("AUTOGLM_BASE_URL")
282
302
  model_name = os.getenv("AUTOGLM_MODEL_NAME")
@@ -287,10 +307,19 @@ class UnifiedConfigManager:
287
307
  decision_model_name = os.getenv("AUTOGLM_DECISION_MODEL_NAME")
288
308
  decision_api_key = os.getenv("AUTOGLM_DECISION_API_KEY")
289
309
 
310
+ layered_max_turns_str = os.getenv("AUTOGLM_LAYERED_MAX_TURNS")
311
+ layered_max_turns = None
312
+ if layered_max_turns_str:
313
+ try:
314
+ layered_max_turns = int(layered_max_turns_str)
315
+ except ValueError:
316
+ logger.warning("AUTOGLM_LAYERED_MAX_TURNS must be an integer")
317
+
290
318
  self._env_layer = ConfigLayer(
291
319
  base_url=base_url if base_url else None,
292
320
  model_name=model_name if model_name else None,
293
321
  api_key=api_key if api_key else None,
322
+ layered_max_turns=layered_max_turns,
294
323
  decision_base_url=decision_base_url if decision_base_url else None,
295
324
  decision_model_name=decision_model_name if decision_model_name else None,
296
325
  decision_api_key=decision_api_key if decision_api_key else None,
@@ -352,6 +381,7 @@ class UnifiedConfigManager:
352
381
  ), # 默认 'glm',兼容旧配置
353
382
  agent_config_params=config_data.get("agent_config_params"),
354
383
  default_max_steps=config_data.get("default_max_steps"),
384
+ layered_max_turns=config_data.get("layered_max_turns"),
355
385
  decision_base_url=config_data.get("decision_base_url"),
356
386
  decision_model_name=config_data.get("decision_model_name"),
357
387
  decision_api_key=config_data.get("decision_api_key"),
@@ -385,6 +415,7 @@ class UnifiedConfigManager:
385
415
  agent_type: Optional[str] = None,
386
416
  agent_config_params: Optional[dict] = None,
387
417
  default_max_steps: Optional[int] = None,
418
+ layered_max_turns: Optional[int] = None,
388
419
  decision_base_url: Optional[str] = None,
389
420
  decision_model_name: Optional[str] = None,
390
421
  decision_api_key: Optional[str] = None,
@@ -400,6 +431,7 @@ class UnifiedConfigManager:
400
431
  agent_type: Agent 类型(可选,如 "glm", "mai")
401
432
  agent_config_params: Agent 特定配置参数(可选)
402
433
  default_max_steps: 默认最大执行步数(可选)
434
+ layered_max_turns: 分层代理最大轮数(可选)
403
435
  decision_base_url: 决策模型 Base URL(可选)
404
436
  decision_model_name: 决策模型名称(可选)
405
437
  decision_api_key: 决策模型 API Key(可选)
@@ -426,6 +458,8 @@ class UnifiedConfigManager:
426
458
  new_config["agent_config_params"] = agent_config_params
427
459
  if default_max_steps is not None:
428
460
  new_config["default_max_steps"] = default_max_steps
461
+ if layered_max_turns is not None:
462
+ new_config["layered_max_turns"] = layered_max_turns
429
463
 
430
464
  # 决策模型配置
431
465
  if decision_base_url is not None:
@@ -447,6 +481,7 @@ class UnifiedConfigManager:
447
481
  "agent_type",
448
482
  "agent_config_params",
449
483
  "default_max_steps",
484
+ "layered_max_turns",
450
485
  "decision_base_url",
451
486
  "decision_model_name",
452
487
  "decision_api_key",
@@ -540,6 +575,7 @@ class UnifiedConfigManager:
540
575
  "decision_base_url",
541
576
  "decision_model_name",
542
577
  "decision_api_key",
578
+ "layered_max_turns",
543
579
  ]
544
580
 
545
581
  for key in config_keys:
@@ -708,6 +744,7 @@ class UnifiedConfigManager:
708
744
  "decision_base_url": config.decision_base_url,
709
745
  "decision_model_name": config.decision_model_name,
710
746
  "decision_api_key": config.decision_api_key,
747
+ "layered_max_turns": config.layered_max_turns,
711
748
  }
712
749
 
713
750
 
@@ -89,6 +89,7 @@ class ManagedDevice:
89
89
 
90
90
  # Device metadata
91
91
  model: Optional[str] = None
92
+ display_name: Optional[str] = None # User-defined custom name
92
93
 
93
94
  # Device-level state
94
95
  state: DeviceState = DeviceState.ONLINE
@@ -144,6 +145,7 @@ class ManagedDevice:
144
145
  "id": self.primary_device_id,
145
146
  "serial": self.serial,
146
147
  "model": self.model or "Unknown",
148
+ "display_name": self.display_name,
147
149
  "status": self.status,
148
150
  "connection_type": self.connection_type.value,
149
151
  "state": self.state.value,
@@ -178,24 +180,20 @@ def _create_managed_device(
178
180
  for d in device_infos
179
181
  ]
180
182
 
181
- # Extract model (prefer device with model info)
182
183
  model = None
183
184
  for device_info in device_infos:
184
185
  if device_info.model:
185
186
  model = device_info.model
186
187
  break
187
188
 
188
- # Create managed device
189
189
  managed = ManagedDevice(
190
190
  serial=serial,
191
191
  connections=connections,
192
192
  model=model,
193
193
  )
194
194
 
195
- # Select primary connection
196
195
  managed.select_primary_connection()
197
196
 
198
- # Set state
199
197
  managed.state = (
200
198
  DeviceState.ONLINE if managed.status == "device" else DeviceState.OFFLINE
201
199
  )
@@ -249,6 +247,10 @@ class DeviceManager:
249
247
  self._remote_devices: dict[str, "DeviceProtocol"] = {}
250
248
  self._remote_device_configs: dict[str, dict] = {}
251
249
 
250
+ from AutoGLM_GUI.device_metadata_manager import DeviceMetadataManager
251
+
252
+ self._metadata_manager = DeviceMetadataManager.get_instance()
253
+
252
254
  @classmethod
253
255
  def get_instance(cls, adb_path: str = "adb") -> DeviceManager:
254
256
  """Get singleton instance (thread-safe)."""
@@ -449,6 +451,11 @@ class DeviceManager:
449
451
  for serial in added_serials:
450
452
  device_infos = grouped_by_serial[serial]
451
453
  managed = _create_managed_device(serial, device_infos)
454
+
455
+ display_name = self._metadata_manager.get_display_name(serial)
456
+ if display_name:
457
+ managed.display_name = display_name
458
+
452
459
  self._devices[serial] = managed
453
460
 
454
461
  # Update reverse mapping
@@ -977,3 +984,20 @@ class DeviceManager:
977
984
  from AutoGLM_GUI.devices.adb_device import ADBDevice
978
985
 
979
986
  return ADBDevice(managed.primary_device_id)
987
+
988
+ def set_device_display_name(self, serial: str, display_name: Optional[str]) -> None:
989
+ """Set custom display name for device."""
990
+ self._metadata_manager.set_display_name(serial, display_name)
991
+
992
+ with self._devices_lock:
993
+ if serial in self._devices:
994
+ self._devices[serial].display_name = display_name
995
+ logger.debug(f"Updated display name in memory for {serial}")
996
+
997
+ def get_device_display_name(self, serial: str) -> Optional[str]:
998
+ """Get custom display name for device."""
999
+ with self._devices_lock:
1000
+ if serial in self._devices and self._devices[serial].display_name:
1001
+ return self._devices[serial].display_name
1002
+
1003
+ return self._metadata_manager.get_display_name(serial)