autoglm-gui 1.4.0__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- AutoGLM_GUI/__main__.py +0 -4
- AutoGLM_GUI/adb_plus/qr_pair.py +8 -8
- AutoGLM_GUI/agents/__init__.py +20 -0
- AutoGLM_GUI/agents/factory.py +160 -0
- AutoGLM_GUI/agents/mai_adapter.py +627 -0
- AutoGLM_GUI/agents/protocols.py +23 -0
- AutoGLM_GUI/api/__init__.py +48 -7
- AutoGLM_GUI/api/agents.py +61 -17
- AutoGLM_GUI/api/devices.py +12 -18
- AutoGLM_GUI/api/dual_model.py +15 -9
- AutoGLM_GUI/api/health.py +13 -0
- AutoGLM_GUI/api/layered_agent.py +239 -166
- AutoGLM_GUI/api/mcp.py +11 -10
- AutoGLM_GUI/api/version.py +23 -10
- AutoGLM_GUI/api/workflows.py +2 -1
- AutoGLM_GUI/config_manager.py +55 -1
- AutoGLM_GUI/device_adapter.py +263 -0
- AutoGLM_GUI/device_protocol.py +266 -0
- AutoGLM_GUI/devices/__init__.py +49 -0
- AutoGLM_GUI/devices/adb_device.py +205 -0
- AutoGLM_GUI/devices/mock_device.py +183 -0
- AutoGLM_GUI/devices/remote_device.py +172 -0
- AutoGLM_GUI/dual_model/decision_model.py +4 -4
- AutoGLM_GUI/exceptions.py +3 -3
- AutoGLM_GUI/mai_ui_adapter/agent_wrapper.py +2 -2
- AutoGLM_GUI/metrics.py +13 -20
- AutoGLM_GUI/phone_agent_manager.py +219 -134
- AutoGLM_GUI/phone_agent_patches.py +2 -1
- AutoGLM_GUI/platform_utils.py +5 -2
- AutoGLM_GUI/schemas.py +47 -0
- AutoGLM_GUI/scrcpy_stream.py +17 -13
- AutoGLM_GUI/server.py +3 -1
- AutoGLM_GUI/socketio_server.py +16 -4
- AutoGLM_GUI/state.py +10 -30
- AutoGLM_GUI/static/assets/{about-DeclntHg.js → about-_XNhzQZX.js} +1 -1
- AutoGLM_GUI/static/assets/chat-DwJpiAWf.js +126 -0
- AutoGLM_GUI/static/assets/{dialog-BfdcBs1x.js → dialog-B3uW4T8V.js} +3 -3
- AutoGLM_GUI/static/assets/index-Cpv2gSF1.css +1 -0
- AutoGLM_GUI/static/assets/{index-zQ4KKDHt.js → index-Cy8TmmHV.js} +1 -1
- AutoGLM_GUI/static/assets/{index-DHF1NZh0.js → index-UYYauTly.js} +6 -6
- AutoGLM_GUI/static/assets/{workflows-xiplap-r.js → workflows-Du_de-dt.js} +1 -1
- AutoGLM_GUI/static/index.html +2 -2
- AutoGLM_GUI/types.py +125 -0
- {autoglm_gui-1.4.0.dist-info → autoglm_gui-1.4.1.dist-info}/METADATA +83 -4
- {autoglm_gui-1.4.0.dist-info → autoglm_gui-1.4.1.dist-info}/RECORD +54 -37
- mai_agent/base.py +137 -0
- mai_agent/mai_grounding_agent.py +263 -0
- mai_agent/mai_naivigation_agent.py +526 -0
- mai_agent/prompt.py +148 -0
- mai_agent/unified_memory.py +67 -0
- mai_agent/utils.py +73 -0
- AutoGLM_GUI/config.py +0 -23
- AutoGLM_GUI/static/assets/chat-Iut2yhSw.js +0 -125
- AutoGLM_GUI/static/assets/index-5hCCwHA7.css +0 -1
- {autoglm_gui-1.4.0.dist-info → autoglm_gui-1.4.1.dist-info}/WHEEL +0 -0
- {autoglm_gui-1.4.0.dist-info → autoglm_gui-1.4.1.dist-info}/entry_points.txt +0 -0
- {autoglm_gui-1.4.0.dist-info → autoglm_gui-1.4.1.dist-info}/licenses/LICENSE +0 -0
AutoGLM_GUI/api/layered_agent.py
CHANGED
|
@@ -6,9 +6,13 @@ a decision model for planning and autoglm-phone for execution.
|
|
|
6
6
|
|
|
7
7
|
import asyncio
|
|
8
8
|
import json
|
|
9
|
-
|
|
9
|
+
import threading
|
|
10
|
+
from typing import TYPE_CHECKING, Any
|
|
10
11
|
|
|
11
12
|
from agents import Agent, Runner, SQLiteSession, function_tool
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from agents.result import RunResultStreaming
|
|
12
16
|
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
|
13
17
|
from fastapi import APIRouter
|
|
14
18
|
from fastapi.responses import StreamingResponse
|
|
@@ -24,6 +28,11 @@ router = APIRouter()
|
|
|
24
28
|
# 存储每个 session_id 对应的 SQLiteSession(内存模式)
|
|
25
29
|
_sessions: dict[str, SQLiteSession] = {}
|
|
26
30
|
|
|
31
|
+
# ==================== 活跃运行管理 ====================
|
|
32
|
+
# 存储每个 session_id 对应的活跃 RunResultStreaming 实例,用于 abort
|
|
33
|
+
_active_runs: dict[str, "RunResultStreaming"] = {}
|
|
34
|
+
_active_runs_lock = threading.Lock()
|
|
35
|
+
|
|
27
36
|
|
|
28
37
|
def _get_or_create_session(session_id: str) -> SQLiteSession:
|
|
29
38
|
"""获取或创建指定 session_id 的内存 session."""
|
|
@@ -146,7 +155,9 @@ def _sync_list_devices() -> str:
|
|
|
146
155
|
_build_device_response_with_agent(d, agent_manager) for d in managed_devices
|
|
147
156
|
]
|
|
148
157
|
|
|
149
|
-
|
|
158
|
+
# Convert DeviceResponse Pydantic models to dicts before JSON serialization
|
|
159
|
+
devices_dict = [device.model_dump() for device in devices_with_agents]
|
|
160
|
+
return json.dumps(devices_dict, ensure_ascii=False, indent=2)
|
|
150
161
|
|
|
151
162
|
|
|
152
163
|
@function_tool
|
|
@@ -307,14 +318,35 @@ def _create_planner_agent(client: AsyncOpenAI) -> Agent[Any]:
|
|
|
307
318
|
# Global agent instance (lazy initialized)
|
|
308
319
|
_client: AsyncOpenAI | None = None
|
|
309
320
|
_agent: Agent[Any] | None = None
|
|
321
|
+
_cached_config_hash: str | None = None
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def _compute_config_hash() -> str:
|
|
325
|
+
import hashlib
|
|
326
|
+
|
|
327
|
+
config = config_manager.get_effective_config()
|
|
328
|
+
config_str = config.model_dump_json()
|
|
329
|
+
return hashlib.md5(config_str.encode()).hexdigest()
|
|
310
330
|
|
|
311
331
|
|
|
312
332
|
def _ensure_agent() -> Agent[Any]:
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
333
|
+
global _client, _agent, _cached_config_hash
|
|
334
|
+
|
|
335
|
+
current_hash = _compute_config_hash()
|
|
336
|
+
|
|
337
|
+
if _agent is None or _cached_config_hash != current_hash:
|
|
338
|
+
if _agent is not None and _cached_config_hash != current_hash:
|
|
339
|
+
logger.info(
|
|
340
|
+
f"[LayeredAgent] Config changed (hash: {_cached_config_hash} -> {current_hash}), reloading agent..."
|
|
341
|
+
)
|
|
342
|
+
|
|
316
343
|
_client = _setup_openai_client()
|
|
317
344
|
_agent = _create_planner_agent(_client)
|
|
345
|
+
_cached_config_hash = current_hash
|
|
346
|
+
logger.info(
|
|
347
|
+
f"[LayeredAgent] Agent initialized/reloaded with config hash: {current_hash}"
|
|
348
|
+
)
|
|
349
|
+
|
|
318
350
|
return _agent
|
|
319
351
|
|
|
320
352
|
|
|
@@ -366,175 +398,182 @@ async def layered_agent_chat(request: LayeredAgentRequest):
|
|
|
366
398
|
session=session,
|
|
367
399
|
)
|
|
368
400
|
|
|
401
|
+
# 保存活跃运行实例,用于 abort
|
|
402
|
+
with _active_runs_lock:
|
|
403
|
+
_active_runs[session_id] = result
|
|
404
|
+
|
|
369
405
|
current_tool_call: dict[str, Any] | None = None
|
|
370
406
|
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
try:
|
|
402
|
-
tool_args = (
|
|
403
|
-
json.loads(args_str)
|
|
404
|
-
if isinstance(args_str, str)
|
|
405
|
-
else args_str
|
|
407
|
+
try:
|
|
408
|
+
async for event in result.stream_events():
|
|
409
|
+
if isinstance(event, RawResponsesStreamEvent):
|
|
410
|
+
# Raw response chunk - could contain thinking
|
|
411
|
+
pass
|
|
412
|
+
|
|
413
|
+
elif isinstance(event, RunItemStreamEvent):
|
|
414
|
+
item = event.item
|
|
415
|
+
|
|
416
|
+
# Handle different item types
|
|
417
|
+
item_type = getattr(item, "type", None)
|
|
418
|
+
|
|
419
|
+
if item_type == "tool_call_item":
|
|
420
|
+
# Tool call started - extract name from raw_item
|
|
421
|
+
tool_name = "unknown"
|
|
422
|
+
tool_args: dict[str, Any] = {}
|
|
423
|
+
|
|
424
|
+
# Try to get from raw_item
|
|
425
|
+
if hasattr(item, "raw_item") and item.raw_item:
|
|
426
|
+
raw = item.raw_item
|
|
427
|
+
|
|
428
|
+
# Handle dict format (sometimes returned as dict)
|
|
429
|
+
if isinstance(raw, dict):
|
|
430
|
+
tool_name = raw.get(
|
|
431
|
+
"name",
|
|
432
|
+
raw.get("function", {}).get("name", "unknown"),
|
|
433
|
+
)
|
|
434
|
+
args_str = raw.get(
|
|
435
|
+
"arguments",
|
|
436
|
+
raw.get("function", {}).get("arguments", "{}"),
|
|
406
437
|
)
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
tool_name = raw.name
|
|
427
|
-
if hasattr(raw, "arguments"):
|
|
428
|
-
try:
|
|
429
|
-
tool_args = (
|
|
430
|
-
json.loads(raw.arguments)
|
|
431
|
-
if isinstance(raw.arguments, str)
|
|
432
|
-
else raw.arguments
|
|
433
|
-
)
|
|
434
|
-
except Exception:
|
|
435
|
-
tool_args = {"raw": str(raw.arguments)}
|
|
436
|
-
|
|
437
|
-
# Fallback to direct item attributes
|
|
438
|
-
if tool_name == "unknown":
|
|
439
|
-
if hasattr(item, "name") and item.name:
|
|
440
|
-
tool_name = item.name
|
|
441
|
-
elif hasattr(item, "call") and item.call:
|
|
442
|
-
call = item.call
|
|
443
|
-
if hasattr(call, "function") and call.function:
|
|
444
|
-
if hasattr(call.function, "name"):
|
|
445
|
-
tool_name = call.function.name
|
|
446
|
-
if hasattr(call.function, "arguments"):
|
|
447
|
-
try:
|
|
448
|
-
tool_args = (
|
|
449
|
-
json.loads(call.function.arguments)
|
|
450
|
-
if isinstance(
|
|
451
|
-
call.function.arguments, str
|
|
438
|
+
try:
|
|
439
|
+
tool_args = (
|
|
440
|
+
json.loads(args_str)
|
|
441
|
+
if isinstance(args_str, str)
|
|
442
|
+
else args_str
|
|
443
|
+
)
|
|
444
|
+
except Exception:
|
|
445
|
+
tool_args = {"raw": str(args_str)}
|
|
446
|
+
else:
|
|
447
|
+
func = getattr(raw, "function", None)
|
|
448
|
+
if func:
|
|
449
|
+
tool_name = getattr(func, "name", "unknown")
|
|
450
|
+
args_val = getattr(func, "arguments", None)
|
|
451
|
+
if args_val:
|
|
452
|
+
try:
|
|
453
|
+
tool_args = (
|
|
454
|
+
json.loads(args_val)
|
|
455
|
+
if isinstance(args_val, str)
|
|
456
|
+
else args_val
|
|
452
457
|
)
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
tool_name
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
"tool_name": tool_name,
|
|
514
|
-
"result": output,
|
|
515
|
-
}
|
|
516
|
-
yield f"data: {json.dumps(event_data, ensure_ascii=False)}\n\n"
|
|
517
|
-
current_tool_call = None
|
|
518
|
-
|
|
519
|
-
elif item_type == "message_output_item":
|
|
520
|
-
# Final message
|
|
521
|
-
content = ""
|
|
522
|
-
if hasattr(item, "raw_item") and item.raw_item:
|
|
523
|
-
if (
|
|
524
|
-
hasattr(item.raw_item, "content")
|
|
525
|
-
and item.raw_item.content
|
|
526
|
-
):
|
|
527
|
-
for c in item.raw_item.content:
|
|
528
|
-
if hasattr(c, "text"):
|
|
529
|
-
content += c.text
|
|
530
|
-
|
|
531
|
-
if content:
|
|
458
|
+
except Exception:
|
|
459
|
+
tool_args = {"raw": str(args_val)}
|
|
460
|
+
else:
|
|
461
|
+
name_val = getattr(raw, "name", None)
|
|
462
|
+
if name_val:
|
|
463
|
+
tool_name = name_val
|
|
464
|
+
args_val = getattr(raw, "arguments", None)
|
|
465
|
+
if args_val:
|
|
466
|
+
try:
|
|
467
|
+
tool_args = (
|
|
468
|
+
json.loads(args_val)
|
|
469
|
+
if isinstance(args_val, str)
|
|
470
|
+
else args_val
|
|
471
|
+
)
|
|
472
|
+
except Exception:
|
|
473
|
+
tool_args = {"raw": str(args_val)}
|
|
474
|
+
|
|
475
|
+
# Fallback to direct item attributes
|
|
476
|
+
if tool_name == "unknown":
|
|
477
|
+
if hasattr(item, "name") and item.name:
|
|
478
|
+
tool_name = item.name
|
|
479
|
+
elif hasattr(item, "call") and item.call:
|
|
480
|
+
call = item.call
|
|
481
|
+
if hasattr(call, "function") and call.function:
|
|
482
|
+
if hasattr(call.function, "name"):
|
|
483
|
+
tool_name = call.function.name
|
|
484
|
+
if hasattr(call.function, "arguments"):
|
|
485
|
+
try:
|
|
486
|
+
tool_args = (
|
|
487
|
+
json.loads(call.function.arguments)
|
|
488
|
+
if isinstance(
|
|
489
|
+
call.function.arguments, str
|
|
490
|
+
)
|
|
491
|
+
else call.function.arguments
|
|
492
|
+
)
|
|
493
|
+
except Exception:
|
|
494
|
+
tool_args = {
|
|
495
|
+
"raw": str(call.function.arguments)
|
|
496
|
+
}
|
|
497
|
+
elif hasattr(call, "name"):
|
|
498
|
+
tool_name = call.name
|
|
499
|
+
if hasattr(call, "arguments"):
|
|
500
|
+
try:
|
|
501
|
+
tool_args = (
|
|
502
|
+
json.loads(call.arguments)
|
|
503
|
+
if isinstance(call.arguments, str)
|
|
504
|
+
else call.arguments
|
|
505
|
+
)
|
|
506
|
+
except Exception:
|
|
507
|
+
tool_args = {"raw": str(call.arguments)}
|
|
508
|
+
|
|
509
|
+
logger.info(
|
|
510
|
+
f"[LayeredAgent] Tool call: {tool_name}, args keys: {list(tool_args.keys()) if isinstance(tool_args, dict) else 'not dict'}"
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
current_tool_call = {
|
|
514
|
+
"name": tool_name,
|
|
515
|
+
"args": tool_args,
|
|
516
|
+
}
|
|
517
|
+
|
|
532
518
|
event_data = {
|
|
533
|
-
"type": "
|
|
534
|
-
"
|
|
519
|
+
"type": "tool_call",
|
|
520
|
+
"tool_name": tool_name,
|
|
521
|
+
"tool_args": tool_args,
|
|
535
522
|
}
|
|
536
523
|
yield f"data: {json.dumps(event_data, ensure_ascii=False)}\n\n"
|
|
537
524
|
|
|
525
|
+
elif item_type == "tool_call_output_item":
|
|
526
|
+
# Tool call result
|
|
527
|
+
output = getattr(item, "output", "")
|
|
528
|
+
|
|
529
|
+
# Get tool name from current_tool_call or try to extract from item
|
|
530
|
+
tool_name = (
|
|
531
|
+
current_tool_call["name"]
|
|
532
|
+
if current_tool_call
|
|
533
|
+
else "unknown"
|
|
534
|
+
)
|
|
535
|
+
|
|
536
|
+
raw_item = getattr(item, "raw_item", None)
|
|
537
|
+
if tool_name == "unknown" and raw_item:
|
|
538
|
+
name_val = getattr(raw_item, "name", None)
|
|
539
|
+
if name_val:
|
|
540
|
+
tool_name = name_val
|
|
541
|
+
|
|
542
|
+
logger.info(
|
|
543
|
+
f"[LayeredAgent] Tool result for {tool_name}: {str(output)[:100] if output else 'empty'}..."
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
event_data = {
|
|
547
|
+
"type": "tool_result",
|
|
548
|
+
"tool_name": tool_name,
|
|
549
|
+
"result": output,
|
|
550
|
+
}
|
|
551
|
+
yield f"data: {json.dumps(event_data, ensure_ascii=False)}\n\n"
|
|
552
|
+
current_tool_call = None
|
|
553
|
+
|
|
554
|
+
elif item_type == "message_output_item":
|
|
555
|
+
content = ""
|
|
556
|
+
raw_item = getattr(item, "raw_item", None)
|
|
557
|
+
if raw_item:
|
|
558
|
+
raw_content = getattr(raw_item, "content", None)
|
|
559
|
+
if raw_content:
|
|
560
|
+
for c in raw_content:
|
|
561
|
+
text_val = getattr(c, "text", None)
|
|
562
|
+
if text_val:
|
|
563
|
+
content += text_val
|
|
564
|
+
|
|
565
|
+
if content:
|
|
566
|
+
event_data = {
|
|
567
|
+
"type": "message",
|
|
568
|
+
"content": content,
|
|
569
|
+
}
|
|
570
|
+
yield f"data: {json.dumps(event_data, ensure_ascii=False)}\n\n"
|
|
571
|
+
|
|
572
|
+
finally:
|
|
573
|
+
# 清理活跃运行实例
|
|
574
|
+
with _active_runs_lock:
|
|
575
|
+
_active_runs.pop(session_id, None)
|
|
576
|
+
|
|
538
577
|
# Final result
|
|
539
578
|
final_output = (
|
|
540
579
|
result.final_output if hasattr(result, "final_output") else ""
|
|
@@ -565,6 +604,40 @@ async def layered_agent_chat(request: LayeredAgentRequest):
|
|
|
565
604
|
)
|
|
566
605
|
|
|
567
606
|
|
|
607
|
+
class AbortSessionRequest(BaseModel):
|
|
608
|
+
"""Request for aborting a running session."""
|
|
609
|
+
|
|
610
|
+
session_id: str
|
|
611
|
+
|
|
612
|
+
|
|
613
|
+
@router.post("/api/layered-agent/abort")
|
|
614
|
+
def abort_session(request: AbortSessionRequest):
|
|
615
|
+
"""
|
|
616
|
+
Abort a running layered agent session.
|
|
617
|
+
|
|
618
|
+
Uses the OpenAI agents SDK's native cancel() method to stop execution.
|
|
619
|
+
"""
|
|
620
|
+
session_id = request.session_id
|
|
621
|
+
|
|
622
|
+
with _active_runs_lock:
|
|
623
|
+
if session_id in _active_runs:
|
|
624
|
+
result = _active_runs[session_id]
|
|
625
|
+
result.cancel(mode="immediate")
|
|
626
|
+
logger.info(f"[LayeredAgent] Aborted session: {session_id}")
|
|
627
|
+
return {
|
|
628
|
+
"success": True,
|
|
629
|
+
"message": f"Session {session_id} abort signal sent",
|
|
630
|
+
}
|
|
631
|
+
else:
|
|
632
|
+
logger.warning(
|
|
633
|
+
f"[LayeredAgent] No active run found for session: {session_id}"
|
|
634
|
+
)
|
|
635
|
+
return {
|
|
636
|
+
"success": False,
|
|
637
|
+
"message": f"No active run found for session {session_id}",
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
|
|
568
641
|
class ResetSessionRequest(BaseModel):
|
|
569
642
|
"""Request for resetting a session."""
|
|
570
643
|
|
AutoGLM_GUI/api/mcp.py
CHANGED
|
@@ -1,11 +1,19 @@
|
|
|
1
1
|
"""MCP (Model Context Protocol) tools for AutoGLM-GUI."""
|
|
2
2
|
|
|
3
|
-
from
|
|
3
|
+
from typing_extensions import TypedDict
|
|
4
4
|
|
|
5
5
|
from fastmcp import FastMCP
|
|
6
6
|
|
|
7
7
|
from AutoGLM_GUI.logger import logger
|
|
8
8
|
from AutoGLM_GUI.prompts import MCP_SYSTEM_PROMPT_ZH
|
|
9
|
+
from AutoGLM_GUI.schemas import DeviceResponse
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ChatResult(TypedDict):
|
|
13
|
+
result: str
|
|
14
|
+
steps: int
|
|
15
|
+
success: bool
|
|
16
|
+
|
|
9
17
|
|
|
10
18
|
# 创建 MCP 服务器实例
|
|
11
19
|
mcp = FastMCP("AutoGLM-GUI MCP Server")
|
|
@@ -15,7 +23,7 @@ MCP_MAX_STEPS = 5
|
|
|
15
23
|
|
|
16
24
|
|
|
17
25
|
@mcp.tool()
|
|
18
|
-
def chat(device_id: str, message: str) ->
|
|
26
|
+
def chat(device_id: str, message: str) -> ChatResult:
|
|
19
27
|
"""
|
|
20
28
|
Send a task to the AutoGLM Phone Agent for execution.
|
|
21
29
|
|
|
@@ -26,13 +34,6 @@ def chat(device_id: str, message: str) -> Dict[str, Any]:
|
|
|
26
34
|
Args:
|
|
27
35
|
device_id: Device identifier (e.g., "192.168.1.100:5555" or serial)
|
|
28
36
|
message: Natural language task (e.g., "打开微信", "发送消息")
|
|
29
|
-
|
|
30
|
-
Returns:
|
|
31
|
-
{
|
|
32
|
-
"result": str, # Task execution result
|
|
33
|
-
"steps": int, # Number of steps taken
|
|
34
|
-
"success": bool # Success flag
|
|
35
|
-
}
|
|
36
37
|
"""
|
|
37
38
|
from AutoGLM_GUI.exceptions import DeviceBusyError
|
|
38
39
|
from AutoGLM_GUI.phone_agent_manager import PhoneAgentManager
|
|
@@ -84,7 +85,7 @@ def chat(device_id: str, message: str) -> Dict[str, Any]:
|
|
|
84
85
|
|
|
85
86
|
|
|
86
87
|
@mcp.tool()
|
|
87
|
-
def list_devices() ->
|
|
88
|
+
def list_devices() -> list[DeviceResponse]:
|
|
88
89
|
"""
|
|
89
90
|
List all connected ADB devices and their agent status.
|
|
90
91
|
|
AutoGLM_GUI/api/version.py
CHANGED
|
@@ -3,8 +3,9 @@
|
|
|
3
3
|
import json
|
|
4
4
|
import re
|
|
5
5
|
import time
|
|
6
|
+
import urllib.error
|
|
6
7
|
import urllib.request
|
|
7
|
-
from
|
|
8
|
+
from typing_extensions import TypedDict
|
|
8
9
|
|
|
9
10
|
from fastapi import APIRouter
|
|
10
11
|
|
|
@@ -12,13 +13,30 @@ from AutoGLM_GUI.logger import logger
|
|
|
12
13
|
from AutoGLM_GUI.schemas import VersionCheckResponse
|
|
13
14
|
from AutoGLM_GUI.version import APP_VERSION
|
|
14
15
|
|
|
16
|
+
|
|
17
|
+
class GitHubRelease(TypedDict, total=False):
|
|
18
|
+
"""GitHub Release API response structure."""
|
|
19
|
+
|
|
20
|
+
tag_name: str
|
|
21
|
+
html_url: str
|
|
22
|
+
published_at: str
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class _VersionCache(TypedDict):
|
|
26
|
+
"""Internal cache structure for version checking."""
|
|
27
|
+
|
|
28
|
+
data: VersionCheckResponse | None
|
|
29
|
+
timestamp: float
|
|
30
|
+
ttl: int
|
|
31
|
+
|
|
32
|
+
|
|
15
33
|
router = APIRouter()
|
|
16
34
|
|
|
17
35
|
# In-memory cache for version check results
|
|
18
|
-
_version_cache:
|
|
36
|
+
_version_cache: _VersionCache = {
|
|
19
37
|
"data": None,
|
|
20
38
|
"timestamp": 0,
|
|
21
|
-
"ttl": 3600,
|
|
39
|
+
"ttl": 3600,
|
|
22
40
|
}
|
|
23
41
|
|
|
24
42
|
# GitHub repository information
|
|
@@ -74,13 +92,8 @@ def compare_versions(current: str, latest: str) -> bool:
|
|
|
74
92
|
return latest_tuple > current_tuple
|
|
75
93
|
|
|
76
94
|
|
|
77
|
-
def fetch_latest_release() ->
|
|
78
|
-
"""
|
|
79
|
-
Fetch latest release information from GitHub API.
|
|
80
|
-
|
|
81
|
-
Returns:
|
|
82
|
-
Release data dict with 'tag_name', 'html_url', 'published_at' or None on error
|
|
83
|
-
"""
|
|
95
|
+
def fetch_latest_release() -> GitHubRelease | None:
|
|
96
|
+
"""Fetch latest release information from GitHub API."""
|
|
84
97
|
try:
|
|
85
98
|
# Create request with User-Agent header (required by GitHub API)
|
|
86
99
|
req = urllib.request.Request(
|
AutoGLM_GUI/api/workflows.py
CHANGED
|
@@ -17,7 +17,8 @@ def list_workflows() -> WorkflowListResponse:
|
|
|
17
17
|
"""获取所有 workflows."""
|
|
18
18
|
from AutoGLM_GUI.workflow_manager import workflow_manager
|
|
19
19
|
|
|
20
|
-
|
|
20
|
+
workflow_dicts = workflow_manager.list_workflows()
|
|
21
|
+
workflows = [WorkflowResponse(**wf) for wf in workflow_dicts]
|
|
21
22
|
return WorkflowListResponse(workflows=workflows)
|
|
22
23
|
|
|
23
24
|
|