@researai/deepscientist 1.5.2 → 1.5.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. package/README.md +22 -0
  2. package/bin/ds.js +399 -175
  3. package/docs/en/00_QUICK_START.md +22 -0
  4. package/docs/en/01_SETTINGS_REFERENCE.md +13 -4
  5. package/docs/en/99_ACKNOWLEDGEMENTS.md +1 -0
  6. package/docs/images/connectors/discord-setup-overview.svg +52 -0
  7. package/docs/images/connectors/feishu-setup-overview.svg +53 -0
  8. package/docs/images/connectors/slack-setup-overview.svg +51 -0
  9. package/docs/images/connectors/telegram-setup-overview.svg +55 -0
  10. package/docs/images/connectors/whatsapp-setup-overview.svg +51 -0
  11. package/docs/images/lingzhu/lingzhu-openclaw-config.svg +17 -0
  12. package/docs/images/lingzhu/lingzhu-platform-values.svg +16 -0
  13. package/docs/images/lingzhu/lingzhu-settings-overview.svg +30 -0
  14. package/docs/images/qq/tencent-cloud-qq-chat.png +0 -0
  15. package/docs/images/qq/tencent-cloud-qq-register.png +0 -0
  16. package/docs/images/quickstart/00-home.png +0 -0
  17. package/docs/images/quickstart/01-start-research.png +0 -0
  18. package/docs/images/quickstart/02-list-quest.png +0 -0
  19. package/docs/zh/00_QUICK_START.md +22 -0
  20. package/docs/zh/01_SETTINGS_REFERENCE.md +14 -5
  21. package/docs/zh/99_ACKNOWLEDGEMENTS.md +1 -0
  22. package/install.sh +120 -4
  23. package/package.json +8 -4
  24. package/pyproject.toml +1 -1
  25. package/src/deepscientist/__init__.py +1 -1
  26. package/src/deepscientist/artifact/service.py +1 -1
  27. package/src/deepscientist/bash_exec/monitor.py +23 -4
  28. package/src/deepscientist/bash_exec/runtime.py +3 -0
  29. package/src/deepscientist/bash_exec/service.py +132 -4
  30. package/src/deepscientist/bridges/base.py +12 -20
  31. package/src/deepscientist/bridges/connectors.py +2 -1
  32. package/src/deepscientist/channels/discord_gateway.py +27 -4
  33. package/src/deepscientist/channels/feishu_long_connection.py +41 -3
  34. package/src/deepscientist/channels/qq.py +524 -64
  35. package/src/deepscientist/channels/qq_gateway.py +24 -5
  36. package/src/deepscientist/channels/relay.py +429 -90
  37. package/src/deepscientist/channels/slack_socket.py +31 -7
  38. package/src/deepscientist/channels/telegram_polling.py +27 -3
  39. package/src/deepscientist/channels/whatsapp_local_session.py +32 -4
  40. package/src/deepscientist/cli.py +31 -1
  41. package/src/deepscientist/config/models.py +13 -43
  42. package/src/deepscientist/config/service.py +216 -157
  43. package/src/deepscientist/connector_profiles.py +346 -0
  44. package/src/deepscientist/connector_runtime.py +88 -43
  45. package/src/deepscientist/daemon/api/handlers.py +53 -16
  46. package/src/deepscientist/daemon/api/router.py +2 -2
  47. package/src/deepscientist/daemon/app.py +747 -228
  48. package/src/deepscientist/mcp/server.py +60 -7
  49. package/src/deepscientist/migration.py +114 -0
  50. package/src/deepscientist/network.py +78 -0
  51. package/src/deepscientist/prompts/builder.py +50 -4
  52. package/src/deepscientist/qq_profiles.py +186 -0
  53. package/src/deepscientist/quest/service.py +1 -1
  54. package/src/deepscientist/skills/installer.py +77 -1
  55. package/src/prompts/connectors/qq.md +42 -2
  56. package/src/prompts/system.md +162 -6
  57. package/src/skills/analysis-campaign/SKILL.md +19 -5
  58. package/src/skills/baseline/SKILL.md +66 -31
  59. package/src/skills/decision/SKILL.md +1 -1
  60. package/src/skills/experiment/SKILL.md +11 -5
  61. package/src/skills/finalize/SKILL.md +1 -1
  62. package/src/skills/idea/SKILL.md +246 -4
  63. package/src/skills/intake-audit/SKILL.md +1 -1
  64. package/src/skills/rebuttal/SKILL.md +1 -1
  65. package/src/skills/review/SKILL.md +1 -1
  66. package/src/skills/scout/SKILL.md +1 -1
  67. package/src/skills/write/SKILL.md +152 -2
  68. package/src/tui/package.json +1 -1
  69. package/src/ui/dist/assets/{AiManusChatView-CZpg376x.js → AiManusChatView-BGLArZRn.js} +14 -37
  70. package/src/ui/dist/assets/{AnalysisPlugin-CtHA22g3.js → AnalysisPlugin-BgDGSigG.js} +1 -1
  71. package/src/ui/dist/assets/{AutoFigurePlugin-BSWmLMmF.js → AutoFigurePlugin-B65HD7L4.js} +5 -5
  72. package/src/ui/dist/assets/{CliPlugin-CJ7jdm_s.js → CliPlugin-CUqgsFHC.js} +17 -110
  73. package/src/ui/dist/assets/{CodeEditorPlugin-DhInVGFf.js → CodeEditorPlugin-CF5EdvaS.js} +8 -8
  74. package/src/ui/dist/assets/{CodeViewerPlugin-D1n8S9r5.js → CodeViewerPlugin-DEeU063D.js} +5 -5
  75. package/src/ui/dist/assets/{DocViewerPlugin-C4XM_kqk.js → DocViewerPlugin-Df-FuDlZ.js} +3 -3
  76. package/src/ui/dist/assets/{GitDiffViewerPlugin-W6kS9r6v.js → GitDiffViewerPlugin-RAnNaRxM.js} +1 -1
  77. package/src/ui/dist/assets/{ImageViewerPlugin-DPeUx_Oz.js → ImageViewerPlugin-DXJ0ZJGg.js} +5 -5
  78. package/src/ui/dist/assets/{LabCopilotPanel-eAelUaub.js → LabCopilotPanel-BlO-sKsj.js} +10 -10
  79. package/src/ui/dist/assets/{LabPlugin-BbOrBxKY.js → LabPlugin-BajPZW5v.js} +1 -1
  80. package/src/ui/dist/assets/{LatexPlugin-C-HhkVXY.js → LatexPlugin-F1OEol8D.js} +7 -7
  81. package/src/ui/dist/assets/{MarkdownViewerPlugin-BDIzIBfh.js → MarkdownViewerPlugin-MhUupqwT.js} +4 -4
  82. package/src/ui/dist/assets/{MarketplacePlugin-DAOJphwr.js → MarketplacePlugin-DxhIEsv0.js} +3 -3
  83. package/src/ui/dist/assets/{NotebookEditor-BsoMvDoU.js → NotebookEditor-q7TkhewC.js} +1 -1
  84. package/src/ui/dist/assets/{PdfLoader-fiC7RtHf.js → PdfLoader-B8ZOTKFc.js} +1 -1
  85. package/src/ui/dist/assets/{PdfMarkdownPlugin-C5OxZBFK.js → PdfMarkdownPlugin-xFPvzvWh.js} +3 -3
  86. package/src/ui/dist/assets/{PdfViewerPlugin-CAbxQebk.js → PdfViewerPlugin-EjEcsIB8.js} +10 -10
  87. package/src/ui/dist/assets/{SearchPlugin-SE33Lb9B.js → SearchPlugin-ixY-1lgW.js} +1 -1
  88. package/src/ui/dist/assets/{Stepper-0Av7GfV7.js → Stepper-gYFK2Pgz.js} +1 -1
  89. package/src/ui/dist/assets/{TextViewerPlugin-Daf2gJDI.js → TextViewerPlugin-Cym6pv_n.js} +4 -4
  90. package/src/ui/dist/assets/{VNCViewer-BKrMUIOX.js → VNCViewer-BPmIHcmK.js} +9 -9
  91. package/src/ui/dist/assets/{bibtex-JBdOEe45.js → bibtex-Btv6Wi7f.js} +1 -1
  92. package/src/ui/dist/assets/{code-B0TDFCZz.js → code-BlG7g85c.js} +1 -1
  93. package/src/ui/dist/assets/{file-content-3YtrSacz.js → file-content-DBT5OfTZ.js} +1 -1
  94. package/src/ui/dist/assets/{file-diff-panel-CJEg5OG1.js → file-diff-panel-BWXYzqHk.js} +1 -1
  95. package/src/ui/dist/assets/{file-socket-CYQYdmB1.js → file-socket-wDlx6byM.js} +1 -1
  96. package/src/ui/dist/assets/{file-utils-Cd1C9Ppl.js → file-utils-Ba3nJmH0.js} +1 -1
  97. package/src/ui/dist/assets/{image-B33ctrvC.js → image-BwtCyguk.js} +1 -1
  98. package/src/ui/dist/assets/{index-BNQWqmJ2.js → index-B-2scqCJ.js} +11 -11
  99. package/src/ui/dist/assets/{index-BVXsmS7V.js → index-Bz5AaWL7.js} +52383 -51440
  100. package/src/ui/dist/assets/{index-Buw_N1VQ.js → index-CfRpE209.js} +2 -2
  101. package/src/ui/dist/assets/{index-9CLPVeZh.js → index-DcqvKzeJ.js} +1 -1
  102. package/src/ui/dist/assets/{index-SwmFAld3.css → index-DpMZw8aM.css} +49 -2
  103. package/src/ui/dist/assets/{message-square-D0cUJ9yU.js → message-square-BnlyWVH0.js} +1 -1
  104. package/src/ui/dist/assets/{monaco-UZLYkp2n.js → monaco-CXe0pAVe.js} +1 -1
  105. package/src/ui/dist/assets/{popover-CTeiY-dK.js → popover-BCHmVhHj.js} +1 -1
  106. package/src/ui/dist/assets/{project-sync-Dbs01Xky.js → project-sync-Brk6kaOD.js} +1 -1
  107. package/src/ui/dist/assets/{sigma-CM08S-xT.js → sigma-D72eSUep.js} +1 -1
  108. package/src/ui/dist/assets/{tooltip-pDtzvU9p.js → tooltip-BMWd0dqX.js} +1 -1
  109. package/src/ui/dist/assets/{trash-YvPCP-da.js → trash-BIt_eWIS.js} +1 -1
  110. package/src/ui/dist/assets/{useCliAccess-Bavi74Ac.js → useCliAccess-N1hkTRrR.js} +1 -1
  111. package/src/ui/dist/assets/{useFileDiffOverlay-CVXY6oeg.js → useFileDiffOverlay-DPRPv6rv.js} +1 -1
  112. package/src/ui/dist/assets/{wrap-text-Cf4flRW7.js → wrap-text-E5-UheyP.js} +1 -1
  113. package/src/ui/dist/assets/{zoom-out-Hb0Z1YpT.js → zoom-out-D4TR-ZZ_.js} +1 -1
  114. package/src/ui/dist/index.html +2 -2
@@ -661,8 +661,8 @@ def build_bash_exec_server(context: McpContext) -> FastMCP:
661
661
  description=(
662
662
  "Execute a bash command inside the current quest. "
663
663
  "mode=detach returns immediately. mode=await/create waits for completion. "
664
- "mode=read returns the saved log. mode=kill requests termination. "
665
- "mode=list shows known quest-local bash sessions."
664
+ "mode=read returns the saved log or a tailed log window. mode=kill requests termination. "
665
+ "mode=list shows known quest-local bash sessions. mode=history shows a compact reverse-chronological bash id list."
666
666
  ),
667
667
  )
668
668
  def bash_exec(
@@ -676,39 +676,88 @@ def build_bash_exec_server(context: McpContext) -> FastMCP:
676
676
  export_log_to: str | None = None,
677
677
  timeout_seconds: int | None = None,
678
678
  status: str | None = None,
679
+ kind: str | None = None,
679
680
  agent_ids: list[str] | None = None,
680
681
  agent_instance_ids: list[str] | None = None,
681
682
  chat_session_id: str | None = None,
682
683
  limit: int = 20,
684
+ tail_limit: int | None = None,
685
+ before_seq: int | None = None,
686
+ after_seq: int | None = None,
687
+ order: str = "asc",
688
+ include_log: bool = False,
689
+ wait: bool = False,
690
+ force: bool = False,
683
691
  comment: str | dict[str, Any] | None = None,
684
692
  ) -> dict[str, Any]:
685
693
  quest_root = context.require_quest_root().resolve()
686
694
  normalized_mode = (mode or "detach").strip().lower()
687
695
  if normalized_mode == "create":
688
696
  normalized_mode = "await"
689
- if normalized_mode not in {"detach", "await", "read", "kill", "list"}:
690
- raise ValueError("Mode must be one of `detach`, `await`, `create`, `read`, `kill`, or `list`.")
691
- if normalized_mode == "list":
697
+ if normalized_mode not in {"detach", "await", "read", "kill", "list", "history"}:
698
+ raise ValueError("Mode must be one of `detach`, `await`, `create`, `read`, `kill`, `list`, or `history`.")
699
+ if normalized_mode in {"list", "history"}:
700
+ resolved_limit = 500 if normalized_mode == "history" and limit == 20 else max(1, min(limit, 500))
692
701
  items = service.list_sessions(
693
702
  quest_root,
694
703
  status=status,
704
+ kind=kind,
695
705
  agent_ids=agent_ids,
696
706
  agent_instance_ids=agent_instance_ids,
697
707
  chat_session_id=chat_session_id,
698
- limit=max(1, min(limit, 500)),
708
+ limit=resolved_limit,
699
709
  )
710
+ history_lines = [service.format_history_line(item) for item in items]
700
711
  counts: dict[str, int] = {}
701
712
  for item in items:
702
713
  item_status = str(item.get("status") or "unknown")
703
714
  counts[item_status] = counts.get(item_status, 0) + 1
704
- return {
715
+ payload = {
705
716
  "count": len(items),
706
717
  "items": items,
707
718
  "status_counts": counts,
719
+ "summary": service.summary(quest_root),
720
+ "history_lines": history_lines,
708
721
  }
722
+ if normalized_mode == "history":
723
+ return {
724
+ "count": len(items),
725
+ "lines": history_lines,
726
+ "items": items,
727
+ }
728
+ return payload
709
729
  if normalized_mode == "read":
710
730
  bash_id = service.resolve_session_id(quest_root, id)
711
731
  session = service.get_session(quest_root, bash_id)
732
+ normalized_order = (order or "asc").strip().lower()
733
+ if normalized_order not in {"asc", "desc"}:
734
+ normalized_order = "asc"
735
+ use_tail = tail_limit is not None or before_seq is not None or after_seq is not None or normalized_order != "asc"
736
+ if use_tail:
737
+ resolved_tail_limit = max(1, min(int(tail_limit or 200), 1000))
738
+ entries, tail_meta = service.read_log_entries(
739
+ quest_root,
740
+ bash_id,
741
+ limit=resolved_tail_limit,
742
+ before_seq=before_seq,
743
+ after_seq=after_seq,
744
+ order=normalized_order,
745
+ )
746
+ payload = service.build_tool_result(
747
+ context,
748
+ session=session,
749
+ include_log=include_log,
750
+ export_log=export_log,
751
+ export_log_to=export_log_to,
752
+ )
753
+ payload["tail"] = entries
754
+ payload["tail_limit"] = tail_meta.get("tail_limit")
755
+ payload["tail_start_seq"] = tail_meta.get("tail_start_seq")
756
+ payload["latest_seq"] = tail_meta.get("latest_seq")
757
+ payload["after_seq"] = tail_meta.get("after_seq")
758
+ payload["before_seq"] = tail_meta.get("before_seq")
759
+ payload["order"] = normalized_order
760
+ return payload
712
761
  return service.build_tool_result(
713
762
  context,
714
763
  session=session,
@@ -723,7 +772,10 @@ def build_bash_exec_server(context: McpContext) -> FastMCP:
723
772
  bash_id,
724
773
  reason=reason,
725
774
  user_id=f"agent:{context.agent_role or 'pi'}",
775
+ force=force,
726
776
  )
777
+ if wait:
778
+ session = service.wait_for_session(quest_root, bash_id, timeout_seconds=timeout_seconds)
727
779
  return service.build_tool_result(context, session=session, include_log=False)
728
780
  if normalized_mode == "await" and not command:
729
781
  bash_id = service.resolve_session_id(quest_root, id)
@@ -744,6 +796,7 @@ def build_bash_exec_server(context: McpContext) -> FastMCP:
744
796
  workdir=workdir,
745
797
  env=env,
746
798
  timeout_seconds=timeout_seconds,
799
+ comment=comment,
747
800
  )
748
801
  if normalized_mode == "detach":
749
802
  return service.build_tool_result(context, session=session, include_log=False)
@@ -0,0 +1,114 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import shutil
5
+ import uuid
6
+ from pathlib import Path
7
+ from typing import Any
8
+
9
+
10
+ HOME_SIGNATURES = (
11
+ "runtime",
12
+ "config",
13
+ "memory",
14
+ "quests",
15
+ "plugins",
16
+ "logs",
17
+ "cache",
18
+ "cli",
19
+ )
20
+
21
+
22
+ def looks_like_deepscientist_root(path: Path) -> bool:
23
+ if not path.exists() or not path.is_dir():
24
+ return False
25
+ if (path / "cli" / "bin" / "ds.js").exists():
26
+ return True
27
+ return any((path / name).exists() for name in HOME_SIGNATURES)
28
+
29
+
30
+ def _is_relative_to(candidate: Path, other: Path) -> bool:
31
+ try:
32
+ candidate.relative_to(other)
33
+ return True
34
+ except ValueError:
35
+ return False
36
+
37
+
38
+ def _collect_manifest(root: Path) -> dict[str, Any]:
39
+ manifest: dict[str, Any] = {}
40
+ file_count = 0
41
+ dir_count = 0
42
+ symlink_count = 0
43
+ total_bytes = 0
44
+ stack = [Path("")]
45
+ while stack:
46
+ rel_root = stack.pop()
47
+ current_root = root / rel_root
48
+ for child in sorted(current_root.iterdir(), key=lambda item: item.name):
49
+ rel_path = (rel_root / child.name).as_posix()
50
+ if child.is_symlink():
51
+ manifest[rel_path] = {"kind": "symlink", "target": os.readlink(child)}
52
+ symlink_count += 1
53
+ continue
54
+ if child.is_dir():
55
+ manifest[rel_path] = {"kind": "dir"}
56
+ dir_count += 1
57
+ stack.append(rel_root / child.name)
58
+ continue
59
+ size = child.stat().st_size
60
+ manifest[rel_path] = {"kind": "file", "size": size}
61
+ file_count += 1
62
+ total_bytes += size
63
+ return {
64
+ "entries": manifest,
65
+ "stats": {
66
+ "file_count": file_count,
67
+ "dir_count": dir_count,
68
+ "symlink_count": symlink_count,
69
+ "total_bytes": total_bytes,
70
+ "entry_count": len(manifest),
71
+ },
72
+ }
73
+
74
+
75
+ def migrate_deepscientist_root(source: Path, target: Path) -> dict[str, Any]:
76
+ source = source.expanduser().resolve()
77
+ target = target.expanduser().resolve()
78
+ if not source.exists():
79
+ raise ValueError(f"Source path does not exist: {source}")
80
+ if not source.is_dir():
81
+ raise ValueError(f"Source path is not a directory: {source}")
82
+ if not looks_like_deepscientist_root(source):
83
+ raise ValueError(f"Source path does not look like a DeepScientist home or install root: {source}")
84
+ if source == target:
85
+ raise ValueError("Source path and target path must be different.")
86
+ if _is_relative_to(target, source):
87
+ raise ValueError("Target path cannot be placed inside the current DeepScientist root.")
88
+ if _is_relative_to(source, target):
89
+ raise ValueError("Target path cannot be a parent of the current DeepScientist root.")
90
+ if target.exists():
91
+ raise ValueError(f"Target path already exists: {target}")
92
+ target.parent.mkdir(parents=True, exist_ok=True)
93
+
94
+ staging = target.parent / f".{target.name}.migrating-{uuid.uuid4().hex[:10]}"
95
+ if staging.exists():
96
+ shutil.rmtree(staging, ignore_errors=True)
97
+ try:
98
+ shutil.copytree(source, staging, symlinks=True, copy_function=shutil.copy2)
99
+ source_manifest = _collect_manifest(source)
100
+ staging_manifest = _collect_manifest(staging)
101
+ if source_manifest["entries"] != staging_manifest["entries"]:
102
+ raise ValueError("Copied tree validation failed: source and target contents do not match.")
103
+ staging.rename(target)
104
+ return {
105
+ "ok": True,
106
+ "source": str(source),
107
+ "target": str(target),
108
+ "staging": str(staging),
109
+ "stats": source_manifest["stats"],
110
+ "summary": "DeepScientist root copied and verified successfully.",
111
+ }
112
+ except Exception:
113
+ shutil.rmtree(staging, ignore_errors=True)
114
+ raise
@@ -0,0 +1,78 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from urllib.parse import urlparse
5
+ from urllib.request import ProxyHandler, Request, build_opener, urlopen as stdlib_urlopen
6
+
7
+ from websockets.sync.client import connect as stdlib_websocket_connect
8
+
9
+ _RUNTIME_PROXY_URL: str | None = None
10
+ _NO_PROXY_OPENER = build_opener(ProxyHandler({}))
11
+ _PROXY_OPENERS: dict[str, object] = {}
12
+
13
+
14
+ def normalize_proxy_url(value: str | None) -> str | None:
15
+ text = str(value or "").strip()
16
+ return text or None
17
+
18
+
19
+ def configure_runtime_proxy(proxy_url: str | None) -> str | None:
20
+ normalized = normalize_proxy_url(proxy_url)
21
+ global _RUNTIME_PROXY_URL
22
+ previous = _RUNTIME_PROXY_URL
23
+ _RUNTIME_PROXY_URL = normalized
24
+ if normalized is None:
25
+ if previous is not None:
26
+ for key in ("HTTP_PROXY", "HTTPS_PROXY", "ALL_PROXY", "http_proxy", "https_proxy", "all_proxy"):
27
+ if os.environ.get(key) == previous:
28
+ os.environ.pop(key, None)
29
+ return None
30
+ for key in ("HTTP_PROXY", "HTTPS_PROXY", "ALL_PROXY", "http_proxy", "https_proxy", "all_proxy"):
31
+ os.environ[key] = normalized
32
+ # Keep local daemon traffic and loopback websocket attaches off the proxy path.
33
+ for key in ("NO_PROXY", "no_proxy"):
34
+ current = str(os.environ.get(key) or "").strip()
35
+ values = [item.strip() for item in current.split(",") if item.strip()]
36
+ for host in ("127.0.0.1", "localhost", "::1", "0.0.0.0"):
37
+ if host not in values:
38
+ values.append(host)
39
+ os.environ[key] = ",".join(values)
40
+ return normalized
41
+
42
+
43
+ def runtime_proxy_url() -> str | None:
44
+ return _RUNTIME_PROXY_URL
45
+
46
+
47
+ def should_bypass_proxy(url: str) -> bool:
48
+ parsed = urlparse(str(url or "").strip())
49
+ host = (parsed.hostname or "").strip().lower()
50
+ return host in {"", "127.0.0.1", "localhost", "::1", "0.0.0.0"}
51
+
52
+
53
+ def _proxy_opener(proxy_url: str):
54
+ opener = _PROXY_OPENERS.get(proxy_url)
55
+ if opener is None:
56
+ opener = build_opener(ProxyHandler({"http": proxy_url, "https": proxy_url}))
57
+ _PROXY_OPENERS[proxy_url] = opener
58
+ return opener
59
+
60
+
61
+ def urlopen_with_proxy(request: Request | str, timeout: float | None = None):
62
+ url = request.full_url if isinstance(request, Request) else str(request)
63
+ if should_bypass_proxy(url):
64
+ return _NO_PROXY_OPENER.open(request, timeout=timeout)
65
+ proxy_url = runtime_proxy_url()
66
+ if proxy_url:
67
+ return _proxy_opener(proxy_url).open(request, timeout=timeout)
68
+ return stdlib_urlopen(request, timeout=timeout)
69
+
70
+
71
+ def websocket_connect_with_proxy(uri: str, /, **kwargs):
72
+ if should_bypass_proxy(uri):
73
+ kwargs.setdefault("proxy", None)
74
+ else:
75
+ proxy_url = runtime_proxy_url()
76
+ if proxy_url:
77
+ kwargs.setdefault("proxy", proxy_url)
78
+ return stdlib_websocket_connect(uri, **kwargs)
@@ -87,10 +87,10 @@ class PromptBuilder:
87
87
  ) -> str:
88
88
  snapshot = self.quest_service.snapshot(quest_id)
89
89
  runtime_config = self.config_manager.load_named("config")
90
- connectors_config = self.config_manager.load_named("connectors")
90
+ connectors_config = self.config_manager.load_named_normalized("connectors")
91
91
  quest_root = Path(snapshot["quest_root"])
92
92
  active_anchor = str(snapshot.get("active_anchor") or skill_id)
93
- default_locale = str(runtime_config.get("default_locale") or "zh-CN")
93
+ default_locale = str(runtime_config.get("default_locale") or "en-US")
94
94
  system_block = self._prompt_fragment("src/prompts/system.md")
95
95
  connector_contract_block = self._connector_contract_block(quest_id=quest_id, snapshot=snapshot)
96
96
  sections = [
@@ -260,6 +260,11 @@ class PromptBuilder:
260
260
  "- qq_surface_rule: QQ is a milestone-report surface, not a full artifact browser.",
261
261
  "- qq_default_mode: keep outbound replies concise, respectful, text-first, and progress-aware.",
262
262
  "- qq_detail_rule: do not proactively dump file inventories, path lists, or low-level file details unless the user explicitly asked for them.",
263
+ "- qq_length_rule: for ordinary QQ progress replies, normally use only 2 to 4 short sentences, or 3 very short bullets at most.",
264
+ "- qq_summary_first_rule: start with the user-facing conclusion, then the immediate meaning, then the next action; do not make the user reverse-engineer the status from telemetry.",
265
+ "- qq_internal_signal_rule: omit worker names, heartbeat timestamps, retry counters, pending/running/completed counts, file names, and monitor-window narration unless that detail is necessary for a user decision or to explain a real risk.",
266
+ "- qq_translation_rule: translate internal actions into user value, for example say that you organized the baseline record for easier comparison later instead of listing the files you touched.",
267
+ "- qq_eta_rule: for baseline reproduction, main experiments, analysis experiments, and other important long-running research phases, include a rough ETA for the next meaningful result, next step, or next update; if the runtime is uncertain, say that directly and still give the next check-in window.",
263
268
  f"- qq_auto_send_main_experiment_png: {bool(qq_config.get('auto_send_main_experiment_png', True))}",
264
269
  f"- qq_auto_send_analysis_summary_png: {bool(qq_config.get('auto_send_analysis_summary_png', True))}",
265
270
  f"- qq_auto_send_slice_png: {bool(qq_config.get('auto_send_slice_png', False))}",
@@ -387,6 +392,14 @@ class PromptBuilder:
387
392
  "- must_continue_rule: unless there is a real blocking user decision, keep advancing the quest automatically from durable state",
388
393
  ]
389
394
  )
395
+ bash_running_count = int(((snapshot.get("counts") or {}).get("bash_running_count")) or 0)
396
+ if bash_running_count > 0:
397
+ lines.extend(
398
+ [
399
+ f"- active_bash_run_count: {bash_running_count}",
400
+ "- long_run_watchdog_rule: while an important long-running bash_exec session is active, never let more than 30 minutes pass without inspecting real logs/status and sending a concise artifact.interact progress update if the run is still ongoing",
401
+ ]
402
+ )
390
403
  if str(turn_reason or "").strip() == "auto_continue":
391
404
  lines.append(
392
405
  "- auto_continue_rule: this turn has no new user message; continue from the active requirements, durable artifacts, and current quest state instead of replaying the previous user message"
@@ -733,8 +746,16 @@ class PromptBuilder:
733
746
  "- interaction_protocol: first message may be plain conversation; after that, treat artifact.interact threads and mailbox polls as the main continuity spine across TUI, web, and connectors",
734
747
  "- mailbox_protocol: artifact.interact(include_recent_inbound_messages=True) is the queued human-message mailbox; when it returns user text, treat that input as higher priority than background subtasks until it has been acknowledged",
735
748
  "- acknowledgment_protocol: after artifact.interact returns any human message, immediately call artifact.interact(...) again to confirm receipt; if answerable, answer directly, otherwise state the short plan, nearest checkpoint, and that the current background subtask is paused",
736
- "- progress_protocol: emit artifact.interact(kind='progress', reply_mode='threaded', ...) only at real human-meaningful checkpoints, after the first meaningful signal from long-running work, and then only occasional keepalives during truly long work, usually about every 20 to 30 minutes",
737
- "- long_run_reporting_protocol: for long-running bash_exec monitoring loops, report after each completed sleep/await cycle with real evidence plus the next planned check time and estimated next reply time",
749
+ "- progress_protocol: emit artifact.interact(kind='progress', reply_mode='threaded', ...) at real human-meaningful checkpoints; if no natural checkpoint appears during active user-relevant work, send a concise keepalive before you drift beyond roughly 10 to 30 tool calls without a user-visible update",
750
+ "- smoke_then_detach_protocol: for baseline reproduction, main experiments, and analysis experiments, first validate the command path with a bounded smoke test; once the smoke test passes, launch the real long run with bash_exec(mode='detach', ...) and usually leave timeout_seconds unset rather than guessing a fake deadline",
751
+ "- progress_first_monitoring_protocol: when supervising a long-running bash_exec session, judge health by forward progress rather than by whether the final artifact has already appeared within a short window",
752
+ "- delta_monitoring_protocol: compare deltas such as new sample counters, new task counters, new saved files, new last_output_seq values, or changed last_progress payloads; if any of these move forward, treat the run as alive and keep observing",
753
+ "- long_run_reporting_protocol: for long-running bash_exec monitoring loops, inspect real logs or status after each completed sleep/await cycle and at least once every 30 minutes at worst, but only send a user-visible update when there is a human-meaningful delta or when the 30-minute visibility bound would otherwise be exceeded",
754
+ "- long_run_watchdog_protocol: for baseline reproduction, baseline-running stages, main experiments, and other important detached runs, do not let more than 30 minutes pass without a real progress inspection and, if the run is still active, a user-visible artifact.interact progress update",
755
+ "- intervention_threshold_protocol: do not kill or restart a run merely because a short watch window passed without final completion; intervene only on explicit failure, clear invalidity, process exit, or no meaningful delta across a sufficiently long observation window",
756
+ "- slow_model_patience_protocol: if the user says the model, endpoint, or workload is expected to be slow, widen the observation window before intervention and avoid repeated no-change updates",
757
+ "- tail_monitoring_protocol: when monitoring a detached run, prefer bash_exec(mode='read', id=..., tail_limit=..., order='desc') so you inspect the newest evidence first instead of re-reading full logs every time",
758
+ "- managed_recovery_protocol: if a detached baseline, main-experiment, or analysis run is clearly invalid, wedged, or superseded, stop it with bash_exec(mode='kill', id=...), document the reason, fix the issue, and relaunch cleanly instead of letting a bad run linger",
738
759
  "- timeout_protocol: before using bash_exec(mode='await', ...), estimate whether the command can finish within the selected wait window; if runtime is uncertain or likely longer, use bash_exec(mode='detach', ...) and monitor, or set timeout_seconds intentionally",
739
760
  "- blocking_protocol: use reply_mode='blocking' only for true unresolved user decisions; ordinary progress updates should stay threaded and non-blocking",
740
761
  "- credential_blocking_protocol: if continuation requires user-supplied external credentials or secrets such as an API key, GitHub key/token, or Hugging Face key/token, emit one structured blocking decision request that asks the user to provide the credential or choose an alternative route; do not invent placeholders or silently skip the blocked step",
@@ -743,6 +764,31 @@ class PromptBuilder:
743
764
  "- stop_notice_protocol: if work must pause or stop, send a user-visible notice that explains why, confirms preserved context, and states that any new message or `/resume` will continue from the same quest",
744
765
  "- respect_protocol: write user-facing updates as natural, respectful, easy-to-follow chat; do not sound like a formal status report or internal tool log",
745
766
  "- omission_protocol: for ordinary user-facing updates, omit file paths, artifact ids, branch/worktree ids, session ids, raw commands, raw logs, and internal tool names unless the user asked for them or needs them to act",
767
+ "- compaction_protocol: ordinary artifact.interact progress updates should usually fit in 2 to 4 short sentences and should not read like a monitoring transcript or execution diary",
768
+ "- tool_call_keepalive_protocol: for active multi-step work outside long detached experiment waits, if you have spent roughly 10 to 30 tool calls without a user-visible checkpoint, send one concise artifact.interact progress update before continuing",
769
+ "- human_progress_shape_protocol: ordinary progress updates should usually make three things explicit in human language: the current task, the main difficulty or latest real progress, and the concrete next measure you will take",
770
+ "- eta_visibility_protocol: for baseline reproduction, main experiments, analysis experiments, and other important long-running phases, progress updates should also make the expected time to the next meaningful result, next milestone, or next user-visible update explicit; use roughly 10 to 30 minutes as the normal update window, and if the ETA is unreliable, say that and give a realistic next check-in window instead",
771
+ "- idea_milestone_protocol: immediately after a successful accepted artifact.submit_idea(...), send a threaded milestone that explains the idea in plain language and explicitly states whether it currently looks valid, research-worthy, and insight-bearing, plus the main risk and exact next experiment",
772
+ "- idea_divergence_protocol: in the idea stage, separate divergence from convergence; unless strong durable evidence already narrows the route to one obvious serious option, do not collapse onto the first plausible route before generating a small but meaningfully diverse candidate slate",
773
+ "- idea_lens_protocol: when idea candidates cluster around one mechanism family, deliberately switch ideation lenses such as problem-first vs solution-first, tension hunting, analogy transfer, inversion, or adjacent-possible reasoning before final selection",
774
+ "- idea_frontier_protocol: a temporary raw ideation slate may be larger, but after convergence the serious frontier should usually shrink back to 2 to 3 candidates and at most 5",
775
+ "- idea_why_now_protocol: every serious idea candidate should answer why now or what changed, not just what the mechanism is",
776
+ "- idea_balance_protocol: when the search space is not tiny, carry at least one conservative route and one higher-upside route into the final comparison",
777
+ "- idea_pitch_protocol: before artifact.submit_idea(...), make the winner pass a two-sentence pitch, a strongest-objection check, and a concrete why-now statement",
778
+ "- experiment_milestone_protocol: immediately after artifact.record_main_experiment(...) writes the durable result, send a threaded milestone that explains what was run, the main result, whether primary performance improved / worsened / stayed mixed versus the active baseline or best prior anchor, whether the route still looks promising, and the exact next step",
779
+ "- asset_grounded_analysis_protocol: before artifact.create_analysis_campaign(...), reuse current quest and user-provided assets first and only plan slices that are executable with the current assets, runtime/tooling, and available credentials",
780
+ "- infeasible_slice_protocol: if an analysis slice cannot actually be executed after bounded recovery, do not fake completion; record the slice with a non-success status, report the blocker explicitly, and do not pretend the system can do it",
781
+ "- explicit_improvement_protocol: never make the user infer performance improvement only from raw metrics; say plainly whether performance improved, worsened, or stayed mixed",
782
+ "- verified_reference_breadth_protocol: for paper-like writing, run broad literature search and reading, aim for roughly 30 to 50 verified references unless scope clearly justifies fewer, use one consistent citation workflow SEARCH -> VERIFY -> RETRIEVE -> VALIDATE -> ADD, use Semantic Scholar by default or Google Scholar manual search/export for discovery, use DOI/Crossref or other real metadata backfills for BibTeX and verification, Every final citation must correspond to a real paper from an actual source, store actual bibliography entries in paper/references.bib as valid BibTeX, do one explicit reference audit before bundling, and never invent citations from memory or hand-write BibTeX from scratch",
783
+ "- narrative_focus_protocol: for paper-like writing, organize the paper around one cohesive contribution, make What / Why / So What clear early, assume many readers judge in the order title -> abstract -> introduction -> figures, front-load value in those surfaces, use a five-part abstract formula, keep the introduction concise with 2 to 4 specific contribution bullets, and if the first sentence could be pasted into many unrelated ML papers then rewrite it until it becomes specific",
784
+ "- writing_reasoning_externalization_protocol: for paper-like writing, externalize major reasoning into durable notes such as paper/outline_selection.md, paper/claim_evidence_map.json, paper/related_work_map.md, paper/figure_storyboard.md, and paper/reviewer_first_pass.md; those notes should summarize current judgment, alternatives considered, evidence used, risks, and next revision action rather than hidden chain-of-thought",
785
+ "- outline_intro_value_protocol: for outlines and introductions, make research value explicit early and use a standard introduction arc: problem and stakes -> concrete gap/bottleneck -> remedy/core idea -> evidence preview -> contributions",
786
+ "- teammate_voice_protocol: write like a calm capable teammate using natural first-person phrasing when helpful, for example 'I'm working on ...', 'The main issue right now is ...', 'Next I'll ...'; do not sound like a dashboard or incident log",
787
+ "- tqdm_progress_protocol: when you control the experiment code for baseline reproduction, main experiments, or analysis experiments, instrument long loops with a throttled tqdm-style progress reporter when feasible and also prefer periodic __DS_PROGRESS__ JSON markers so monitoring stays both human-readable and machine-usable",
788
+ "- translation_protocol: convert internal actions into user-facing meaning; describe what was finished and why it matters instead of naming every touched file, counter, timestamp, or subprocess",
789
+ "- detail_gate_protocol: include exact counters, worker labels, timestamps, retry counts, or file names only when the user explicitly asked for them, when they change the recommended action, or when they are the only honest way to explain a real blocker",
790
+ "- monitoring_summary_protocol: for long-running monitoring loops, summarize the frontier state in plain language such as still progressing, temporarily stalled, recovered, or needs intervention; do not narrate each watch window and do not send a no-change update merely because a sleep finished unless the user-visible timing bound requires it",
791
+ "- preflight_rewrite_protocol: before sending artifact.interact, quickly self-check whether the draft reads like a monitoring log, file inventory, or internal diary; if it mentions watch windows, heartbeats, retry counters, raw counts, timestamps, or multiple file names without being necessary for user action, rewrite it into conclusion -> meaning -> next step first",
746
792
  "- non_research_mode_protocol: if the user message looks like a non-research request, ask for a second confirmation before engaging stage skills or research workflow; after completion, leave one blocking standby interaction instead of repeatedly pinging",
747
793
  "- workspace_discipline: read and modify code inside current_workspace_root; treat quest_root as the canonical repo identity and durable runtime root",
748
794
  "- binary_safety: do not open or rewrite large binary assets unless truly necessary; prefer summaries, metadata, and targeted inspection first",
@@ -0,0 +1,186 @@
1
+ from __future__ import annotations
2
+
3
+ from copy import deepcopy
4
+ from typing import Any
5
+
6
+ from .shared import slugify
7
+
8
+
9
+ QQ_PROFILE_ID_PREFIX = "qq-profile"
10
+ QQ_DEFAULT_SECRET_ENV = "QQ_APP_SECRET"
11
+
12
+
13
+ def default_qq_profile() -> dict[str, Any]:
14
+ return {
15
+ "profile_id": None,
16
+ "enabled": True,
17
+ "app_id": None,
18
+ "app_secret": None,
19
+ "app_secret_env": QQ_DEFAULT_SECRET_ENV,
20
+ "bot_name": "DeepScientist",
21
+ "main_chat_id": None,
22
+ }
23
+
24
+
25
+ def _as_text(value: Any) -> str | None:
26
+ text = str(value or "").strip()
27
+ return text or None
28
+
29
+
30
+ def _profile_id_seed(*, profile_id: Any, app_id: Any, bot_name: Any, index: int) -> str:
31
+ explicit = _as_text(profile_id)
32
+ if explicit:
33
+ return explicit
34
+ app_text = _as_text(app_id)
35
+ if app_text:
36
+ return f"qq-{app_text}"
37
+ bot_text = slugify(str(bot_name or "").strip(), default="")
38
+ if bot_text:
39
+ return f"{QQ_PROFILE_ID_PREFIX}-{bot_text}"
40
+ return f"{QQ_PROFILE_ID_PREFIX}-{index:03d}"
41
+
42
+
43
+ def _unique_profile_id(seed: str, *, used: set[str]) -> str:
44
+ base = slugify(seed, default=QQ_PROFILE_ID_PREFIX)
45
+ candidate = base
46
+ suffix = 2
47
+ while candidate in used:
48
+ candidate = f"{base}-{suffix}"
49
+ suffix += 1
50
+ used.add(candidate)
51
+ return candidate
52
+
53
+
54
+ def list_qq_profiles(config: dict[str, Any] | None) -> list[dict[str, Any]]:
55
+ normalized = normalize_qq_connector_config(config)
56
+ profiles = normalized.get("profiles")
57
+ return [dict(item) for item in profiles] if isinstance(profiles, list) else []
58
+
59
+
60
+ def find_qq_profile(
61
+ config: dict[str, Any] | None,
62
+ *,
63
+ profile_id: str | None = None,
64
+ app_id: str | None = None,
65
+ ) -> dict[str, Any] | None:
66
+ normalized_profile_id = _as_text(profile_id)
67
+ normalized_app_id = _as_text(app_id)
68
+ for profile in list_qq_profiles(config):
69
+ if normalized_profile_id and str(profile.get("profile_id") or "").strip() == normalized_profile_id:
70
+ return profile
71
+ if normalized_app_id and str(profile.get("app_id") or "").strip() == normalized_app_id:
72
+ return profile
73
+ return None
74
+
75
+
76
+ def merge_qq_profile_config(shared_config: dict[str, Any] | None, profile: dict[str, Any]) -> dict[str, Any]:
77
+ normalized = normalize_qq_connector_config(shared_config)
78
+ merged = deepcopy(normalized)
79
+ merged.pop("profiles", None)
80
+ merged.update(
81
+ {
82
+ "profile_id": str(profile.get("profile_id") or "").strip() or None,
83
+ "app_id": _as_text(profile.get("app_id")),
84
+ "app_secret": _as_text(profile.get("app_secret")),
85
+ "app_secret_env": _as_text(profile.get("app_secret_env")) or QQ_DEFAULT_SECRET_ENV,
86
+ "bot_name": _as_text(profile.get("bot_name")) or str(normalized.get("bot_name") or "DeepScientist"),
87
+ "main_chat_id": _as_text(profile.get("main_chat_id")),
88
+ "enabled": bool(normalized.get("enabled", False)) and bool(profile.get("enabled", True)),
89
+ "transport": "gateway_direct",
90
+ }
91
+ )
92
+ return merged
93
+
94
+
95
+ def qq_profile_label(profile: dict[str, Any] | None) -> str:
96
+ if not isinstance(profile, dict):
97
+ return "QQ"
98
+ bot_name = _as_text(profile.get("bot_name"))
99
+ app_id = _as_text(profile.get("app_id"))
100
+ if bot_name and app_id:
101
+ return f"{bot_name} · {app_id}"
102
+ if bot_name:
103
+ return bot_name
104
+ if app_id:
105
+ return f"QQ · {app_id}"
106
+ return "QQ"
107
+
108
+
109
+ def normalize_qq_connector_config(config: dict[str, Any] | None) -> dict[str, Any]:
110
+ payload = deepcopy(config or {})
111
+ shared_defaults = {
112
+ "enabled": False,
113
+ "transport": "gateway_direct",
114
+ "app_id": None,
115
+ "app_secret": None,
116
+ "app_secret_env": QQ_DEFAULT_SECRET_ENV,
117
+ "bot_name": "DeepScientist",
118
+ "command_prefix": "/",
119
+ "main_chat_id": None,
120
+ "require_at_in_groups": True,
121
+ "auto_bind_dm_to_active_quest": True,
122
+ "gateway_restart_on_config_change": True,
123
+ "auto_send_main_experiment_png": True,
124
+ "auto_send_analysis_summary_png": True,
125
+ "auto_send_slice_png": True,
126
+ "auto_send_paper_pdf": True,
127
+ "enable_markdown_send": False,
128
+ "enable_file_upload_experimental": False,
129
+ "profiles": [],
130
+ }
131
+ shared = {**shared_defaults, **payload}
132
+ shared["transport"] = "gateway_direct"
133
+ shared["command_prefix"] = _as_text(shared.get("command_prefix")) or "/"
134
+ shared["bot_name"] = _as_text(shared.get("bot_name")) or "DeepScientist"
135
+ shared["app_secret_env"] = _as_text(shared.get("app_secret_env")) or QQ_DEFAULT_SECRET_ENV
136
+
137
+ raw_profiles = payload.get("profiles")
138
+ items = list(raw_profiles) if isinstance(raw_profiles, list) else []
139
+ legacy_profile_seed = {
140
+ "app_id": payload.get("app_id"),
141
+ "app_secret": payload.get("app_secret"),
142
+ "app_secret_env": payload.get("app_secret_env"),
143
+ "bot_name": payload.get("bot_name"),
144
+ "main_chat_id": payload.get("main_chat_id"),
145
+ }
146
+ if not items:
147
+ if any(_as_text(legacy_profile_seed.get(key)) for key in ("app_id", "app_secret", "main_chat_id", "bot_name")):
148
+ items = [legacy_profile_seed]
149
+
150
+ profiles: list[dict[str, Any]] = []
151
+ used_ids: set[str] = set()
152
+ for index, raw in enumerate(items, start=1):
153
+ if not isinstance(raw, dict):
154
+ continue
155
+ current = {**default_qq_profile(), **raw}
156
+ current["enabled"] = bool(current.get("enabled", True))
157
+ current["app_id"] = _as_text(current.get("app_id"))
158
+ current["app_secret"] = _as_text(current.get("app_secret"))
159
+ current["app_secret_env"] = _as_text(current.get("app_secret_env")) or shared["app_secret_env"]
160
+ current["bot_name"] = _as_text(current.get("bot_name")) or shared["bot_name"]
161
+ current["main_chat_id"] = _as_text(current.get("main_chat_id"))
162
+ current["profile_id"] = _unique_profile_id(
163
+ _profile_id_seed(
164
+ profile_id=current.get("profile_id"),
165
+ app_id=current.get("app_id"),
166
+ bot_name=current.get("bot_name"),
167
+ index=index,
168
+ ),
169
+ used=used_ids,
170
+ )
171
+ profiles.append(current)
172
+
173
+ shared["profiles"] = profiles
174
+ if len(profiles) == 1:
175
+ mirror = profiles[0]
176
+ shared["app_id"] = mirror.get("app_id")
177
+ shared["app_secret"] = mirror.get("app_secret")
178
+ shared["app_secret_env"] = mirror.get("app_secret_env")
179
+ shared["bot_name"] = mirror.get("bot_name")
180
+ shared["main_chat_id"] = mirror.get("main_chat_id")
181
+ else:
182
+ shared["app_id"] = None
183
+ shared["app_secret"] = None
184
+ shared["main_chat_id"] = None
185
+
186
+ return shared
@@ -74,7 +74,7 @@ class QuestService:
74
74
  if value:
75
75
  return value.lower()
76
76
  config = ConfigManager(self.home).load_named("config")
77
- return str(config.get("default_locale") or "zh-CN").lower()
77
+ return str(config.get("default_locale") or "en-US").lower()
78
78
 
79
79
  def localized_copy(self, *, zh: str, en: str, quest_root: Path | None = None) -> str:
80
80
  return zh if self.preferred_locale(quest_root).startswith("zh") else en