@researai/deepscientist 1.5.6 → 1.5.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +28 -0
- package/bin/ds.js +54 -13
- package/package.json +1 -1
- package/pyproject.toml +1 -1
- package/src/deepscientist/__init__.py +1 -1
- package/src/deepscientist/cli.py +1 -1
- package/src/deepscientist/config/models.py +6 -6
- package/src/deepscientist/config/service.py +5 -0
- package/src/deepscientist/connector_profiles.py +34 -6
- package/src/deepscientist/daemon/api/handlers.py +6 -1
- package/src/deepscientist/daemon/api/router.py +1 -0
- package/src/deepscientist/daemon/app.py +230 -3
- package/src/deepscientist/prompts/builder.py +20 -4
- package/src/deepscientist/qq_profiles.py +19 -9
- package/src/deepscientist/quest/layout.py +1 -0
- package/src/deepscientist/runners/codex.py +31 -13
- package/src/deepscientist/runners/runtime_overrides.py +46 -0
- package/src/deepscientist/skills/installer.py +7 -0
- package/src/prompts/system.md +30 -0
- package/src/skills/analysis-campaign/SKILL.md +30 -0
- package/src/skills/analysis-campaign/references/campaign-checklist-template.md +41 -0
- package/src/skills/analysis-campaign/references/campaign-plan-template.md +68 -0
- package/src/skills/baseline/SKILL.md +105 -5
- package/src/skills/baseline/references/baseline-checklist-template.md +57 -0
- package/src/skills/baseline/references/baseline-plan-template.md +103 -0
- package/src/skills/experiment/SKILL.md +30 -0
- package/src/skills/experiment/references/main-experiment-checklist-template.md +52 -0
- package/src/skills/experiment/references/main-experiment-plan-template.md +77 -0
- package/src/tui/package.json +1 -1
- package/src/ui/dist/assets/{AiManusChatView-BGLArZRn.js → AiManusChatView-BS3V4ZOk.js} +11 -11
- package/src/ui/dist/assets/{AnalysisPlugin-BgDGSigG.js → AnalysisPlugin-DLPXQsmr.js} +1 -1
- package/src/ui/dist/assets/{AutoFigurePlugin-B65HD7L4.js → AutoFigurePlugin-C-Fr9knQ.js} +5 -5
- package/src/ui/dist/assets/{CliPlugin-CUqgsFHC.js → CliPlugin-Dd8AHzFg.js} +9 -9
- package/src/ui/dist/assets/{CodeEditorPlugin-CF5EdvaS.js → CodeEditorPlugin-Dg-RepTl.js} +8 -8
- package/src/ui/dist/assets/{CodeViewerPlugin-DEeU063D.js → CodeViewerPlugin-D2J_3nyt.js} +5 -5
- package/src/ui/dist/assets/{DocViewerPlugin-Df-FuDlZ.js → DocViewerPlugin-ChRLLKNb.js} +3 -3
- package/src/ui/dist/assets/{GitDiffViewerPlugin-RAnNaRxM.js → GitDiffViewerPlugin-DgHfcved.js} +1 -1
- package/src/ui/dist/assets/{ImageViewerPlugin-DXJ0ZJGg.js → ImageViewerPlugin-C89GZMBy.js} +5 -5
- package/src/ui/dist/assets/{LabCopilotPanel-BlO-sKsj.js → LabCopilotPanel-BUfIwUcb.js} +10 -10
- package/src/ui/dist/assets/{LabPlugin-BajPZW5v.js → LabPlugin-zvUmQUMq.js} +1 -1
- package/src/ui/dist/assets/{LatexPlugin-F1OEol8D.js → LatexPlugin-C1SSNuWp.js} +7 -7
- package/src/ui/dist/assets/{MarkdownViewerPlugin-MhUupqwT.js → MarkdownViewerPlugin-D2Mf5tU5.js} +4 -4
- package/src/ui/dist/assets/{MarketplacePlugin-DxhIEsv0.js → MarketplacePlugin-CF4LgiS2.js} +3 -3
- package/src/ui/dist/assets/{NotebookEditor-q7TkhewC.js → NotebookEditor-BM7Bgwlv.js} +1 -1
- package/src/ui/dist/assets/{PdfLoader-B8ZOTKFc.js → PdfLoader-Bc5qfD-Z.js} +1 -1
- package/src/ui/dist/assets/{PdfMarkdownPlugin-xFPvzvWh.js → PdfMarkdownPlugin-sh1-IRcp.js} +3 -3
- package/src/ui/dist/assets/{PdfViewerPlugin-EjEcsIB8.js → PdfViewerPlugin-C_a7CpWG.js} +10 -10
- package/src/ui/dist/assets/{SearchPlugin-ixY-1lgW.js → SearchPlugin-L4z3HcLf.js} +1 -1
- package/src/ui/dist/assets/{Stepper-gYFK2Pgz.js → Stepper-Dk4aQ3fN.js} +1 -1
- package/src/ui/dist/assets/{TextViewerPlugin-Cym6pv_n.js → TextViewerPlugin-BsNtlKVo.js} +4 -4
- package/src/ui/dist/assets/{VNCViewer-BPmIHcmK.js → VNCViewer-BpeDcZ5_.js} +9 -9
- package/src/ui/dist/assets/{bibtex-Btv6Wi7f.js → bibtex-C4QI-bbj.js} +1 -1
- package/src/ui/dist/assets/{code-BlG7g85c.js → code-DuMINRsg.js} +1 -1
- package/src/ui/dist/assets/{file-content-DBT5OfTZ.js → file-content-C3N-432K.js} +1 -1
- package/src/ui/dist/assets/{file-diff-panel-BWXYzqHk.js → file-diff-panel-CffQ4ZMg.js} +1 -1
- package/src/ui/dist/assets/{file-socket-wDlx6byM.js → file-socket-CRH59PCO.js} +1 -1
- package/src/ui/dist/assets/{file-utils-Ba3nJmH0.js → file-utils-vYGtW2mI.js} +1 -1
- package/src/ui/dist/assets/{image-BwtCyguk.js → image-DBVGaooo.js} +1 -1
- package/src/ui/dist/assets/{index-Bz5AaWL7.js → index-B1P6hQRJ.js} +166 -32
- package/src/ui/dist/assets/{index-B-2scqCJ.js → index-Be0NAmh8.js} +11 -11
- package/src/ui/dist/assets/{index-DcqvKzeJ.js → index-BpjYH9Vg.js} +1 -1
- package/src/ui/dist/assets/{index-CfRpE209.js → index-DjSFDmgB.js} +2 -2
- package/src/ui/dist/assets/{index-DpMZw8aM.css → index-Do9N28uB.css} +1 -1
- package/src/ui/dist/assets/{message-square-BnlyWVH0.js → message-square-BsPDBhiY.js} +1 -1
- package/src/ui/dist/assets/{monaco-CXe0pAVe.js → monaco-BTkdPojV.js} +1 -1
- package/src/ui/dist/assets/{popover-BCHmVhHj.js → popover-cWjCk-vc.js} +1 -1
- package/src/ui/dist/assets/{project-sync-Brk6kaOD.js → project-sync-CXn530xb.js} +1 -1
- package/src/ui/dist/assets/{sigma-D72eSUep.js → sigma-04Jr12jg.js} +1 -1
- package/src/ui/dist/assets/{tooltip-BMWd0dqX.js → tooltip-BdVDl0G5.js} +1 -1
- package/src/ui/dist/assets/{trash-BIt_eWIS.js → trash-CB_GlQyC.js} +1 -1
- package/src/ui/dist/assets/{useCliAccess-N1hkTRrR.js → useCliAccess-BL932NwS.js} +1 -1
- package/src/ui/dist/assets/{useFileDiffOverlay-DPRPv6rv.js → useFileDiffOverlay-B2WK7Tvq.js} +1 -1
- package/src/ui/dist/assets/{wrap-text-E5-UheyP.js → wrap-text-YC68g12z.js} +1 -1
- package/src/ui/dist/assets/{zoom-out-D4TR-ZZ_.js → zoom-out-C0RJvFiJ.js} +1 -1
- package/src/ui/dist/index.html +2 -2
|
@@ -29,7 +29,14 @@ from ..channels.slack_socket import SlackSocketModeService
|
|
|
29
29
|
from ..channels.telegram_polling import TelegramPollingService
|
|
30
30
|
from ..channels.whatsapp_local_session import WhatsAppLocalSessionService
|
|
31
31
|
from ..cloud import CloudLinkService
|
|
32
|
-
from ..connector_profiles import
|
|
32
|
+
from ..connector_profiles import (
|
|
33
|
+
CONNECTOR_PROFILE_SPECS,
|
|
34
|
+
PROFILEABLE_CONNECTOR_NAMES,
|
|
35
|
+
connector_profile_label,
|
|
36
|
+
list_connector_profiles,
|
|
37
|
+
merge_connector_profile_config,
|
|
38
|
+
normalize_connector_config,
|
|
39
|
+
)
|
|
33
40
|
from ..connector_runtime import conversation_identity_key, format_conversation_id, normalize_conversation_id, parse_conversation_id
|
|
34
41
|
from ..config import ConfigManager
|
|
35
42
|
from ..home import repo_root
|
|
@@ -38,7 +45,7 @@ from ..network import urlopen_with_proxy as urlopen
|
|
|
38
45
|
from ..latex_runtime import QuestLatexService
|
|
39
46
|
from ..prompts import PromptBuilder
|
|
40
47
|
from ..prompts.builder import STANDARD_SKILLS
|
|
41
|
-
from ..qq_profiles import list_qq_profiles, merge_qq_profile_config
|
|
48
|
+
from ..qq_profiles import list_qq_profiles, merge_qq_profile_config, normalize_qq_connector_config
|
|
42
49
|
from ..quest import QuestService
|
|
43
50
|
from ..runners import CodexRunner, RunRequest, get_runner_factory, register_builtin_runners
|
|
44
51
|
from ..runtime_logs import JsonlLogger
|
|
@@ -73,7 +80,7 @@ class DaemonApp:
|
|
|
73
80
|
self.daemon_managed_by = str(os.environ.get("DS_DAEMON_MANAGED_BY") or "manual").strip() or "manual"
|
|
74
81
|
self.repo_root = repo_root()
|
|
75
82
|
self.config_manager = ConfigManager(home)
|
|
76
|
-
self.runners_config = self.config_manager.
|
|
83
|
+
self.runners_config = self.config_manager.load_runners_config()
|
|
77
84
|
self.connectors_config = self.config_manager.load_named_normalized("connectors")
|
|
78
85
|
self.skill_installer = SkillInstaller(self.repo_root, home)
|
|
79
86
|
self.quest_service = QuestService(home, skill_installer=self.skill_installer)
|
|
@@ -647,6 +654,22 @@ class DaemonApp:
|
|
|
647
654
|
),
|
|
648
655
|
}
|
|
649
656
|
|
|
657
|
+
def reload_runners_config(self) -> dict[str, object]:
|
|
658
|
+
self.runners_config = self.config_manager.load_runners_config()
|
|
659
|
+
codex_config = self.runners_config.get("codex", {})
|
|
660
|
+
if isinstance(codex_config, dict):
|
|
661
|
+
self.codex_runner.binary = str(codex_config.get("binary") or "codex")
|
|
662
|
+
return {
|
|
663
|
+
"ok": True,
|
|
664
|
+
"runners": sorted(name for name, config in self.runners_config.items() if isinstance(config, dict)),
|
|
665
|
+
"codex": {
|
|
666
|
+
"binary": self.codex_runner.binary,
|
|
667
|
+
"approval_policy": codex_config.get("approval_policy") if isinstance(codex_config, dict) else None,
|
|
668
|
+
"sandbox_mode": codex_config.get("sandbox_mode") if isinstance(codex_config, dict) else None,
|
|
669
|
+
"mcp_tool_timeout_sec": codex_config.get("mcp_tool_timeout_sec") if isinstance(codex_config, dict) else None,
|
|
670
|
+
},
|
|
671
|
+
}
|
|
672
|
+
|
|
650
673
|
def _preferred_locale(self) -> str:
|
|
651
674
|
config = self.config_manager.load_named("config")
|
|
652
675
|
return str(config.get("default_locale") or "en-US").lower()
|
|
@@ -2090,6 +2113,210 @@ class DaemonApp:
|
|
|
2090
2113
|
channel = self._channel_with_bindings(connector_name)
|
|
2091
2114
|
return channel.list_bindings()
|
|
2092
2115
|
|
|
2116
|
+
def delete_connector_profile(self, connector_name: str, profile_id: str) -> dict | tuple[int, dict]:
|
|
2117
|
+
normalized_connector = str(connector_name or "").strip().lower()
|
|
2118
|
+
normalized_profile_id = str(profile_id or "").strip()
|
|
2119
|
+
if not normalized_connector:
|
|
2120
|
+
return 400, {"ok": False, "message": "Connector name is required."}
|
|
2121
|
+
if not normalized_profile_id:
|
|
2122
|
+
return 400, {"ok": False, "message": "Profile id is required."}
|
|
2123
|
+
if normalized_connector != "qq" and normalized_connector not in PROFILEABLE_CONNECTOR_NAMES:
|
|
2124
|
+
return 400, {"ok": False, "message": f"Connector `{normalized_connector}` does not support profile deletion."}
|
|
2125
|
+
|
|
2126
|
+
connectors = self.config_manager.load_named_normalized("connectors")
|
|
2127
|
+
connector_config = connectors.get(normalized_connector)
|
|
2128
|
+
if not isinstance(connector_config, dict):
|
|
2129
|
+
return 404, {"ok": False, "message": f"Unknown connector `{normalized_connector}`."}
|
|
2130
|
+
|
|
2131
|
+
if normalized_connector == "qq":
|
|
2132
|
+
profiles = list_qq_profiles(connector_config)
|
|
2133
|
+
else:
|
|
2134
|
+
profiles = list_connector_profiles(normalized_connector, connector_config)
|
|
2135
|
+
if not profiles:
|
|
2136
|
+
return 404, {"ok": False, "message": f"Connector `{normalized_connector}` has no configured profiles."}
|
|
2137
|
+
|
|
2138
|
+
remaining_profiles = [
|
|
2139
|
+
dict(item)
|
|
2140
|
+
for item in profiles
|
|
2141
|
+
if str(item.get("profile_id") or "").strip() != normalized_profile_id
|
|
2142
|
+
]
|
|
2143
|
+
if len(remaining_profiles) == len(profiles):
|
|
2144
|
+
return 404, {
|
|
2145
|
+
"ok": False,
|
|
2146
|
+
"message": f"Profile `{normalized_profile_id}` was not found under connector `{normalized_connector}`.",
|
|
2147
|
+
}
|
|
2148
|
+
|
|
2149
|
+
deleting_last_profile = len(remaining_profiles) == 0
|
|
2150
|
+
related_conversations = self._connector_profile_bound_conversations(
|
|
2151
|
+
normalized_connector,
|
|
2152
|
+
normalized_profile_id,
|
|
2153
|
+
include_all_if_single_profile=deleting_last_profile,
|
|
2154
|
+
)
|
|
2155
|
+
for conversation_id in related_conversations:
|
|
2156
|
+
self._unbind_connector_conversation_everywhere(normalized_connector, conversation_id)
|
|
2157
|
+
|
|
2158
|
+
next_connector_config = dict(connector_config)
|
|
2159
|
+
next_connector_config["profiles"] = remaining_profiles
|
|
2160
|
+
if deleting_last_profile:
|
|
2161
|
+
next_connector_config["enabled"] = False
|
|
2162
|
+
if normalized_connector == "qq":
|
|
2163
|
+
for key in ("app_id", "app_secret", "app_secret_env", "main_chat_id"):
|
|
2164
|
+
next_connector_config[key] = None
|
|
2165
|
+
else:
|
|
2166
|
+
profile_spec = CONNECTOR_PROFILE_SPECS.get(normalized_connector, {})
|
|
2167
|
+
for key in profile_spec.get("profile_fields", ()):
|
|
2168
|
+
if key in {"enabled", "transport", "mode"}:
|
|
2169
|
+
continue
|
|
2170
|
+
next_connector_config[key] = None
|
|
2171
|
+
if normalized_connector == "qq":
|
|
2172
|
+
connectors[normalized_connector] = normalize_qq_connector_config(next_connector_config)
|
|
2173
|
+
else:
|
|
2174
|
+
connectors[normalized_connector] = normalize_connector_config(normalized_connector, next_connector_config)
|
|
2175
|
+
save_result = self.config_manager.save_named_payload("connectors", connectors)
|
|
2176
|
+
if not bool(save_result.get("ok")):
|
|
2177
|
+
return 409, {
|
|
2178
|
+
"ok": False,
|
|
2179
|
+
"message": "Failed to persist connector profile deletion.",
|
|
2180
|
+
"errors": save_result.get("errors") or [],
|
|
2181
|
+
"warnings": save_result.get("warnings") or [],
|
|
2182
|
+
}
|
|
2183
|
+
|
|
2184
|
+
self._cleanup_connector_profile_runtime(
|
|
2185
|
+
normalized_connector,
|
|
2186
|
+
normalized_profile_id,
|
|
2187
|
+
clear_all=deleting_last_profile,
|
|
2188
|
+
)
|
|
2189
|
+
self.reload_connectors_config()
|
|
2190
|
+
snapshot = next(
|
|
2191
|
+
(item for item in self.list_connector_statuses() if str(item.get("name") or "").strip().lower() == normalized_connector),
|
|
2192
|
+
None,
|
|
2193
|
+
)
|
|
2194
|
+
return {
|
|
2195
|
+
"ok": True,
|
|
2196
|
+
"connector": normalized_connector,
|
|
2197
|
+
"profile_id": normalized_profile_id,
|
|
2198
|
+
"deleted": True,
|
|
2199
|
+
"deleted_bound_conversations": related_conversations,
|
|
2200
|
+
"remaining_profile_count": len(remaining_profiles),
|
|
2201
|
+
"snapshot": snapshot,
|
|
2202
|
+
}
|
|
2203
|
+
|
|
2204
|
+
def _connector_profile_bound_conversations(
|
|
2205
|
+
self,
|
|
2206
|
+
connector_name: str,
|
|
2207
|
+
profile_id: str,
|
|
2208
|
+
*,
|
|
2209
|
+
include_all_if_single_profile: bool = False,
|
|
2210
|
+
) -> list[str]:
|
|
2211
|
+
normalized_connector = str(connector_name or "").strip().lower()
|
|
2212
|
+
normalized_profile_id = str(profile_id or "").strip()
|
|
2213
|
+
if not normalized_connector or not normalized_profile_id:
|
|
2214
|
+
return []
|
|
2215
|
+
try:
|
|
2216
|
+
channel = self._channel_with_bindings(normalized_connector)
|
|
2217
|
+
except Exception:
|
|
2218
|
+
return []
|
|
2219
|
+
conversations: list[str] = []
|
|
2220
|
+
seen: set[str] = set()
|
|
2221
|
+
for item in channel.list_bindings():
|
|
2222
|
+
if not isinstance(item, dict):
|
|
2223
|
+
continue
|
|
2224
|
+
conversation_id = str(item.get("conversation_id") or "").strip()
|
|
2225
|
+
if not conversation_id:
|
|
2226
|
+
continue
|
|
2227
|
+
item_profile_id = str(item.get("profile_id") or "").strip()
|
|
2228
|
+
if not include_all_if_single_profile and item_profile_id != normalized_profile_id:
|
|
2229
|
+
continue
|
|
2230
|
+
identity = conversation_identity_key(conversation_id)
|
|
2231
|
+
if identity in seen:
|
|
2232
|
+
continue
|
|
2233
|
+
seen.add(identity)
|
|
2234
|
+
conversations.append(conversation_id)
|
|
2235
|
+
return conversations
|
|
2236
|
+
|
|
2237
|
+
def _unbind_connector_conversation_everywhere(self, connector_name: str, conversation_id: str) -> bool:
|
|
2238
|
+
normalized_connector = str(connector_name or "").strip().lower()
|
|
2239
|
+
normalized_conversation_id = normalize_conversation_id(conversation_id)
|
|
2240
|
+
if not normalized_connector or not normalized_conversation_id:
|
|
2241
|
+
return False
|
|
2242
|
+
try:
|
|
2243
|
+
channel = self._channel_with_bindings(normalized_connector)
|
|
2244
|
+
except Exception:
|
|
2245
|
+
return False
|
|
2246
|
+
bound_quest_id = str(channel.resolve_bound_quest(normalized_conversation_id) or "").strip() or None
|
|
2247
|
+
removed = channel.unbind_conversation(
|
|
2248
|
+
normalized_conversation_id,
|
|
2249
|
+
quest_id=bound_quest_id,
|
|
2250
|
+
)
|
|
2251
|
+
if bound_quest_id:
|
|
2252
|
+
self.sessions.unbind(bound_quest_id, normalized_conversation_id)
|
|
2253
|
+
self.quest_service.unbind_source(bound_quest_id, normalized_conversation_id)
|
|
2254
|
+
return removed
|
|
2255
|
+
|
|
2256
|
+
def _cleanup_connector_profile_runtime(
|
|
2257
|
+
self,
|
|
2258
|
+
connector_name: str,
|
|
2259
|
+
profile_id: str,
|
|
2260
|
+
*,
|
|
2261
|
+
clear_all: bool = False,
|
|
2262
|
+
) -> None:
|
|
2263
|
+
normalized_connector = str(connector_name or "").strip().lower()
|
|
2264
|
+
normalized_profile_id = str(profile_id or "").strip()
|
|
2265
|
+
if not normalized_connector or not normalized_profile_id:
|
|
2266
|
+
return
|
|
2267
|
+
connector_root = self.home / "logs" / "connectors" / normalized_connector
|
|
2268
|
+
profile_root = connector_root / "profiles" / normalized_profile_id
|
|
2269
|
+
shutil.rmtree(profile_root, ignore_errors=True)
|
|
2270
|
+
|
|
2271
|
+
def matches_profile(payload: object, *, conversation_id: str | None = None) -> bool:
|
|
2272
|
+
if clear_all:
|
|
2273
|
+
return True
|
|
2274
|
+
item = payload if isinstance(payload, dict) else {}
|
|
2275
|
+
item_profile_id = str(item.get("profile_id") or "").strip() if isinstance(item, dict) else ""
|
|
2276
|
+
if item_profile_id:
|
|
2277
|
+
return item_profile_id == normalized_profile_id
|
|
2278
|
+
parsed = parse_conversation_id(conversation_id or "")
|
|
2279
|
+
return str((parsed or {}).get("profile_id") or "").strip() == normalized_profile_id
|
|
2280
|
+
|
|
2281
|
+
bindings_path = connector_root / "bindings.json"
|
|
2282
|
+
bindings_payload = read_json(bindings_path, {"bindings": {}})
|
|
2283
|
+
binding_map = bindings_payload.get("bindings")
|
|
2284
|
+
if isinstance(binding_map, dict):
|
|
2285
|
+
filtered_bindings = {
|
|
2286
|
+
key: value
|
|
2287
|
+
for key, value in binding_map.items()
|
|
2288
|
+
if not matches_profile(value, conversation_id=str(key))
|
|
2289
|
+
}
|
|
2290
|
+
bindings_payload["bindings"] = filtered_bindings
|
|
2291
|
+
write_json(bindings_path, bindings_payload)
|
|
2292
|
+
|
|
2293
|
+
state_path = connector_root / "state.json"
|
|
2294
|
+
state_payload = read_json(state_path, {})
|
|
2295
|
+
if isinstance(state_payload, dict):
|
|
2296
|
+
for key in ("recent_conversations", "known_targets"):
|
|
2297
|
+
raw_items = state_payload.get(key)
|
|
2298
|
+
if isinstance(raw_items, list):
|
|
2299
|
+
state_payload[key] = [
|
|
2300
|
+
item
|
|
2301
|
+
for item in raw_items
|
|
2302
|
+
if not matches_profile(
|
|
2303
|
+
item,
|
|
2304
|
+
conversation_id=str((item or {}).get("conversation_id") or "") if isinstance(item, dict) else "",
|
|
2305
|
+
)
|
|
2306
|
+
]
|
|
2307
|
+
if clear_all:
|
|
2308
|
+
state_payload["last_conversation_id"] = None
|
|
2309
|
+
else:
|
|
2310
|
+
last_conversation_id = str(state_payload.get("last_conversation_id") or "").strip()
|
|
2311
|
+
if last_conversation_id and matches_profile({}, conversation_id=last_conversation_id):
|
|
2312
|
+
state_payload["last_conversation_id"] = None
|
|
2313
|
+
write_json(state_path, state_payload)
|
|
2314
|
+
|
|
2315
|
+
if clear_all:
|
|
2316
|
+
runtime_path = connector_root / "runtime.json"
|
|
2317
|
+
if runtime_path.exists():
|
|
2318
|
+
write_json(runtime_path, {})
|
|
2319
|
+
|
|
2093
2320
|
def preview_connector_binding_conflicts(
|
|
2094
2321
|
self,
|
|
2095
2322
|
requested_bindings: list[dict[str, object]] | None,
|
|
@@ -91,7 +91,7 @@ class PromptBuilder:
|
|
|
91
91
|
quest_root = Path(snapshot["quest_root"])
|
|
92
92
|
active_anchor = str(snapshot.get("active_anchor") or skill_id)
|
|
93
93
|
default_locale = str(runtime_config.get("default_locale") or "en-US")
|
|
94
|
-
system_block = self._prompt_fragment("
|
|
94
|
+
system_block = self._prompt_fragment("system.md", quest_root=quest_root)
|
|
95
95
|
connector_contract_block = self._connector_contract_block(quest_id=quest_id, snapshot=snapshot)
|
|
96
96
|
sections = [
|
|
97
97
|
system_block,
|
|
@@ -332,7 +332,8 @@ class PromptBuilder:
|
|
|
332
332
|
connector = self._active_external_connector_name(quest_id=quest_id, snapshot=snapshot)
|
|
333
333
|
if connector is None:
|
|
334
334
|
return ""
|
|
335
|
-
|
|
335
|
+
quest_root = Path(snapshot["quest_root"])
|
|
336
|
+
path = self._prompt_path(Path("connectors") / f"{connector}.md", quest_root=quest_root)
|
|
336
337
|
if not path.exists():
|
|
337
338
|
return ""
|
|
338
339
|
return self._markdown_body(path)
|
|
@@ -601,10 +602,18 @@ class PromptBuilder:
|
|
|
601
602
|
|
|
602
603
|
return "\n".join(lines)
|
|
603
604
|
|
|
604
|
-
def _prompt_fragment(self, relative_path: str) -> str:
|
|
605
|
-
path = self.
|
|
605
|
+
def _prompt_fragment(self, relative_path: str | Path, *, quest_root: Path | None = None) -> str:
|
|
606
|
+
path = self._prompt_path(relative_path, quest_root=quest_root)
|
|
606
607
|
return self._markdown_body(path)
|
|
607
608
|
|
|
609
|
+
def _prompt_path(self, relative_path: str | Path, *, quest_root: Path | None = None) -> Path:
|
|
610
|
+
normalized = Path(relative_path)
|
|
611
|
+
if quest_root is not None:
|
|
612
|
+
quest_path = quest_root / ".codex" / "prompts" / normalized
|
|
613
|
+
if quest_path.exists():
|
|
614
|
+
return quest_path
|
|
615
|
+
return self.repo_root / "src" / "prompts" / normalized
|
|
616
|
+
|
|
608
617
|
def _latest_user_message(self, quest_id: str) -> dict | None:
|
|
609
618
|
for item in reversed(self.quest_service.history(quest_id, limit=80)):
|
|
610
619
|
if str(item.get("role") or "") == "user":
|
|
@@ -768,6 +777,13 @@ class PromptBuilder:
|
|
|
768
777
|
"- tool_call_keepalive_protocol: for active multi-step work outside long detached experiment waits, if you have spent roughly 10 to 30 tool calls without a user-visible checkpoint, send one concise artifact.interact progress update before continuing",
|
|
769
778
|
"- human_progress_shape_protocol: ordinary progress updates should usually make three things explicit in human language: the current task, the main difficulty or latest real progress, and the concrete next measure you will take",
|
|
770
779
|
"- eta_visibility_protocol: for baseline reproduction, main experiments, analysis experiments, and other important long-running phases, progress updates should also make the expected time to the next meaningful result, next milestone, or next user-visible update explicit; use roughly 10 to 30 minutes as the normal update window, and if the ETA is unreliable, say that and give a realistic next check-in window instead",
|
|
780
|
+
"- stage_plan_protocol: for `baseline`, `experiment`, and `analysis-campaign`, do not jump straight into substantial setup, code changes, or real runs; first create or update quest-visible `PLAN.md` and `CHECKLIST.md`, then keep them aligned with the actual route",
|
|
781
|
+
"- baseline_plan_protocol: in `baseline`, read the source paper and source repo first when they exist, then make `PLAN.md` cover the route, source package, code touchpoints, smoke path, real-run path, fallback options, monitoring rules, and verification targets before substantial work continues",
|
|
782
|
+
"- experiment_plan_protocol: in `experiment`, make `PLAN.md` start with the selected idea summarized in 1 to 2 sentences and then map the idea into code touchpoints, comparability rules, smoke / pilot path, full-run path, fallback options, monitoring rules, and revision notes",
|
|
783
|
+
"- analysis_plan_protocol: in `analysis-campaign`, treat `PLAN.md` as the campaign charter and make it cover the slice list, comparability boundary, asset and comparator plan, smoke / full-run policy, reporting plan, and revision log before real slices launch",
|
|
784
|
+
"- checklist_maintenance_protocol: for those same stages, treat `CHECKLIST.md` as the living execution surface and update it during reading, setup, coding, smoke tests, real runs, validation, aggregation, and route changes instead of letting progress live only in chat",
|
|
785
|
+
"- plan_revision_protocol: if the route, comparability contract, source package, execution strategy, slice ordering, or campaign interpretation changes materially, revise `PLAN.md` before continuing",
|
|
786
|
+
"- stage_milestone_summary_protocol: for accepted baseline, selected idea, completed main experiment, and completed analysis-campaign milestones, usually open with 1 to 2 sentences that say what happened, what it means, and the exact next step before expanding into more detail",
|
|
771
787
|
"- idea_milestone_protocol: immediately after a successful accepted artifact.submit_idea(...), send a threaded milestone that explains the idea in plain language and explicitly states whether it currently looks valid, research-worthy, and insight-bearing, plus the main risk and exact next experiment",
|
|
772
788
|
"- idea_divergence_protocol: in the idea stage, separate divergence from convergence; unless strong durable evidence already narrows the route to one obvious serious option, do not collapse onto the first plausible route before generating a small but meaningfully diverse candidate slate",
|
|
773
789
|
"- idea_lens_protocol: when idea candidates cluster around one mechanism family, deliberately switch ideation lenses such as problem-first vs solution-first, tension hunting, analogy transfer, inversion, or adjacent-possible reasoning before final selection",
|
|
@@ -7,16 +7,13 @@ from .shared import slugify
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
QQ_PROFILE_ID_PREFIX = "qq-profile"
|
|
10
|
-
QQ_DEFAULT_SECRET_ENV = "QQ_APP_SECRET"
|
|
11
|
-
|
|
12
|
-
|
|
13
10
|
def default_qq_profile() -> dict[str, Any]:
|
|
14
11
|
return {
|
|
15
12
|
"profile_id": None,
|
|
16
13
|
"enabled": True,
|
|
17
14
|
"app_id": None,
|
|
18
15
|
"app_secret": None,
|
|
19
|
-
"app_secret_env":
|
|
16
|
+
"app_secret_env": None,
|
|
20
17
|
"bot_name": "DeepScientist",
|
|
21
18
|
"main_chat_id": None,
|
|
22
19
|
}
|
|
@@ -27,6 +24,13 @@ def _as_text(value: Any) -> str | None:
|
|
|
27
24
|
return text or None
|
|
28
25
|
|
|
29
26
|
|
|
27
|
+
def _normalize_secret_pair(payload: dict[str, Any], direct_key: str, env_key: str) -> None:
|
|
28
|
+
direct = _as_text(payload.get(direct_key))
|
|
29
|
+
env_name = _as_text(payload.get(env_key))
|
|
30
|
+
payload[direct_key] = direct
|
|
31
|
+
payload[env_key] = None if direct else env_name
|
|
32
|
+
|
|
33
|
+
|
|
30
34
|
def _profile_id_seed(*, profile_id: Any, app_id: Any, bot_name: Any, index: int) -> str:
|
|
31
35
|
explicit = _as_text(profile_id)
|
|
32
36
|
if explicit:
|
|
@@ -77,12 +81,14 @@ def merge_qq_profile_config(shared_config: dict[str, Any] | None, profile: dict[
|
|
|
77
81
|
normalized = normalize_qq_connector_config(shared_config)
|
|
78
82
|
merged = deepcopy(normalized)
|
|
79
83
|
merged.pop("profiles", None)
|
|
84
|
+
app_secret = _as_text(profile.get("app_secret"))
|
|
85
|
+
app_secret_env = _as_text(profile.get("app_secret_env"))
|
|
80
86
|
merged.update(
|
|
81
87
|
{
|
|
82
88
|
"profile_id": str(profile.get("profile_id") or "").strip() or None,
|
|
83
89
|
"app_id": _as_text(profile.get("app_id")),
|
|
84
|
-
"app_secret":
|
|
85
|
-
"app_secret_env":
|
|
90
|
+
"app_secret": app_secret,
|
|
91
|
+
"app_secret_env": None if app_secret else app_secret_env,
|
|
86
92
|
"bot_name": _as_text(profile.get("bot_name")) or str(normalized.get("bot_name") or "DeepScientist"),
|
|
87
93
|
"main_chat_id": _as_text(profile.get("main_chat_id")),
|
|
88
94
|
"enabled": bool(normalized.get("enabled", False)) and bool(profile.get("enabled", True)),
|
|
@@ -113,7 +119,7 @@ def normalize_qq_connector_config(config: dict[str, Any] | None) -> dict[str, An
|
|
|
113
119
|
"transport": "gateway_direct",
|
|
114
120
|
"app_id": None,
|
|
115
121
|
"app_secret": None,
|
|
116
|
-
"app_secret_env":
|
|
122
|
+
"app_secret_env": None,
|
|
117
123
|
"bot_name": "DeepScientist",
|
|
118
124
|
"command_prefix": "/",
|
|
119
125
|
"main_chat_id": None,
|
|
@@ -132,7 +138,7 @@ def normalize_qq_connector_config(config: dict[str, Any] | None) -> dict[str, An
|
|
|
132
138
|
shared["transport"] = "gateway_direct"
|
|
133
139
|
shared["command_prefix"] = _as_text(shared.get("command_prefix")) or "/"
|
|
134
140
|
shared["bot_name"] = _as_text(shared.get("bot_name")) or "DeepScientist"
|
|
135
|
-
shared
|
|
141
|
+
_normalize_secret_pair(shared, "app_secret", "app_secret_env")
|
|
136
142
|
|
|
137
143
|
raw_profiles = payload.get("profiles")
|
|
138
144
|
items = list(raw_profiles) if isinstance(raw_profiles, list) else []
|
|
@@ -144,7 +150,9 @@ def normalize_qq_connector_config(config: dict[str, Any] | None) -> dict[str, An
|
|
|
144
150
|
"main_chat_id": payload.get("main_chat_id"),
|
|
145
151
|
}
|
|
146
152
|
if not items:
|
|
147
|
-
|
|
153
|
+
has_direct_profile_seed = any(_as_text(legacy_profile_seed.get(key)) for key in ("app_id", "app_secret", "main_chat_id"))
|
|
154
|
+
has_env_profile_seed = bool(payload.get("enabled")) and bool(_as_text(legacy_profile_seed.get("app_secret_env")))
|
|
155
|
+
if has_direct_profile_seed or has_env_profile_seed:
|
|
148
156
|
items = [legacy_profile_seed]
|
|
149
157
|
|
|
150
158
|
profiles: list[dict[str, Any]] = []
|
|
@@ -157,6 +165,7 @@ def normalize_qq_connector_config(config: dict[str, Any] | None) -> dict[str, An
|
|
|
157
165
|
current["app_id"] = _as_text(current.get("app_id"))
|
|
158
166
|
current["app_secret"] = _as_text(current.get("app_secret"))
|
|
159
167
|
current["app_secret_env"] = _as_text(current.get("app_secret_env")) or shared["app_secret_env"]
|
|
168
|
+
_normalize_secret_pair(current, "app_secret", "app_secret_env")
|
|
160
169
|
current["bot_name"] = _as_text(current.get("bot_name")) or shared["bot_name"]
|
|
161
170
|
current["main_chat_id"] = _as_text(current.get("main_chat_id"))
|
|
162
171
|
current["profile_id"] = _unique_profile_id(
|
|
@@ -181,6 +190,7 @@ def normalize_qq_connector_config(config: dict[str, Any] | None) -> dict[str, An
|
|
|
181
190
|
else:
|
|
182
191
|
shared["app_id"] = None
|
|
183
192
|
shared["app_secret"] = None
|
|
193
|
+
shared["app_secret_env"] = None
|
|
184
194
|
shared["main_chat_id"] = None
|
|
185
195
|
|
|
186
196
|
return shared
|
|
@@ -499,6 +499,7 @@ class CodexRunner:
|
|
|
499
499
|
workspace_root = request.worktree_root or request.quest_root
|
|
500
500
|
run_root = ensure_dir(request.quest_root / ".ds" / "runs" / request.run_id)
|
|
501
501
|
history_root = ensure_dir(request.quest_root / ".ds" / "codex_history" / request.run_id)
|
|
502
|
+
runner_config = self._load_runner_config()
|
|
502
503
|
prompt = self.prompt_builder.build(
|
|
503
504
|
quest_id=request.quest_id,
|
|
504
505
|
skill_id=request.skill_id,
|
|
@@ -514,8 +515,9 @@ class CodexRunner:
|
|
|
514
515
|
quest_root=request.quest_root,
|
|
515
516
|
quest_id=request.quest_id,
|
|
516
517
|
run_id=request.run_id,
|
|
518
|
+
runner_config=runner_config,
|
|
517
519
|
)
|
|
518
|
-
command = self._build_command(request, prompt)
|
|
520
|
+
command = self._build_command(request, prompt, runner_config=runner_config)
|
|
519
521
|
write_json(
|
|
520
522
|
run_root / "command.json",
|
|
521
523
|
{
|
|
@@ -752,9 +754,10 @@ class CodexRunner:
|
|
|
752
754
|
process.wait(timeout=3)
|
|
753
755
|
return interrupted
|
|
754
756
|
|
|
755
|
-
def _build_command(self, request: RunRequest, prompt: str) -> list[str]:
|
|
757
|
+
def _build_command(self, request: RunRequest, prompt: str, *, runner_config: dict[str, Any] | None = None) -> list[str]:
|
|
756
758
|
workspace_root = request.worktree_root or request.quest_root
|
|
757
759
|
resolved_binary = resolve_runner_binary(self.binary, runner_name="codex")
|
|
760
|
+
resolved_runner_config = runner_config if isinstance(runner_config, dict) else self._load_runner_config()
|
|
758
761
|
command = [
|
|
759
762
|
resolved_binary or self.binary,
|
|
760
763
|
"--search",
|
|
@@ -771,6 +774,11 @@ class CodexRunner:
|
|
|
771
774
|
reasoning_effort = request.reasoning_effort if request.reasoning_effort is not None else "xhigh"
|
|
772
775
|
if reasoning_effort:
|
|
773
776
|
command.extend(["-c", f'model_reasoning_effort="{reasoning_effort}"'])
|
|
777
|
+
tool_timeout_sec = self._positive_timeout_seconds(resolved_runner_config.get("mcp_tool_timeout_sec"))
|
|
778
|
+
if tool_timeout_sec is not None:
|
|
779
|
+
timeout_value = int(tool_timeout_sec) if float(tool_timeout_sec).is_integer() else float(tool_timeout_sec)
|
|
780
|
+
for server_name in ("memory", "artifact", "bash_exec"):
|
|
781
|
+
command.extend(["-c", f"mcp_servers.{server_name}.tool_timeout_sec={timeout_value}"])
|
|
774
782
|
if request.sandbox_mode:
|
|
775
783
|
command.extend(["--sandbox", request.sandbox_mode])
|
|
776
784
|
command.append("-")
|
|
@@ -783,6 +791,7 @@ class CodexRunner:
|
|
|
783
791
|
quest_root: Path,
|
|
784
792
|
quest_id: str,
|
|
785
793
|
run_id: str,
|
|
794
|
+
runner_config: dict[str, Any] | None = None,
|
|
786
795
|
) -> Path:
|
|
787
796
|
target = ensure_dir(workspace_root / ".codex")
|
|
788
797
|
source = Path(os.environ.get("CODEX_HOME", str(Path.home() / ".codex"))).expanduser()
|
|
@@ -812,6 +821,7 @@ class CodexRunner:
|
|
|
812
821
|
workspace_root=workspace_root,
|
|
813
822
|
quest_id=quest_id,
|
|
814
823
|
run_id=run_id,
|
|
824
|
+
runner_config=runner_config,
|
|
815
825
|
)
|
|
816
826
|
return target
|
|
817
827
|
|
|
@@ -823,6 +833,7 @@ class CodexRunner:
|
|
|
823
833
|
workspace_root: Path,
|
|
824
834
|
quest_id: str,
|
|
825
835
|
run_id: str,
|
|
836
|
+
runner_config: dict[str, Any] | None = None,
|
|
826
837
|
) -> None:
|
|
827
838
|
config_path = codex_home / "config.toml"
|
|
828
839
|
existing = config_path.read_text(encoding="utf-8") if config_path.exists() else ""
|
|
@@ -834,17 +845,8 @@ class CodexRunner:
|
|
|
834
845
|
prefix = existing.rstrip()
|
|
835
846
|
|
|
836
847
|
pythonpath = os.environ.get("PYTHONPATH", "")
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
runners_cfg = ConfigManager(self.home).load_named("runners")
|
|
840
|
-
raw_codex_cfg = runners_cfg.get("codex") if isinstance(runners_cfg.get("codex"), dict) else {}
|
|
841
|
-
raw_timeout = raw_codex_cfg.get("mcp_tool_timeout_sec") if isinstance(raw_codex_cfg, dict) else None
|
|
842
|
-
if raw_timeout is not None:
|
|
843
|
-
tool_timeout_sec = float(raw_timeout)
|
|
844
|
-
except (OSError, ValueError, TypeError, KeyError):
|
|
845
|
-
tool_timeout_sec = None
|
|
846
|
-
if tool_timeout_sec is not None and tool_timeout_sec <= 0:
|
|
847
|
-
tool_timeout_sec = None
|
|
848
|
+
resolved_runner_config = runner_config if isinstance(runner_config, dict) else self._load_runner_config()
|
|
849
|
+
tool_timeout_sec = self._positive_timeout_seconds(resolved_runner_config.get("mcp_tool_timeout_sec"))
|
|
848
850
|
|
|
849
851
|
shared_env = {
|
|
850
852
|
"DEEPSCIENTIST_HOME": str(self.home),
|
|
@@ -896,3 +898,19 @@ class CodexRunner:
|
|
|
896
898
|
for key, value in env.items():
|
|
897
899
|
lines.append(f"{key} = {json.dumps(value)}")
|
|
898
900
|
return "\n".join(lines)
|
|
901
|
+
|
|
902
|
+
def _load_runner_config(self) -> dict[str, Any]:
|
|
903
|
+
try:
|
|
904
|
+
runners_cfg = ConfigManager(self.home).load_runners_config()
|
|
905
|
+
except OSError:
|
|
906
|
+
return {}
|
|
907
|
+
codex_cfg = runners_cfg.get("codex")
|
|
908
|
+
return codex_cfg if isinstance(codex_cfg, dict) else {}
|
|
909
|
+
|
|
910
|
+
@staticmethod
|
|
911
|
+
def _positive_timeout_seconds(value: object) -> float | None:
|
|
912
|
+
try:
|
|
913
|
+
timeout = float(value)
|
|
914
|
+
except (TypeError, ValueError):
|
|
915
|
+
return None
|
|
916
|
+
return timeout if timeout > 0 else None
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from copy import deepcopy
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def _as_text(value: Any) -> str | None:
|
|
9
|
+
text = str(value or "").strip()
|
|
10
|
+
return text or None
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _as_bool_env(name: str) -> bool:
|
|
14
|
+
value = _as_text(os.environ.get(name))
|
|
15
|
+
if value is None:
|
|
16
|
+
return False
|
|
17
|
+
return value.lower() in {"1", "true", "yes", "on", "y"}
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def codex_runtime_overrides() -> dict[str, str]:
|
|
21
|
+
approval_policy = _as_text(os.environ.get("DEEPSCIENTIST_CODEX_APPROVAL_POLICY"))
|
|
22
|
+
sandbox_mode = _as_text(os.environ.get("DEEPSCIENTIST_CODEX_SANDBOX_MODE"))
|
|
23
|
+
|
|
24
|
+
if _as_bool_env("DEEPSCIENTIST_CODEX_YOLO"):
|
|
25
|
+
approval_policy = approval_policy or "never"
|
|
26
|
+
sandbox_mode = sandbox_mode or "danger-full-access"
|
|
27
|
+
|
|
28
|
+
overrides: dict[str, str] = {}
|
|
29
|
+
if approval_policy:
|
|
30
|
+
overrides["approval_policy"] = approval_policy
|
|
31
|
+
if sandbox_mode:
|
|
32
|
+
overrides["sandbox_mode"] = sandbox_mode
|
|
33
|
+
return overrides
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def apply_codex_runtime_overrides(config: dict[str, Any] | None) -> dict[str, Any]:
|
|
37
|
+
resolved = deepcopy(config or {})
|
|
38
|
+
resolved.update(codex_runtime_overrides())
|
|
39
|
+
return resolved
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def apply_runners_runtime_overrides(runners_config: dict[str, Any] | None) -> dict[str, Any]:
|
|
43
|
+
resolved = deepcopy(runners_config or {})
|
|
44
|
+
codex = resolved.get("codex")
|
|
45
|
+
resolved["codex"] = apply_codex_runtime_overrides(codex if isinstance(codex, dict) else {})
|
|
46
|
+
return resolved
|
|
@@ -41,6 +41,8 @@ class SkillInstaller:
|
|
|
41
41
|
}
|
|
42
42
|
|
|
43
43
|
def sync_quest(self, quest_root: Path) -> dict:
|
|
44
|
+
prompts_root = ensure_dir(quest_root / ".codex" / "prompts")
|
|
45
|
+
self._sync_prompt_tree(prompts_root)
|
|
44
46
|
codex_root = ensure_dir(quest_root / ".codex" / "skills")
|
|
45
47
|
claude_root = ensure_dir(quest_root / ".claude" / "agents")
|
|
46
48
|
copied_codex: list[str] = []
|
|
@@ -58,6 +60,7 @@ class SkillInstaller:
|
|
|
58
60
|
self._prune_bundle_targets(codex_root, expected_codex)
|
|
59
61
|
self._prune_bundle_targets(claude_root, expected_claude)
|
|
60
62
|
return {
|
|
63
|
+
"prompts": [str(path) for path in sorted(prompts_root.rglob("*")) if path.is_file()],
|
|
61
64
|
"codex": copied_codex,
|
|
62
65
|
"claude": copied_claude,
|
|
63
66
|
"notes": [],
|
|
@@ -187,6 +190,10 @@ class SkillInstaller:
|
|
|
187
190
|
temp_path.write_bytes(payload)
|
|
188
191
|
temp_path.replace(path)
|
|
189
192
|
|
|
193
|
+
def _sync_prompt_tree(self, target_root: Path) -> None:
|
|
194
|
+
source_root = self.repo_root / "src" / "prompts"
|
|
195
|
+
self._sync_bundle_tree(source_root, target_root)
|
|
196
|
+
|
|
190
197
|
@staticmethod
|
|
191
198
|
def _prune_bundle_targets(root: Path, expected_names: set[str]) -> None:
|
|
192
199
|
for target in sorted(root.glob("deepscientist-*"), reverse=True):
|