@researai/deepscientist 1.5.12 → 1.5.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/ds.js +20 -3
- package/docs/en/00_QUICK_START.md +24 -5
- package/docs/en/01_SETTINGS_REFERENCE.md +4 -0
- package/docs/en/09_DOCTOR.md +24 -5
- package/docs/en/15_CODEX_PROVIDER_SETUP.md +113 -15
- package/docs/zh/00_QUICK_START.md +24 -5
- package/docs/zh/01_SETTINGS_REFERENCE.md +4 -0
- package/docs/zh/09_DOCTOR.md +24 -5
- package/docs/zh/15_CODEX_PROVIDER_SETUP.md +113 -15
- package/package.json +2 -1
- package/pyproject.toml +1 -1
- package/src/deepscientist/__init__.py +1 -1
- package/src/deepscientist/cli.py +3 -0
- package/src/deepscientist/codex_cli_compat.py +117 -0
- package/src/deepscientist/config/service.py +53 -6
- package/src/deepscientist/runners/codex.py +11 -2
- package/src/deepscientist/runners/runtime_overrides.py +3 -0
- package/src/skills/baseline/references/artifact-payload-examples.md +39 -0
- package/src/tui/dist/lib/connectorConfig.js +90 -0
- package/src/tui/dist/lib/qr.js +21 -0
- package/src/tui/package.json +2 -1
package/bin/ds.js
CHANGED
|
@@ -36,11 +36,15 @@ const pythonCommands = new Set([
|
|
|
36
36
|
const UPDATE_PACKAGE_NAME = String(packageJson.name || '@researai/deepscientist').trim() || '@researai/deepscientist';
|
|
37
37
|
const UPDATE_CHECK_TTL_MS = 12 * 60 * 60 * 1000;
|
|
38
38
|
|
|
39
|
-
const optionsWithValues = new Set(['--home', '--host', '--port', '--quest-id', '--mode', '--proxy', '--codex-profile']);
|
|
39
|
+
const optionsWithValues = new Set(['--home', '--host', '--port', '--quest-id', '--mode', '--proxy', '--codex-profile', '--codex']);
|
|
40
40
|
|
|
41
|
-
function buildCodexOverrideEnv({ yolo = false, profile = null } = {}) {
|
|
41
|
+
function buildCodexOverrideEnv({ yolo = false, profile = null, binary = null } = {}) {
|
|
42
42
|
const normalizedProfile = typeof profile === 'string' ? profile.trim() : '';
|
|
43
|
+
const normalizedBinary = typeof binary === 'string' ? binary.trim() : '';
|
|
43
44
|
const overrides = {};
|
|
45
|
+
if (normalizedBinary) {
|
|
46
|
+
overrides.DEEPSCIENTIST_CODEX_BINARY = normalizedBinary;
|
|
47
|
+
}
|
|
44
48
|
if (!yolo) {
|
|
45
49
|
if (normalizedProfile) {
|
|
46
50
|
overrides.DEEPSCIENTIST_CODEX_PROFILE = normalizedProfile;
|
|
@@ -103,6 +107,7 @@ Launcher flags:
|
|
|
103
107
|
--proxy <url> Use an outbound HTTP/WS proxy for npm and Python runtime traffic
|
|
104
108
|
--yolo Run Codex in YOLO mode: approval_policy=never and sandbox_mode=danger-full-access
|
|
105
109
|
--codex-profile <id> Run DeepScientist with a specific Codex profile, for example \`m27\`
|
|
110
|
+
--codex <path> Run DeepScientist with a specific Codex executable path for this launch
|
|
106
111
|
--quest-id <id> Open the TUI on one quest directly
|
|
107
112
|
|
|
108
113
|
Update:
|
|
@@ -981,6 +986,7 @@ function parseLauncherArgs(argv) {
|
|
|
981
986
|
let skipUpdateCheck = false;
|
|
982
987
|
let yolo = false;
|
|
983
988
|
let codexProfile = null;
|
|
989
|
+
let codexBinary = null;
|
|
984
990
|
|
|
985
991
|
if (args[0] === 'ui') {
|
|
986
992
|
args.shift();
|
|
@@ -1001,6 +1007,7 @@ function parseLauncherArgs(argv) {
|
|
|
1001
1007
|
else if (arg === '--skip-update-check') skipUpdateCheck = true;
|
|
1002
1008
|
else if (arg === '--yolo') yolo = true;
|
|
1003
1009
|
else if (arg === '--codex-profile' && args[index + 1]) codexProfile = args[++index];
|
|
1010
|
+
else if (arg === '--codex' && args[index + 1]) codexBinary = args[++index];
|
|
1004
1011
|
else if (arg === '--host' && args[index + 1]) host = args[++index];
|
|
1005
1012
|
else if (arg === '--port' && args[index + 1]) port = Number(args[++index]);
|
|
1006
1013
|
else if (arg === '--home' && args[index + 1]) home = path.resolve(args[++index]);
|
|
@@ -1027,6 +1034,7 @@ function parseLauncherArgs(argv) {
|
|
|
1027
1034
|
skipUpdateCheck,
|
|
1028
1035
|
yolo,
|
|
1029
1036
|
codexProfile,
|
|
1037
|
+
codexBinary,
|
|
1030
1038
|
};
|
|
1031
1039
|
}
|
|
1032
1040
|
|
|
@@ -2321,6 +2329,10 @@ function normalizePythonCliArgs(args, home) {
|
|
|
2321
2329
|
index += 1;
|
|
2322
2330
|
continue;
|
|
2323
2331
|
}
|
|
2332
|
+
if (arg === '--codex') {
|
|
2333
|
+
index += 1;
|
|
2334
|
+
continue;
|
|
2335
|
+
}
|
|
2324
2336
|
normalized.push(arg);
|
|
2325
2337
|
}
|
|
2326
2338
|
return ['--home', home, ...normalized];
|
|
@@ -4002,7 +4014,11 @@ async function launcherMain(rawArgs) {
|
|
|
4002
4014
|
|
|
4003
4015
|
const pythonRuntime = ensurePythonRuntime(home);
|
|
4004
4016
|
const runtimePython = pythonRuntime.runtimePython;
|
|
4005
|
-
const codexOverrideEnv = buildCodexOverrideEnv({
|
|
4017
|
+
const codexOverrideEnv = buildCodexOverrideEnv({
|
|
4018
|
+
yolo: options.yolo,
|
|
4019
|
+
profile: options.codexProfile,
|
|
4020
|
+
binary: options.codexBinary,
|
|
4021
|
+
});
|
|
4006
4022
|
ensureInitialized(home, runtimePython);
|
|
4007
4023
|
if (await maybeHandleStartupUpdate(home, rawArgs, options)) {
|
|
4008
4024
|
return true;
|
|
@@ -4083,6 +4099,7 @@ async function main() {
|
|
|
4083
4099
|
const codexOverrideEnv = buildCodexOverrideEnv({
|
|
4084
4100
|
yolo: args.includes('--yolo'),
|
|
4085
4101
|
profile: readOptionValue(args, '--codex-profile'),
|
|
4102
|
+
binary: readOptionValue(args, '--codex'),
|
|
4086
4103
|
});
|
|
4087
4104
|
if (positional.value === 'run' || positional.value === 'daemon') {
|
|
4088
4105
|
maybePrintOptionalLatexNotice(home);
|
|
@@ -79,7 +79,7 @@ npm install -g @researai/deepscientist
|
|
|
79
79
|
|
|
80
80
|
This installs the `ds` command globally.
|
|
81
81
|
|
|
82
|
-
DeepScientist depends on a working Codex CLI.
|
|
82
|
+
DeepScientist depends on a working Codex CLI. It prefers the `codex` already available on your machine and only falls back to the bundled npm dependency when no local Codex path is available. If `codex` is still missing afterward, repair it explicitly:
|
|
83
83
|
|
|
84
84
|
```bash
|
|
85
85
|
npm install -g @openai/codex
|
|
@@ -124,25 +124,44 @@ ds doctor
|
|
|
124
124
|
If you already use a named Codex profile for MiniMax, GLM, Volcengine Ark, Alibaba Bailian, or another provider-backed path, verify that profile first in a terminal:
|
|
125
125
|
|
|
126
126
|
```bash
|
|
127
|
-
codex --profile
|
|
127
|
+
codex --profile m27
|
|
128
128
|
```
|
|
129
129
|
|
|
130
130
|
Then run DeepScientist through the same profile:
|
|
131
131
|
|
|
132
132
|
```bash
|
|
133
|
-
ds doctor --codex-profile
|
|
133
|
+
ds doctor --codex-profile m27
|
|
134
134
|
```
|
|
135
135
|
|
|
136
136
|
and later:
|
|
137
137
|
|
|
138
138
|
```bash
|
|
139
|
-
ds --codex-profile
|
|
139
|
+
ds --codex-profile m27
|
|
140
140
|
```
|
|
141
141
|
|
|
142
|
-
|
|
142
|
+
If you need one specific Codex binary for this run, add `--codex` too:
|
|
143
|
+
|
|
144
|
+
```bash
|
|
145
|
+
ds doctor --codex /absolute/path/to/codex --codex-profile m27
|
|
146
|
+
ds --codex /absolute/path/to/codex --codex-profile m27
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
`m27` is the MiniMax profile name used consistently in this repo. MiniMax's own page currently uses `m21`, but the profile name is only a local alias; if you created a different name, use that same name in all commands.
|
|
143
150
|
|
|
144
151
|
DeepScientist blocks startup until Codex can pass a real hello probe. By default, the runner model in `~/DeepScientist/config/runners.yaml` is `gpt-5.4`. If your profile expects the model to come from the profile itself, use `model: inherit` in `runners.yaml`, or simply launch with `--codex-profile <name>` and let that session inherit the profile-defined model.
|
|
145
152
|
|
|
153
|
+
MiniMax note:
|
|
154
|
+
|
|
155
|
+
- if the current `@openai/codex` latest does not work with MiniMax, install `npm install -g @openai/codex@0.57.0`
|
|
156
|
+
- create a MiniMax `Coding Plan Key` first
|
|
157
|
+
- clear `OPENAI_API_KEY` and `OPENAI_BASE_URL` in the current shell before exporting `MINIMAX_API_KEY`
|
|
158
|
+
- use `https://api.minimaxi.com/v1`
|
|
159
|
+
- the `codex-MiniMax-*` model names shown on MiniMax's current Codex CLI page did not pass reliably through Codex CLI in local testing with the provided key
|
|
160
|
+
- the locally verified working model name is `MiniMax-M2.7`
|
|
161
|
+
- DeepScientist can auto-adapt MiniMax's profile-only `model_provider` / `model` config shape during probe and runtime
|
|
162
|
+
- if you also want plain terminal `codex --profile <name>` to work directly, add `model_provider = "minimax"` and `model = "MiniMax-M2.7"` at the top level of `~/.codex/config.toml`
|
|
163
|
+
- DeepScientist automatically downgrades `xhigh` to `high` when it detects an older Codex CLI that does not support `xhigh`
|
|
164
|
+
|
|
146
165
|
## 3. Start the Local Runtime
|
|
147
166
|
|
|
148
167
|
Run:
|
|
@@ -430,6 +430,8 @@ claude:
|
|
|
430
430
|
- UI label: `Binary`
|
|
431
431
|
- Meaning: command name or absolute path used to launch the runner.
|
|
432
432
|
- `Test` behavior: checks whether the binary is on `PATH`.
|
|
433
|
+
- Resolution order for `codex`: env override, explicit path, local `PATH`, then bundled fallback.
|
|
434
|
+
- One-off note: you can temporarily override this with `ds --codex /absolute/path/to/codex`.
|
|
433
435
|
- First-run note: DeepScientist does not finish Codex authentication for you. Before the first `ds`, make sure `codex --login` (or `codex`) has completed successfully.
|
|
434
436
|
- Repair note: if the bundled dependency is missing after `npm install -g @researai/deepscientist`, install Codex explicitly with `npm install -g @openai/codex`.
|
|
435
437
|
|
|
@@ -448,6 +450,7 @@ claude:
|
|
|
448
450
|
- Meaning: optional Codex profile name passed through as `codex --profile <name>`.
|
|
449
451
|
- Use this when your Codex CLI is already configured for a provider-backed setup such as MiniMax, GLM, Volcengine Ark, or Alibaba Bailian.
|
|
450
452
|
- One-off note: you can also leave this field empty and launch with `ds --codex-profile <name>`.
|
|
453
|
+
- Combined note: one-off profile and binary overrides can be combined as `ds --codex /absolute/path/to/codex --codex-profile <name>`.
|
|
451
454
|
|
|
452
455
|
**`model`**
|
|
453
456
|
|
|
@@ -465,6 +468,7 @@ claude:
|
|
|
465
468
|
- UI label: `Reasoning effort`
|
|
466
469
|
- Allowed values: `""`, `minimal`, `low`, `medium`, `high`, `xhigh`
|
|
467
470
|
- Meaning: default reasoning intensity.
|
|
471
|
+
- Compatibility note: when DeepScientist detects a Codex CLI older than `0.63.0`, it automatically downgrades `xhigh` to `high` for the startup probe and runner command. This covers MiniMax's currently recommended `@openai/codex@0.57.0` path.
|
|
468
472
|
|
|
469
473
|
**`approval_policy`**
|
|
470
474
|
|
package/docs/en/09_DOCTOR.md
CHANGED
|
@@ -21,7 +21,7 @@ Use `ds doctor` when DeepScientist does not start cleanly after installation.
|
|
|
21
21
|
Provider-backed profile path:
|
|
22
22
|
|
|
23
23
|
```bash
|
|
24
|
-
codex --profile
|
|
24
|
+
codex --profile m27
|
|
25
25
|
```
|
|
26
26
|
|
|
27
27
|
If `codex` is missing, repair it explicitly with:
|
|
@@ -63,7 +63,7 @@ Use `ds doctor` when DeepScientist does not start cleanly after installation.
|
|
|
63
63
|
|
|
64
64
|
### Codex is missing
|
|
65
65
|
|
|
66
|
-
|
|
66
|
+
DeepScientist prefers the `codex` already available on your machine and only uses the bundled dependency as fallback. If neither is present, run the package install again so the bundled Codex dependency is present:
|
|
67
67
|
|
|
68
68
|
```bash
|
|
69
69
|
npm install -g @researai/deepscientist
|
|
@@ -92,11 +92,18 @@ Finish login once, then rerun `ds doctor`.
|
|
|
92
92
|
Run DeepScientist with the same profile explicitly:
|
|
93
93
|
|
|
94
94
|
```bash
|
|
95
|
-
ds doctor --codex-profile
|
|
96
|
-
ds --codex-profile
|
|
95
|
+
ds doctor --codex-profile m27
|
|
96
|
+
ds --codex-profile m27
|
|
97
97
|
```
|
|
98
98
|
|
|
99
|
-
|
|
99
|
+
If your working Codex CLI is not the one on `PATH`, point DeepScientist at it explicitly:
|
|
100
|
+
|
|
101
|
+
```bash
|
|
102
|
+
ds doctor --codex /absolute/path/to/codex --codex-profile m27
|
|
103
|
+
ds --codex /absolute/path/to/codex --codex-profile m27
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
`m27` is the MiniMax profile name used consistently in this repo. MiniMax's own page currently uses `m21`, but the profile name is only a local alias; if you created a different name, use that same name in both commands.
|
|
100
107
|
|
|
101
108
|
Also check:
|
|
102
109
|
|
|
@@ -104,6 +111,18 @@ Also check:
|
|
|
104
111
|
- the profile points at the provider's Coding Plan endpoint, not the generic API endpoint
|
|
105
112
|
- `~/DeepScientist/config/runners.yaml` uses `model: inherit` if the provider expects the model to come from the profile itself
|
|
106
113
|
|
|
114
|
+
MiniMax-specific note:
|
|
115
|
+
|
|
116
|
+
- if MiniMax fails on the current `@openai/codex` latest, install `npm install -g @openai/codex@0.57.0`
|
|
117
|
+
- create a MiniMax `Coding Plan Key` first
|
|
118
|
+
- clear `OPENAI_API_KEY` and `OPENAI_BASE_URL` in the current shell before exporting `MINIMAX_API_KEY`
|
|
119
|
+
- use `https://api.minimaxi.com/v1`
|
|
120
|
+
- the `codex-MiniMax-*` model names shown on MiniMax's current Codex CLI page did not pass reliably through Codex CLI in local testing with the provided key
|
|
121
|
+
- the locally verified working model name is `MiniMax-M2.7`
|
|
122
|
+
- DeepScientist can auto-adapt MiniMax's profile-only `model_provider` / `model` config shape during probe and runtime
|
|
123
|
+
- if you also want plain terminal `codex --profile <name>` to work directly, put `model_provider = "minimax"` and `model = "MiniMax-M2.7"` at the top level of `~/.codex/config.toml`
|
|
124
|
+
- DeepScientist automatically downgrades `xhigh` to `high` when it detects a Codex CLI older than `0.63.0`
|
|
125
|
+
|
|
107
126
|
### The configured Codex model is unavailable
|
|
108
127
|
|
|
109
128
|
DeepScientist blocks startup until Codex passes a real startup hello probe. In the current release, that probe first uses the runner model configured in:
|
|
@@ -25,12 +25,19 @@ ds
|
|
|
25
25
|
|
|
26
26
|
### 2. One-off provider profile
|
|
27
27
|
|
|
28
|
-
Use this when you already have a named Codex profile such as `
|
|
28
|
+
Use this when you already have a named Codex profile such as `m27`, `glm`, `ark`, or `bailian`.
|
|
29
29
|
|
|
30
30
|
```bash
|
|
31
|
-
codex --profile
|
|
32
|
-
ds doctor --codex-profile
|
|
33
|
-
ds --codex-profile
|
|
31
|
+
codex --profile m27
|
|
32
|
+
ds doctor --codex-profile m27
|
|
33
|
+
ds --codex-profile m27
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
If you need one specific Codex binary for this run, use:
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
ds doctor --codex /absolute/path/to/codex --codex-profile m27
|
|
40
|
+
ds --codex /absolute/path/to/codex --codex-profile m27
|
|
34
41
|
```
|
|
35
42
|
|
|
36
43
|
This is the simplest path. You do not need to edit `runners.yaml` just to try one provider-backed session.
|
|
@@ -61,7 +68,7 @@ Important:
|
|
|
61
68
|
| Provider | Official docs | Codex login needed | What DeepScientist should use |
|
|
62
69
|
|---|---|---|---|
|
|
63
70
|
| OpenAI | use the normal Codex setup | Yes | no profile; run `ds` |
|
|
64
|
-
| MiniMax | [MiniMax Codex CLI](https://platform.minimaxi.com/docs/coding-plan/codex-cli) | No | your Codex profile, for example `ds --codex-profile
|
|
71
|
+
| MiniMax | [MiniMax Codex CLI](https://platform.minimaxi.com/docs/coding-plan/codex-cli) | No | your Codex profile, for example `ds --codex-profile m27` |
|
|
65
72
|
| GLM | [GLM Coding Plan: Other Tools](https://docs.bigmodel.cn/cn/coding-plan/tool/others) | No | a Codex profile that targets the GLM coding endpoint |
|
|
66
73
|
| Volcengine Ark | [Ark Coding Plan Overview](https://www.volcengine.com/docs/82379/1925114?lang=zh) | No | a Codex profile that targets the Ark coding endpoint |
|
|
67
74
|
| Alibaba Bailian | [Bailian Coding Plan: Other Tools](https://help.aliyun.com/zh/model-studio/other-tools-coding-plan) | No | a Codex profile that targets the Bailian coding endpoint |
|
|
@@ -99,15 +106,65 @@ Official doc:
|
|
|
99
106
|
|
|
100
107
|
- <https://platform.minimaxi.com/docs/coding-plan/codex-cli>
|
|
101
108
|
|
|
109
|
+
### Verified compatibility note
|
|
110
|
+
|
|
111
|
+
Checked against MiniMax's current Codex CLI doc and local compatibility validation on 2026-03-25:
|
|
112
|
+
|
|
113
|
+
- MiniMax's Codex CLI page currently recommends `@openai/codex@0.57.0`
|
|
114
|
+
- the Coding Plan endpoint to use is `https://api.minimaxi.com/v1`
|
|
115
|
+
- MiniMax's official page uses `m21` as the profile name, but that profile name is only a local alias; this repo uses `m27` consistently in examples
|
|
116
|
+
- the `codex-MiniMax-*` model names shown on MiniMax's page did not pass reliably through Codex CLI in local testing with the provided key
|
|
117
|
+
- the locally verified working path was `MiniMax-M2.7` + `m27` + `model: inherit` + Codex CLI `0.57.0`
|
|
118
|
+
- the current `@openai/codex` latest release still does not line up cleanly with MiniMax's current guide
|
|
119
|
+
|
|
120
|
+
If you want the most reproducible DeepScientist + MiniMax path today, use Codex CLI `0.57.0`.
|
|
121
|
+
|
|
102
122
|
### What to prepare
|
|
103
123
|
|
|
104
|
-
- Codex CLI
|
|
124
|
+
- Codex CLI `0.57.0`
|
|
125
|
+
- a MiniMax `Coding Plan Key`
|
|
105
126
|
- `MINIMAX_API_KEY` available in the shell that starts Codex and DeepScientist
|
|
127
|
+
- the current shell cleared of `OPENAI_API_KEY` and `OPENAI_BASE_URL`
|
|
106
128
|
- a working Codex profile in `~/.codex/config.toml`
|
|
107
129
|
|
|
130
|
+
### Install Codex CLI `0.57.0`
|
|
131
|
+
|
|
132
|
+
The simplest path is to pin the global Codex install:
|
|
133
|
+
|
|
134
|
+
```bash
|
|
135
|
+
npm install -g @openai/codex@0.57.0
|
|
136
|
+
codex --version
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
Expected output:
|
|
140
|
+
|
|
141
|
+
```text
|
|
142
|
+
codex-cli 0.57.0
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
If you want to keep another Codex version elsewhere, create a small wrapper script and point `runners.codex.binary` at that absolute path.
|
|
146
|
+
|
|
108
147
|
### Codex-side setup
|
|
109
148
|
|
|
110
|
-
|
|
149
|
+
Use `https://api.minimaxi.com/v1`, not `https://api.minimax.io/v1`.
|
|
150
|
+
|
|
151
|
+
MiniMax's doc requires clearing the OpenAI environment variables first:
|
|
152
|
+
|
|
153
|
+
```bash
|
|
154
|
+
unset OPENAI_API_KEY
|
|
155
|
+
unset OPENAI_BASE_URL
|
|
156
|
+
export MINIMAX_API_KEY="..."
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
MiniMax's official page uses `m21` as the example profile name. Since the profile name is only a local alias, this repo rewrites that example to `m27`.
|
|
160
|
+
|
|
161
|
+
The important difference is the model name:
|
|
162
|
+
|
|
163
|
+
- MiniMax's page currently shows `codex-MiniMax-M2.5`
|
|
164
|
+
- in local testing, direct MiniMax API calls worked with `MiniMax-M2.7`
|
|
165
|
+
- with the same key, `codex-MiniMax-M2.5` and `codex-MiniMax-M2.7` both failed through Codex CLI
|
|
166
|
+
|
|
167
|
+
So the config below is the currently recommended DeepScientist working configuration:
|
|
111
168
|
|
|
112
169
|
```toml
|
|
113
170
|
[model_providers.minimax]
|
|
@@ -120,23 +177,50 @@ request_max_retries = 4
|
|
|
120
177
|
stream_max_retries = 10
|
|
121
178
|
stream_idle_timeout_ms = 300000
|
|
122
179
|
|
|
123
|
-
[profiles.
|
|
124
|
-
model = "
|
|
180
|
+
[profiles.m27]
|
|
181
|
+
model = "MiniMax-M2.7"
|
|
182
|
+
model_provider = "minimax"
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
What DeepScientist supports now:
|
|
186
|
+
|
|
187
|
+
- if you use this profile-only MiniMax config with Codex CLI `0.57.0`, DeepScientist automatically promotes the selected profile's `model_provider` and `model` to the top level inside its probe/runtime copy of `.codex/config.toml`
|
|
188
|
+
- this means DeepScientist can start even when plain terminal `codex --profile m27` still fails on that exact profile-only shape
|
|
189
|
+
|
|
190
|
+
If you want plain terminal `codex --profile <name>` to work too, use the explicit top-level compatibility form instead:
|
|
191
|
+
|
|
192
|
+
```toml
|
|
193
|
+
model = "MiniMax-M2.7"
|
|
194
|
+
model_provider = "minimax"
|
|
195
|
+
approval_policy = "never"
|
|
196
|
+
sandbox_mode = "workspace-write"
|
|
197
|
+
|
|
198
|
+
[model_providers.minimax]
|
|
199
|
+
name = "MiniMax Chat Completions API"
|
|
200
|
+
base_url = "https://api.minimaxi.com/v1"
|
|
201
|
+
env_key = "MINIMAX_API_KEY"
|
|
202
|
+
wire_api = "chat"
|
|
203
|
+
requires_openai_auth = false
|
|
204
|
+
request_max_retries = 4
|
|
205
|
+
stream_max_retries = 10
|
|
206
|
+
stream_idle_timeout_ms = 300000
|
|
207
|
+
|
|
208
|
+
[profiles.m27]
|
|
209
|
+
model = "MiniMax-M2.7"
|
|
125
210
|
model_provider = "minimax"
|
|
126
211
|
```
|
|
127
212
|
|
|
128
213
|
Then:
|
|
129
214
|
|
|
130
215
|
```bash
|
|
131
|
-
|
|
132
|
-
codex --profile minimax
|
|
216
|
+
codex --profile m27
|
|
133
217
|
```
|
|
134
218
|
|
|
135
219
|
### DeepScientist commands
|
|
136
220
|
|
|
137
221
|
```bash
|
|
138
|
-
ds doctor --codex-profile
|
|
139
|
-
ds --codex-profile
|
|
222
|
+
ds doctor --codex-profile m27
|
|
223
|
+
ds --codex-profile m27
|
|
140
224
|
```
|
|
141
225
|
|
|
142
226
|
### Persistent runner config
|
|
@@ -144,12 +228,26 @@ ds --codex-profile minimax
|
|
|
144
228
|
```yaml
|
|
145
229
|
codex:
|
|
146
230
|
enabled: true
|
|
147
|
-
binary:
|
|
231
|
+
binary: /tmp/codex057-wrapper
|
|
148
232
|
config_dir: ~/.codex
|
|
149
|
-
profile:
|
|
233
|
+
profile: m27
|
|
150
234
|
model: inherit
|
|
235
|
+
model_reasoning_effort: high
|
|
151
236
|
```
|
|
152
237
|
|
|
238
|
+
If you already pinned your global `codex` binary to `0.57.0`, you can set `binary: codex` instead. The absolute wrapper path here is only to make the version choice explicit.
|
|
239
|
+
|
|
240
|
+
If you do not want to persist that path in `runners.yaml`, you can keep `binary: codex` there and launch ad hoc with:
|
|
241
|
+
|
|
242
|
+
```bash
|
|
243
|
+
ds --codex /absolute/path/to/codex --codex-profile m27
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
DeepScientist now does two MiniMax-specific compatibility steps for the `0.57.0` path:
|
|
247
|
+
|
|
248
|
+
- it downgrades `xhigh` to `high` automatically when the Codex CLI does not support `xhigh`
|
|
249
|
+
- it auto-adapts MiniMax's profile-only `model_provider` / `model` shape inside the temporary DeepScientist Codex home when needed
|
|
250
|
+
|
|
153
251
|
## GLM
|
|
154
252
|
|
|
155
253
|
GLM documents the Coding Plan as an OpenAI-compatible coding endpoint rather than a dedicated Codex login flow.
|
|
@@ -79,7 +79,7 @@ npm install -g @researai/deepscientist
|
|
|
79
79
|
|
|
80
80
|
这一步会把 `ds` 命令安装到你的机器上。
|
|
81
81
|
|
|
82
|
-
DeepScientist 依赖一个可用的 Codex CLI
|
|
82
|
+
DeepScientist 依赖一个可用的 Codex CLI。它会优先使用你机器上已经可用的 `codex`,只有在本机找不到时才回退到 npm 包内置的依赖。如果安装完成后 `codex` 仍然不可用,请显式修复:
|
|
83
83
|
|
|
84
84
|
```bash
|
|
85
85
|
npm install -g @openai/codex
|
|
@@ -124,25 +124,44 @@ ds doctor
|
|
|
124
124
|
如果你已经在 MiniMax、GLM、火山方舟、阿里百炼或其他 provider 上配置了一个命名的 Codex profile,请先在终端里确认这个 profile 本身可用:
|
|
125
125
|
|
|
126
126
|
```bash
|
|
127
|
-
codex --profile
|
|
127
|
+
codex --profile m27
|
|
128
128
|
```
|
|
129
129
|
|
|
130
130
|
然后用同一个 profile 去跑 DeepScientist:
|
|
131
131
|
|
|
132
132
|
```bash
|
|
133
|
-
ds doctor --codex-profile
|
|
133
|
+
ds doctor --codex-profile m27
|
|
134
134
|
```
|
|
135
135
|
|
|
136
136
|
之后启动:
|
|
137
137
|
|
|
138
138
|
```bash
|
|
139
|
-
ds --codex-profile
|
|
139
|
+
ds --codex-profile m27
|
|
140
140
|
```
|
|
141
141
|
|
|
142
|
-
|
|
142
|
+
如果你这一轮还想强制指定某一个 Codex 可执行文件,也可以一起加上 `--codex`:
|
|
143
|
+
|
|
144
|
+
```bash
|
|
145
|
+
ds doctor --codex /absolute/path/to/codex --codex-profile m27
|
|
146
|
+
ds --codex /absolute/path/to/codex --codex-profile m27
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
这里的 `m27` 是本仓库统一使用的 MiniMax profile 示例名。MiniMax 官方页面当前示例名是 `m21`,但 profile 名只是本地别名;如果你自己用了别的名字,就把命令里的名字一起改掉。
|
|
143
150
|
|
|
144
151
|
DeepScientist 会在启动前强制做一次真实的 Codex hello 探测。默认情况下,`~/DeepScientist/config/runners.yaml` 里的 runner 模型还是 `gpt-5.4`。如果你的 profile 希望模型由 profile 自己决定,请把 `runners.yaml` 里的 `model` 改成 `inherit`;或者直接使用 `--codex-profile <name>`,让这一轮启动自动继承 profile 对应的模型。
|
|
145
152
|
|
|
153
|
+
MiniMax 额外说明:
|
|
154
|
+
|
|
155
|
+
- 如果当前最新版 `@openai/codex` 和 MiniMax 走不通,直接安装 `npm install -g @openai/codex@0.57.0`
|
|
156
|
+
- 先创建 MiniMax `Coding Plan Key`
|
|
157
|
+
- 在当前 shell 里先执行 `unset OPENAI_API_KEY` 和 `unset OPENAI_BASE_URL`
|
|
158
|
+
- 使用 `https://api.minimaxi.com/v1`
|
|
159
|
+
- MiniMax 官方 Codex CLI 页面当前给出的 `codex-MiniMax-*` 模型名,在本地用提供的 key 实测并不能稳定通过 Codex CLI
|
|
160
|
+
- 当前本地实测可用的模型名是 `MiniMax-M2.7`
|
|
161
|
+
- DeepScientist 现在可以在 probe 和运行时自动适配 MiniMax profile-only 的 `model_provider` / `model` 配置形态
|
|
162
|
+
- 如果你还希望终端里的 `codex --profile <name>` 也直接可用,再在 `~/.codex/config.toml` 顶层补上 `model_provider = "minimax"` 和 `model = "MiniMax-M2.7"`
|
|
163
|
+
- 当 DeepScientist 检测到旧版 Codex CLI 不支持 `xhigh` 时,会自动把它降级成 `high`
|
|
164
|
+
|
|
146
165
|
## 3. 启动本地运行时
|
|
147
166
|
|
|
148
167
|
运行:
|
|
@@ -444,6 +444,8 @@ claude:
|
|
|
444
444
|
- 页面标签:`Binary`
|
|
445
445
|
- 作用:启动 runner 时使用的命令名或绝对路径。
|
|
446
446
|
- `Test` 行为:检查该二进制是否在 `PATH` 上。
|
|
447
|
+
- `codex` 的解析顺序:环境变量覆盖、显式路径、本机 `PATH`、最后才是 bundled fallback。
|
|
448
|
+
- 临时使用说明:你也可以直接用 `ds --codex /absolute/path/to/codex` 临时覆盖这里的设置。
|
|
447
449
|
- 首次使用说明:DeepScientist 不会替你完成 Codex 认证。第一次运行 `ds` 前,必须先确保 `codex --login`(或 `codex`)已经成功完成。
|
|
448
450
|
- 修复说明:如果执行 `npm install -g @researai/deepscientist` 之后 bundled Codex 依赖仍然缺失,请显式安装 `npm install -g @openai/codex`。
|
|
449
451
|
|
|
@@ -462,6 +464,7 @@ claude:
|
|
|
462
464
|
- 作用:可选的 Codex profile 名称,会直接透传为 `codex --profile <name>`。
|
|
463
465
|
- 当你的 Codex CLI 已经配置成 MiniMax、GLM、火山方舟、阿里百炼或其他 provider-backed 路径时,就在这里填写。
|
|
464
466
|
- 临时使用说明:如果你不想持久化写配置,也可以保持这里为空,直接使用 `ds --codex-profile <name>` 启动。
|
|
467
|
+
- 组合使用说明:如果你还想临时指定 Codex 可执行文件,也可以组合成 `ds --codex /absolute/path/to/codex --codex-profile <name>`。
|
|
465
468
|
|
|
466
469
|
**`model`**
|
|
467
470
|
|
|
@@ -480,6 +483,7 @@ claude:
|
|
|
480
483
|
- 允许值:`""`、`minimal`、`low`、`medium`、`high`、`xhigh`
|
|
481
484
|
- 作用:默认推理强度。
|
|
482
485
|
- 推荐:当前仓库的 Codex 默认就是 `xhigh`。
|
|
486
|
+
- 兼容性说明:当 DeepScientist 检测到 Codex CLI 低于 `0.63.0` 时,会在启动探测和实际 runner 命令里自动把 `xhigh` 降级成 `high`。这也覆盖了 MiniMax 当前推荐的 `@openai/codex@0.57.0` 路径。
|
|
483
487
|
|
|
484
488
|
**`approval_policy`**
|
|
485
489
|
|
package/docs/zh/09_DOCTOR.md
CHANGED
|
@@ -21,7 +21,7 @@
|
|
|
21
21
|
provider-backed profile 路径:
|
|
22
22
|
|
|
23
23
|
```bash
|
|
24
|
-
codex --profile
|
|
24
|
+
codex --profile m27
|
|
25
25
|
```
|
|
26
26
|
|
|
27
27
|
如果 `codex` 缺失,请显式修复:
|
|
@@ -65,7 +65,7 @@
|
|
|
65
65
|
|
|
66
66
|
### 没有安装 Codex
|
|
67
67
|
|
|
68
|
-
|
|
68
|
+
DeepScientist 会优先使用你机器上已有的 `codex`,只有本机不可用时才回退到随包依赖。如果两者都不可用,就重新安装 DeepScientist,让随包的 Codex 依赖一起装好:
|
|
69
69
|
|
|
70
70
|
```bash
|
|
71
71
|
npm install -g @researai/deepscientist
|
|
@@ -94,11 +94,18 @@ codex --login
|
|
|
94
94
|
请显式让 DeepScientist 使用同一个 profile:
|
|
95
95
|
|
|
96
96
|
```bash
|
|
97
|
-
ds doctor --codex-profile
|
|
98
|
-
ds --codex-profile
|
|
97
|
+
ds doctor --codex-profile m27
|
|
98
|
+
ds --codex-profile m27
|
|
99
99
|
```
|
|
100
100
|
|
|
101
|
-
|
|
101
|
+
如果你当前能用的是另一个不在 `PATH` 上的 Codex,可执行文件路径也可以一起显式传给 DeepScientist:
|
|
102
|
+
|
|
103
|
+
```bash
|
|
104
|
+
ds doctor --codex /absolute/path/to/codex --codex-profile m27
|
|
105
|
+
ds --codex /absolute/path/to/codex --codex-profile m27
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
这里的 `m27` 是本仓库统一使用的 MiniMax profile 示例名。MiniMax 官方页面当前示例名是 `m21`,但 profile 名只是本地别名;如果你自己用了别的名字,就把命令里的名字一起改掉。
|
|
102
109
|
|
|
103
110
|
同时检查:
|
|
104
111
|
|
|
@@ -106,6 +113,18 @@ ds --codex-profile minimax
|
|
|
106
113
|
- 该 profile 指向的是 provider 的 Coding Plan endpoint,而不是普通通用 API endpoint
|
|
107
114
|
- 如果模型应该由 profile 自己决定,请在 `~/DeepScientist/config/runners.yaml` 中使用 `model: inherit`
|
|
108
115
|
|
|
116
|
+
MiniMax 补充说明:
|
|
117
|
+
|
|
118
|
+
- 如果 MiniMax 在当前最新版 `@openai/codex` 上失败,直接安装 `npm install -g @openai/codex@0.57.0`
|
|
119
|
+
- 先创建 MiniMax `Coding Plan Key`
|
|
120
|
+
- 在当前 shell 里先执行 `unset OPENAI_API_KEY` 和 `unset OPENAI_BASE_URL`
|
|
121
|
+
- 使用 `https://api.minimaxi.com/v1`
|
|
122
|
+
- MiniMax 官方 Codex CLI 页面当前给出的 `codex-MiniMax-*` 模型名,在本地用提供的 key 实测并不能稳定通过 Codex CLI
|
|
123
|
+
- 当前本地实测可用的模型名是 `MiniMax-M2.7`
|
|
124
|
+
- DeepScientist 现在可以在 probe 和运行时自动适配 MiniMax profile-only 的 `model_provider` / `model` 配置形态
|
|
125
|
+
- 如果你还希望终端里的 `codex --profile <name>` 也直接可用,再在 `~/.codex/config.toml` 顶层补上 `model_provider = "minimax"` 和 `model = "MiniMax-M2.7"`
|
|
126
|
+
- 当 DeepScientist 检测到 Codex CLI 版本低于 `0.63.0` 时,会自动把 `xhigh` 降级成 `high`
|
|
127
|
+
|
|
109
128
|
### 当前配置的 Codex 模型不可用
|
|
110
129
|
|
|
111
130
|
DeepScientist 会在启动前强制做一次真实的 Codex hello 探测。当前版本里,这个探测会先使用:
|
|
@@ -25,12 +25,19 @@ ds
|
|
|
25
25
|
|
|
26
26
|
### 2. 临时使用 provider profile
|
|
27
27
|
|
|
28
|
-
如果你已经有一个可用的 Codex profile,例如 `
|
|
28
|
+
如果你已经有一个可用的 Codex profile,例如 `m27`、`glm`、`ark`、`bailian`,最简单的方式就是直接在启动 `ds` 时透传它。
|
|
29
29
|
|
|
30
30
|
```bash
|
|
31
|
-
codex --profile
|
|
32
|
-
ds doctor --codex-profile
|
|
33
|
-
ds --codex-profile
|
|
31
|
+
codex --profile m27
|
|
32
|
+
ds doctor --codex-profile m27
|
|
33
|
+
ds --codex-profile m27
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
如果你这一轮要强制指定某一个 Codex 可执行文件,也可以这样:
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
ds doctor --codex /absolute/path/to/codex --codex-profile m27
|
|
40
|
+
ds --codex /absolute/path/to/codex --codex-profile m27
|
|
34
41
|
```
|
|
35
42
|
|
|
36
43
|
这是最简单的路径。只是临时试用某个 provider 时,不需要先改 `runners.yaml`。
|
|
@@ -62,7 +69,7 @@ codex:
|
|
|
62
69
|
| Provider | 官方文档 | 是否需要 Codex 登录 | DeepScientist 应该怎么用 |
|
|
63
70
|
|---|---|---|---|
|
|
64
71
|
| OpenAI | 正常 Codex 配置即可 | 是 | 不需要 profile,直接 `ds` |
|
|
65
|
-
| MiniMax | [MiniMax Codex CLI](https://platform.minimaxi.com/docs/coding-plan/codex-cli) | 否 | 使用你自己的 Codex profile,例如 `ds --codex-profile
|
|
72
|
+
| MiniMax | [MiniMax Codex CLI](https://platform.minimaxi.com/docs/coding-plan/codex-cli) | 否 | 使用你自己的 Codex profile,例如 `ds --codex-profile m27` |
|
|
66
73
|
| GLM | [GLM Coding Plan:其他工具](https://docs.bigmodel.cn/cn/coding-plan/tool/others) | 否 | 使用一个指向 GLM coding endpoint 的 Codex profile |
|
|
67
74
|
| 火山方舟 | [Ark Coding Plan 总览](https://www.volcengine.com/docs/82379/1925114?lang=zh) | 否 | 使用一个指向 Ark coding endpoint 的 Codex profile |
|
|
68
75
|
| 阿里百炼 | [百炼 Coding Plan:其他工具](https://help.aliyun.com/zh/model-studio/other-tools-coding-plan) | 否 | 使用一个指向 Bailian coding endpoint 的 Codex profile |
|
|
@@ -100,15 +107,65 @@ MiniMax 是最典型的 profile 模式。它的官方 Codex CLI 文档直接给
|
|
|
100
107
|
|
|
101
108
|
- <https://platform.minimaxi.com/docs/coding-plan/codex-cli>
|
|
102
109
|
|
|
110
|
+
### 已验证的兼容性说明
|
|
111
|
+
|
|
112
|
+
按 2026-03-25 对 MiniMax 官方 Codex CLI 页面和本地兼容性测试的核对结果:
|
|
113
|
+
|
|
114
|
+
- MiniMax 官方 Codex CLI 页面当前建议使用 `@openai/codex@0.57.0`
|
|
115
|
+
- MiniMax 当前应使用的 Coding Plan endpoint 是 `https://api.minimaxi.com/v1`
|
|
116
|
+
- MiniMax 官方页面示例 profile 名是 `m21`,但 profile 名本身只是本地别名;本仓库统一用 `m27` 作为示例名
|
|
117
|
+
- MiniMax 官方页面当前给出的 `codex-MiniMax-*` 模型名,在本地使用你提供的 key 实测并不能稳定通过 Codex CLI
|
|
118
|
+
- 本地实测能稳定跑通的组合是 `MiniMax-M2.7` + `m27` + `model: inherit` + Codex CLI `0.57.0`
|
|
119
|
+
- 当前最新版 `@openai/codex` 和 MiniMax 官方文档并不能稳定直接对齐
|
|
120
|
+
|
|
121
|
+
如果你现在要走最稳的 DeepScientist + MiniMax 路径,建议直接使用 Codex CLI `0.57.0`。
|
|
122
|
+
|
|
103
123
|
### 需要准备什么
|
|
104
124
|
|
|
105
|
-
- 已安装 Codex CLI
|
|
125
|
+
- 已安装 Codex CLI `0.57.0`
|
|
126
|
+
- 已创建 MiniMax `Coding Plan Key`
|
|
106
127
|
- 在启动 Codex 和 DeepScientist 的 shell 中可见的 `MINIMAX_API_KEY`
|
|
128
|
+
- 当前 shell 已清理 `OPENAI_API_KEY` 和 `OPENAI_BASE_URL`
|
|
107
129
|
- `~/.codex/config.toml` 中已经配置好的 Codex profile
|
|
108
130
|
|
|
131
|
+
### 安装 Codex CLI `0.57.0`
|
|
132
|
+
|
|
133
|
+
最直接的方式是把全局 Codex 安装固定到 `0.57.0`:
|
|
134
|
+
|
|
135
|
+
```bash
|
|
136
|
+
npm install -g @openai/codex@0.57.0
|
|
137
|
+
codex --version
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
预期输出:
|
|
141
|
+
|
|
142
|
+
```text
|
|
143
|
+
codex-cli 0.57.0
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
如果你还想保留另一个 Codex 版本,也可以单独写一个 wrapper 脚本,再把 `runners.codex.binary` 指向那个绝对路径。
|
|
147
|
+
|
|
109
148
|
### Codex 侧配置
|
|
110
149
|
|
|
111
|
-
|
|
150
|
+
请使用 `https://api.minimaxi.com/v1`,不要用 `https://api.minimax.io/v1`。
|
|
151
|
+
|
|
152
|
+
MiniMax 官方文档要求在配置前先清理 OpenAI 环境变量:
|
|
153
|
+
|
|
154
|
+
```bash
|
|
155
|
+
unset OPENAI_API_KEY
|
|
156
|
+
unset OPENAI_BASE_URL
|
|
157
|
+
export MINIMAX_API_KEY="..."
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
MiniMax 官方页面示例 profile 名是 `m21`。由于 profile 名只是本地别名,本仓库统一改写成 `m27`。
|
|
161
|
+
|
|
162
|
+
先说明差异:
|
|
163
|
+
|
|
164
|
+
- 官方页面当前展示的是 `codex-MiniMax-M2.5`
|
|
165
|
+
- 但本地实测里,直接请求 MiniMax API 能稳定跑通的是 `MiniMax-M2.7`
|
|
166
|
+
- 同一把 key 下,`codex-MiniMax-M2.5` / `codex-MiniMax-M2.7` 通过 Codex CLI 都会失败
|
|
167
|
+
|
|
168
|
+
因此,下面给的是当前 DeepScientist 推荐的可运行配置:
|
|
112
169
|
|
|
113
170
|
```toml
|
|
114
171
|
[model_providers.minimax]
|
|
@@ -121,23 +178,50 @@ request_max_retries = 4
|
|
|
121
178
|
stream_max_retries = 10
|
|
122
179
|
stream_idle_timeout_ms = 300000
|
|
123
180
|
|
|
124
|
-
[profiles.
|
|
125
|
-
model = "
|
|
181
|
+
[profiles.m27]
|
|
182
|
+
model = "MiniMax-M2.7"
|
|
183
|
+
model_provider = "minimax"
|
|
184
|
+
```
|
|
185
|
+
|
|
186
|
+
DeepScientist 现在对它的支持方式是:
|
|
187
|
+
|
|
188
|
+
- 如果你使用的是这类 profile-only MiniMax 配置,再配合 Codex CLI `0.57.0`,DeepScientist 会在自己的 probe / 运行时临时 `.codex/config.toml` 里,把所选 profile 的 `model_provider` 和 `model` 自动提升到顶层
|
|
189
|
+
- 这意味着即使终端里原样执行 `codex --profile m27` 还会失败,DeepScientist 也可以先兼容跑起来
|
|
190
|
+
|
|
191
|
+
如果你还希望终端里的 `codex --profile <name>` 也直接可用,请使用显式顶层兼容写法:
|
|
192
|
+
|
|
193
|
+
```toml
|
|
194
|
+
model = "MiniMax-M2.7"
|
|
195
|
+
model_provider = "minimax"
|
|
196
|
+
approval_policy = "never"
|
|
197
|
+
sandbox_mode = "workspace-write"
|
|
198
|
+
|
|
199
|
+
[model_providers.minimax]
|
|
200
|
+
name = "MiniMax Chat Completions API"
|
|
201
|
+
base_url = "https://api.minimaxi.com/v1"
|
|
202
|
+
env_key = "MINIMAX_API_KEY"
|
|
203
|
+
wire_api = "chat"
|
|
204
|
+
requires_openai_auth = false
|
|
205
|
+
request_max_retries = 4
|
|
206
|
+
stream_max_retries = 10
|
|
207
|
+
stream_idle_timeout_ms = 300000
|
|
208
|
+
|
|
209
|
+
[profiles.m27]
|
|
210
|
+
model = "MiniMax-M2.7"
|
|
126
211
|
model_provider = "minimax"
|
|
127
212
|
```
|
|
128
213
|
|
|
129
214
|
然后执行:
|
|
130
215
|
|
|
131
216
|
```bash
|
|
132
|
-
|
|
133
|
-
codex --profile minimax
|
|
217
|
+
codex --profile m27
|
|
134
218
|
```
|
|
135
219
|
|
|
136
220
|
### DeepScientist 命令
|
|
137
221
|
|
|
138
222
|
```bash
|
|
139
|
-
ds doctor --codex-profile
|
|
140
|
-
ds --codex-profile
|
|
223
|
+
ds doctor --codex-profile m27
|
|
224
|
+
ds --codex-profile m27
|
|
141
225
|
```
|
|
142
226
|
|
|
143
227
|
### 持久化 runner 配置
|
|
@@ -145,12 +229,26 @@ ds --codex-profile minimax
|
|
|
145
229
|
```yaml
|
|
146
230
|
codex:
|
|
147
231
|
enabled: true
|
|
148
|
-
binary:
|
|
232
|
+
binary: /tmp/codex057-wrapper
|
|
149
233
|
config_dir: ~/.codex
|
|
150
|
-
profile:
|
|
234
|
+
profile: m27
|
|
151
235
|
model: inherit
|
|
236
|
+
model_reasoning_effort: high
|
|
152
237
|
```
|
|
153
238
|
|
|
239
|
+
如果你已经把全局 `codex` 固定到 `0.57.0`,也可以把 `binary` 写回 `codex`。这里写绝对路径只是为了明确避免误用系统里其他版本的 Codex。
|
|
240
|
+
|
|
241
|
+
如果你不想把这个路径持久化写进 `runners.yaml`,也可以保留 `binary: codex`,然后在启动时临时加:
|
|
242
|
+
|
|
243
|
+
```bash
|
|
244
|
+
ds --codex /absolute/path/to/codex --codex-profile m27
|
|
245
|
+
```
|
|
246
|
+
|
|
247
|
+
DeepScientist 现在会为 MiniMax 的 `0.57.0` 路径额外做两层兼容:
|
|
248
|
+
|
|
249
|
+
- 当检测到旧版 Codex CLI 不支持 `xhigh` 时,自动把 `xhigh` 降级成 `high`
|
|
250
|
+
- 当检测到 MiniMax 使用 profile-only 的 `model_provider` / `model` 配置形态时,在临时 DeepScientist Codex home 里自动补齐顶层字段
|
|
251
|
+
|
|
154
252
|
## GLM
|
|
155
253
|
|
|
156
254
|
GLM 的官方文档把 Coding Plan 描述成 OpenAI-compatible 的 coding endpoint,而不是单独的 Codex 登录流程。
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@researai/deepscientist",
|
|
3
|
-
"version": "1.5.
|
|
3
|
+
"version": "1.5.13",
|
|
4
4
|
"description": "DeepScientist is not just a fully open-source autonomous scientific discovery system. It is also a research map that keeps growing from every round.",
|
|
5
5
|
"license": "Apache-2.0",
|
|
6
6
|
"files": [
|
|
@@ -38,6 +38,7 @@
|
|
|
38
38
|
"@openai/codex": "^0.114.0",
|
|
39
39
|
"ink": "npm:@jrichman/ink@6.4.6",
|
|
40
40
|
"ink-gradient": "^3.0.0",
|
|
41
|
+
"qrcode": "^1.5.4",
|
|
41
42
|
"react": "^19.2.0",
|
|
42
43
|
"react-dom": "^19.2.0",
|
|
43
44
|
"string-width": "^8.1.0"
|
package/pyproject.toml
CHANGED
package/src/deepscientist/cli.py
CHANGED
|
@@ -39,6 +39,7 @@ def build_parser() -> argparse.ArgumentParser:
|
|
|
39
39
|
parser = argparse.ArgumentParser(prog="ds", description="DeepScientist Core skeleton")
|
|
40
40
|
parser.add_argument("--home", default=None, help="Override DeepScientist home")
|
|
41
41
|
parser.add_argument("--proxy", default=None, help="Explicit outbound HTTP/WS proxy, for example `http://127.0.0.1:7890`.")
|
|
42
|
+
parser.add_argument("--codex", default=None, help="Override the Codex executable path for this invocation.")
|
|
42
43
|
|
|
43
44
|
subparsers = parser.add_subparsers(dest="command", required=True)
|
|
44
45
|
|
|
@@ -475,6 +476,8 @@ def migrate_command(home: Path, target: str) -> int:
|
|
|
475
476
|
def main(argv: list[str] | None = None) -> int:
|
|
476
477
|
parser = build_parser()
|
|
477
478
|
args = parser.parse_args(argv)
|
|
479
|
+
if args.codex:
|
|
480
|
+
os.environ["DEEPSCIENTIST_CODEX_BINARY"] = str(args.codex)
|
|
478
481
|
configure_runtime_proxy(args.proxy)
|
|
479
482
|
home = resolve_home(args)
|
|
480
483
|
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
import subprocess
|
|
6
|
+
import tomllib
|
|
7
|
+
from functools import lru_cache
|
|
8
|
+
|
|
9
|
+
_MIN_XHIGH_SUPPORTED_VERSION = (0, 63, 0)
|
|
10
|
+
_CODEX_VERSION_PATTERN = re.compile(r"codex-cli\s+(\d+)\.(\d+)\.(\d+)", re.IGNORECASE)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def parse_codex_cli_version(text: str) -> tuple[int, int, int] | None:
|
|
14
|
+
match = _CODEX_VERSION_PATTERN.search(str(text or ""))
|
|
15
|
+
if not match:
|
|
16
|
+
return None
|
|
17
|
+
return tuple(int(part) for part in match.groups())
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@lru_cache(maxsize=32)
|
|
21
|
+
def codex_cli_version(binary: str) -> tuple[int, int, int] | None:
|
|
22
|
+
normalized = str(binary or "").strip()
|
|
23
|
+
if not normalized:
|
|
24
|
+
return None
|
|
25
|
+
try:
|
|
26
|
+
result = subprocess.run(
|
|
27
|
+
[normalized, "--version"],
|
|
28
|
+
check=False,
|
|
29
|
+
capture_output=True,
|
|
30
|
+
text=True,
|
|
31
|
+
timeout=10,
|
|
32
|
+
)
|
|
33
|
+
except (OSError, subprocess.TimeoutExpired):
|
|
34
|
+
return None
|
|
35
|
+
return parse_codex_cli_version(f"{result.stdout}\n{result.stderr}")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def format_codex_cli_version(version: tuple[int, int, int] | None) -> str:
|
|
39
|
+
if version is None:
|
|
40
|
+
return ""
|
|
41
|
+
return ".".join(str(part) for part in version)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def normalize_codex_reasoning_effort(
|
|
45
|
+
reasoning_effort: str | None,
|
|
46
|
+
*,
|
|
47
|
+
resolved_binary: str | None,
|
|
48
|
+
) -> tuple[str | None, str | None]:
|
|
49
|
+
normalized = str(reasoning_effort or "").strip()
|
|
50
|
+
if not normalized:
|
|
51
|
+
return None, None
|
|
52
|
+
if normalized.lower() != "xhigh":
|
|
53
|
+
return normalized, None
|
|
54
|
+
|
|
55
|
+
version = codex_cli_version(str(resolved_binary or ""))
|
|
56
|
+
if version is None or version >= _MIN_XHIGH_SUPPORTED_VERSION:
|
|
57
|
+
return normalized, None
|
|
58
|
+
|
|
59
|
+
version_text = format_codex_cli_version(version)
|
|
60
|
+
return (
|
|
61
|
+
"high",
|
|
62
|
+
(
|
|
63
|
+
f"Codex CLI {version_text} does not support `xhigh`; "
|
|
64
|
+
"DeepScientist downgraded reasoning effort to `high` automatically."
|
|
65
|
+
),
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def adapt_profile_only_provider_config(
|
|
70
|
+
config_text: str,
|
|
71
|
+
*,
|
|
72
|
+
profile: str,
|
|
73
|
+
) -> tuple[str, str | None]:
|
|
74
|
+
normalized_profile = str(profile or "").strip()
|
|
75
|
+
if not normalized_profile or not str(config_text or "").strip():
|
|
76
|
+
return config_text, None
|
|
77
|
+
try:
|
|
78
|
+
parsed = tomllib.loads(config_text)
|
|
79
|
+
except tomllib.TOMLDecodeError:
|
|
80
|
+
return config_text, None
|
|
81
|
+
|
|
82
|
+
profiles = parsed.get("profiles")
|
|
83
|
+
if not isinstance(profiles, dict):
|
|
84
|
+
return config_text, None
|
|
85
|
+
profile_payload = profiles.get(normalized_profile)
|
|
86
|
+
if not isinstance(profile_payload, dict):
|
|
87
|
+
return config_text, None
|
|
88
|
+
|
|
89
|
+
prefix_lines: list[str] = []
|
|
90
|
+
injected_fields: list[str] = []
|
|
91
|
+
if "model_provider" not in parsed:
|
|
92
|
+
model_provider = str(profile_payload.get("model_provider") or "").strip()
|
|
93
|
+
if model_provider:
|
|
94
|
+
prefix_lines.append(f"model_provider = {json.dumps(model_provider, ensure_ascii=False)}")
|
|
95
|
+
injected_fields.append("model_provider")
|
|
96
|
+
if "model" not in parsed:
|
|
97
|
+
model = str(profile_payload.get("model") or "").strip()
|
|
98
|
+
if model:
|
|
99
|
+
prefix_lines.append(f"model = {json.dumps(model, ensure_ascii=False)}")
|
|
100
|
+
injected_fields.append("model")
|
|
101
|
+
|
|
102
|
+
if not prefix_lines:
|
|
103
|
+
return config_text, None
|
|
104
|
+
|
|
105
|
+
adapted = (
|
|
106
|
+
"# BEGIN DEEPSCIENTIST PROFILE COMPAT\n"
|
|
107
|
+
+ "\n".join(prefix_lines)
|
|
108
|
+
+ "\n# END DEEPSCIENTIST PROFILE COMPAT\n\n"
|
|
109
|
+
+ config_text.lstrip()
|
|
110
|
+
)
|
|
111
|
+
return (
|
|
112
|
+
adapted,
|
|
113
|
+
(
|
|
114
|
+
f"DeepScientist promoted `{normalized_profile}` profile "
|
|
115
|
+
f"{', '.join(injected_fields)} to the top level for Codex compatibility."
|
|
116
|
+
),
|
|
117
|
+
)
|
|
@@ -3,11 +3,14 @@ from __future__ import annotations
|
|
|
3
3
|
import json
|
|
4
4
|
import os
|
|
5
5
|
import subprocess
|
|
6
|
+
import tempfile
|
|
7
|
+
from shutil import copy2
|
|
6
8
|
from copy import deepcopy
|
|
7
9
|
from pathlib import Path
|
|
8
10
|
from urllib.error import URLError
|
|
9
11
|
from urllib.request import Request
|
|
10
12
|
|
|
13
|
+
from ..codex_cli_compat import adapt_profile_only_provider_config, normalize_codex_reasoning_effort
|
|
11
14
|
from ..connector.connector_profiles import PROFILEABLE_CONNECTOR_NAMES, list_connector_profiles, normalize_connector_config
|
|
12
15
|
from ..connector_runtime import build_discovered_target, infer_connector_transport
|
|
13
16
|
from ..home import repo_root
|
|
@@ -486,6 +489,7 @@ This page edits `{home_text}/config/runners.yaml`.
|
|
|
486
489
|
- `claude` remains TODO / reserved in the current open-source release and is not runnable yet
|
|
487
490
|
- set `codex.profile` only when your Codex CLI uses a named provider profile such as `m27`
|
|
488
491
|
- when you launch DeepScientist ad hoc with a provider profile, you can also use `ds --codex-profile <name>`
|
|
492
|
+
- when you want a one-off Codex binary override, you can also use `ds --codex /absolute/path/to/codex`
|
|
489
493
|
- keep `codex.model_reasoning_effort: xhigh` unless you explicitly want a lighter default
|
|
490
494
|
- keep `codex.retry_on_failure: true` so transient Codex failures can resume automatically
|
|
491
495
|
- keep retry timing near `10s / 6x / 1800s max` so Codex backs off exponentially and the last retry waits about 30 minutes
|
|
@@ -1206,6 +1210,31 @@ Use **Test** when the file exposes runtime dependencies.
|
|
|
1206
1210
|
resolved[env_key] = str(value)
|
|
1207
1211
|
return resolved
|
|
1208
1212
|
|
|
1213
|
+
def _prepare_codex_probe_home(
|
|
1214
|
+
self,
|
|
1215
|
+
*,
|
|
1216
|
+
config_dir: str,
|
|
1217
|
+
profile: str,
|
|
1218
|
+
) -> tuple[str, str | None, tempfile.TemporaryDirectory[str] | None]:
|
|
1219
|
+
expanded = Path(config_dir).expanduser()
|
|
1220
|
+
config_path = expanded / "config.toml"
|
|
1221
|
+
if not config_path.exists():
|
|
1222
|
+
return str(expanded), None, None
|
|
1223
|
+
|
|
1224
|
+
original_text = read_text(config_path)
|
|
1225
|
+
adapted_text, warning = adapt_profile_only_provider_config(original_text, profile=profile)
|
|
1226
|
+
if warning is None:
|
|
1227
|
+
return str(expanded), None, None
|
|
1228
|
+
|
|
1229
|
+
temp_home = tempfile.TemporaryDirectory(prefix="ds-codex-probe-")
|
|
1230
|
+
temp_root = Path(temp_home.name)
|
|
1231
|
+
for filename in ("auth.json",):
|
|
1232
|
+
source_path = expanded / filename
|
|
1233
|
+
if source_path.exists():
|
|
1234
|
+
copy2(source_path, temp_root / filename)
|
|
1235
|
+
write_text(temp_root / "config.toml", adapted_text)
|
|
1236
|
+
return str(temp_root), warning, temp_home
|
|
1237
|
+
|
|
1209
1238
|
def _codex_missing_binary_guidance(self, config: dict) -> list[str]:
|
|
1210
1239
|
profile = self._codex_profile_name(config)
|
|
1211
1240
|
guidance = [
|
|
@@ -1221,7 +1250,9 @@ Use **Test** when the file exposes runtime dependencies.
|
|
|
1221
1250
|
)
|
|
1222
1251
|
else:
|
|
1223
1252
|
guidance.append("Run `codex --login` (or `codex`) once and finish authentication before starting DeepScientist.")
|
|
1224
|
-
guidance.append(
|
|
1253
|
+
guidance.append(
|
|
1254
|
+
"If you use a custom Codex path, either set `runners.codex.binary` or launch with `ds --codex /absolute/path/to/codex`."
|
|
1255
|
+
)
|
|
1225
1256
|
return guidance
|
|
1226
1257
|
|
|
1227
1258
|
def _codex_probe_failure_guidance(self, config: dict) -> tuple[list[str], list[str]]:
|
|
@@ -1326,11 +1357,15 @@ Use **Test** when the file exposes runtime dependencies.
|
|
|
1326
1357
|
profile = self._codex_profile_name(config)
|
|
1327
1358
|
requested_model = self._codex_requested_model(config)
|
|
1328
1359
|
raw_reasoning_effort = config.get("model_reasoning_effort")
|
|
1329
|
-
|
|
1360
|
+
requested_reasoning_effort = (
|
|
1330
1361
|
str(raw_reasoning_effort).strip()
|
|
1331
1362
|
if raw_reasoning_effort is not None and str(raw_reasoning_effort).strip()
|
|
1332
1363
|
else ("xhigh" if raw_reasoning_effort is None else None)
|
|
1333
1364
|
)
|
|
1365
|
+
reasoning_effort, reasoning_effort_warning = normalize_codex_reasoning_effort(
|
|
1366
|
+
requested_reasoning_effort,
|
|
1367
|
+
resolved_binary=resolved_binary,
|
|
1368
|
+
)
|
|
1334
1369
|
details: dict[str, object] = {
|
|
1335
1370
|
"binary": binary,
|
|
1336
1371
|
"resolved_binary": resolved_binary,
|
|
@@ -1342,6 +1377,7 @@ Use **Test** when the file exposes runtime dependencies.
|
|
|
1342
1377
|
"approval_policy": str(config.get("approval_policy") or "on-request"),
|
|
1343
1378
|
"sandbox_mode": str(config.get("sandbox_mode") or "workspace-write"),
|
|
1344
1379
|
"reasoning_effort": reasoning_effort,
|
|
1380
|
+
"requested_reasoning_effort": requested_reasoning_effort,
|
|
1345
1381
|
"model_fallback_attempted": False,
|
|
1346
1382
|
"model_fallback_used": False,
|
|
1347
1383
|
"checked_at": checked_at,
|
|
@@ -1365,9 +1401,20 @@ Use **Test** when the file exposes runtime dependencies.
|
|
|
1365
1401
|
env = os.environ.copy()
|
|
1366
1402
|
env.update(self._codex_runner_env(config))
|
|
1367
1403
|
config_dir = str(config.get("config_dir") or "~/.codex").strip()
|
|
1404
|
+
probe_home_handle: tempfile.TemporaryDirectory[str] | None = None
|
|
1405
|
+
compatibility_warnings: list[str] = []
|
|
1368
1406
|
if config_dir:
|
|
1369
|
-
|
|
1407
|
+
prepared_home, profile_config_warning, probe_home_handle = self._prepare_codex_probe_home(
|
|
1408
|
+
config_dir=config_dir,
|
|
1409
|
+
profile=profile,
|
|
1410
|
+
)
|
|
1411
|
+
env["CODEX_HOME"] = prepared_home
|
|
1412
|
+
if profile_config_warning:
|
|
1413
|
+
compatibility_warnings.append(profile_config_warning)
|
|
1370
1414
|
prompt = "Reply with exactly HELLO."
|
|
1415
|
+
if reasoning_effort_warning:
|
|
1416
|
+
compatibility_warnings.append(reasoning_effort_warning)
|
|
1417
|
+
base_warnings: list[str] = list(compatibility_warnings)
|
|
1371
1418
|
|
|
1372
1419
|
def run_probe_once(model_for_command: str) -> tuple[list[str], subprocess.CompletedProcess[str] | None, subprocess.TimeoutExpired | None]:
|
|
1373
1420
|
command = self._build_codex_probe_command(
|
|
@@ -1406,7 +1453,7 @@ Use **Test** when the file exposes runtime dependencies.
|
|
|
1406
1453
|
return {
|
|
1407
1454
|
"ok": False,
|
|
1408
1455
|
"summary": "Codex startup probe timed out.",
|
|
1409
|
-
"warnings":
|
|
1456
|
+
"warnings": base_warnings,
|
|
1410
1457
|
"errors": [
|
|
1411
1458
|
"Codex did not answer the startup hello probe within 90 seconds.",
|
|
1412
1459
|
*self._codex_probe_failure_guidance(config)[0],
|
|
@@ -1463,7 +1510,7 @@ Use **Test** when the file exposes runtime dependencies.
|
|
|
1463
1510
|
return {
|
|
1464
1511
|
"ok": True,
|
|
1465
1512
|
"summary": "Codex startup probe completed with Codex default model fallback.",
|
|
1466
|
-
"warnings": [fallback_warning],
|
|
1513
|
+
"warnings": [*base_warnings, fallback_warning],
|
|
1467
1514
|
"errors": [],
|
|
1468
1515
|
"details": details,
|
|
1469
1516
|
"guidance": [
|
|
@@ -1483,7 +1530,7 @@ Use **Test** when the file exposes runtime dependencies.
|
|
|
1483
1530
|
"probe_command": command,
|
|
1484
1531
|
}
|
|
1485
1532
|
)
|
|
1486
|
-
warnings: list[str] =
|
|
1533
|
+
warnings: list[str] = list(base_warnings)
|
|
1487
1534
|
errors: list[str] = []
|
|
1488
1535
|
if not ok:
|
|
1489
1536
|
errors.append("Codex did not complete the startup hello probe successfully.")
|
|
@@ -11,11 +11,12 @@ from pathlib import Path
|
|
|
11
11
|
from typing import Any
|
|
12
12
|
|
|
13
13
|
from ..artifact import ArtifactService
|
|
14
|
+
from ..codex_cli_compat import adapt_profile_only_provider_config, normalize_codex_reasoning_effort
|
|
14
15
|
from ..config import ConfigManager
|
|
15
16
|
from ..gitops import export_git_graph
|
|
16
17
|
from ..prompts import PromptBuilder
|
|
17
18
|
from ..runtime_logs import JsonlLogger
|
|
18
|
-
from ..shared import append_jsonl, ensure_dir, generate_id, read_yaml, resolve_runner_binary, utc_now, write_json, write_text
|
|
19
|
+
from ..shared import append_jsonl, ensure_dir, generate_id, read_text, read_yaml, resolve_runner_binary, utc_now, write_json, write_text
|
|
19
20
|
from ..web_search import extract_web_search_payload
|
|
20
21
|
from .base import RunRequest, RunResult
|
|
21
22
|
|
|
@@ -920,7 +921,10 @@ class CodexRunner:
|
|
|
920
921
|
command.extend(["--model", normalized_model])
|
|
921
922
|
if request.approval_policy:
|
|
922
923
|
command.extend(["-c", f'approval_policy="{request.approval_policy}"'])
|
|
923
|
-
reasoning_effort =
|
|
924
|
+
reasoning_effort, _ = normalize_codex_reasoning_effort(
|
|
925
|
+
request.reasoning_effort,
|
|
926
|
+
resolved_binary=resolved_binary or self.binary,
|
|
927
|
+
)
|
|
924
928
|
if reasoning_effort:
|
|
925
929
|
command.extend(["-c", f'model_reasoning_effort="{reasoning_effort}"'])
|
|
926
930
|
tool_timeout_sec = self._positive_timeout_seconds(resolved_runner_config.get("mcp_tool_timeout_sec"))
|
|
@@ -945,6 +949,7 @@ class CodexRunner:
|
|
|
945
949
|
target = ensure_dir(workspace_root / ".codex")
|
|
946
950
|
resolved_runner_config = runner_config if isinstance(runner_config, dict) else self._load_runner_config()
|
|
947
951
|
configured_home = str(resolved_runner_config.get("config_dir") or os.environ.get("CODEX_HOME") or str(Path.home() / ".codex"))
|
|
952
|
+
profile = str(resolved_runner_config.get("profile") or "").strip()
|
|
948
953
|
source = Path(configured_home).expanduser()
|
|
949
954
|
for filename in ("config.toml", "auth.json"):
|
|
950
955
|
source_path = source / filename
|
|
@@ -953,6 +958,10 @@ class CodexRunner:
|
|
|
953
958
|
if source_path.resolve() == target_path.resolve():
|
|
954
959
|
continue
|
|
955
960
|
shutil.copy2(source_path, target_path)
|
|
961
|
+
config_path = target / "config.toml"
|
|
962
|
+
if profile and config_path.exists():
|
|
963
|
+
adapted_text, _ = adapt_profile_only_provider_config(read_text(config_path), profile=profile)
|
|
964
|
+
write_text(config_path, adapted_text)
|
|
956
965
|
ensure_dir(target / "skills")
|
|
957
966
|
quest_skills_root = quest_root / ".codex" / "skills"
|
|
958
967
|
if quest_skills_root.exists():
|
|
@@ -18,6 +18,7 @@ def _as_bool_env(name: str) -> bool:
|
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
def codex_runtime_overrides() -> dict[str, str]:
|
|
21
|
+
binary = _as_text(os.environ.get("DEEPSCIENTIST_CODEX_BINARY") or os.environ.get("DS_CODEX_BINARY"))
|
|
21
22
|
approval_policy = _as_text(os.environ.get("DEEPSCIENTIST_CODEX_APPROVAL_POLICY"))
|
|
22
23
|
sandbox_mode = _as_text(os.environ.get("DEEPSCIENTIST_CODEX_SANDBOX_MODE"))
|
|
23
24
|
profile = _as_text(os.environ.get("DEEPSCIENTIST_CODEX_PROFILE"))
|
|
@@ -28,6 +29,8 @@ def codex_runtime_overrides() -> dict[str, str]:
|
|
|
28
29
|
sandbox_mode = sandbox_mode or "danger-full-access"
|
|
29
30
|
|
|
30
31
|
overrides: dict[str, str] = {}
|
|
32
|
+
if binary:
|
|
33
|
+
overrides["binary"] = binary
|
|
31
34
|
if approval_policy:
|
|
32
35
|
overrides["approval_policy"] = approval_policy
|
|
33
36
|
if sandbox_mode:
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# Artifact Payload Examples
|
|
2
|
+
|
|
3
|
+
Use this reference when the `baseline` stage needs a stable payload shape without re-expanding the main skill body.
|
|
4
|
+
|
|
5
|
+
## Route or blocked decision
|
|
6
|
+
|
|
7
|
+
Keep these fields when route choice or blocking status matters:
|
|
8
|
+
|
|
9
|
+
- `kind`
|
|
10
|
+
- `action`
|
|
11
|
+
- `reason`
|
|
12
|
+
- `baseline_id`
|
|
13
|
+
- `baseline_variant_id` when relevant
|
|
14
|
+
- `evidence_paths`
|
|
15
|
+
- `next_direction`
|
|
16
|
+
|
|
17
|
+
## Accepted baseline
|
|
18
|
+
|
|
19
|
+
Keep these fields when writing the accepted baseline artifact:
|
|
20
|
+
|
|
21
|
+
- `kind`
|
|
22
|
+
- `baseline_id`
|
|
23
|
+
- `baseline_kind`
|
|
24
|
+
- `path`
|
|
25
|
+
- `task`
|
|
26
|
+
- `dataset`
|
|
27
|
+
- `primary_metric`
|
|
28
|
+
- `metrics_summary`
|
|
29
|
+
- `default_variant_id` when relevant
|
|
30
|
+
- `baseline_variants` when relevant
|
|
31
|
+
- `environment`
|
|
32
|
+
- `source`
|
|
33
|
+
- `summary`
|
|
34
|
+
|
|
35
|
+
## Rules
|
|
36
|
+
|
|
37
|
+
- keep payloads compact but audit-friendly
|
|
38
|
+
- do not omit the trusted comparison surface just because one headline metric exists
|
|
39
|
+
- do not publish a blocked or verification-incomplete baseline payload as if it were accepted
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
export const CONNECTOR_ORDER = [
|
|
2
|
+
'qq',
|
|
3
|
+
'weixin',
|
|
4
|
+
'lingzhu',
|
|
5
|
+
'telegram',
|
|
6
|
+
'discord',
|
|
7
|
+
'slack',
|
|
8
|
+
'feishu',
|
|
9
|
+
'whatsapp',
|
|
10
|
+
];
|
|
11
|
+
const CONNECTOR_LABELS = {
|
|
12
|
+
qq: 'QQ',
|
|
13
|
+
weixin: 'Weixin',
|
|
14
|
+
lingzhu: 'Lingzhu',
|
|
15
|
+
telegram: 'Telegram',
|
|
16
|
+
discord: 'Discord',
|
|
17
|
+
slack: 'Slack',
|
|
18
|
+
feishu: 'Feishu',
|
|
19
|
+
whatsapp: 'WhatsApp',
|
|
20
|
+
};
|
|
21
|
+
const CONNECTOR_SUBTITLES = {
|
|
22
|
+
qq: 'Save App ID and App Secret, then wait for the first private QQ message to discover the target.',
|
|
23
|
+
weixin: 'Start QR login, scan with WeChat, and let DeepScientist save the connector automatically.',
|
|
24
|
+
lingzhu: 'Generate the Rokid binding values here, then copy them into the Lingzhu platform and save once.',
|
|
25
|
+
telegram: 'Guided setup not added in TUI yet. Use raw connectors config if needed.',
|
|
26
|
+
discord: 'Guided setup not added in TUI yet. Use raw connectors config if needed.',
|
|
27
|
+
slack: 'Guided setup not added in TUI yet. Use raw connectors config if needed.',
|
|
28
|
+
feishu: 'Guided setup not added in TUI yet. Use raw connectors config if needed.',
|
|
29
|
+
whatsapp: 'Guided setup not added in TUI yet. Use raw connectors config if needed.',
|
|
30
|
+
};
|
|
31
|
+
const GUIDED_CONNECTORS = new Set(['qq', 'weixin', 'lingzhu']);
|
|
32
|
+
const LINGZHU_EXAMPLE_AUTH_AKS = new Set(['abcd1234-abcd-abcd-abcd-abcdefghijkl']);
|
|
33
|
+
export function connectorLabel(name) {
|
|
34
|
+
const normalized = String(name || '').trim().toLowerCase();
|
|
35
|
+
return CONNECTOR_LABELS[normalized] || (normalized ? normalized[0].toUpperCase() + normalized.slice(1) : 'Connector');
|
|
36
|
+
}
|
|
37
|
+
export function connectorSubtitle(name) {
|
|
38
|
+
const normalized = String(name || '').trim().toLowerCase();
|
|
39
|
+
return CONNECTOR_SUBTITLES[normalized] || 'Connector settings.';
|
|
40
|
+
}
|
|
41
|
+
export function supportsGuidedConnector(name) {
|
|
42
|
+
const normalized = String(name || '').trim().toLowerCase();
|
|
43
|
+
return GUIDED_CONNECTORS.has(normalized);
|
|
44
|
+
}
|
|
45
|
+
export function maskSecret(value) {
|
|
46
|
+
const text = String(value || '').trim();
|
|
47
|
+
if (!text) {
|
|
48
|
+
return '';
|
|
49
|
+
}
|
|
50
|
+
if (text.startsWith('$')) {
|
|
51
|
+
return text;
|
|
52
|
+
}
|
|
53
|
+
if (text.length <= 8) {
|
|
54
|
+
return '*'.repeat(text.length);
|
|
55
|
+
}
|
|
56
|
+
return `${text.slice(0, 4)}${'*'.repeat(Math.max(0, text.length - 8))}${text.slice(-4)}`;
|
|
57
|
+
}
|
|
58
|
+
export function createLingzhuAk() {
|
|
59
|
+
const chars = 'abcdefghijklmnopqrstuvwxyz0123456789';
|
|
60
|
+
const segments = [8, 4, 4, 4, 12];
|
|
61
|
+
const totalLength = segments.reduce((sum, size) => sum + size, 0);
|
|
62
|
+
const bytes = typeof crypto !== 'undefined' && typeof crypto.getRandomValues === 'function'
|
|
63
|
+
? crypto.getRandomValues(new Uint8Array(totalLength))
|
|
64
|
+
: Uint8Array.from({ length: totalLength }, () => Math.floor(Math.random() * 256));
|
|
65
|
+
let index = 0;
|
|
66
|
+
return segments
|
|
67
|
+
.map((size) => {
|
|
68
|
+
let segment = '';
|
|
69
|
+
for (let offset = 0; offset < size; offset += 1) {
|
|
70
|
+
segment += chars[bytes[index] % chars.length];
|
|
71
|
+
index += 1;
|
|
72
|
+
}
|
|
73
|
+
return segment;
|
|
74
|
+
})
|
|
75
|
+
.join('-');
|
|
76
|
+
}
|
|
77
|
+
export function resolveLingzhuAuthAk(value) {
|
|
78
|
+
const normalized = String(value || '').trim();
|
|
79
|
+
return LINGZHU_EXAMPLE_AUTH_AKS.has(normalized) ? '' : normalized;
|
|
80
|
+
}
|
|
81
|
+
export function looksLikeWeixinQrImageUrl(value) {
|
|
82
|
+
const text = String(value || '').trim();
|
|
83
|
+
if (!text) {
|
|
84
|
+
return false;
|
|
85
|
+
}
|
|
86
|
+
if (text.startsWith('data:image/') || text.startsWith('blob:')) {
|
|
87
|
+
return true;
|
|
88
|
+
}
|
|
89
|
+
return /^https?:\/\/.+\.(png|jpg|jpeg|gif|webp|svg)(?:$|[?#])/i.test(text);
|
|
90
|
+
}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
export async function renderQrAscii(content) {
|
|
2
|
+
const normalized = String(content || '').trim();
|
|
3
|
+
if (!normalized) {
|
|
4
|
+
return '';
|
|
5
|
+
}
|
|
6
|
+
const qrModule = (await import('qrcode'));
|
|
7
|
+
try {
|
|
8
|
+
return await qrModule.toString(normalized, {
|
|
9
|
+
type: 'utf8',
|
|
10
|
+
errorCorrectionLevel: 'M',
|
|
11
|
+
margin: 0,
|
|
12
|
+
});
|
|
13
|
+
}
|
|
14
|
+
catch {
|
|
15
|
+
return qrModule.toString(normalized, {
|
|
16
|
+
type: 'utf8',
|
|
17
|
+
errorCorrectionLevel: 'M',
|
|
18
|
+
margin: 2,
|
|
19
|
+
});
|
|
20
|
+
}
|
|
21
|
+
}
|
package/src/tui/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "deepscientist-tui",
|
|
3
|
-
"version": "1.5.
|
|
3
|
+
"version": "1.5.13",
|
|
4
4
|
"private": true,
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
"dependencies": {
|
|
13
13
|
"ink": "npm:@jrichman/ink@6.4.6",
|
|
14
14
|
"ink-gradient": "^3.0.0",
|
|
15
|
+
"qrcode": "^1.5.4",
|
|
15
16
|
"react": "^19.2.0",
|
|
16
17
|
"react-dom": "^19.2.0",
|
|
17
18
|
"string-width": "^8.1.0"
|