nex-code 0.4.25 → 0.4.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/nex-code.js +105 -105
- package/dist/skills/autoresearch.js +32 -6
- package/package.json +2 -2
- package/dist/skills/skills/autoresearch.js +0 -975
- package/dist/skills/skills/devops.md +0 -43
- package/dist/skills/skills/session-search.js +0 -180
- package/dist/skills/skills/skill-learning.js +0 -304
|
@@ -1,975 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* cli/skills/autoresearch.js — Autoresearch Skill
|
|
3
|
-
* Autonomous optimization loops: edit -> test -> log -> keep/revert
|
|
4
|
-
* Inspired by Karpathy's autoresearch pattern.
|
|
5
|
-
*
|
|
6
|
-
* Key design choices (aligned with Karpathy's autoresearch):
|
|
7
|
-
* - Dedicated branch per run (autoresearch/<tag>) for isolation
|
|
8
|
-
* - Git reset (not checkout) for discards — only successes in history
|
|
9
|
-
* - Fixed time budget per experiment for comparable results
|
|
10
|
-
* - Output redirection + metric grep to protect context window
|
|
11
|
-
* - Simplicity criterion: complexity cost weighed against metric gain
|
|
12
|
-
* - Crash triage: trivial bugs retried, broken ideas skipped
|
|
13
|
-
* - Resource tracking (memory/CPU alongside primary metric)
|
|
14
|
-
* - No iteration cap by default — runs until stopped
|
|
15
|
-
*/
|
|
16
|
-
|
|
17
|
-
const { execSync } = require("child_process");
|
|
18
|
-
const fs = require("fs");
|
|
19
|
-
const path = require("path");
|
|
20
|
-
|
|
21
|
-
// Lazy-load benchmark to avoid circular deps and keep startup fast
|
|
22
|
-
let _benchmark = null;
|
|
23
|
-
function getBenchmark() {
|
|
24
|
-
if (!_benchmark) {
|
|
25
|
-
try {
|
|
26
|
-
_benchmark = require("../benchmark");
|
|
27
|
-
} catch {
|
|
28
|
-
_benchmark = null;
|
|
29
|
-
}
|
|
30
|
-
}
|
|
31
|
-
return _benchmark;
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
// Track experiment history within the session
|
|
35
|
-
let experiments = [];
|
|
36
|
-
let loopActive = false;
|
|
37
|
-
|
|
38
|
-
function getLogPath() {
|
|
39
|
-
const dir = path.join(process.cwd(), ".nex", "autoresearch");
|
|
40
|
-
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
|
|
41
|
-
return path.join(dir, "experiments.json");
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
function loadExperiments() {
|
|
45
|
-
const logPath = getLogPath();
|
|
46
|
-
if (fs.existsSync(logPath)) {
|
|
47
|
-
try {
|
|
48
|
-
experiments = JSON.parse(fs.readFileSync(logPath, "utf-8"));
|
|
49
|
-
} catch {
|
|
50
|
-
experiments = [];
|
|
51
|
-
}
|
|
52
|
-
}
|
|
53
|
-
return experiments;
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
function saveExperiments() {
|
|
57
|
-
const logPath = getLogPath();
|
|
58
|
-
fs.writeFileSync(logPath, JSON.stringify(experiments, null, 2));
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
/** Get short git hash for current HEAD */
|
|
62
|
-
function gitHash() {
|
|
63
|
-
try {
|
|
64
|
-
return execSync("git rev-parse --short HEAD", {
|
|
65
|
-
cwd: process.cwd(),
|
|
66
|
-
encoding: "utf-8",
|
|
67
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
68
|
-
}).trim();
|
|
69
|
-
} catch {
|
|
70
|
-
return null;
|
|
71
|
-
}
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
/** Get current git branch name */
|
|
75
|
-
function gitBranch() {
|
|
76
|
-
try {
|
|
77
|
-
return execSync("git rev-parse --abbrev-ref HEAD", {
|
|
78
|
-
cwd: process.cwd(),
|
|
79
|
-
encoding: "utf-8",
|
|
80
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
81
|
-
}).trim();
|
|
82
|
-
} catch {
|
|
83
|
-
return null;
|
|
84
|
-
}
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
/** Extract metric values from output using grep patterns */
|
|
88
|
-
function extractMetrics(output, patterns) {
|
|
89
|
-
const results = {};
|
|
90
|
-
for (const [name, pattern] of Object.entries(patterns)) {
|
|
91
|
-
const re = new RegExp(pattern);
|
|
92
|
-
const match = output.match(re);
|
|
93
|
-
if (match && match[1]) {
|
|
94
|
-
results[name] = parseFloat(match[1]);
|
|
95
|
-
}
|
|
96
|
-
}
|
|
97
|
-
return results;
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
/** Parse peak memory from process output (platform-aware) */
|
|
101
|
-
function parseResourceUsage(output) {
|
|
102
|
-
const resources = {};
|
|
103
|
-
// Common patterns: "peak_vram_mb: 1234", "MaxRSS: 1234", "memory: 1234MB"
|
|
104
|
-
const vram = output.match(/peak_vram_mb:\s*([\d.]+)/);
|
|
105
|
-
if (vram) resources.peak_memory_mb = parseFloat(vram[1]);
|
|
106
|
-
const rss = output.match(/MaxRSS:\s*([\d.]+)/);
|
|
107
|
-
if (rss) resources.peak_memory_mb = parseFloat(rss[1]) / 1024; // KB to MB
|
|
108
|
-
const mem = output.match(/memory:\s*([\d.]+)\s*MB/i);
|
|
109
|
-
if (mem) resources.peak_memory_mb = parseFloat(mem[1]);
|
|
110
|
-
return resources;
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
module.exports = {
|
|
114
|
-
name: "autoresearch",
|
|
115
|
-
description:
|
|
116
|
-
"Autonomous optimization loops: edit -> test -> log -> keep/revert. " +
|
|
117
|
-
"Run experiments on a dedicated branch, track results, and automatically keep improvements or revert failures.",
|
|
118
|
-
|
|
119
|
-
instructions: `You have access to autoresearch tools for running autonomous optimization loops.
|
|
120
|
-
|
|
121
|
-
## Workflow
|
|
122
|
-
|
|
123
|
-
When the user starts an autoresearch loop with /autoresearch <goal>, follow this cycle:
|
|
124
|
-
|
|
125
|
-
1. **Setup branch** using skill_ar_setup_branch to create a dedicated autoresearch/<tag> branch
|
|
126
|
-
2. **Analyze** the current state (read code, run baseline test)
|
|
127
|
-
3. **Hypothesize** a specific change that could improve the target metric
|
|
128
|
-
4. **Commit checkpoint** using skill_ar_checkpoint before making changes
|
|
129
|
-
5. **Edit** the code to implement your hypothesis
|
|
130
|
-
6. **Run experiment** using skill_ar_run_experiment with the test command
|
|
131
|
-
7. **Log result** using skill_ar_log_experiment with the outcome
|
|
132
|
-
8. **Decide**: If improved, keep changes. If worse, revert using skill_ar_revert
|
|
133
|
-
9. **Repeat** from step 3 — do NOT stop unless the user interrupts
|
|
134
|
-
|
|
135
|
-
## Simplicity Criterion
|
|
136
|
-
|
|
137
|
-
Not every metric improvement is worth keeping. Weigh complexity cost against improvement:
|
|
138
|
-
- A tiny improvement that adds 20 lines of hacky code? Probably not worth it.
|
|
139
|
-
- Deleting code and getting equal or better results? Definitely keep — that's a simplification win.
|
|
140
|
-
- An improvement of ~0 but much simpler code? Keep.
|
|
141
|
-
When logging experiments, note the complexity impact in the notes field.
|
|
142
|
-
|
|
143
|
-
## Crash Triage
|
|
144
|
-
|
|
145
|
-
When an experiment crashes:
|
|
146
|
-
- **Trivial bug** (typo, missing import, off-by-one): fix it and re-run the same experiment
|
|
147
|
-
- **Fundamentally broken idea** (OOM, architectural incompatibility): log as crash, revert, move on
|
|
148
|
-
- Use your judgment — if you can't fix a crash in 2 attempts, skip the idea
|
|
149
|
-
|
|
150
|
-
## Output Efficiency
|
|
151
|
-
|
|
152
|
-
When running experiments, redirect output to a log file and only grep for the target metric.
|
|
153
|
-
This protects the context window from being flooded with training output.
|
|
154
|
-
Use ar_run_experiment with output_file to redirect, then ar_extract_metric to read just the result.
|
|
155
|
-
|
|
156
|
-
## Rules
|
|
157
|
-
- Always create a checkpoint before making changes
|
|
158
|
-
- Always run the experiment after editing
|
|
159
|
-
- Always log the result (even failures and crashes)
|
|
160
|
-
- Revert immediately if the metric worsens
|
|
161
|
-
- NEVER STOP: keep running experiments until the user interrupts — they may be away
|
|
162
|
-
- If you run out of ideas, re-read the code for new angles, try combining previous near-misses, or try more radical changes
|
|
163
|
-
- Show a summary table after every 5 iterations`,
|
|
164
|
-
|
|
165
|
-
commands: [
|
|
166
|
-
{
|
|
167
|
-
cmd: "/autoresearch",
|
|
168
|
-
desc: "Start an autonomous optimization loop: /autoresearch <goal>",
|
|
169
|
-
handler: (args) => {
|
|
170
|
-
const goal = args.trim();
|
|
171
|
-
if (!goal) {
|
|
172
|
-
console.log("Usage: /autoresearch <optimization goal>");
|
|
173
|
-
console.log(
|
|
174
|
-
'Example: /autoresearch "reduce test runtime while maintaining correctness"',
|
|
175
|
-
);
|
|
176
|
-
console.log(
|
|
177
|
-
'Example: /autoresearch "optimize bundle size under 500kb"',
|
|
178
|
-
);
|
|
179
|
-
return;
|
|
180
|
-
}
|
|
181
|
-
loopActive = true;
|
|
182
|
-
loadExperiments();
|
|
183
|
-
console.log(`Autoresearch started: ${goal}`);
|
|
184
|
-
console.log(
|
|
185
|
-
"The agent will run autonomous optimization loops until you interrupt (Ctrl+C).",
|
|
186
|
-
);
|
|
187
|
-
console.log("Experiments run on a dedicated branch for isolation.\n");
|
|
188
|
-
return `AUTORESEARCH_GOAL: ${goal}\n\nStart the autoresearch loop. First, set up a dedicated branch using ar_setup_branch. Then analyze the current state and establish a baseline metric. Then begin the edit->test->log->keep/revert cycle. Do NOT stop — keep running experiments indefinitely until I interrupt.`;
|
|
189
|
-
},
|
|
190
|
-
},
|
|
191
|
-
{
|
|
192
|
-
cmd: "/ar-self-improve",
|
|
193
|
-
desc: "Self-improvement loop: optimize nex-code's own benchmark score",
|
|
194
|
-
handler: (args) => {
|
|
195
|
-
const focus = args.trim() || "overall benchmark score";
|
|
196
|
-
loopActive = true;
|
|
197
|
-
loadExperiments();
|
|
198
|
-
console.log(`Self-improvement loop started.`);
|
|
199
|
-
console.log(`Focus: ${focus}`);
|
|
200
|
-
console.log(
|
|
201
|
-
"The agent will optimize nex-code's benchmark score autonomously.",
|
|
202
|
-
);
|
|
203
|
-
console.log("Ctrl+C to stop.\n");
|
|
204
|
-
return [
|
|
205
|
-
`AUTORESEARCH_GOAL: Improve nex-code's ${focus}`,
|
|
206
|
-
"",
|
|
207
|
-
"## Self-Improvement Protocol",
|
|
208
|
-
"",
|
|
209
|
-
"You are optimizing nex-code itself. The benchmark suite is your eval harness — DO NOT modify it.",
|
|
210
|
-
"",
|
|
211
|
-
"### Setup",
|
|
212
|
-
"1. Call ar_setup_branch with a tag like 'self-improve-<date>'",
|
|
213
|
-
"2. Call ar_run_benchmark with quick=true to establish baseline score",
|
|
214
|
-
"3. Read the category breakdown — identify the weakest category",
|
|
215
|
-
"",
|
|
216
|
-
"### Loop",
|
|
217
|
-
"1. Pick ONE targeted improvement to address the weakest benchmark area",
|
|
218
|
-
"2. ar_checkpoint before making changes",
|
|
219
|
-
"3. Edit nex-code source files (agent.js, orchestrator.js, context-engine.js, etc.)",
|
|
220
|
-
"4. Run npm test to verify nothing breaks — if tests fail, fix or revert immediately",
|
|
221
|
-
"5. npm run build to update dist/",
|
|
222
|
-
"6. ar_run_benchmark with quick=true to measure the new score",
|
|
223
|
-
"7. ar_log_experiment with the benchmark score as metric",
|
|
224
|
-
"8. If score improved: keep. If score same or worse: ar_revert",
|
|
225
|
-
"9. Repeat — do NOT stop",
|
|
226
|
-
"",
|
|
227
|
-
"### What you CAN modify",
|
|
228
|
-
"- cli/agent.js — guard thresholds, system prompts, tool handling",
|
|
229
|
-
"- cli/orchestrator.js — sub-agent behavior, decomposition logic",
|
|
230
|
-
"- cli/context-engine.js — compression, token estimation",
|
|
231
|
-
"- cli/sub-agent.js — retry logic, error classification",
|
|
232
|
-
"- cli/task-router.js — routing logic",
|
|
233
|
-
"- Any other cli/ source file that affects agent quality",
|
|
234
|
-
"",
|
|
235
|
-
"### What you CANNOT modify",
|
|
236
|
-
"- cli/benchmark.js — this is the eval harness, modifying it is cheating",
|
|
237
|
-
"- tests/ — test files are not the optimization target",
|
|
238
|
-
"- Do not modify the scoring weights or task definitions",
|
|
239
|
-
"",
|
|
240
|
-
"### Quality rules",
|
|
241
|
-
"- Simplicity criterion: prefer removing code over adding it",
|
|
242
|
-
"- Each change must pass npm test before benchmarking",
|
|
243
|
-
"- Track which category you targeted and whether it improved",
|
|
244
|
-
"- If 3 consecutive experiments fail to improve, shift focus to a different category",
|
|
245
|
-
].join("\n");
|
|
246
|
-
},
|
|
247
|
-
},
|
|
248
|
-
{
|
|
249
|
-
cmd: "/ar-status",
|
|
250
|
-
desc: "Show autoresearch experiment history",
|
|
251
|
-
handler: () => {
|
|
252
|
-
const exps = loadExperiments();
|
|
253
|
-
if (exps.length === 0) {
|
|
254
|
-
console.log("No experiments recorded yet.");
|
|
255
|
-
return;
|
|
256
|
-
}
|
|
257
|
-
console.log(`\nExperiment History (${exps.length} total):\n`);
|
|
258
|
-
console.log(
|
|
259
|
-
" # | Status | Metric | Memory MB | Commit | Description",
|
|
260
|
-
);
|
|
261
|
-
console.log(
|
|
262
|
-
" ---|----------|---------------|-----------|---------|----------------------------------",
|
|
263
|
-
);
|
|
264
|
-
for (let i = 0; i < exps.length; i++) {
|
|
265
|
-
const e = exps[i];
|
|
266
|
-
const status =
|
|
267
|
-
e.status === "crash"
|
|
268
|
-
? "CRASH "
|
|
269
|
-
: e.kept
|
|
270
|
-
? "KEPT "
|
|
271
|
-
: "REVERTED";
|
|
272
|
-
const metric =
|
|
273
|
-
e.metric != null ? String(e.metric).padEnd(13) : "N/A ";
|
|
274
|
-
const memory =
|
|
275
|
-
e.peak_memory_mb != null
|
|
276
|
-
? String(e.peak_memory_mb.toFixed(1)).padEnd(9)
|
|
277
|
-
: "N/A ";
|
|
278
|
-
const commit = (e.commit || "N/A").padEnd(7);
|
|
279
|
-
const desc = (e.description || "").substring(0, 34);
|
|
280
|
-
console.log(
|
|
281
|
-
` ${String(i + 1).padStart(2)} | ${status} | ${metric} | ${memory} | ${commit} | ${desc}`,
|
|
282
|
-
);
|
|
283
|
-
}
|
|
284
|
-
// Show trend
|
|
285
|
-
const kept = exps.filter((e) => e.kept);
|
|
286
|
-
if (kept.length >= 2) {
|
|
287
|
-
const first = kept[0].metric;
|
|
288
|
-
const last = kept[kept.length - 1].metric;
|
|
289
|
-
if (first != null && last != null) {
|
|
290
|
-
const diff = last - first;
|
|
291
|
-
const arrow = diff > 0 ? "+" : "";
|
|
292
|
-
console.log(
|
|
293
|
-
`\n Trend: ${first} -> ${last} (${arrow}${diff.toFixed(2)})`,
|
|
294
|
-
);
|
|
295
|
-
}
|
|
296
|
-
}
|
|
297
|
-
const crashes = exps.filter((e) => e.status === "crash");
|
|
298
|
-
if (crashes.length > 0) {
|
|
299
|
-
console.log(` Crashes: ${crashes.length}`);
|
|
300
|
-
}
|
|
301
|
-
console.log();
|
|
302
|
-
},
|
|
303
|
-
},
|
|
304
|
-
{
|
|
305
|
-
cmd: "/ar-clear",
|
|
306
|
-
desc: "Clear autoresearch experiment history",
|
|
307
|
-
handler: () => {
|
|
308
|
-
experiments = [];
|
|
309
|
-
saveExperiments();
|
|
310
|
-
loopActive = false;
|
|
311
|
-
console.log("Autoresearch history cleared.");
|
|
312
|
-
},
|
|
313
|
-
},
|
|
314
|
-
],
|
|
315
|
-
|
|
316
|
-
tools: [
|
|
317
|
-
{
|
|
318
|
-
type: "function",
|
|
319
|
-
function: {
|
|
320
|
-
name: "ar_setup_branch",
|
|
321
|
-
description:
|
|
322
|
-
"Create a dedicated autoresearch branch for this experiment run. " +
|
|
323
|
-
"Creates 'autoresearch/<tag>' from the current branch. " +
|
|
324
|
-
"Call this ONCE at the start of each autoresearch session.",
|
|
325
|
-
parameters: {
|
|
326
|
-
type: "object",
|
|
327
|
-
properties: {
|
|
328
|
-
tag: {
|
|
329
|
-
type: "string",
|
|
330
|
-
description:
|
|
331
|
-
"Short tag for this run (e.g. 'mar31', 'perf-opt'). " +
|
|
332
|
-
"Used as branch name: autoresearch/<tag>",
|
|
333
|
-
},
|
|
334
|
-
},
|
|
335
|
-
required: ["tag"],
|
|
336
|
-
},
|
|
337
|
-
},
|
|
338
|
-
execute: async (args) => {
|
|
339
|
-
const tag = (args.tag || "").replace(/[^a-zA-Z0-9_-]/g, "-");
|
|
340
|
-
const branchName = `autoresearch/${tag}`;
|
|
341
|
-
|
|
342
|
-
try {
|
|
343
|
-
// Check if branch already exists
|
|
344
|
-
try {
|
|
345
|
-
execSync(`git rev-parse --verify ${branchName}`, {
|
|
346
|
-
cwd: process.cwd(),
|
|
347
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
348
|
-
});
|
|
349
|
-
// Branch exists — check it out
|
|
350
|
-
execSync(`git checkout ${branchName}`, {
|
|
351
|
-
cwd: process.cwd(),
|
|
352
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
353
|
-
});
|
|
354
|
-
return JSON.stringify({
|
|
355
|
-
status: "resumed",
|
|
356
|
-
branch: branchName,
|
|
357
|
-
note: "Branch already existed — resuming experiments on it.",
|
|
358
|
-
});
|
|
359
|
-
} catch {
|
|
360
|
-
// Branch doesn't exist — create it
|
|
361
|
-
}
|
|
362
|
-
|
|
363
|
-
const sourceBranch = gitBranch() || "unknown";
|
|
364
|
-
execSync(`git checkout -b ${branchName}`, {
|
|
365
|
-
cwd: process.cwd(),
|
|
366
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
367
|
-
});
|
|
368
|
-
|
|
369
|
-
return JSON.stringify({
|
|
370
|
-
status: "created",
|
|
371
|
-
branch: branchName,
|
|
372
|
-
source_branch: sourceBranch,
|
|
373
|
-
note: `Experiment branch created. All experiments will be isolated here. Merge back to '${sourceBranch}' when done.`,
|
|
374
|
-
});
|
|
375
|
-
} catch (err) {
|
|
376
|
-
return JSON.stringify({
|
|
377
|
-
status: "branch_failed",
|
|
378
|
-
error: err.message,
|
|
379
|
-
note: "Could not create branch. Continuing on current branch.",
|
|
380
|
-
});
|
|
381
|
-
}
|
|
382
|
-
},
|
|
383
|
-
},
|
|
384
|
-
{
|
|
385
|
-
type: "function",
|
|
386
|
-
function: {
|
|
387
|
-
name: "ar_checkpoint",
|
|
388
|
-
description:
|
|
389
|
-
"Create a git checkpoint before making experimental changes. " +
|
|
390
|
-
"This allows reverting via git reset if the experiment fails. " +
|
|
391
|
-
"Call this BEFORE editing any files in an autoresearch loop.",
|
|
392
|
-
parameters: {
|
|
393
|
-
type: "object",
|
|
394
|
-
properties: {
|
|
395
|
-
message: {
|
|
396
|
-
type: "string",
|
|
397
|
-
description:
|
|
398
|
-
"Short description of what you are about to try (e.g. 'replace forEach with for-of loop')",
|
|
399
|
-
},
|
|
400
|
-
},
|
|
401
|
-
required: ["message"],
|
|
402
|
-
},
|
|
403
|
-
},
|
|
404
|
-
execute: async (args) => {
|
|
405
|
-
try {
|
|
406
|
-
// Stage all current changes and create a checkpoint commit
|
|
407
|
-
execSync("git add -A", { cwd: process.cwd(), stdio: "pipe" });
|
|
408
|
-
const hasChanges = execSync("git diff --cached --stat", {
|
|
409
|
-
cwd: process.cwd(),
|
|
410
|
-
encoding: "utf-8",
|
|
411
|
-
}).trim();
|
|
412
|
-
|
|
413
|
-
if (hasChanges) {
|
|
414
|
-
execSync(
|
|
415
|
-
`git commit -m "autoresearch: checkpoint before: ${(args.message || "experiment").replace(/"/g, '\\"')}"`,
|
|
416
|
-
{ cwd: process.cwd(), stdio: "pipe" },
|
|
417
|
-
);
|
|
418
|
-
}
|
|
419
|
-
|
|
420
|
-
const hash = gitHash();
|
|
421
|
-
|
|
422
|
-
return JSON.stringify({
|
|
423
|
-
status: "checkpoint_created",
|
|
424
|
-
commit: hash,
|
|
425
|
-
message: args.message,
|
|
426
|
-
});
|
|
427
|
-
} catch (err) {
|
|
428
|
-
return JSON.stringify({
|
|
429
|
-
status: "checkpoint_skipped",
|
|
430
|
-
reason: err.message,
|
|
431
|
-
note: "Working tree may be clean or git unavailable. Proceeding anyway.",
|
|
432
|
-
});
|
|
433
|
-
}
|
|
434
|
-
},
|
|
435
|
-
},
|
|
436
|
-
{
|
|
437
|
-
type: "function",
|
|
438
|
-
function: {
|
|
439
|
-
name: "ar_run_experiment",
|
|
440
|
-
description:
|
|
441
|
-
"Run a test/benchmark command to measure the effect of changes. " +
|
|
442
|
-
"Returns stdout, stderr, exit code, execution time, and resource usage. " +
|
|
443
|
-
"Supports output redirection to a log file to protect context window. " +
|
|
444
|
-
"Call this AFTER making changes to measure their impact.",
|
|
445
|
-
parameters: {
|
|
446
|
-
type: "object",
|
|
447
|
-
properties: {
|
|
448
|
-
command: {
|
|
449
|
-
type: "string",
|
|
450
|
-
description:
|
|
451
|
-
'The shell command to run (e.g. "npm test", "time npm run build", "node bench.js")',
|
|
452
|
-
},
|
|
453
|
-
timeout_seconds: {
|
|
454
|
-
type: "number",
|
|
455
|
-
description:
|
|
456
|
-
"Max seconds to wait (default: 300). Kill the process if exceeded.",
|
|
457
|
-
},
|
|
458
|
-
output_file: {
|
|
459
|
-
type: "string",
|
|
460
|
-
description:
|
|
461
|
-
"Optional: redirect all output to this file instead of capturing in context. " +
|
|
462
|
-
'Use with ar_extract_metric to read only the metric. (e.g. "run.log")',
|
|
463
|
-
},
|
|
464
|
-
metric_pattern: {
|
|
465
|
-
type: "string",
|
|
466
|
-
description:
|
|
467
|
-
"Optional: regex pattern to extract the primary metric from output. " +
|
|
468
|
-
"Must have one capture group for the numeric value. " +
|
|
469
|
-
'(e.g. "val_bpb:\\\\s*([\\\\d.]+)")',
|
|
470
|
-
},
|
|
471
|
-
},
|
|
472
|
-
required: ["command"],
|
|
473
|
-
},
|
|
474
|
-
},
|
|
475
|
-
execute: async (args) => {
|
|
476
|
-
const timeout = (args.timeout_seconds || 300) * 1000;
|
|
477
|
-
const start = Date.now();
|
|
478
|
-
const outputFile = args.output_file;
|
|
479
|
-
|
|
480
|
-
// Build the actual command — redirect if output_file specified
|
|
481
|
-
const cmd = outputFile
|
|
482
|
-
? `${args.command} > ${outputFile} 2>&1`
|
|
483
|
-
: args.command;
|
|
484
|
-
|
|
485
|
-
try {
|
|
486
|
-
const output = execSync(cmd, {
|
|
487
|
-
cwd: process.cwd(),
|
|
488
|
-
encoding: "utf-8",
|
|
489
|
-
timeout,
|
|
490
|
-
maxBuffer: 2 * 1024 * 1024, // 2MB
|
|
491
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
492
|
-
});
|
|
493
|
-
|
|
494
|
-
const elapsed = ((Date.now() - start) / 1000).toFixed(2);
|
|
495
|
-
const rawOutput = outputFile
|
|
496
|
-
? fs.existsSync(path.resolve(process.cwd(), outputFile))
|
|
497
|
-
? fs.readFileSync(
|
|
498
|
-
path.resolve(process.cwd(), outputFile),
|
|
499
|
-
"utf-8",
|
|
500
|
-
)
|
|
501
|
-
: ""
|
|
502
|
-
: output;
|
|
503
|
-
|
|
504
|
-
// Extract resource usage from output
|
|
505
|
-
const resources = parseResourceUsage(rawOutput);
|
|
506
|
-
|
|
507
|
-
// Extract metric if pattern provided
|
|
508
|
-
let extractedMetric = null;
|
|
509
|
-
if (args.metric_pattern) {
|
|
510
|
-
const metrics = extractMetrics(rawOutput, {
|
|
511
|
-
primary: args.metric_pattern,
|
|
512
|
-
});
|
|
513
|
-
extractedMetric = metrics.primary ?? null;
|
|
514
|
-
}
|
|
515
|
-
|
|
516
|
-
// For redirected output, only return summary + metric
|
|
517
|
-
const stdout = outputFile
|
|
518
|
-
? `[Output redirected to ${outputFile}]`
|
|
519
|
-
: output.substring(0, 4000);
|
|
520
|
-
|
|
521
|
-
return JSON.stringify({
|
|
522
|
-
status: "success",
|
|
523
|
-
exit_code: 0,
|
|
524
|
-
elapsed_seconds: parseFloat(elapsed),
|
|
525
|
-
stdout,
|
|
526
|
-
stderr: "",
|
|
527
|
-
extracted_metric: extractedMetric,
|
|
528
|
-
resources,
|
|
529
|
-
});
|
|
530
|
-
} catch (err) {
|
|
531
|
-
const elapsed = ((Date.now() - start) / 1000).toFixed(2);
|
|
532
|
-
|
|
533
|
-
// Try to read output file even on failure
|
|
534
|
-
let resources = {};
|
|
535
|
-
let extractedMetric = null;
|
|
536
|
-
if (outputFile) {
|
|
537
|
-
const outPath = path.resolve(process.cwd(), outputFile);
|
|
538
|
-
if (fs.existsSync(outPath)) {
|
|
539
|
-
const rawOutput = fs.readFileSync(outPath, "utf-8");
|
|
540
|
-
resources = parseResourceUsage(rawOutput);
|
|
541
|
-
if (args.metric_pattern) {
|
|
542
|
-
const metrics = extractMetrics(rawOutput, {
|
|
543
|
-
primary: args.metric_pattern,
|
|
544
|
-
});
|
|
545
|
-
extractedMetric = metrics.primary ?? null;
|
|
546
|
-
}
|
|
547
|
-
}
|
|
548
|
-
}
|
|
549
|
-
|
|
550
|
-
return JSON.stringify({
|
|
551
|
-
status: err.killed ? "timeout" : "failure",
|
|
552
|
-
exit_code: err.status || 1,
|
|
553
|
-
elapsed_seconds: parseFloat(elapsed),
|
|
554
|
-
stdout: outputFile
|
|
555
|
-
? `[Output redirected to ${outputFile}]`
|
|
556
|
-
: (err.stdout || "").substring(0, 4000),
|
|
557
|
-
stderr: (err.stderr || "").substring(0, 2000),
|
|
558
|
-
extracted_metric: extractedMetric,
|
|
559
|
-
resources,
|
|
560
|
-
});
|
|
561
|
-
}
|
|
562
|
-
},
|
|
563
|
-
},
|
|
564
|
-
{
|
|
565
|
-
type: "function",
|
|
566
|
-
function: {
|
|
567
|
-
name: "ar_extract_metric",
|
|
568
|
-
description:
|
|
569
|
-
"Extract specific metrics from an experiment log file using grep patterns. " +
|
|
570
|
-
"Use this after ar_run_experiment with output_file to read only the metrics " +
|
|
571
|
-
"without loading the entire output into context.",
|
|
572
|
-
parameters: {
|
|
573
|
-
type: "object",
|
|
574
|
-
properties: {
|
|
575
|
-
file: {
|
|
576
|
-
type: "string",
|
|
577
|
-
description:
|
|
578
|
-
'Path to the log file (e.g. "run.log")',
|
|
579
|
-
},
|
|
580
|
-
patterns: {
|
|
581
|
-
type: "object",
|
|
582
|
-
description:
|
|
583
|
-
'Map of metric name to regex pattern with one capture group. ' +
|
|
584
|
-
'Example: {"val_bpb": "val_bpb:\\\\s*([\\\\d.]+)", "memory": "peak_vram_mb:\\\\s*([\\\\d.]+)"}',
|
|
585
|
-
additionalProperties: { type: "string" },
|
|
586
|
-
},
|
|
587
|
-
tail_lines: {
|
|
588
|
-
type: "number",
|
|
589
|
-
description:
|
|
590
|
-
"If the file is large, only read the last N lines (default: 100). " +
|
|
591
|
-
"Set to 0 to read the entire file.",
|
|
592
|
-
},
|
|
593
|
-
},
|
|
594
|
-
required: ["file", "patterns"],
|
|
595
|
-
},
|
|
596
|
-
},
|
|
597
|
-
execute: async (args) => {
|
|
598
|
-
try {
|
|
599
|
-
const filePath = path.resolve(process.cwd(), args.file);
|
|
600
|
-
if (!fs.existsSync(filePath)) {
|
|
601
|
-
return JSON.stringify({
|
|
602
|
-
status: "file_not_found",
|
|
603
|
-
file: args.file,
|
|
604
|
-
});
|
|
605
|
-
}
|
|
606
|
-
|
|
607
|
-
let content = fs.readFileSync(filePath, "utf-8");
|
|
608
|
-
const tailLines =
|
|
609
|
-
args.tail_lines !== undefined ? args.tail_lines : 100;
|
|
610
|
-
if (tailLines > 0) {
|
|
611
|
-
const lines = content.split("\n");
|
|
612
|
-
content = lines.slice(-tailLines).join("\n");
|
|
613
|
-
}
|
|
614
|
-
|
|
615
|
-
const metrics = extractMetrics(content, args.patterns);
|
|
616
|
-
const resources = parseResourceUsage(content);
|
|
617
|
-
|
|
618
|
-
return JSON.stringify({
|
|
619
|
-
status: "extracted",
|
|
620
|
-
metrics,
|
|
621
|
-
resources,
|
|
622
|
-
lines_read: tailLines > 0 ? tailLines : content.split("\n").length,
|
|
623
|
-
});
|
|
624
|
-
} catch (err) {
|
|
625
|
-
return JSON.stringify({
|
|
626
|
-
status: "extract_failed",
|
|
627
|
-
error: err.message,
|
|
628
|
-
});
|
|
629
|
-
}
|
|
630
|
-
},
|
|
631
|
-
},
|
|
632
|
-
{
|
|
633
|
-
type: "function",
|
|
634
|
-
function: {
|
|
635
|
-
name: "ar_run_benchmark",
|
|
636
|
-
description:
|
|
637
|
-
"Run nex-code's built-in benchmark suite and return scores. " +
|
|
638
|
-
"This is the primary metric for self-improvement loops. " +
|
|
639
|
-
"Returns overall score (0-100), per-category scores, and model details. " +
|
|
640
|
-
"Use quick=true for fast iteration (~1-2 min), full for comprehensive evaluation.",
|
|
641
|
-
parameters: {
|
|
642
|
-
type: "object",
|
|
643
|
-
properties: {
|
|
644
|
-
quick: {
|
|
645
|
-
type: "boolean",
|
|
646
|
-
description:
|
|
647
|
-
"If true, run 7 tasks on 3 models (fast). If false, run all 59 tasks (thorough). Default: true.",
|
|
648
|
-
},
|
|
649
|
-
models: {
|
|
650
|
-
type: "array",
|
|
651
|
-
items: { type: "string" },
|
|
652
|
-
description:
|
|
653
|
-
"Optional: specific models to benchmark. Default: top models from previous results.",
|
|
654
|
-
},
|
|
655
|
-
},
|
|
656
|
-
},
|
|
657
|
-
},
|
|
658
|
-
execute: async (args) => {
|
|
659
|
-
const benchmark = getBenchmark();
|
|
660
|
-
if (!benchmark) {
|
|
661
|
-
return JSON.stringify({
|
|
662
|
-
status: "unavailable",
|
|
663
|
-
error: "Benchmark module not found. Make sure cli/benchmark.js exists.",
|
|
664
|
-
});
|
|
665
|
-
}
|
|
666
|
-
|
|
667
|
-
const quick = args.quick !== false; // default true
|
|
668
|
-
const start = Date.now();
|
|
669
|
-
|
|
670
|
-
try {
|
|
671
|
-
const summary = await benchmark.runBenchmark({
|
|
672
|
-
quick,
|
|
673
|
-
models: args.models || undefined,
|
|
674
|
-
onProgress: () => {}, // silent
|
|
675
|
-
});
|
|
676
|
-
|
|
677
|
-
const elapsed = ((Date.now() - start) / 1000).toFixed(1);
|
|
678
|
-
|
|
679
|
-
// Extract the key metrics for autoresearch
|
|
680
|
-
const results = summary.map((s) => ({
|
|
681
|
-
model: s.model,
|
|
682
|
-
score: s.score,
|
|
683
|
-
categoryScores: s.categoryScores || {},
|
|
684
|
-
toolCallRate: s.toolCallRate,
|
|
685
|
-
correctRate: s.correctRate,
|
|
686
|
-
validArgsRate: s.validArgsRate,
|
|
687
|
-
avgLatency: s.avgLatency,
|
|
688
|
-
}));
|
|
689
|
-
|
|
690
|
-
// Compute aggregate score across all models
|
|
691
|
-
const avgScore =
|
|
692
|
-
results.length > 0
|
|
693
|
-
? Math.round(
|
|
694
|
-
(results.reduce((a, r) => a + r.score, 0) / results.length) *
|
|
695
|
-
10,
|
|
696
|
-
) / 10
|
|
697
|
-
: 0;
|
|
698
|
-
|
|
699
|
-
// Find weakest category across all models
|
|
700
|
-
const categoryTotals = {};
|
|
701
|
-
const categoryCounts = {};
|
|
702
|
-
for (const r of results) {
|
|
703
|
-
for (const [cat, score] of Object.entries(r.categoryScores)) {
|
|
704
|
-
categoryTotals[cat] = (categoryTotals[cat] || 0) + score;
|
|
705
|
-
categoryCounts[cat] = (categoryCounts[cat] || 0) + 1;
|
|
706
|
-
}
|
|
707
|
-
}
|
|
708
|
-
const categoryAvgs = {};
|
|
709
|
-
for (const cat of Object.keys(categoryTotals)) {
|
|
710
|
-
categoryAvgs[cat] =
|
|
711
|
-
Math.round((categoryTotals[cat] / categoryCounts[cat]) * 10) / 10;
|
|
712
|
-
}
|
|
713
|
-
|
|
714
|
-
// Sort categories by score to find weakest
|
|
715
|
-
const sortedCategories = Object.entries(categoryAvgs)
|
|
716
|
-
.sort((a, b) => a[1] - b[1]);
|
|
717
|
-
|
|
718
|
-
const weakestCategory =
|
|
719
|
-
sortedCategories.length > 0 ? sortedCategories[0] : null;
|
|
720
|
-
|
|
721
|
-
return JSON.stringify({
|
|
722
|
-
status: "success",
|
|
723
|
-
quick,
|
|
724
|
-
elapsed_seconds: parseFloat(elapsed),
|
|
725
|
-
models_tested: results.length,
|
|
726
|
-
average_score: avgScore,
|
|
727
|
-
category_averages: categoryAvgs,
|
|
728
|
-
weakest_category: weakestCategory
|
|
729
|
-
? { name: weakestCategory[0], score: weakestCategory[1] }
|
|
730
|
-
: null,
|
|
731
|
-
per_model: results,
|
|
732
|
-
});
|
|
733
|
-
} catch (err) {
|
|
734
|
-
return JSON.stringify({
|
|
735
|
-
status: "benchmark_failed",
|
|
736
|
-
error: err.message,
|
|
737
|
-
elapsed_seconds:
|
|
738
|
-
parseFloat(((Date.now() - start) / 1000).toFixed(1)),
|
|
739
|
-
});
|
|
740
|
-
}
|
|
741
|
-
},
|
|
742
|
-
},
|
|
743
|
-
{
|
|
744
|
-
type: "function",
|
|
745
|
-
function: {
|
|
746
|
-
name: "ar_log_experiment",
|
|
747
|
-
description:
|
|
748
|
-
"Log the result of an experiment. Call this after running the experiment " +
|
|
749
|
-
"to record whether the change was an improvement. This builds the experiment history.",
|
|
750
|
-
parameters: {
|
|
751
|
-
type: "object",
|
|
752
|
-
properties: {
|
|
753
|
-
description: {
|
|
754
|
-
type: "string",
|
|
755
|
-
description:
|
|
756
|
-
"What was changed (e.g. 'replaced Array.map with for loop in parser')",
|
|
757
|
-
},
|
|
758
|
-
metric: {
|
|
759
|
-
type: "number",
|
|
760
|
-
description:
|
|
761
|
-
"The measured metric value (e.g. test runtime in seconds, bundle size in KB, score). Use 0 for crashes.",
|
|
762
|
-
},
|
|
763
|
-
metric_name: {
|
|
764
|
-
type: "string",
|
|
765
|
-
description:
|
|
766
|
-
'Name of the metric (e.g. "runtime_seconds", "bundle_size_kb", "val_bpb")',
|
|
767
|
-
},
|
|
768
|
-
kept: {
|
|
769
|
-
type: "boolean",
|
|
770
|
-
description:
|
|
771
|
-
"Whether you decided to keep (true) or revert (false) this change",
|
|
772
|
-
},
|
|
773
|
-
status: {
|
|
774
|
-
type: "string",
|
|
775
|
-
enum: ["keep", "discard", "crash"],
|
|
776
|
-
description:
|
|
777
|
-
"Experiment outcome: 'keep' if metric improved, 'discard' if worse, 'crash' if it failed to run",
|
|
778
|
-
},
|
|
779
|
-
peak_memory_mb: {
|
|
780
|
-
type: "number",
|
|
781
|
-
description:
|
|
782
|
-
"Peak memory usage in MB during the experiment (if available)",
|
|
783
|
-
},
|
|
784
|
-
complexity_impact: {
|
|
785
|
-
type: "string",
|
|
786
|
-
enum: ["simpler", "neutral", "complex"],
|
|
787
|
-
description:
|
|
788
|
-
"How this change affects code complexity: 'simpler' (removed code), 'neutral', or 'complex' (added code)",
|
|
789
|
-
},
|
|
790
|
-
notes: {
|
|
791
|
-
type: "string",
|
|
792
|
-
description:
|
|
793
|
-
"Additional observations — include complexity assessment and crash triage info",
|
|
794
|
-
},
|
|
795
|
-
},
|
|
796
|
-
required: ["description", "metric", "kept"],
|
|
797
|
-
},
|
|
798
|
-
},
|
|
799
|
-
execute: async (args) => {
|
|
800
|
-
loadExperiments();
|
|
801
|
-
const commit = gitHash();
|
|
802
|
-
const entry = {
|
|
803
|
-
id: experiments.length + 1,
|
|
804
|
-
timestamp: new Date().toISOString(),
|
|
805
|
-
commit,
|
|
806
|
-
description: args.description,
|
|
807
|
-
metric: args.metric,
|
|
808
|
-
metric_name: args.metric_name || "metric",
|
|
809
|
-
kept: args.kept,
|
|
810
|
-
status: args.status || (args.kept ? "keep" : "discard"),
|
|
811
|
-
peak_memory_mb: args.peak_memory_mb ?? null,
|
|
812
|
-
complexity_impact: args.complexity_impact || "neutral",
|
|
813
|
-
notes: args.notes || "",
|
|
814
|
-
};
|
|
815
|
-
experiments.push(entry);
|
|
816
|
-
saveExperiments();
|
|
817
|
-
|
|
818
|
-
const trend =
|
|
819
|
-
experiments.length >= 2
|
|
820
|
-
? `Previous: ${experiments[experiments.length - 2].metric}, Current: ${args.metric}`
|
|
821
|
-
: "First experiment — baseline established";
|
|
822
|
-
|
|
823
|
-
return JSON.stringify({
|
|
824
|
-
status: "logged",
|
|
825
|
-
experiment_number: entry.id,
|
|
826
|
-
total_experiments: experiments.length,
|
|
827
|
-
kept_count: experiments.filter((e) => e.kept).length,
|
|
828
|
-
reverted_count: experiments.filter((e) => !e.kept).length,
|
|
829
|
-
crash_count: experiments.filter((e) => e.status === "crash").length,
|
|
830
|
-
trend,
|
|
831
|
-
});
|
|
832
|
-
},
|
|
833
|
-
},
|
|
834
|
-
{
|
|
835
|
-
type: "function",
|
|
836
|
-
function: {
|
|
837
|
-
name: "ar_revert",
|
|
838
|
-
description:
|
|
839
|
-
"Revert to the last checkpoint using git reset. " +
|
|
840
|
-
"Unlike git checkout, this moves the branch pointer back so only " +
|
|
841
|
-
"successful experiments remain in git history. " +
|
|
842
|
-
"Use this when an experiment made things worse or crashed.",
|
|
843
|
-
parameters: {
|
|
844
|
-
type: "object",
|
|
845
|
-
properties: {
|
|
846
|
-
reason: {
|
|
847
|
-
type: "string",
|
|
848
|
-
description:
|
|
849
|
-
"Why reverting (e.g. 'metric worsened from 2.3s to 4.1s')",
|
|
850
|
-
},
|
|
851
|
-
},
|
|
852
|
-
required: ["reason"],
|
|
853
|
-
},
|
|
854
|
-
},
|
|
855
|
-
execute: async (args) => {
|
|
856
|
-
try {
|
|
857
|
-
// Use git reset --hard HEAD~1 to remove the failed experiment commit
|
|
858
|
-
// and move the branch pointer back (clean history, only successes)
|
|
859
|
-
const currentHash = gitHash();
|
|
860
|
-
|
|
861
|
-
// Check if there's a commit to reset to
|
|
862
|
-
try {
|
|
863
|
-
execSync("git log --oneline -2", {
|
|
864
|
-
cwd: process.cwd(),
|
|
865
|
-
encoding: "utf-8",
|
|
866
|
-
stdio: ["pipe", "pipe", "pipe"],
|
|
867
|
-
});
|
|
868
|
-
} catch {
|
|
869
|
-
// Fallback: just clean working tree
|
|
870
|
-
execSync("git checkout -- .", {
|
|
871
|
-
cwd: process.cwd(),
|
|
872
|
-
stdio: "pipe",
|
|
873
|
-
});
|
|
874
|
-
execSync("git clean -fd", {
|
|
875
|
-
cwd: process.cwd(),
|
|
876
|
-
stdio: "pipe",
|
|
877
|
-
});
|
|
878
|
-
return JSON.stringify({
|
|
879
|
-
status: "reverted",
|
|
880
|
-
method: "checkout",
|
|
881
|
-
reason: args.reason,
|
|
882
|
-
});
|
|
883
|
-
}
|
|
884
|
-
|
|
885
|
-
// Reset to before the experiment commit
|
|
886
|
-
execSync("git reset --hard HEAD~1", {
|
|
887
|
-
cwd: process.cwd(),
|
|
888
|
-
stdio: "pipe",
|
|
889
|
-
});
|
|
890
|
-
// Also clean any untracked files
|
|
891
|
-
execSync("git clean -fd", {
|
|
892
|
-
cwd: process.cwd(),
|
|
893
|
-
stdio: "pipe",
|
|
894
|
-
});
|
|
895
|
-
|
|
896
|
-
const newHash = gitHash();
|
|
897
|
-
|
|
898
|
-
return JSON.stringify({
|
|
899
|
-
status: "reverted",
|
|
900
|
-
method: "reset",
|
|
901
|
-
reverted_from: currentHash,
|
|
902
|
-
reverted_to: newHash,
|
|
903
|
-
reason: args.reason,
|
|
904
|
-
note: "Branch pointer moved back — failed experiment removed from history.",
|
|
905
|
-
});
|
|
906
|
-
} catch (err) {
|
|
907
|
-
// Fallback to checkout if reset fails
|
|
908
|
-
try {
|
|
909
|
-
execSync("git checkout -- .", {
|
|
910
|
-
cwd: process.cwd(),
|
|
911
|
-
stdio: "pipe",
|
|
912
|
-
});
|
|
913
|
-
execSync("git clean -fd", {
|
|
914
|
-
cwd: process.cwd(),
|
|
915
|
-
stdio: "pipe",
|
|
916
|
-
});
|
|
917
|
-
return JSON.stringify({
|
|
918
|
-
status: "reverted",
|
|
919
|
-
method: "checkout_fallback",
|
|
920
|
-
reason: args.reason,
|
|
921
|
-
note: "git reset failed, fell back to checkout. Commit may remain in history.",
|
|
922
|
-
});
|
|
923
|
-
} catch (fallbackErr) {
|
|
924
|
-
return JSON.stringify({
|
|
925
|
-
status: "revert_failed",
|
|
926
|
-
error: fallbackErr.message,
|
|
927
|
-
note: "Manual cleanup may be needed. Check git status.",
|
|
928
|
-
});
|
|
929
|
-
}
|
|
930
|
-
}
|
|
931
|
-
},
|
|
932
|
-
},
|
|
933
|
-
{
|
|
934
|
-
type: "function",
|
|
935
|
-
function: {
|
|
936
|
-
name: "ar_history",
|
|
937
|
-
description:
|
|
938
|
-
"Get the full experiment history as JSON for analysis. " +
|
|
939
|
-
"Use this to review past experiments and identify patterns.",
|
|
940
|
-
parameters: {
|
|
941
|
-
type: "object",
|
|
942
|
-
properties: {},
|
|
943
|
-
},
|
|
944
|
-
},
|
|
945
|
-
execute: async () => {
|
|
946
|
-
loadExperiments();
|
|
947
|
-
const kept = experiments.filter((e) => e.kept);
|
|
948
|
-
const reverted = experiments.filter((e) => !e.kept);
|
|
949
|
-
const crashes = experiments.filter((e) => e.status === "crash");
|
|
950
|
-
|
|
951
|
-
let bestMetric = null;
|
|
952
|
-
let worstMetric = null;
|
|
953
|
-
for (const e of experiments) {
|
|
954
|
-
if (e.metric != null && e.status !== "crash") {
|
|
955
|
-
if (bestMetric === null || e.metric < bestMetric)
|
|
956
|
-
bestMetric = e.metric;
|
|
957
|
-
if (worstMetric === null || e.metric > worstMetric)
|
|
958
|
-
worstMetric = e.metric;
|
|
959
|
-
}
|
|
960
|
-
}
|
|
961
|
-
|
|
962
|
-
return JSON.stringify({
|
|
963
|
-
total: experiments.length,
|
|
964
|
-
kept: kept.length,
|
|
965
|
-
reverted: reverted.length,
|
|
966
|
-
crashes: crashes.length,
|
|
967
|
-
best_metric: bestMetric,
|
|
968
|
-
worst_metric: worstMetric,
|
|
969
|
-
branch: gitBranch(),
|
|
970
|
-
experiments: experiments.slice(-20), // Last 20
|
|
971
|
-
});
|
|
972
|
-
},
|
|
973
|
-
},
|
|
974
|
-
],
|
|
975
|
-
};
|