luckerr 0.41.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +267 -0
- package/README.zh-CN.md +237 -0
- package/dashboard/app.css +3022 -0
- package/dashboard/dist/app.js +30137 -0
- package/dashboard/dist/app.js.map +1 -0
- package/dashboard/dist/vendor-hljs.css +10 -0
- package/dashboard/dist/vendor-uplot.css +1 -0
- package/dashboard/index.html +19 -0
- package/data/deepseek-tokenizer.json.gz +0 -0
- package/dist/cli/acp-EOOAI4F5.js +712 -0
- package/dist/cli/acp-EOOAI4F5.js.map +1 -0
- package/dist/cli/chat-7J6GJXL2.js +51 -0
- package/dist/cli/chat-7J6GJXL2.js.map +1 -0
- package/dist/cli/chunk-2425HK6U.js +54 -0
- package/dist/cli/chunk-2425HK6U.js.map +1 -0
- package/dist/cli/chunk-25T6CVUP.js +172 -0
- package/dist/cli/chunk-25T6CVUP.js.map +1 -0
- package/dist/cli/chunk-2UQP6H6T.js +31 -0
- package/dist/cli/chunk-2UQP6H6T.js.map +1 -0
- package/dist/cli/chunk-56OAJILV.js +47 -0
- package/dist/cli/chunk-56OAJILV.js.map +1 -0
- package/dist/cli/chunk-5FTI4KXH.js +150 -0
- package/dist/cli/chunk-5FTI4KXH.js.map +1 -0
- package/dist/cli/chunk-5TWQD73O.js +2846 -0
- package/dist/cli/chunk-5TWQD73O.js.map +1 -0
- package/dist/cli/chunk-653BOCMK.js +40 -0
- package/dist/cli/chunk-653BOCMK.js.map +1 -0
- package/dist/cli/chunk-6ALJTWWQ.js +2663 -0
- package/dist/cli/chunk-6ALJTWWQ.js.map +1 -0
- package/dist/cli/chunk-6DRKA2IL.js +341 -0
- package/dist/cli/chunk-6DRKA2IL.js.map +1 -0
- package/dist/cli/chunk-6LV63NJV.js +634 -0
- package/dist/cli/chunk-6LV63NJV.js.map +1 -0
- package/dist/cli/chunk-74EX7SUH.js +25293 -0
- package/dist/cli/chunk-74EX7SUH.js.map +1 -0
- package/dist/cli/chunk-74U5RKTX.js +60611 -0
- package/dist/cli/chunk-74U5RKTX.js.map +1 -0
- package/dist/cli/chunk-ANJSUESV.js +143 -0
- package/dist/cli/chunk-ANJSUESV.js.map +1 -0
- package/dist/cli/chunk-DB2Z3DKZ.js +54 -0
- package/dist/cli/chunk-DB2Z3DKZ.js.map +1 -0
- package/dist/cli/chunk-DDIH3ZAA.js +400 -0
- package/dist/cli/chunk-DDIH3ZAA.js.map +1 -0
- package/dist/cli/chunk-ELN3Z3B2.js +621 -0
- package/dist/cli/chunk-ELN3Z3B2.js.map +1 -0
- package/dist/cli/chunk-F6BSQJGV.js +200 -0
- package/dist/cli/chunk-F6BSQJGV.js.map +1 -0
- package/dist/cli/chunk-FET2UAG5.js +246 -0
- package/dist/cli/chunk-FET2UAG5.js.map +1 -0
- package/dist/cli/chunk-FFJ342IJ.js +190 -0
- package/dist/cli/chunk-FFJ342IJ.js.map +1 -0
- package/dist/cli/chunk-GB3247B6.js +130 -0
- package/dist/cli/chunk-GB3247B6.js.map +1 -0
- package/dist/cli/chunk-HC2J4U3G.js +373 -0
- package/dist/cli/chunk-HC2J4U3G.js.map +1 -0
- package/dist/cli/chunk-HRUZAIHQ.js +42 -0
- package/dist/cli/chunk-HRUZAIHQ.js.map +1 -0
- package/dist/cli/chunk-J3ZJFUDL.js +308 -0
- package/dist/cli/chunk-J3ZJFUDL.js.map +1 -0
- package/dist/cli/chunk-J5XJHLWM.js +55 -0
- package/dist/cli/chunk-J5XJHLWM.js.map +1 -0
- package/dist/cli/chunk-JFGLMRZ6.js +160 -0
- package/dist/cli/chunk-JFGLMRZ6.js.map +1 -0
- package/dist/cli/chunk-JMBMLOBP.js +26 -0
- package/dist/cli/chunk-JMBMLOBP.js.map +1 -0
- package/dist/cli/chunk-JMWHXZEL.js +551 -0
- package/dist/cli/chunk-JMWHXZEL.js.map +1 -0
- package/dist/cli/chunk-KEQGPJBO.js +209 -0
- package/dist/cli/chunk-KEQGPJBO.js.map +1 -0
- package/dist/cli/chunk-M4K6U37F.js +232 -0
- package/dist/cli/chunk-M4K6U37F.js.map +1 -0
- package/dist/cli/chunk-MIJI2WMN.js +95 -0
- package/dist/cli/chunk-MIJI2WMN.js.map +1 -0
- package/dist/cli/chunk-MPAO3JNR.js +128 -0
- package/dist/cli/chunk-MPAO3JNR.js.map +1 -0
- package/dist/cli/chunk-PZOFBEDC.js +873 -0
- package/dist/cli/chunk-PZOFBEDC.js.map +1 -0
- package/dist/cli/chunk-RAILYQLN.js +46 -0
- package/dist/cli/chunk-RAILYQLN.js.map +1 -0
- package/dist/cli/chunk-RR35VQVT.js +90 -0
- package/dist/cli/chunk-RR35VQVT.js.map +1 -0
- package/dist/cli/chunk-RRA7VPW4.js +417 -0
- package/dist/cli/chunk-RRA7VPW4.js.map +1 -0
- package/dist/cli/chunk-RU36QVN3.js +452 -0
- package/dist/cli/chunk-RU36QVN3.js.map +1 -0
- package/dist/cli/chunk-RUBIINXR.js +1819 -0
- package/dist/cli/chunk-RUBIINXR.js.map +1 -0
- package/dist/cli/chunk-S4XVGLRW.js +499 -0
- package/dist/cli/chunk-S4XVGLRW.js.map +1 -0
- package/dist/cli/chunk-TUK7OWJA.js +51 -0
- package/dist/cli/chunk-TUK7OWJA.js.map +1 -0
- package/dist/cli/chunk-VALDDV76.js +580 -0
- package/dist/cli/chunk-VALDDV76.js.map +1 -0
- package/dist/cli/chunk-WQOGPYGN.js +11390 -0
- package/dist/cli/chunk-WQOGPYGN.js.map +1 -0
- package/dist/cli/chunk-WREKDFXT.js +34320 -0
- package/dist/cli/chunk-WREKDFXT.js.map +1 -0
- package/dist/cli/chunk-Y7XQU2EL.js +270 -0
- package/dist/cli/chunk-Y7XQU2EL.js.map +1 -0
- package/dist/cli/chunk-YBVCZJU4.js +54 -0
- package/dist/cli/chunk-YBVCZJU4.js.map +1 -0
- package/dist/cli/chunk-YLIHDXUQ.js +749 -0
- package/dist/cli/chunk-YLIHDXUQ.js.map +1 -0
- package/dist/cli/chunk-YV5XXFD7.js +767 -0
- package/dist/cli/chunk-YV5XXFD7.js.map +1 -0
- package/dist/cli/chunk-ZRCNIYRQ.js +101 -0
- package/dist/cli/chunk-ZRCNIYRQ.js.map +1 -0
- package/dist/cli/code-CRKVCMFZ.js +155 -0
- package/dist/cli/code-CRKVCMFZ.js.map +1 -0
- package/dist/cli/commands-QLMD3T7B.js +356 -0
- package/dist/cli/commands-QLMD3T7B.js.map +1 -0
- package/dist/cli/commit-53PP32NC.js +293 -0
- package/dist/cli/commit-53PP32NC.js.map +1 -0
- package/dist/cli/desktop-R6W5CLJ5.js +1046 -0
- package/dist/cli/desktop-R6W5CLJ5.js.map +1 -0
- package/dist/cli/devtools-YECO25QO.js +3719 -0
- package/dist/cli/devtools-YECO25QO.js.map +1 -0
- package/dist/cli/diff-LYNRCJZE.js +166 -0
- package/dist/cli/diff-LYNRCJZE.js.map +1 -0
- package/dist/cli/doctor-5IBP4R5J.js +28 -0
- package/dist/cli/doctor-5IBP4R5J.js.map +1 -0
- package/dist/cli/events-QN6KLN2V.js +340 -0
- package/dist/cli/events-QN6KLN2V.js.map +1 -0
- package/dist/cli/index.js +3500 -0
- package/dist/cli/index.js.map +1 -0
- package/dist/cli/mcp-FGKEH7RG.js +277 -0
- package/dist/cli/mcp-FGKEH7RG.js.map +1 -0
- package/dist/cli/mcp-browse-YCND4NWT.js +178 -0
- package/dist/cli/mcp-browse-YCND4NWT.js.map +1 -0
- package/dist/cli/mcp-inspect-V34J3VX5.js +143 -0
- package/dist/cli/mcp-inspect-V34J3VX5.js.map +1 -0
- package/dist/cli/package.json +3 -0
- package/dist/cli/prompt-I775PNKT.js +16 -0
- package/dist/cli/prompt-I775PNKT.js.map +1 -0
- package/dist/cli/prune-sessions-KGIIYD3P.js +44 -0
- package/dist/cli/prune-sessions-KGIIYD3P.js.map +1 -0
- package/dist/cli/replay-RDXLUAOE.js +292 -0
- package/dist/cli/replay-RDXLUAOE.js.map +1 -0
- package/dist/cli/run-RCAC2RYW.js +223 -0
- package/dist/cli/run-RCAC2RYW.js.map +1 -0
- package/dist/cli/server-FFU6TLYJ.js +3658 -0
- package/dist/cli/server-FFU6TLYJ.js.map +1 -0
- package/dist/cli/sessions-QT26MQAE.js +107 -0
- package/dist/cli/sessions-QT26MQAE.js.map +1 -0
- package/dist/cli/setup-VV4WKXHV.js +767 -0
- package/dist/cli/setup-VV4WKXHV.js.map +1 -0
- package/dist/cli/stats-JVZPQWAN.js +15 -0
- package/dist/cli/stats-JVZPQWAN.js.map +1 -0
- package/dist/cli/update-KYI3OVJP.js +15 -0
- package/dist/cli/update-KYI3OVJP.js.map +1 -0
- package/dist/cli/version-ANYORXTI.js +34 -0
- package/dist/cli/version-ANYORXTI.js.map +1 -0
- package/dist/index.d.ts +2557 -0
- package/dist/index.js +15000 -0
- package/dist/index.js.map +1 -0
- package/package.json +106 -0
|
@@ -0,0 +1,580 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { createRequire as __cr } from 'node:module'; if (typeof globalThis.require === 'undefined') { globalThis.require = __cr(import.meta.url); }
|
|
3
|
+
|
|
4
|
+
// src/memory/project.ts
|
|
5
|
+
import { existsSync, readFileSync, statSync } from "fs";
|
|
6
|
+
import { basename, join } from "path";
|
|
7
|
+
var PROJECT_MEMORY_FILE = "LUCKERR.md";
|
|
8
|
+
var PROJECT_MEMORY_FILES = ["LUCKERR.md", "AGENTS.md", "AGENT.md"];
|
|
9
|
+
var PROJECT_MEMORY_MAX_CHARS = 8e3;
|
|
10
|
+
var FOREIGN_PLATFORM_FILE_MARKERS = ["SOUL.md", "PERSONA.md"];
|
|
11
|
+
function detectForeignAgentPlatform(rootDir) {
|
|
12
|
+
const hits = [];
|
|
13
|
+
for (const name of FOREIGN_PLATFORM_FILE_MARKERS) {
|
|
14
|
+
if (existsSync(join(rootDir, name))) hits.push(name);
|
|
15
|
+
}
|
|
16
|
+
if (isDir(join(rootDir, "skills")) && isDir(join(rootDir, "memories"))) {
|
|
17
|
+
hits.push("skills/ + memories/");
|
|
18
|
+
}
|
|
19
|
+
return hits.length > 0 ? hits : null;
|
|
20
|
+
}
|
|
21
|
+
function isDir(path) {
|
|
22
|
+
try {
|
|
23
|
+
return statSync(path).isDirectory();
|
|
24
|
+
} catch {
|
|
25
|
+
return false;
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
function findProjectMemoryPath(rootDir) {
|
|
29
|
+
for (const name of PROJECT_MEMORY_FILES) {
|
|
30
|
+
const path = join(rootDir, name);
|
|
31
|
+
if (existsSync(path)) return path;
|
|
32
|
+
}
|
|
33
|
+
return null;
|
|
34
|
+
}
|
|
35
|
+
function resolveProjectMemoryWritePath(rootDir) {
|
|
36
|
+
return findProjectMemoryPath(rootDir) ?? join(rootDir, PROJECT_MEMORY_FILE);
|
|
37
|
+
}
|
|
38
|
+
function readProjectMemory(rootDir) {
|
|
39
|
+
const path = findProjectMemoryPath(rootDir);
|
|
40
|
+
if (!path) return null;
|
|
41
|
+
let raw;
|
|
42
|
+
try {
|
|
43
|
+
raw = readFileSync(path, "utf8");
|
|
44
|
+
} catch {
|
|
45
|
+
return null;
|
|
46
|
+
}
|
|
47
|
+
const trimmed = raw.trim();
|
|
48
|
+
if (!trimmed) return null;
|
|
49
|
+
const originalChars = trimmed.length;
|
|
50
|
+
const truncated = originalChars > PROJECT_MEMORY_MAX_CHARS;
|
|
51
|
+
const content = truncated ? `${trimmed.slice(0, PROJECT_MEMORY_MAX_CHARS)}
|
|
52
|
+
\u2026 (truncated ${originalChars - PROJECT_MEMORY_MAX_CHARS} chars)` : trimmed;
|
|
53
|
+
return { path, content, originalChars, truncated };
|
|
54
|
+
}
|
|
55
|
+
function memoryEnabled() {
|
|
56
|
+
const env = process.env.LUCKERR_MEMORY;
|
|
57
|
+
if (env === "off" || env === "false" || env === "0") return false;
|
|
58
|
+
return true;
|
|
59
|
+
}
|
|
60
|
+
function applyProjectMemory(basePrompt, rootDir) {
|
|
61
|
+
if (!memoryEnabled()) return basePrompt;
|
|
62
|
+
const mem = readProjectMemory(rootDir);
|
|
63
|
+
if (!mem) return basePrompt;
|
|
64
|
+
const filename = basename(mem.path);
|
|
65
|
+
return `${basePrompt}
|
|
66
|
+
|
|
67
|
+
# Project memory (${filename})
|
|
68
|
+
|
|
69
|
+
The user pinned these notes about this project \u2014 treat them as authoritative context for every turn:
|
|
70
|
+
|
|
71
|
+
\`\`\`
|
|
72
|
+
${mem.content}
|
|
73
|
+
\`\`\`
|
|
74
|
+
`;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// src/prompt-fragments.ts
|
|
78
|
+
var TUI_FORMATTING_RULES = `Formatting (rendered in a TUI with a real markdown renderer):
|
|
79
|
+
- Tabular data \u2192 GitHub-Flavored Markdown tables with ASCII pipes (\`| col | col |\` header + \`| --- | --- |\` separator). Never use Unicode box-drawing characters (\u2502 \u2500 \u253C \u250C \u2510 \u2514 \u2518 \u251C \u2524) \u2014 they look intentional but break terminal word-wrap and render as garbled columns at narrow widths.
|
|
80
|
+
- Keep table cells short (one phrase each). If a cell needs a paragraph, use bullets below the table instead.
|
|
81
|
+
- Code, file paths with line ranges, and shell commands \u2192 fenced code blocks (\`\`\`).
|
|
82
|
+
- Do NOT draw decorative frames around content with \`\u250C\u2500\u2500\u2510 \u2502 \u2514\u2500\u2500\u2518\` characters. The renderer adds its own borders; extra ASCII art adds noise and shatters at narrow widths.
|
|
83
|
+
- For flow charts and diagrams: a plain bullet list with \`\u2192\` or \`\u2193\` between steps. Don't try to draw boxes-and-arrows in ASCII; it never survives word-wrap.`;
|
|
84
|
+
function escalationContract(modelId) {
|
|
85
|
+
if (modelId === "deepseek-v4-pro") {
|
|
86
|
+
return `Cost-aware escalation note: you are running on \`${modelId}\` \u2014 the escalation tier. There is no higher tier to escalate to, so the \`<<<NEEDS_PRO>>>\` marker is a no-op for you; deliver the strongest answer you can directly. If asked which model you are, answer \`${modelId}\`.`;
|
|
87
|
+
}
|
|
88
|
+
return `Cost-aware escalation (you are running on \`${modelId}\`):
|
|
89
|
+
|
|
90
|
+
If a task CLEARLY exceeds what this tier can do well \u2014 complex cross-file architecture refactors, subtle concurrency / security / correctness invariants you can't resolve with confidence, or a design trade-off you'd be guessing at \u2014 output the marker as the FIRST line of your response (nothing before it, not even whitespace on a separate line). This aborts the current call and retries this turn on deepseek-v4-pro, one shot.
|
|
91
|
+
|
|
92
|
+
Two accepted forms:
|
|
93
|
+
- \`<<<NEEDS_PRO>>>\` \u2014 bare marker, no rationale.
|
|
94
|
+
- \`<<<NEEDS_PRO: <one-sentence reason>>>>\` \u2014 preferred. The reason text appears in the user-visible warning ("\u21E7 flash requested escalation \u2014 <your reason>"), so they understand WHY a more expensive call is happening. Keep it under ~150 chars, no newlines, no nested \`>\` characters. Examples: \`<<<NEEDS_PRO: cross-file refactor across 6 modules with circular imports>>>\` or \`<<<NEEDS_PRO: subtle session-token race; flash would likely miss the locking invariant>>>\`.
|
|
95
|
+
|
|
96
|
+
Do NOT emit any other content in the same response when you request escalation. Use this sparingly: normal tasks \u2014 reading files, small edits, clear bug fixes, straightforward feature additions \u2014 stay on this tier. Request escalation ONLY when you would otherwise produce a guess or a visibly-mediocre answer. If in doubt, attempt the task here first; the system also escalates automatically if you hit 3+ repair / SEARCH-mismatch errors in a single turn (the user sees a typed breakdown). If asked which model you are, answer \`${modelId}\`.`;
|
|
97
|
+
}
|
|
98
|
+
var ESCALATION_CONTRACT = escalationContract("deepseek-v4-flash");
|
|
99
|
+
var NEGATIVE_CLAIM_RULE = `Negative claims ("X is missing", "Y isn't implemented", "there's no Z") are the #1 hallucination shape. They feel safe to write because no citation seems possible \u2014 but that's exactly why you must NOT write them on instinct.
|
|
100
|
+
|
|
101
|
+
If you have a search tool (\`search_content\`, \`grep\`, web search), call it FIRST before asserting absence:
|
|
102
|
+
- Returns matches \u2192 you were wrong; correct yourself and cite the matches.
|
|
103
|
+
- Returns nothing \u2192 state the absence WITH the search query as evidence: \`No callers of \\\`foo()\\\` found (search_content "foo").\`
|
|
104
|
+
|
|
105
|
+
If you have no search tool, qualify hard: "I haven't verified \u2014 this is a guess." Never assert absence with fake authority.`;
|
|
106
|
+
|
|
107
|
+
// src/skills.ts
|
|
108
|
+
import { existsSync as existsSync2, mkdirSync, readFileSync as readFileSync2, readdirSync, statSync as statSync2, writeFileSync } from "fs";
|
|
109
|
+
import { homedir } from "os";
|
|
110
|
+
import { dirname, join as join2, resolve } from "path";
|
|
111
|
+
|
|
112
|
+
// src/frontmatter.ts
|
|
113
|
+
var KEY_RE = /^([a-zA-Z_][a-zA-Z0-9_-]*):\s*(.*)$/;
|
|
114
|
+
var FORBIDDEN_KEYS = /* @__PURE__ */ new Set(["__proto__", "constructor", "prototype"]);
|
|
115
|
+
function stripQuotes(s) {
|
|
116
|
+
if (s.length < 2) return s;
|
|
117
|
+
const first = s[0];
|
|
118
|
+
const last = s[s.length - 1];
|
|
119
|
+
if (first === '"' && last === '"' || first === "'" && last === "'") {
|
|
120
|
+
return s.slice(1, -1);
|
|
121
|
+
}
|
|
122
|
+
return s;
|
|
123
|
+
}
|
|
124
|
+
function parseFrontmatter(raw) {
|
|
125
|
+
const stripped = raw.charCodeAt(0) === 65279 ? raw.slice(1) : raw;
|
|
126
|
+
const lines = stripped.split(/\r?\n/);
|
|
127
|
+
if (lines[0] !== "---") return { data: {}, body: stripped };
|
|
128
|
+
const end = lines.indexOf("---", 1);
|
|
129
|
+
if (end < 0) return { data: {}, body: stripped };
|
|
130
|
+
const entries = /* @__PURE__ */ new Map();
|
|
131
|
+
let currentKey = null;
|
|
132
|
+
for (let i = 1; i < end; i++) {
|
|
133
|
+
const line = lines[i] ?? "";
|
|
134
|
+
if (line.trim() === "") {
|
|
135
|
+
currentKey = null;
|
|
136
|
+
continue;
|
|
137
|
+
}
|
|
138
|
+
const m = line.match(KEY_RE);
|
|
139
|
+
if (m?.[1] && !FORBIDDEN_KEYS.has(m[1])) {
|
|
140
|
+
currentKey = m[1];
|
|
141
|
+
entries.set(currentKey, (m[2] ?? "").trim());
|
|
142
|
+
} else if (currentKey) {
|
|
143
|
+
const cont = line.trim();
|
|
144
|
+
const prev = entries.get(currentKey) ?? "";
|
|
145
|
+
entries.set(currentKey, prev ? `${prev} ${cont}` : cont);
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
const data = /* @__PURE__ */ Object.create(null);
|
|
149
|
+
for (const [k, v] of entries) {
|
|
150
|
+
if (FORBIDDEN_KEYS.has(k)) continue;
|
|
151
|
+
data[k] = stripQuotes(v);
|
|
152
|
+
}
|
|
153
|
+
return {
|
|
154
|
+
data,
|
|
155
|
+
body: lines.slice(end + 1).join("\n").replace(/^\n+/, "")
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
// src/skills.ts
|
|
160
|
+
var SKILLS_DIRNAME = "skills";
|
|
161
|
+
var SKILL_FILE = "SKILL.md";
|
|
162
|
+
var SKILLS_INDEX_MAX_CHARS = 4e3;
|
|
163
|
+
var VALID_SKILL_NAME = /^[a-zA-Z0-9][a-zA-Z0-9._-]{0,63}$/;
|
|
164
|
+
function validateSkillFrontmatter(raw) {
|
|
165
|
+
const { data } = parseFrontmatter(raw);
|
|
166
|
+
const desc = (data.description ?? "").trim();
|
|
167
|
+
if (!desc) {
|
|
168
|
+
return {
|
|
169
|
+
error: `skill frontmatter is missing a non-empty "description:" line \u2014 without it the skill will not appear in the model's skills index`
|
|
170
|
+
};
|
|
171
|
+
}
|
|
172
|
+
return { ok: true };
|
|
173
|
+
}
|
|
174
|
+
function isValidSkillName(name) {
|
|
175
|
+
return VALID_SKILL_NAME.test(name);
|
|
176
|
+
}
|
|
177
|
+
function parseAllowedTools(raw) {
|
|
178
|
+
if (raw === void 0) return void 0;
|
|
179
|
+
const names = raw.split(",").map((s) => s.trim()).filter(Boolean);
|
|
180
|
+
return names.length > 0 ? Object.freeze(names) : void 0;
|
|
181
|
+
}
|
|
182
|
+
var SKILL_MAX_ITERS_MIN = 1;
|
|
183
|
+
var SKILL_MAX_ITERS_MAX = 32;
|
|
184
|
+
function parseMaxToolIters(raw) {
|
|
185
|
+
if (raw === void 0) return void 0;
|
|
186
|
+
const n = Number.parseInt(raw.trim(), 10);
|
|
187
|
+
if (!Number.isFinite(n)) return void 0;
|
|
188
|
+
return Math.min(SKILL_MAX_ITERS_MAX, Math.max(SKILL_MAX_ITERS_MIN, n));
|
|
189
|
+
}
|
|
190
|
+
var SkillStore = class {
|
|
191
|
+
homeDir;
|
|
192
|
+
projectRoot;
|
|
193
|
+
disableBuiltins;
|
|
194
|
+
constructor(opts = {}) {
|
|
195
|
+
this.homeDir = opts.homeDir ?? homedir();
|
|
196
|
+
this.projectRoot = opts.projectRoot ? resolve(opts.projectRoot) : void 0;
|
|
197
|
+
this.disableBuiltins = opts.disableBuiltins === true;
|
|
198
|
+
}
|
|
199
|
+
/** True iff this store was configured with a project root. */
|
|
200
|
+
hasProjectScope() {
|
|
201
|
+
return this.projectRoot !== void 0;
|
|
202
|
+
}
|
|
203
|
+
/** Project scope first so per-repo skill overrides a global with the same name. */
|
|
204
|
+
roots() {
|
|
205
|
+
const out = [];
|
|
206
|
+
if (this.projectRoot) {
|
|
207
|
+
out.push({
|
|
208
|
+
dir: join2(this.projectRoot, ".luckerr", SKILLS_DIRNAME),
|
|
209
|
+
scope: "project"
|
|
210
|
+
});
|
|
211
|
+
}
|
|
212
|
+
out.push({ dir: join2(this.homeDir, ".luckerr", SKILLS_DIRNAME), scope: "global" });
|
|
213
|
+
return out;
|
|
214
|
+
}
|
|
215
|
+
/** Higher-priority root wins on collision (project > global > builtin); sorted for stable prefix hash. */
|
|
216
|
+
list() {
|
|
217
|
+
const byName = /* @__PURE__ */ new Map();
|
|
218
|
+
for (const { dir, scope } of this.roots()) {
|
|
219
|
+
if (!existsSync2(dir)) continue;
|
|
220
|
+
let entries;
|
|
221
|
+
try {
|
|
222
|
+
entries = readdirSync(dir, { withFileTypes: true });
|
|
223
|
+
} catch {
|
|
224
|
+
continue;
|
|
225
|
+
}
|
|
226
|
+
for (const entry of entries) {
|
|
227
|
+
const skill = this.readEntry(dir, scope, entry);
|
|
228
|
+
if (!skill) continue;
|
|
229
|
+
if (!byName.has(skill.name)) byName.set(skill.name, skill);
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
if (!this.disableBuiltins) {
|
|
233
|
+
for (const skill of BUILTIN_SKILLS) {
|
|
234
|
+
if (!byName.has(skill.name)) byName.set(skill.name, skill);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
return [...byName.values()].sort((a, b) => a.name.localeCompare(b.name));
|
|
238
|
+
}
|
|
239
|
+
/** Scaffold a new skill stub at the chosen scope. Refuses to overwrite. */
|
|
240
|
+
create(name, scope) {
|
|
241
|
+
return this.createWithContent(name, scope, skillStubBody(name));
|
|
242
|
+
}
|
|
243
|
+
/** Like `create` but writes caller-supplied file contents instead of the stub — used by the scaffold tool. */
|
|
244
|
+
createWithContent(name, scope, content) {
|
|
245
|
+
if (!isValidSkillName(name)) {
|
|
246
|
+
return { error: `invalid skill name: "${name}" \u2014 use letters, digits, _, -, .` };
|
|
247
|
+
}
|
|
248
|
+
if (scope === "project" && !this.projectRoot) {
|
|
249
|
+
return { error: "project scope requires a workspace \u2014 run from `luckerr code`" };
|
|
250
|
+
}
|
|
251
|
+
const root = scope === "project" ? join2(this.projectRoot ?? "", ".luckerr", SKILLS_DIRNAME) : join2(this.homeDir, ".luckerr", SKILLS_DIRNAME);
|
|
252
|
+
const flat = join2(root, `${name}.md`);
|
|
253
|
+
const folder = join2(root, name, SKILL_FILE);
|
|
254
|
+
if (existsSync2(folder)) {
|
|
255
|
+
return { error: `skill "${name}" already exists at ${folder}` };
|
|
256
|
+
}
|
|
257
|
+
mkdirSync(dirname(flat), { recursive: true });
|
|
258
|
+
try {
|
|
259
|
+
writeFileSync(flat, content, { encoding: "utf8", flag: "wx" });
|
|
260
|
+
} catch (err) {
|
|
261
|
+
if (err.code === "EEXIST") {
|
|
262
|
+
return { error: `skill "${name}" already exists at ${flat}` };
|
|
263
|
+
}
|
|
264
|
+
throw err;
|
|
265
|
+
}
|
|
266
|
+
return { path: flat };
|
|
267
|
+
}
|
|
268
|
+
/** Resolve one skill by name. Returns `null` if not found or malformed. */
|
|
269
|
+
read(name) {
|
|
270
|
+
if (!isValidSkillName(name)) return null;
|
|
271
|
+
for (const { dir, scope } of this.roots()) {
|
|
272
|
+
if (!existsSync2(dir)) continue;
|
|
273
|
+
const dirCandidate = join2(dir, name, SKILL_FILE);
|
|
274
|
+
if (existsSync2(dirCandidate) && statSync2(dirCandidate).isFile()) {
|
|
275
|
+
return this.parse(dirCandidate, name, scope);
|
|
276
|
+
}
|
|
277
|
+
const flatCandidate = join2(dir, `${name}.md`);
|
|
278
|
+
if (existsSync2(flatCandidate) && statSync2(flatCandidate).isFile()) {
|
|
279
|
+
return this.parse(flatCandidate, name, scope);
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
if (!this.disableBuiltins) {
|
|
283
|
+
for (const skill of BUILTIN_SKILLS) {
|
|
284
|
+
if (skill.name === name) return skill;
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
return null;
|
|
288
|
+
}
|
|
289
|
+
readEntry(dir, scope, entry) {
|
|
290
|
+
if (entry.isDirectory()) {
|
|
291
|
+
if (!isValidSkillName(entry.name)) return null;
|
|
292
|
+
const file = join2(dir, entry.name, SKILL_FILE);
|
|
293
|
+
if (!existsSync2(file)) return null;
|
|
294
|
+
return this.parse(file, entry.name, scope);
|
|
295
|
+
}
|
|
296
|
+
if (entry.isFile() && entry.name.endsWith(".md")) {
|
|
297
|
+
const stem = entry.name.slice(0, -3);
|
|
298
|
+
if (!isValidSkillName(stem)) return null;
|
|
299
|
+
return this.parse(join2(dir, entry.name), stem, scope);
|
|
300
|
+
}
|
|
301
|
+
return null;
|
|
302
|
+
}
|
|
303
|
+
parse(path, stem, scope) {
|
|
304
|
+
let raw;
|
|
305
|
+
try {
|
|
306
|
+
raw = readFileSync2(path, "utf8");
|
|
307
|
+
} catch {
|
|
308
|
+
return null;
|
|
309
|
+
}
|
|
310
|
+
const { data, body } = parseFrontmatter(raw);
|
|
311
|
+
const name = data.name && isValidSkillName(data.name) ? data.name : stem;
|
|
312
|
+
return {
|
|
313
|
+
name,
|
|
314
|
+
description: (data.description ?? "").trim(),
|
|
315
|
+
body: body.trim(),
|
|
316
|
+
scope,
|
|
317
|
+
path,
|
|
318
|
+
allowedTools: parseAllowedTools(data["allowed-tools"]),
|
|
319
|
+
runAs: parseRunAs(data.runAs),
|
|
320
|
+
model: data.model?.startsWith("deepseek-") ? data.model : void 0,
|
|
321
|
+
maxToolIters: parseMaxToolIters(data["max-iters"])
|
|
322
|
+
};
|
|
323
|
+
}
|
|
324
|
+
};
|
|
325
|
+
function parseRunAs(raw) {
|
|
326
|
+
return raw?.trim() === "subagent" ? "subagent" : "inline";
|
|
327
|
+
}
|
|
328
|
+
function skillStubBody(name) {
|
|
329
|
+
return `---
|
|
330
|
+
name: ${name}
|
|
331
|
+
description: One-liner \u2014 what does this skill do?
|
|
332
|
+
---
|
|
333
|
+
|
|
334
|
+
# ${name}
|
|
335
|
+
|
|
336
|
+
Replace this body with the playbook the model should follow when this skill is invoked.
|
|
337
|
+
|
|
338
|
+
Tips:
|
|
339
|
+
- Reference tools by name (run_command, edit_file, search_content, ...)
|
|
340
|
+
- Add \`runAs: subagent\` to frontmatter to spawn an isolated subagent loop
|
|
341
|
+
- Add \`allowed-tools: read_file, search_content\` to scope a subagent's tools
|
|
342
|
+
- Add \`max-iters: 32\` to raise the subagent's tool-call budget (default 16, max 32)
|
|
343
|
+
`;
|
|
344
|
+
}
|
|
345
|
+
function skillIndexLine(s) {
|
|
346
|
+
const safeDesc = s.description.replace(/\n/g, " ").trim();
|
|
347
|
+
const tag = s.runAs === "subagent" ? " [\u{1F9EC} subagent]" : "";
|
|
348
|
+
const max = 130 - s.name.length - tag.length;
|
|
349
|
+
const clipped = safeDesc.length > max ? `${safeDesc.slice(0, Math.max(1, max - 1))}\u2026` : safeDesc;
|
|
350
|
+
return clipped ? `- ${s.name}${tag} \u2014 ${clipped}` : `- ${s.name}${tag}`;
|
|
351
|
+
}
|
|
352
|
+
var MISSING_DESCRIPTION_PLACEHOLDER = '(no description \u2014 frontmatter is missing a "description:" line; tell the user to add one)';
|
|
353
|
+
function applySkillsIndex(basePrompt, opts = {}) {
|
|
354
|
+
const store = new SkillStore(opts);
|
|
355
|
+
const skills = store.list();
|
|
356
|
+
if (skills.length === 0) return basePrompt;
|
|
357
|
+
const lines = skills.map(
|
|
358
|
+
(s) => skillIndexLine(s.description ? s : { ...s, description: MISSING_DESCRIPTION_PLACEHOLDER })
|
|
359
|
+
);
|
|
360
|
+
const joined = lines.join("\n");
|
|
361
|
+
const truncated = joined.length > SKILLS_INDEX_MAX_CHARS ? `${joined.slice(0, SKILLS_INDEX_MAX_CHARS)}
|
|
362
|
+
\u2026 (truncated ${joined.length - SKILLS_INDEX_MAX_CHARS} chars)` : joined;
|
|
363
|
+
return [
|
|
364
|
+
basePrompt,
|
|
365
|
+
"",
|
|
366
|
+
"# Skills \u2014 playbooks you can invoke",
|
|
367
|
+
"",
|
|
368
|
+
'One-liner index. Each entry is either a built-in or a user-authored playbook. Call `run_skill({ name: "<skill-name>", arguments: "<task>" })` \u2014 the `name` is JUST the skill identifier (e.g. `"explore"`), NOT the `[\u{1F9EC} subagent]` tag that appears after it. Entries tagged `[\u{1F9EC} subagent]` spawn an **isolated subagent** \u2014 its tool calls and reasoning never enter your context, only its final answer does. Use subagent skills for tasks that would otherwise flood your context (deep exploration, multi-step research, anything where you only need the conclusion). Plain skills are inlined: their body becomes a tool result you read and act on directly. The user can also invoke a skill via `/skill <name>`.',
|
|
369
|
+
"",
|
|
370
|
+
"```",
|
|
371
|
+
truncated,
|
|
372
|
+
"```"
|
|
373
|
+
].join("\n");
|
|
374
|
+
}
|
|
375
|
+
var BUILTIN_EXPLORE_BODY = `You are running as an exploration subagent. Your job is to investigate the codebase the parent agent pointed you at, then return one focused, distilled answer.
|
|
376
|
+
|
|
377
|
+
How to operate:
|
|
378
|
+
- Use read_file, search_files, search_content, directory_tree, list_directory, get_file_info as your primary tools. Stay read-only.
|
|
379
|
+
- For "find all places that call / reference / use X" questions, use \`search_content\` (content grep) \u2014 NOT \`search_files\` (which only matches file names). This is the most common subagent mistake; using the wrong tool gives empty results and you waste your iter budget chasing a phantom.
|
|
380
|
+
- Cast a wide net first (search_content for symbol references, directory_tree for structure) to map the territory; then read the 3-10 most relevant files in full.
|
|
381
|
+
- Don't read every file \u2014 be selective. Aim for breadth on the first pass, depth only where the question demands it.
|
|
382
|
+
- Stop exploring as soon as you can answer the question. The parent doesn't see your tool calls, so over-exploration is pure waste.
|
|
383
|
+
|
|
384
|
+
Your final answer:
|
|
385
|
+
- One paragraph (or a few short bullets). Lead with the conclusion.
|
|
386
|
+
- Cite specific file paths + line ranges when they support the answer.
|
|
387
|
+
- If the question can't be answered from what you found, say so plainly and suggest where to look next.
|
|
388
|
+
- No follow-up offers, no "let me know if you need more." The parent will ask again if they need more.
|
|
389
|
+
|
|
390
|
+
${NEGATIVE_CLAIM_RULE}
|
|
391
|
+
|
|
392
|
+
${TUI_FORMATTING_RULES}
|
|
393
|
+
|
|
394
|
+
The 'task' the parent gave you is the question you must answer. Treat any other reading of it as scope creep.`;
|
|
395
|
+
var BUILTIN_RESEARCH_BODY = `You are running as a research subagent. Your job is to gather information from code AND the web, synthesize it, and return one focused conclusion.
|
|
396
|
+
|
|
397
|
+
How to operate:
|
|
398
|
+
- Combine code reading (read_file, search_files) with web tools (web_search, web_fetch) as appropriate to the question.
|
|
399
|
+
- For "how does X work" / "is Y supported" questions: web first to find the canonical reference, then verify against the local code.
|
|
400
|
+
- For "what's our policy on Z" / "where do we use Q": local code first, web only if you need to compare against external standards.
|
|
401
|
+
- Cap yourself at ~10 tool calls. If you can't converge in 10, return what you have plus a note about what's missing.
|
|
402
|
+
|
|
403
|
+
Your final answer:
|
|
404
|
+
- One paragraph (or short bullets). Lead with the conclusion.
|
|
405
|
+
- Cite both code (file:line) AND web sources (URL) when they back the answer.
|
|
406
|
+
- Distinguish "I verified this in code" from "I read this on a docs page" \u2014 the parent will trust the former more.
|
|
407
|
+
- If the answer is uncertain, say so. Don't invent confidence.
|
|
408
|
+
|
|
409
|
+
${NEGATIVE_CLAIM_RULE}
|
|
410
|
+
|
|
411
|
+
${TUI_FORMATTING_RULES}
|
|
412
|
+
|
|
413
|
+
The 'task' the parent gave you is the research question. Stay on it.`;
|
|
414
|
+
var BUILTIN_REVIEW_BODY = `You are running as a code-review subagent. Your job is to inspect the changes the user is about to ship \u2014 usually the current git branch vs its upstream \u2014 and produce a focused review the parent can hand back to the user.
|
|
415
|
+
|
|
416
|
+
How to operate:
|
|
417
|
+
- Default scope: the current branch's diff vs the default branch. If the user's task names a specific commit range or files, honor that instead.
|
|
418
|
+
- Discover scope first: \`run_command git status\`, \`git diff --stat\`, \`git log --oneline\` to see what changed. Then \`git diff\` (or \`git diff <base>...HEAD\`) for the actual hunks.
|
|
419
|
+
- Read the touched files (\`read_file\`) when the diff alone doesn't carry enough context \u2014 function signatures, surrounding invariants, callers.
|
|
420
|
+
- For "any callers depending on this?" questions: \`search_content\` against the symbol BEFORE asserting impact.
|
|
421
|
+
- Stay read-only. Never \`run_command git commit\`, never write files, never propose SEARCH/REPLACE blocks. The parent decides whether to act on your findings.
|
|
422
|
+
- Cap yourself at ~12 tool calls. If the diff is too big to review in one pass, pick the riskiest 2-3 files and say so explicitly.
|
|
423
|
+
|
|
424
|
+
What to look for, in priority order:
|
|
425
|
+
1. **Correctness bugs** \u2014 off-by-one, null/undefined handling, race conditions, wrong sign / wrong operator, edge cases the code doesn't handle.
|
|
426
|
+
2. **Security** \u2014 injection (SQL, shell, path traversal), secrets in code, missing authz checks, unsafe deserialization.
|
|
427
|
+
3. **Behavior changes the diff hides** \u2014 renames that miss callers, removed branches that were load-bearing, error-handling that now swallows what used to surface.
|
|
428
|
+
4. **Tests** \u2014 does the change have tests for the new behavior? Are existing tests still meaningful, or did the change make them tautological?
|
|
429
|
+
5. **Style + consistency** \u2014 only flag deviations that matter (unsafe \`any\`, missing types in TypeScript, inconsistent error shape). Don't pile on cosmetic nits if the substance is clean.
|
|
430
|
+
|
|
431
|
+
Your final answer:
|
|
432
|
+
- Lead with a one-sentence verdict: "ship as-is" / "minor nits, OK to ship after" / "blocking issues, do not ship".
|
|
433
|
+
- Then a short bulleted list of issues, each with: file:line citation + the problem in one sentence + what to change.
|
|
434
|
+
- Group by severity if you have more than 4 items: **Blocking**, **Should-fix**, **Nits**.
|
|
435
|
+
- If everything looks clean, say so plainly. Don't manufacture concerns.
|
|
436
|
+
|
|
437
|
+
${NEGATIVE_CLAIM_RULE}
|
|
438
|
+
|
|
439
|
+
${TUI_FORMATTING_RULES}
|
|
440
|
+
|
|
441
|
+
The 'task' the parent gave you describes WHAT to review (a branch, a file set, or "the pending changes"). Stay on it; don't redesign the feature.`;
|
|
442
|
+
var BUILTIN_SECURITY_REVIEW_BODY = `You are running as a security-review subagent. Your job is to inspect the changes the user is about to ship \u2014 usually the current git branch vs its upstream \u2014 through a security lens specifically, and report exploitable issues.
|
|
443
|
+
|
|
444
|
+
How to operate:
|
|
445
|
+
- Default scope: the current branch's diff vs the default branch. If the user names a different range or a directory, honor that.
|
|
446
|
+
- Discover scope first: \`git status\`, \`git diff --stat\`, \`git diff <base>...HEAD\`. Read touched files (\`read_file\`) when the diff alone doesn't carry security context \u2014 auth checks, input validation, the actual handler that calls into the changed function.
|
|
447
|
+
- Use \`search_content\` to verify "is this user-controlled input ever sanitized later?" / "are there other call sites that depend on this validation?" before asserting impact.
|
|
448
|
+
- Stay read-only. Never write, never run destructive commands, never propose SEARCH/REPLACE blocks. The parent decides what to act on.
|
|
449
|
+
- Cap yourself at ~12 tool calls. If the diff is too big, focus on the riskiest 2-3 files and say so explicitly.
|
|
450
|
+
|
|
451
|
+
Threat model \u2014 flag with severity:
|
|
452
|
+
|
|
453
|
+
**CRITICAL** (do-not-ship):
|
|
454
|
+
- SQL / NoSQL / shell / template injection \u2014 user input concatenated into a query, command, or template without parameterization.
|
|
455
|
+
- Path traversal \u2014 user-controlled filenames touching the filesystem without canonicalization + sandbox check.
|
|
456
|
+
- Authentication / authorization missing \u2014 endpoints / actions that should require a session check but don't.
|
|
457
|
+
- Hardcoded secrets \u2014 API keys, passwords, signing tokens visible in the diff.
|
|
458
|
+
- Deserialization of untrusted input \u2014 \`pickle.loads\`, \`yaml.load\` (non-safe), \`eval\`, \`Function()\`, \`unserialize()\`.
|
|
459
|
+
- Cryptographic mistakes \u2014 homemade crypto, weak hashes (MD5/SHA-1) for passwords, missing IVs, ECB mode, predictable nonces.
|
|
460
|
+
|
|
461
|
+
**HIGH**:
|
|
462
|
+
- XSS \u2014 user input rendered into HTML without escaping (or wrong escaping context).
|
|
463
|
+
- SSRF \u2014 fetching URLs from user input without an allowlist.
|
|
464
|
+
- Race conditions in security-relevant code \u2014 TOCTOU on auth/file checks.
|
|
465
|
+
- Open redirects \u2014 user-controlled URL passed to a redirect helper.
|
|
466
|
+
- Insufficient logging on security events (login failure, permission denial) \u2014 only flag if the codebase clearly DOES log elsewhere.
|
|
467
|
+
|
|
468
|
+
**MEDIUM**:
|
|
469
|
+
- Verbose error messages leaking internal paths / stack traces / SQL.
|
|
470
|
+
- Missing rate limiting on a credential / token endpoint.
|
|
471
|
+
- Cross-origin / cookie-flag issues (missing \`Secure\` / \`HttpOnly\` / \`SameSite\`).
|
|
472
|
+
|
|
473
|
+
Things to NOT pile on (out of scope here \u2014 the regular /review covers them):
|
|
474
|
+
- Style, formatting, naming.
|
|
475
|
+
- Performance, refactor opportunities, test coverage gaps that aren't security-relevant.
|
|
476
|
+
- "Should be a constant" / "extract this helper" \u2014 irrelevant to ship-blocking.
|
|
477
|
+
|
|
478
|
+
Your final answer:
|
|
479
|
+
- Lead with a one-sentence verdict: "no security issues found", "minor concerns", or "blocking issues".
|
|
480
|
+
- Then a list grouped by severity. Each item: file:line + 1-sentence threat + 1-sentence fix direction (no full SEARCH/REPLACE \u2014 the user / parent agent will write that).
|
|
481
|
+
- If clean, say so plainly. Don't manufacture findings.
|
|
482
|
+
|
|
483
|
+
${NEGATIVE_CLAIM_RULE}
|
|
484
|
+
|
|
485
|
+
${TUI_FORMATTING_RULES}
|
|
486
|
+
|
|
487
|
+
The 'task' the parent gave you names what to review. Stay on it; don't redesign the feature.`;
|
|
488
|
+
var BUILTIN_TEST_BODY = `You are running as the parent agent \u2014 this skill is INLINED, not a subagent. The user invoked /test (or asked you to "run the tests and fix failures"). Your job: run the project's test suite, diagnose any failure, propose fixes as SEARCH/REPLACE edit blocks, then re-run. Repeat until green or you hit a wall you should escalate.
|
|
489
|
+
|
|
490
|
+
How to operate:
|
|
491
|
+
|
|
492
|
+
1. **Detect the test command**.
|
|
493
|
+
- Look for \`package.json\` \u2192 \`scripts.test\` first (most common: \`npm test\`, \`pnpm test\`, \`yarn test\`).
|
|
494
|
+
- If no package.json or no test script: try \`pytest\`, \`go test ./...\`, \`cargo test\` based on what files exist (pyproject.toml/requirements.txt \u2192 pytest; go.mod \u2192 go test; Cargo.toml \u2192 cargo test).
|
|
495
|
+
- If you can't tell, ASK the user for the command \u2014 don't guess. One question, one tool call to confirm.
|
|
496
|
+
|
|
497
|
+
2. **Run it via run_command** (typical timeout 120s, bigger if the suite is large). Capture stdout + stderr.
|
|
498
|
+
|
|
499
|
+
3. **Read the failures**. Pull out: which test names failed, the actual error/traceback, the file + line that threw. Don't just paraphrase \u2014 locate the exact assertion or stack frame.
|
|
500
|
+
|
|
501
|
+
4. **Propose fixes**. For each distinct failure:
|
|
502
|
+
- If the failure is in PRODUCTION code (test catches a real bug) \u2192 propose a SEARCH/REPLACE that fixes the production code.
|
|
503
|
+
- If the failure is in TEST code (test is wrong, codebase is right) \u2192 propose a SEARCH/REPLACE that updates the test, AND say so explicitly: "This is a test bug, not a production bug \u2014 updating the assertion."
|
|
504
|
+
- If the failure is environmental (missing dep, wrong node version, missing fixture file) \u2192 say so and stop. Don't try to install packages or change config without checking with the user.
|
|
505
|
+
|
|
506
|
+
5. **Apply + re-run**. After the user accepts the edit blocks, run the test command again. Iterate.
|
|
507
|
+
|
|
508
|
+
6. **Stop conditions**:
|
|
509
|
+
- All tests pass \u2192 report green, summarize what changed.
|
|
510
|
+
- Same test still failing after 2 fix attempts on the same line \u2192 STOP. Tell the user "I've tried twice, it's still failing \u2014 here's what I think is happening, want me to try a different angle?". Don't loop indefinitely.
|
|
511
|
+
- 3+ unrelated failures \u2192 fix one at a time, smallest first, so each pass narrows the surface.
|
|
512
|
+
|
|
513
|
+
Don't:
|
|
514
|
+
- Run \`npm install\` / \`pip install\` / \`cargo update\` without asking \u2014 those mutate lockfiles and have global effects.
|
|
515
|
+
- Disable, skip, or delete failing tests to "make it green". If a test seems wrong, update its assertion with a one-sentence explanation, but never add \`.skip\` / \`it.skip\` / \`@pytest.mark.skip\`.
|
|
516
|
+
- Modify the test runner config (vitest.config, jest.config, etc.) to silence failures.
|
|
517
|
+
|
|
518
|
+
Lead each turn with a one-line status: "\u25B8 running \`npm test\` ..." \u2192 "\u25B8 2 failures in tests/foo.test.ts \u2014 first is \u2026" \u2192 so the user always knows where you are without scrolling tool output.`;
|
|
519
|
+
var BUILTIN_SKILLS = Object.freeze([
|
|
520
|
+
Object.freeze({
|
|
521
|
+
name: "explore",
|
|
522
|
+
description: "Explore the codebase in an isolated subagent \u2014 wide-net read-only investigation that returns one distilled answer. Best for: 'find all places that...', 'how does X work across the project', 'survey the code for Y'.",
|
|
523
|
+
body: BUILTIN_EXPLORE_BODY,
|
|
524
|
+
scope: "builtin",
|
|
525
|
+
path: "(builtin)",
|
|
526
|
+
runAs: "subagent"
|
|
527
|
+
}),
|
|
528
|
+
Object.freeze({
|
|
529
|
+
name: "research",
|
|
530
|
+
description: "Research a question by combining web search + code reading in an isolated subagent. Best for: 'is X feature supported by lib Y', 'what's the canonical way to do Z', 'compare our impl against the spec'.",
|
|
531
|
+
body: BUILTIN_RESEARCH_BODY,
|
|
532
|
+
scope: "builtin",
|
|
533
|
+
path: "(builtin)",
|
|
534
|
+
runAs: "subagent"
|
|
535
|
+
}),
|
|
536
|
+
Object.freeze({
|
|
537
|
+
name: "review",
|
|
538
|
+
description: "Review the pending changes (current branch diff by default) in an isolated subagent \u2014 flags correctness, security, missing tests, hidden behavior changes; reports verdict + per-issue file:line. Read-only; the parent decides what to act on.",
|
|
539
|
+
body: BUILTIN_REVIEW_BODY,
|
|
540
|
+
scope: "builtin",
|
|
541
|
+
path: "(builtin)",
|
|
542
|
+
runAs: "subagent"
|
|
543
|
+
}),
|
|
544
|
+
Object.freeze({
|
|
545
|
+
name: "security-review",
|
|
546
|
+
description: "Security-focused review of the current branch diff in an isolated subagent \u2014 flags injection/authz/secrets/deserialization/path-traversal/crypto issues, severity-tagged. Read-only. Use when shipping changes that touch auth, input parsing, file IO, or external requests.",
|
|
547
|
+
body: BUILTIN_SECURITY_REVIEW_BODY,
|
|
548
|
+
scope: "builtin",
|
|
549
|
+
path: "(builtin)",
|
|
550
|
+
runAs: "subagent"
|
|
551
|
+
}),
|
|
552
|
+
Object.freeze({
|
|
553
|
+
name: "test",
|
|
554
|
+
description: "Run the project's test suite, diagnose failures, propose SEARCH/REPLACE fixes, re-run until green (or stop after 2 fix attempts on the same failure). Inlined \u2014 runs in the parent loop so you see the edit blocks and can /apply them. Detects npm/pnpm/yarn/pytest/go/cargo.",
|
|
555
|
+
body: BUILTIN_TEST_BODY,
|
|
556
|
+
scope: "builtin",
|
|
557
|
+
path: "(builtin)",
|
|
558
|
+
runAs: "inline"
|
|
559
|
+
})
|
|
560
|
+
]);
|
|
561
|
+
|
|
562
|
+
export {
|
|
563
|
+
PROJECT_MEMORY_FILE,
|
|
564
|
+
detectForeignAgentPlatform,
|
|
565
|
+
findProjectMemoryPath,
|
|
566
|
+
resolveProjectMemoryWritePath,
|
|
567
|
+
readProjectMemory,
|
|
568
|
+
memoryEnabled,
|
|
569
|
+
applyProjectMemory,
|
|
570
|
+
parseFrontmatter,
|
|
571
|
+
TUI_FORMATTING_RULES,
|
|
572
|
+
escalationContract,
|
|
573
|
+
NEGATIVE_CLAIM_RULE,
|
|
574
|
+
SKILLS_DIRNAME,
|
|
575
|
+
SKILL_FILE,
|
|
576
|
+
validateSkillFrontmatter,
|
|
577
|
+
SkillStore,
|
|
578
|
+
applySkillsIndex
|
|
579
|
+
};
|
|
580
|
+
//# sourceMappingURL=chunk-VALDDV76.js.map
|