@moreih29/nexus-core 0.4.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/conformance/README.md +15 -18
- package/conformance/examples/plan.extension.schema.example.json +25 -0
- package/conformance/lifecycle/README.md +1 -3
- package/conformance/lifecycle/agent-complete.json +2 -1
- package/conformance/lifecycle/agent-resume.json +2 -1
- package/conformance/lifecycle/agent-spawn.json +5 -8
- package/conformance/scenarios/full-plan-cycle.json +3 -3
- package/conformance/schema/fixture.schema.json +6 -6
- package/conformance/state-schemas/agent-tracker.schema.json +10 -5
- package/conformance/state-schemas/history.schema.json +11 -1
- package/conformance/state-schemas/plan.schema.json +5 -0
- package/conformance/state-schemas/tasks.schema.json +5 -0
- package/conformance/tools/plan-decide.json +7 -7
- package/conformance/tools/plan-start.json +1 -1
- package/conformance/tools/task-add.json +1 -1
- package/conformance/tools/task-close.json +2 -0
- package/docs/consumer-implementation-guide.md +7 -11
- package/docs/nexus-layout.md +0 -15
- package/docs/nexus-outputs-contract.md +15 -25
- package/docs/nexus-state-overview.md +0 -19
- package/docs/nexus-tools-contract.md +12 -2
- package/manifest.json +26 -26
- package/package.json +5 -1
- package/scripts/.gitkeep +0 -0
- package/scripts/conformance-coverage.ts +466 -0
- package/scripts/import-from-claude-nexus.ts +403 -0
- package/scripts/lib/frontmatter.ts +71 -0
- package/scripts/lib/lint.ts +216 -0
- package/scripts/lib/structure.ts +159 -0
- package/scripts/lib/validate.ts +668 -0
- package/scripts/validate.ts +90 -0
- package/conformance/lifecycle/session-end.json +0 -31
- package/conformance/lifecycle/session-start.json +0 -36
- package/conformance/state-schemas/runtime.schema.json +0 -25
|
@@ -0,0 +1,403 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
|
|
3
|
+
import path from 'node:path';
|
|
4
|
+
import { readFile, writeFile, mkdir, readdir, rm, rename, stat } from 'node:fs/promises';
|
|
5
|
+
import { parse as parseYaml, stringify as stringifyYaml } from 'yaml';
|
|
6
|
+
import { execSync } from 'node:child_process';
|
|
7
|
+
import { glob } from 'tinyglobby';
|
|
8
|
+
import { parseFrontmatter } from './lib/frontmatter.ts';
|
|
9
|
+
|
|
10
|
+
// Neutral layer allowed fields (bridge §2.1)
|
|
11
|
+
const AGENT_ALLOW_FIELDS = new Set([
|
|
12
|
+
'id', 'name', 'alias_ko', 'description', 'task',
|
|
13
|
+
'category', 'capabilities', 'resume_tier', 'model_tier',
|
|
14
|
+
]);
|
|
15
|
+
const SKILL_ALLOW_FIELDS = new Set([
|
|
16
|
+
'id', 'name', 'description', 'triggers', 'alias_ko', 'manual_only',
|
|
17
|
+
]);
|
|
18
|
+
|
|
19
|
+
// Explicitly known drop list (warn + drop)
|
|
20
|
+
const AGENT_DROP_FIELDS = new Set(['maxTurns', 'tags', 'disallowedTools', 'model']);
|
|
21
|
+
// 'disable-model-invocation' is NOT in this set — it is mapped to manual_only below.
|
|
22
|
+
const SKILL_DROP_FIELDS = new Set(['trigger_display', 'purpose']);
|
|
23
|
+
|
|
24
|
+
// model name → model_tier abstraction
|
|
25
|
+
const MODEL_TIER_MAP: Record<string, 'high' | 'standard'> = {
|
|
26
|
+
opus: 'high',
|
|
27
|
+
sonnet: 'standard',
|
|
28
|
+
haiku: 'standard',
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
interface ImportOptions {
|
|
32
|
+
source: string;
|
|
33
|
+
apply: boolean;
|
|
34
|
+
agentsOnly: boolean;
|
|
35
|
+
skillsOnly: boolean;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
function parseCli(): ImportOptions {
|
|
39
|
+
const args = process.argv.slice(2);
|
|
40
|
+
let source = process.env['NEXUS_CLAUDE_NEXUS_PATH'] ?? path.resolve('..', 'claude-nexus');
|
|
41
|
+
let apply = false;
|
|
42
|
+
let agentsOnly = false;
|
|
43
|
+
let skillsOnly = false;
|
|
44
|
+
for (let i = 0; i < args.length; i++) {
|
|
45
|
+
if (args[i] === '--source' && args[i + 1]) { source = path.resolve(args[++i]!); }
|
|
46
|
+
else if (args[i] === '--apply') { apply = true; }
|
|
47
|
+
else if (args[i] === '--agents-only') { agentsOnly = true; }
|
|
48
|
+
else if (args[i] === '--skills-only') { skillsOnly = true; }
|
|
49
|
+
}
|
|
50
|
+
if (agentsOnly && skillsOnly) {
|
|
51
|
+
throw new Error('--agents-only and --skills-only are mutually exclusive');
|
|
52
|
+
}
|
|
53
|
+
return { source, apply, agentsOnly, skillsOnly };
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
async function verifySourceIsClaudeNexus(source: string): Promise<void> {
|
|
57
|
+
const pkgPath = path.join(source, 'package.json');
|
|
58
|
+
try {
|
|
59
|
+
const pkg = JSON.parse(await readFile(pkgPath, 'utf8')) as { name?: string };
|
|
60
|
+
if (pkg.name !== 'claude-nexus') {
|
|
61
|
+
throw new Error(`source package.json.name='${pkg.name}', expected 'claude-nexus'`);
|
|
62
|
+
}
|
|
63
|
+
} catch (err) {
|
|
64
|
+
throw new Error(`Failed to verify source at ${source}: ${(err as Error).message}`);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
function gitWorkingTreeClean(root: string, scopes: string[]): boolean {
|
|
69
|
+
try {
|
|
70
|
+
const out = execSync(`git status --porcelain ${scopes.join(' ')}`, { cwd: root, encoding: 'utf8' });
|
|
71
|
+
return out.trim().length === 0;
|
|
72
|
+
} catch {
|
|
73
|
+
return true; // directory not yet created, treat as clean
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// ---- Body transformation ----
|
|
78
|
+
// <role>...</role> → ## Role ... (top-level section marker only)
|
|
79
|
+
// <constraints>...</constraints> → ## Constraints ...
|
|
80
|
+
// <guidelines>...</guidelines> → ## Guidelines ...
|
|
81
|
+
// inline <example>, <thinking> etc. sub-XML blocks are preserved
|
|
82
|
+
function transformBody(rawBody: string): string {
|
|
83
|
+
const sections: Array<[RegExp, string]> = [
|
|
84
|
+
[/<role>\s*\n?/g, '## Role\n\n'],
|
|
85
|
+
[/<\/role>\s*\n?/g, '\n\n'],
|
|
86
|
+
[/<constraints>\s*\n?/g, '## Constraints\n\n'],
|
|
87
|
+
[/<\/constraints>\s*\n?/g, '\n\n'],
|
|
88
|
+
[/<guidelines>\s*\n?/g, '## Guidelines\n\n'],
|
|
89
|
+
[/<\/guidelines>\s*\n?/g, '\n\n'],
|
|
90
|
+
];
|
|
91
|
+
let result = rawBody;
|
|
92
|
+
for (const [re, replacement] of sections) {
|
|
93
|
+
result = result.replace(re, replacement);
|
|
94
|
+
}
|
|
95
|
+
// Trim excessive blank lines
|
|
96
|
+
return result.replace(/\n{3,}/g, '\n\n').trim() + '\n';
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// ---- Capability reverse mapping ----
|
|
100
|
+
// disallowedTools → capabilities reverse mapping
|
|
101
|
+
interface CapabilityEntry {
|
|
102
|
+
id: string;
|
|
103
|
+
harness_mapping: {
|
|
104
|
+
claude_code: string[];
|
|
105
|
+
opencode: string[];
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
interface CapabilitiesFile {
|
|
110
|
+
capabilities: CapabilityEntry[];
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
async function loadCapabilityMap(root: string): Promise<Map<string, string>> {
|
|
114
|
+
const raw = await readFile(path.join(root, 'vocabulary/capabilities.yml'), 'utf8');
|
|
115
|
+
const caps = parseYaml(raw) as CapabilitiesFile;
|
|
116
|
+
const map = new Map<string, string>();
|
|
117
|
+
for (const cap of caps.capabilities) {
|
|
118
|
+
for (const tool of cap.harness_mapping.claude_code) {
|
|
119
|
+
map.set(tool, cap.id);
|
|
120
|
+
}
|
|
121
|
+
for (const tool of cap.harness_mapping.opencode) {
|
|
122
|
+
map.set(tool, cap.id);
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
return map;
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
function reverseMapTools(disallowedTools: string[], capMap: Map<string, string>): string[] {
|
|
129
|
+
const capabilitySet = new Set<string>();
|
|
130
|
+
for (const tool of disallowedTools) {
|
|
131
|
+
const cap = capMap.get(tool);
|
|
132
|
+
if (cap) capabilitySet.add(cap);
|
|
133
|
+
}
|
|
134
|
+
return Array.from(capabilitySet).sort();
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// ---- Agent transformation ----
|
|
138
|
+
interface TransformedAgent {
|
|
139
|
+
id: string;
|
|
140
|
+
meta: Record<string, unknown>;
|
|
141
|
+
body: string;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
async function transformAgent(
|
|
145
|
+
sourcePath: string,
|
|
146
|
+
capMap: Map<string, string>,
|
|
147
|
+
warnings: string[]
|
|
148
|
+
): Promise<TransformedAgent | null> {
|
|
149
|
+
const source = await readFile(sourcePath, 'utf8');
|
|
150
|
+
const { data, content } = parseFrontmatter(source);
|
|
151
|
+
const fileName = path.basename(sourcePath, '.md');
|
|
152
|
+
|
|
153
|
+
const meta: Record<string, unknown> = {};
|
|
154
|
+
for (const [key, value] of Object.entries(data)) {
|
|
155
|
+
if (AGENT_ALLOW_FIELDS.has(key)) {
|
|
156
|
+
if (key === 'capabilities') continue; // will be computed from disallowedTools
|
|
157
|
+
meta[key] = value;
|
|
158
|
+
} else if (AGENT_DROP_FIELDS.has(key)) {
|
|
159
|
+
warnings.push(` ${fileName}: dropping known field '${key}'`);
|
|
160
|
+
} else {
|
|
161
|
+
throw new Error(`${fileName}: unknown field '${key}' — add to allow list or drop list`);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// model → model_tier
|
|
166
|
+
if (typeof data['model'] === 'string') {
|
|
167
|
+
const tier = MODEL_TIER_MAP[data['model']];
|
|
168
|
+
if (!tier) throw new Error(`${fileName}: unknown model '${data['model']}'`);
|
|
169
|
+
meta['model_tier'] = tier;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
// disallowedTools → capabilities
|
|
173
|
+
const disallowed = Array.isArray(data['disallowedTools']) ? data['disallowedTools'] : [];
|
|
174
|
+
meta['capabilities'] = reverseMapTools(disallowed as string[], capMap);
|
|
175
|
+
|
|
176
|
+
// Ensure id present (use filename if not in frontmatter)
|
|
177
|
+
if (!meta['id']) meta['id'] = fileName;
|
|
178
|
+
|
|
179
|
+
// name default to id
|
|
180
|
+
if (!meta['name']) meta['name'] = meta['id'];
|
|
181
|
+
|
|
182
|
+
const body = transformBody(content);
|
|
183
|
+
|
|
184
|
+
return { id: meta['id'] as string, meta, body };
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// ---- Skill transformation ----
|
|
188
|
+
interface TransformedSkill {
|
|
189
|
+
id: string;
|
|
190
|
+
meta: Record<string, unknown>;
|
|
191
|
+
body: string;
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
async function transformSkill(
|
|
195
|
+
sourceDir: string,
|
|
196
|
+
warnings: string[]
|
|
197
|
+
): Promise<TransformedSkill | null> {
|
|
198
|
+
const skillFile = path.join(sourceDir, 'SKILL.md');
|
|
199
|
+
const source = await readFile(skillFile, 'utf8');
|
|
200
|
+
const { data, content } = parseFrontmatter(source);
|
|
201
|
+
const skillId = path.basename(sourceDir);
|
|
202
|
+
|
|
203
|
+
const meta: Record<string, unknown> = {};
|
|
204
|
+
for (const [key, value] of Object.entries(data)) {
|
|
205
|
+
if (SKILL_ALLOW_FIELDS.has(key)) {
|
|
206
|
+
meta[key] = value;
|
|
207
|
+
} else if (SKILL_DROP_FIELDS.has(key)) {
|
|
208
|
+
warnings.push(` ${skillId}: dropping known field '${key}'`);
|
|
209
|
+
} else if (key === 'disable-model-invocation') {
|
|
210
|
+
// Map to manual_only
|
|
211
|
+
meta['manual_only'] = Boolean(value);
|
|
212
|
+
warnings.push(` ${skillId}: mapping disable-model-invocation → manual_only`);
|
|
213
|
+
} else {
|
|
214
|
+
throw new Error(`${skillId}: unknown field '${key}' — add to allow list or drop list`);
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// Normalize triggers: bracket strings → tag ids; slash commands → drop + manual_only
|
|
219
|
+
if (Array.isArray(meta['triggers'])) {
|
|
220
|
+
const rawTriggers = meta['triggers'] as string[];
|
|
221
|
+
// Bracket triggers (e.g., [plan], [plan:auto]): strip brackets, split on ':' for parent id
|
|
222
|
+
const bracketTriggers = rawTriggers
|
|
223
|
+
.filter((t) => t.startsWith('[') && t.endsWith(']'))
|
|
224
|
+
.map((t) => t.slice(1, -1).split(':')[0]!)
|
|
225
|
+
.filter((t, i, arr) => arr.indexOf(t) === i); // dedupe
|
|
226
|
+
// Slash command triggers (e.g., /claude-nexus:nx-init): harness-specific, drop them
|
|
227
|
+
const slashTriggers = rawTriggers.filter((t) => t.startsWith('/'));
|
|
228
|
+
if (slashTriggers.length > 0) {
|
|
229
|
+
warnings.push(` ${skillId}: slash command triggers detected (${slashTriggers.join(', ')}) → dropping + setting manual_only: true`);
|
|
230
|
+
meta['manual_only'] = true;
|
|
231
|
+
}
|
|
232
|
+
if (bracketTriggers.length > 0) {
|
|
233
|
+
meta['triggers'] = bracketTriggers;
|
|
234
|
+
} else {
|
|
235
|
+
delete meta['triggers'];
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
if (!meta['id']) meta['id'] = skillId;
|
|
240
|
+
if (!meta['name']) meta['name'] = meta['id'];
|
|
241
|
+
|
|
242
|
+
const body = transformBody(content);
|
|
243
|
+
|
|
244
|
+
return { id: meta['id'] as string, meta, body };
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
// ---- Staging + atomic rename ----
|
|
248
|
+
async function writeToStaging(
|
|
249
|
+
root: string,
|
|
250
|
+
kind: 'agents' | 'skills',
|
|
251
|
+
items: Array<{ id: string; meta: Record<string, unknown>; body: string }>
|
|
252
|
+
): Promise<void> {
|
|
253
|
+
const stagingDir = path.join(root, `${kind}.staging`);
|
|
254
|
+
await rm(stagingDir, { recursive: true, force: true });
|
|
255
|
+
await mkdir(stagingDir, { recursive: true });
|
|
256
|
+
for (const item of items) {
|
|
257
|
+
const dir = path.join(stagingDir, item.id);
|
|
258
|
+
await mkdir(dir, { recursive: true });
|
|
259
|
+
await writeFile(path.join(dir, 'meta.yml'), stringifyYaml(item.meta));
|
|
260
|
+
await writeFile(path.join(dir, 'body.md'), item.body);
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
async function atomicSwap(root: string, kind: 'agents' | 'skills'): Promise<void> {
|
|
265
|
+
const live = path.join(root, kind);
|
|
266
|
+
const staging = path.join(root, `${kind}.staging`);
|
|
267
|
+
const backup = path.join(root, `${kind}.backup`);
|
|
268
|
+
// If live exists, move it to backup
|
|
269
|
+
try {
|
|
270
|
+
await stat(live);
|
|
271
|
+
await rm(backup, { recursive: true, force: true });
|
|
272
|
+
await rename(live, backup);
|
|
273
|
+
} catch {
|
|
274
|
+
// live doesn't exist — first bootstrap
|
|
275
|
+
}
|
|
276
|
+
await rename(staging, live);
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
async function cleanupBackup(root: string, kind: 'agents' | 'skills'): Promise<void> {
|
|
280
|
+
const backup = path.join(root, `${kind}.backup`);
|
|
281
|
+
await rm(backup, { recursive: true, force: true });
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
async function restoreBackup(root: string, kind: 'agents' | 'skills'): Promise<void> {
|
|
285
|
+
const live = path.join(root, kind);
|
|
286
|
+
const backup = path.join(root, `${kind}.backup`);
|
|
287
|
+
try {
|
|
288
|
+
await rm(live, { recursive: true, force: true });
|
|
289
|
+
await rename(backup, live);
|
|
290
|
+
} catch {
|
|
291
|
+
/* nothing to restore */
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
// ---- Main ----
|
|
296
|
+
async function main(): Promise<void> {
|
|
297
|
+
const opts = parseCli();
|
|
298
|
+
const root = process.cwd();
|
|
299
|
+
|
|
300
|
+
console.log('# nexus-core import-from-claude-nexus');
|
|
301
|
+
console.log(`Source: ${opts.source}`);
|
|
302
|
+
console.log(`Target: ${root}`);
|
|
303
|
+
console.log(`Mode: ${opts.apply ? 'APPLY' : 'DRY-RUN'}`);
|
|
304
|
+
console.log('');
|
|
305
|
+
|
|
306
|
+
// Verify source
|
|
307
|
+
await verifySourceIsClaudeNexus(opts.source);
|
|
308
|
+
|
|
309
|
+
// Git working tree dirty check
|
|
310
|
+
if (opts.apply) {
|
|
311
|
+
const scopes = ['agents/', 'skills/', 'manifest.json'];
|
|
312
|
+
if (!gitWorkingTreeClean(root, scopes)) {
|
|
313
|
+
throw new Error('Working tree has uncommitted changes in agents/, skills/, or manifest.json. Commit or stash first.');
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
// Load capability map
|
|
318
|
+
const capMap = await loadCapabilityMap(root);
|
|
319
|
+
|
|
320
|
+
// Collect source files
|
|
321
|
+
const agentFiles = opts.skillsOnly
|
|
322
|
+
? []
|
|
323
|
+
: await glob(['agents/*.md'], { cwd: opts.source, absolute: true });
|
|
324
|
+
const skillDirs = opts.agentsOnly
|
|
325
|
+
? []
|
|
326
|
+
: await glob(['skills/*'], { cwd: opts.source, absolute: true, onlyDirectories: true });
|
|
327
|
+
|
|
328
|
+
const warnings: string[] = [];
|
|
329
|
+
const transformedAgents: TransformedAgent[] = [];
|
|
330
|
+
const transformedSkills: TransformedSkill[] = [];
|
|
331
|
+
|
|
332
|
+
// Transform agents
|
|
333
|
+
for (const f of agentFiles) {
|
|
334
|
+
const agent = await transformAgent(f, capMap, warnings);
|
|
335
|
+
if (agent) transformedAgents.push(agent);
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
// Transform skills
|
|
339
|
+
for (const d of skillDirs) {
|
|
340
|
+
const skill = await transformSkill(d, warnings);
|
|
341
|
+
if (skill) transformedSkills.push(skill);
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
console.log('# Transformation summary');
|
|
345
|
+
console.log(`Agents: ${transformedAgents.length}`);
|
|
346
|
+
console.log(`Skills: ${transformedSkills.length}`);
|
|
347
|
+
if (warnings.length > 0) {
|
|
348
|
+
console.log('# Warnings');
|
|
349
|
+
for (const w of warnings) console.log(w);
|
|
350
|
+
}
|
|
351
|
+
console.log('');
|
|
352
|
+
|
|
353
|
+
if (!opts.apply) {
|
|
354
|
+
console.log('# Files that would be written (dry-run):');
|
|
355
|
+
for (const a of transformedAgents) {
|
|
356
|
+
console.log(` agents/${a.id}/meta.yml`);
|
|
357
|
+
console.log(` agents/${a.id}/body.md`);
|
|
358
|
+
}
|
|
359
|
+
for (const s of transformedSkills) {
|
|
360
|
+
console.log(` skills/${s.id}/meta.yml`);
|
|
361
|
+
console.log(` skills/${s.id}/body.md`);
|
|
362
|
+
}
|
|
363
|
+
console.log('');
|
|
364
|
+
console.log('Run with --apply to write.');
|
|
365
|
+
return;
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
// Apply — all-or-nothing transaction via staging
|
|
369
|
+
try {
|
|
370
|
+
if (!opts.skillsOnly) {
|
|
371
|
+
await writeToStaging(root, 'agents', transformedAgents);
|
|
372
|
+
await atomicSwap(root, 'agents');
|
|
373
|
+
}
|
|
374
|
+
if (!opts.agentsOnly) {
|
|
375
|
+
await writeToStaging(root, 'skills', transformedSkills);
|
|
376
|
+
await atomicSwap(root, 'skills');
|
|
377
|
+
}
|
|
378
|
+
} catch (err) {
|
|
379
|
+
console.error('Import failed during write phase. Attempting restore...');
|
|
380
|
+
if (!opts.skillsOnly) await restoreBackup(root, 'agents');
|
|
381
|
+
if (!opts.agentsOnly) await restoreBackup(root, 'skills');
|
|
382
|
+
throw err;
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
// Cleanup backup on success
|
|
386
|
+
if (!opts.skillsOnly) await cleanupBackup(root, 'agents');
|
|
387
|
+
if (!opts.agentsOnly) await cleanupBackup(root, 'skills');
|
|
388
|
+
|
|
389
|
+
console.log('');
|
|
390
|
+
console.log('Import complete. Running validate...');
|
|
391
|
+
// Auto-validate after apply — run validate via spawnSync
|
|
392
|
+
const { spawnSync } = await import('node:child_process');
|
|
393
|
+
const res = spawnSync('bun', ['run', 'validate'], { cwd: root, stdio: 'inherit' });
|
|
394
|
+
if (res.status !== 0) {
|
|
395
|
+
throw new Error('Validation failed after import. Check output above.');
|
|
396
|
+
}
|
|
397
|
+
console.log('Import + validation complete.');
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
main().catch((err) => {
|
|
401
|
+
console.error('Error:', (err as Error).message);
|
|
402
|
+
process.exit(1);
|
|
403
|
+
});
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import { parse as parseYaml } from 'yaml';
|
|
2
|
+
|
|
3
|
+
const FRONTMATTER_RE = /^---\n([\s\S]*?)\n---\n([\s\S]*)$/;
|
|
4
|
+
|
|
5
|
+
export interface ParsedFrontmatter {
|
|
6
|
+
/** parsed YAML object */
|
|
7
|
+
data: Record<string, unknown>;
|
|
8
|
+
/** body content (after closing ---) */
|
|
9
|
+
content: string;
|
|
10
|
+
/** 1-based line number where frontmatter YAML starts (after opening ---) */
|
|
11
|
+
frontmatterStartLine: number;
|
|
12
|
+
/** 1-based line number where body content starts (after closing ---) */
|
|
13
|
+
contentStartLine: number;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Parses a markdown file with optional YAML frontmatter.
|
|
18
|
+
*
|
|
19
|
+
* Format:
|
|
20
|
+
* ---
|
|
21
|
+
* {frontmatter YAML}
|
|
22
|
+
* ---
|
|
23
|
+
* {body content}
|
|
24
|
+
*
|
|
25
|
+
* If no frontmatter is present, returns {data: {}, content: source, frontmatterStartLine: 0, contentStartLine: 1}.
|
|
26
|
+
*
|
|
27
|
+
* Line numbers are 1-based to match editor conventions.
|
|
28
|
+
*/
|
|
29
|
+
export function parseFrontmatter(source: string): ParsedFrontmatter {
|
|
30
|
+
const match = source.match(FRONTMATTER_RE);
|
|
31
|
+
if (!match) {
|
|
32
|
+
return {
|
|
33
|
+
data: {},
|
|
34
|
+
content: source,
|
|
35
|
+
frontmatterStartLine: 0,
|
|
36
|
+
contentStartLine: 1,
|
|
37
|
+
};
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
const frontmatterText = match[1];
|
|
41
|
+
const body = match[2];
|
|
42
|
+
const parsed = parseYaml(frontmatterText);
|
|
43
|
+
|
|
44
|
+
// Opening '---' is line 1, frontmatter YAML starts at line 2
|
|
45
|
+
const frontmatterStartLine = 2;
|
|
46
|
+
// Count lines in frontmatter text + 2 (opening --- and closing ---)
|
|
47
|
+
const frontmatterLineCount = frontmatterText.split('\n').length;
|
|
48
|
+
const contentStartLine = frontmatterStartLine + frontmatterLineCount + 1; // +1 for closing ---
|
|
49
|
+
|
|
50
|
+
return {
|
|
51
|
+
data: (parsed ?? {}) as Record<string, unknown>,
|
|
52
|
+
content: body,
|
|
53
|
+
frontmatterStartLine,
|
|
54
|
+
contentStartLine,
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Reverse: translates a line number within the frontmatter YAML (1-based, as reported
|
|
60
|
+
* by a YAML parser operating on the frontmatter text alone) back to the line in the
|
|
61
|
+
* original source file.
|
|
62
|
+
*/
|
|
63
|
+
export function frontmatterLineToSourceLine(
|
|
64
|
+
parsed: ParsedFrontmatter,
|
|
65
|
+
frontmatterLine: number
|
|
66
|
+
): number {
|
|
67
|
+
if (parsed.frontmatterStartLine === 0) {
|
|
68
|
+
throw new Error('Source has no frontmatter');
|
|
69
|
+
}
|
|
70
|
+
return parsed.frontmatterStartLine + frontmatterLine - 1;
|
|
71
|
+
}
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
import { glob } from 'tinyglobby';
|
|
2
|
+
import { readFile } from 'node:fs/promises';
|
|
3
|
+
import { parse as parseYaml } from 'yaml';
|
|
4
|
+
import path from 'node:path';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Common ValidationResult type. Imported from ./validate.ts for consistency,
|
|
8
|
+
* but declared here as well for isolation.
|
|
9
|
+
*/
|
|
10
|
+
export interface ValidationResult {
|
|
11
|
+
file: string;
|
|
12
|
+
gate: string;
|
|
13
|
+
severity: 'error' | 'warning';
|
|
14
|
+
line?: number;
|
|
15
|
+
message: string;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
/** Paths excluded from all lint checks. */
|
|
19
|
+
const LINT_EXCLUDE: string[] = [
|
|
20
|
+
'scripts/**',
|
|
21
|
+
'node_modules/**',
|
|
22
|
+
'.git/**',
|
|
23
|
+
'dist/**',
|
|
24
|
+
'.nexus/**',
|
|
25
|
+
'schema/**',
|
|
26
|
+
// capabilities.yml prose_guidance naturally uses English words (Read, edit, write)
|
|
27
|
+
// that match tool-name regexes. After v0.2.0 harness-agnostic redesign, this file
|
|
28
|
+
// contains zero harness tool names — only semantic descriptions. Excluding is safe.
|
|
29
|
+
'vocabulary/capabilities.yml',
|
|
30
|
+
];
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Patterns to scan — only prompt-injection sources and canonical vocabulary.
|
|
34
|
+
*
|
|
35
|
+
* Intentionally excluded: README.md, CONSUMING.md, CHANGELOG.md, MIGRATIONS/*,
|
|
36
|
+
* schema/README.md — these are human-facing documentation where harness tool
|
|
37
|
+
* names and model names may legitimately appear in prose explanations.
|
|
38
|
+
*/
|
|
39
|
+
const LINT_INCLUDE: string[] = [
|
|
40
|
+
'agents/**/meta.yml',
|
|
41
|
+
'agents/**/body.md',
|
|
42
|
+
'skills/**/meta.yml',
|
|
43
|
+
'skills/**/body.md',
|
|
44
|
+
'vocabulary/*.yml',
|
|
45
|
+
];
|
|
46
|
+
|
|
47
|
+
// G6: harness-specific tool names
|
|
48
|
+
// Distinctive tools — unambiguous, safe to scan in ALL files including body.md prose
|
|
49
|
+
const CLAUDE_CODE_TOOLS_DISTINCTIVE = /\b(NotebookEdit|BashOutput|KillShell|Glob|Grep|WebFetch|WebSearch|TodoWrite|SendMessage|TeamCreate|AskUserQuestion|mcp__plugin_[a-z0-9_]+)\b/g;
|
|
50
|
+
// Ambiguous tools — also common English words (Read, Write, Edit, Bash, Task, Monitor)
|
|
51
|
+
// Only scanned in meta.yml and vocabulary where they are clearly tool references, not prose.
|
|
52
|
+
const CLAUDE_CODE_TOOLS_AMBIGUOUS = /\b(Read|Write|Edit|Bash|Task|Monitor)\b/g;
|
|
53
|
+
const OPENCODE_TOOLS = /\b(edit|write|patch|multiedit|bash)\b/g;
|
|
54
|
+
|
|
55
|
+
// G7: concrete model names
|
|
56
|
+
const CONCRETE_MODELS = /\b(opus|sonnet|haiku|gpt-[0-9][a-z0-9.-]*|claude-[0-9][a-z0-9.-]*)\b/gi;
|
|
57
|
+
|
|
58
|
+
// G8: non-TS/JS file allowed extensions
|
|
59
|
+
const PROMPT_ONLY_BAD_EXT = /\.(ts|tsx|js|jsx|cjs|mjs)$/;
|
|
60
|
+
|
|
61
|
+
async function* iterFiles(root: string): AsyncGenerator<string> {
|
|
62
|
+
const files = await glob(LINT_INCLUDE, {
|
|
63
|
+
cwd: root,
|
|
64
|
+
ignore: LINT_EXCLUDE,
|
|
65
|
+
absolute: true,
|
|
66
|
+
onlyFiles: true,
|
|
67
|
+
});
|
|
68
|
+
for (const f of files) yield f;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
function lineOfMatch(source: string, index: number): number {
|
|
72
|
+
return source.slice(0, index).split('\n').length;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
function scanRegex(
|
|
76
|
+
source: string,
|
|
77
|
+
regex: RegExp,
|
|
78
|
+
file: string,
|
|
79
|
+
gate: string,
|
|
80
|
+
makeMessage: (match: string) => string
|
|
81
|
+
): ValidationResult[] {
|
|
82
|
+
const results: ValidationResult[] = [];
|
|
83
|
+
regex.lastIndex = 0;
|
|
84
|
+
let m: RegExpExecArray | null;
|
|
85
|
+
// eslint-disable-next-line no-cond-assign
|
|
86
|
+
while ((m = regex.exec(source)) !== null) {
|
|
87
|
+
results.push({
|
|
88
|
+
file,
|
|
89
|
+
gate,
|
|
90
|
+
severity: 'error',
|
|
91
|
+
line: lineOfMatch(source, m.index),
|
|
92
|
+
message: makeMessage(m[0]),
|
|
93
|
+
});
|
|
94
|
+
if (m.index === regex.lastIndex) regex.lastIndex++;
|
|
95
|
+
}
|
|
96
|
+
return results;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
/** G6: harness-specific tool names forbidden in body/meta/vocabulary.
|
|
100
|
+
*
|
|
101
|
+
* CLAUDE_CODE_TOOLS (capitalized, distinctive) — scanned in ALL lint-included files.
|
|
102
|
+
* OPENCODE_TOOLS (lowercase, indistinguishable from English words in prose) — scanned
|
|
103
|
+
* ONLY in meta.yml and vocabulary/*.yml, NOT in body.md or prose_guidance fields.
|
|
104
|
+
* Rationale: "edit", "write", "bash" are common English words that legitimately appear
|
|
105
|
+
* in descriptive body prose. Scanning body.md for these produces mass false positives.
|
|
106
|
+
*/
|
|
107
|
+
export async function checkHarnessSpecific(root: string): Promise<ValidationResult[]> {
|
|
108
|
+
const results: ValidationResult[] = [];
|
|
109
|
+
for await (const file of iterFiles(root)) {
|
|
110
|
+
const source = await readFile(file, 'utf8');
|
|
111
|
+
const rel = path.relative(root, file);
|
|
112
|
+
// Distinctive Claude Code tools (unambiguous) — all files
|
|
113
|
+
results.push(
|
|
114
|
+
...scanRegex(source, CLAUDE_CODE_TOOLS_DISTINCTIVE, rel, 'G6-harness-lint',
|
|
115
|
+
(m) => `Harness-specific tool name forbidden: '${m}'. Use abstract capability or remove.`)
|
|
116
|
+
);
|
|
117
|
+
// Ambiguous tools (Read/Write/Edit/Bash/Task/Monitor + OpenCode lowercase) — meta.yml and vocabulary only
|
|
118
|
+
if (rel.endsWith('meta.yml') || rel.startsWith('vocabulary/')) {
|
|
119
|
+
results.push(
|
|
120
|
+
...scanRegex(source, CLAUDE_CODE_TOOLS_AMBIGUOUS, rel, 'G6-harness-lint',
|
|
121
|
+
(m) => `Harness-specific tool name forbidden: '${m}'. Use abstract capability or remove.`)
|
|
122
|
+
);
|
|
123
|
+
results.push(
|
|
124
|
+
...scanRegex(source, OPENCODE_TOOLS, rel, 'G6-harness-lint',
|
|
125
|
+
(m) => `OpenCode tool name forbidden: '${m}'. Use abstract capability or remove.`)
|
|
126
|
+
);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
return results;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
/** G7: concrete model names forbidden; use model_tier abstraction. */
|
|
133
|
+
export async function checkConcreteModel(root: string): Promise<ValidationResult[]> {
|
|
134
|
+
const results: ValidationResult[] = [];
|
|
135
|
+
for await (const file of iterFiles(root)) {
|
|
136
|
+
const source = await readFile(file, 'utf8');
|
|
137
|
+
const rel = path.relative(root, file);
|
|
138
|
+
results.push(
|
|
139
|
+
...scanRegex(source, CONCRETE_MODELS, rel, 'G7-model-lint',
|
|
140
|
+
(m) => `Concrete model name forbidden: '${m}'. Use 'model_tier: high | standard'.`)
|
|
141
|
+
);
|
|
142
|
+
}
|
|
143
|
+
return results;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* G11: tag trigger consistency — each tag's trigger must equal "[" + id.replace(/-/g, ":") + "]".
|
|
148
|
+
*/
|
|
149
|
+
export async function checkTagTriggerConsistency(root: string): Promise<ValidationResult[]> {
|
|
150
|
+
const tagsPath = path.join(root, 'vocabulary', 'tags.yml');
|
|
151
|
+
const rel = path.join('vocabulary', 'tags.yml');
|
|
152
|
+
let source: string;
|
|
153
|
+
try {
|
|
154
|
+
source = await readFile(tagsPath, 'utf8');
|
|
155
|
+
} catch (err) {
|
|
156
|
+
return [{
|
|
157
|
+
file: rel,
|
|
158
|
+
gate: 'G11-tag-trigger',
|
|
159
|
+
severity: 'error',
|
|
160
|
+
message: `Cannot read tags.yml: ${(err as Error).message}`,
|
|
161
|
+
}];
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
let data: unknown;
|
|
165
|
+
try {
|
|
166
|
+
data = parseYaml(source);
|
|
167
|
+
} catch (err) {
|
|
168
|
+
return [{
|
|
169
|
+
file: rel,
|
|
170
|
+
gate: 'G11-tag-trigger',
|
|
171
|
+
severity: 'error',
|
|
172
|
+
message: `YAML parse error in tags.yml: ${(err as Error).message}`,
|
|
173
|
+
}];
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
const tags = (data as { tags?: Array<{ id: string; trigger: string }> })?.tags ?? [];
|
|
177
|
+
const results: ValidationResult[] = [];
|
|
178
|
+
for (const tag of tags) {
|
|
179
|
+
const expected = '[' + tag.id.replace(/-/g, ':') + ']';
|
|
180
|
+
if (tag.trigger !== expected) {
|
|
181
|
+
results.push({
|
|
182
|
+
file: rel,
|
|
183
|
+
gate: 'G11-tag-trigger',
|
|
184
|
+
severity: 'error',
|
|
185
|
+
message: `Tag '${tag.id}': trigger mismatch — expected '${expected}', got '${tag.trigger}'`,
|
|
186
|
+
});
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
return results;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* G8: prompt-only enforcement — no .ts/.js/.cjs/.mjs outside scripts/.
|
|
194
|
+
* Published artifact must not contain runtime code.
|
|
195
|
+
*/
|
|
196
|
+
export async function checkPromptOnly(root: string): Promise<ValidationResult[]> {
|
|
197
|
+
const results: ValidationResult[] = [];
|
|
198
|
+
const allFiles = await glob(['**/*'], {
|
|
199
|
+
cwd: root,
|
|
200
|
+
ignore: ['node_modules/**', '.git/**', 'dist/**', '.nexus/**', 'scripts/**'],
|
|
201
|
+
absolute: true,
|
|
202
|
+
onlyFiles: true,
|
|
203
|
+
});
|
|
204
|
+
for (const file of allFiles) {
|
|
205
|
+
if (PROMPT_ONLY_BAD_EXT.test(file)) {
|
|
206
|
+
const rel = path.relative(root, file);
|
|
207
|
+
results.push({
|
|
208
|
+
file: rel,
|
|
209
|
+
gate: 'G8-prompt-only',
|
|
210
|
+
severity: 'error',
|
|
211
|
+
message: `Runtime code file outside scripts/: ${rel}. nexus-core is a prompt-only library.`,
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
return results;
|
|
216
|
+
}
|