agent-method 1.5.1 → 1.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +141 -151
- package/bin/{agent-method.js → wwa.js} +12 -4
- package/lib/cli/check.js +2 -2
- package/lib/cli/init.js +107 -17
- package/lib/cli/pipeline.js +1 -1
- package/lib/cli/refine.js +3 -3
- package/lib/cli/route.js +1 -1
- package/lib/cli/scan.js +3 -3
- package/lib/cli/serve.js +23 -0
- package/lib/cli/status.js +2 -2
- package/lib/cli/upgrade.js +8 -7
- package/lib/cli/watch.js +32 -0
- package/lib/init.js +62 -6
- package/lib/mcp-server.js +524 -0
- package/lib/pipeline.js +1 -1
- package/lib/registry.js +1 -1
- package/lib/watcher.js +165 -0
- package/package.json +7 -5
- package/templates/README.md +24 -20
- package/templates/entry-points/.cursorrules +11 -11
- package/templates/entry-points/AGENT.md +11 -11
- package/templates/entry-points/CLAUDE.md +11 -11
- package/templates/full/.cursorrules +11 -11
- package/templates/full/AGENT.md +11 -11
- package/templates/full/CLAUDE.md +11 -11
- package/templates/full/SESSION-LOG.md +37 -5
- package/templates/starter/.cursorrules +11 -11
- package/templates/starter/AGENT.md +11 -11
- package/templates/starter/CLAUDE.md +11 -11
- package/templates/starter/SESSION-LOG.md +37 -5
|
@@ -0,0 +1,524 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MCP server — exposes methodology pipeline and registry as agent-callable tools.
|
|
3
|
+
*
|
|
4
|
+
* 8 tools:
|
|
5
|
+
* route_query S1-S5 End-to-end query routing
|
|
6
|
+
* validate_entry_point S7 Entry point validation
|
|
7
|
+
* detect_project S8 Project type detection
|
|
8
|
+
* resolve_cascade S5 Cascade chain resolution
|
|
9
|
+
* check_capability Registry Model tier capability check
|
|
10
|
+
* lookup_feature Registry Feature lookup by ID or keyword
|
|
11
|
+
* list_workflows Registry List available workflows
|
|
12
|
+
* refactor_markdown CTX-06 Markdown scale analysis
|
|
13
|
+
*
|
|
14
|
+
* Uses @modelcontextprotocol/sdk McpServer with stdio transport.
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import { readFileSync, statSync } from "node:fs";
|
|
18
|
+
import { resolve } from "node:path";
|
|
19
|
+
import { z } from "zod";
|
|
20
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
21
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
22
|
+
|
|
23
|
+
import { loadRegistry, getFeatures, getWorkflows, getVersion } from "./registry.js";
|
|
24
|
+
import {
|
|
25
|
+
classify,
|
|
26
|
+
selectWorkflow,
|
|
27
|
+
resolveFeatures,
|
|
28
|
+
computeFileSets,
|
|
29
|
+
resolveCascade,
|
|
30
|
+
validateEntryPoint,
|
|
31
|
+
detectProjectType,
|
|
32
|
+
} from "./pipeline.js";
|
|
33
|
+
|
|
34
|
+
// ---------------------------------------------------------------------------
|
|
35
|
+
// Registry singleton (loaded once at startup)
|
|
36
|
+
// ---------------------------------------------------------------------------
|
|
37
|
+
|
|
38
|
+
let _registry = null;
|
|
39
|
+
|
|
40
|
+
function registry(registryPath) {
|
|
41
|
+
if (!_registry) {
|
|
42
|
+
_registry = loadRegistry(registryPath || undefined);
|
|
43
|
+
}
|
|
44
|
+
return _registry;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// ---------------------------------------------------------------------------
|
|
48
|
+
// Capability tiers — maps model tiers to supported feature domains
|
|
49
|
+
// ---------------------------------------------------------------------------
|
|
50
|
+
|
|
51
|
+
const TIER_CAPABILITIES = {
|
|
52
|
+
lite: {
|
|
53
|
+
supported_domains: ["CTX", "QRY", "STT"],
|
|
54
|
+
max_features: 12,
|
|
55
|
+
notes: "Haiku-class models. Core context and query routing only.",
|
|
56
|
+
},
|
|
57
|
+
standard: {
|
|
58
|
+
supported_domains: ["CTX", "QRY", "STT", "TSK", "HAI", "TPL"],
|
|
59
|
+
max_features: 24,
|
|
60
|
+
notes: "Sonnet-class models. Full methodology minus exploration/discovery.",
|
|
61
|
+
},
|
|
62
|
+
full: {
|
|
63
|
+
supported_domains: ["CTX", "QRY", "STT", "TSK", "HAI", "TPL", "EXP", "SCAN"],
|
|
64
|
+
max_features: 32,
|
|
65
|
+
notes: "Opus-class models. Complete methodology surface area.",
|
|
66
|
+
},
|
|
67
|
+
};
|
|
68
|
+
|
|
69
|
+
// ---------------------------------------------------------------------------
|
|
70
|
+
// Markdown analysis helpers (CTX-06 scale management)
|
|
71
|
+
// ---------------------------------------------------------------------------
|
|
72
|
+
|
|
73
|
+
const SCALE_THRESHOLD = 300;
|
|
74
|
+
|
|
75
|
+
function analyzeMarkdown(filePath) {
|
|
76
|
+
const absPath = resolve(filePath);
|
|
77
|
+
let content;
|
|
78
|
+
try {
|
|
79
|
+
content = readFileSync(absPath, "utf-8");
|
|
80
|
+
} catch {
|
|
81
|
+
return { error: `File not found: ${filePath}` };
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
const lines = content.split("\n");
|
|
85
|
+
const lineCount = lines.length;
|
|
86
|
+
const overThreshold = lineCount > SCALE_THRESHOLD;
|
|
87
|
+
|
|
88
|
+
// Parse sections by headings
|
|
89
|
+
const sections = [];
|
|
90
|
+
let currentSection = null;
|
|
91
|
+
|
|
92
|
+
for (let i = 0; i < lines.length; i++) {
|
|
93
|
+
const match = lines[i].match(/^(#{1,4})\s+(.+)/);
|
|
94
|
+
if (match) {
|
|
95
|
+
if (currentSection) {
|
|
96
|
+
currentSection.end_line = i;
|
|
97
|
+
currentSection.line_count = currentSection.end_line - currentSection.start_line;
|
|
98
|
+
sections.push(currentSection);
|
|
99
|
+
}
|
|
100
|
+
currentSection = {
|
|
101
|
+
level: match[1].length,
|
|
102
|
+
title: match[2].trim(),
|
|
103
|
+
start_line: i + 1,
|
|
104
|
+
end_line: null,
|
|
105
|
+
line_count: 0,
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
if (currentSection) {
|
|
110
|
+
currentSection.end_line = lines.length;
|
|
111
|
+
currentSection.line_count = currentSection.end_line - currentSection.start_line;
|
|
112
|
+
sections.push(currentSection);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
const result = {
|
|
116
|
+
file: filePath,
|
|
117
|
+
line_count: lineCount,
|
|
118
|
+
threshold: SCALE_THRESHOLD,
|
|
119
|
+
over_threshold: overThreshold,
|
|
120
|
+
sections,
|
|
121
|
+
};
|
|
122
|
+
|
|
123
|
+
// Generate split plan only if over threshold
|
|
124
|
+
if (overThreshold) {
|
|
125
|
+
const topSections = sections.filter((s) => s.level <= 2);
|
|
126
|
+
const splitCandidates = topSections
|
|
127
|
+
.filter((s) => s.line_count > 30)
|
|
128
|
+
.sort((a, b) => b.line_count - a.line_count);
|
|
129
|
+
|
|
130
|
+
result.split_plan = {
|
|
131
|
+
strategy: "index_plus_components",
|
|
132
|
+
description:
|
|
133
|
+
"Split into index file (navigation + active content) and components subdirectory (archived sections grouped by semantic boundary).",
|
|
134
|
+
candidates: splitCandidates.map((s) => ({
|
|
135
|
+
section: s.title,
|
|
136
|
+
lines: s.line_count,
|
|
137
|
+
recommendation: s.line_count > 100 ? "extract to component" : "consider extracting",
|
|
138
|
+
})),
|
|
139
|
+
estimated_index_lines: lineCount - splitCandidates.reduce((sum, s) => sum + s.line_count, 0),
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
return result;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// ---------------------------------------------------------------------------
|
|
147
|
+
// Server creation
|
|
148
|
+
// ---------------------------------------------------------------------------
|
|
149
|
+
|
|
150
|
+
export function createServer(registryPath) {
|
|
151
|
+
const reg = registry(registryPath);
|
|
152
|
+
const version = getVersion(reg);
|
|
153
|
+
|
|
154
|
+
const server = new McpServer({
|
|
155
|
+
name: "wwa",
|
|
156
|
+
version: version,
|
|
157
|
+
});
|
|
158
|
+
|
|
159
|
+
// -------------------------------------------------------------------------
|
|
160
|
+
// Tool 1: route_query — end-to-end pipeline (S1-S5)
|
|
161
|
+
// -------------------------------------------------------------------------
|
|
162
|
+
|
|
163
|
+
server.tool(
|
|
164
|
+
"route_query",
|
|
165
|
+
"Route a natural language query through the methodology pipeline (S1-S5). Returns query classification, workflow selection, resolved features, and computed read/write file sets.",
|
|
166
|
+
{
|
|
167
|
+
query: z.string().describe("The natural language query to route"),
|
|
168
|
+
project_type: z
|
|
169
|
+
.enum(["code", "data", "analytical", "mixed", "general"])
|
|
170
|
+
.default("general")
|
|
171
|
+
.describe("Project type for context-aware routing"),
|
|
172
|
+
stage: z
|
|
173
|
+
.string()
|
|
174
|
+
.default("scope")
|
|
175
|
+
.describe("Pipeline stage: scope, act, or verify"),
|
|
176
|
+
},
|
|
177
|
+
async ({ query, project_type, stage }) => {
|
|
178
|
+
const classification = classify(query, project_type, reg);
|
|
179
|
+
const selection = selectWorkflow(classification.query_type, project_type);
|
|
180
|
+
const resolution = resolveFeatures(selection.workflow_id, stage, reg);
|
|
181
|
+
const fileSets = computeFileSets(resolution.features, classification.query_type, reg);
|
|
182
|
+
const cascade = resolveCascade([], null, project_type === "general" ? "universal" : project_type);
|
|
183
|
+
|
|
184
|
+
return {
|
|
185
|
+
content: [
|
|
186
|
+
{
|
|
187
|
+
type: "text",
|
|
188
|
+
text: JSON.stringify(
|
|
189
|
+
{
|
|
190
|
+
query,
|
|
191
|
+
project_type,
|
|
192
|
+
stage,
|
|
193
|
+
query_type: classification.query_type,
|
|
194
|
+
confidence: classification.confidence,
|
|
195
|
+
matched: classification.matched,
|
|
196
|
+
workflow: selection,
|
|
197
|
+
features: resolution.features,
|
|
198
|
+
feature_source: resolution.source,
|
|
199
|
+
read_set: fileSets.read_set,
|
|
200
|
+
write_set: fileSets.write_set,
|
|
201
|
+
cascade: cascade,
|
|
202
|
+
},
|
|
203
|
+
null,
|
|
204
|
+
2
|
|
205
|
+
),
|
|
206
|
+
},
|
|
207
|
+
],
|
|
208
|
+
};
|
|
209
|
+
}
|
|
210
|
+
);
|
|
211
|
+
|
|
212
|
+
// -------------------------------------------------------------------------
|
|
213
|
+
// Tool 2: validate_entry_point — S7 validation
|
|
214
|
+
// -------------------------------------------------------------------------
|
|
215
|
+
|
|
216
|
+
server.tool(
|
|
217
|
+
"validate_entry_point",
|
|
218
|
+
"Validate an entry point file (CLAUDE.md, .cursorrules, AGENT.md) against the methodology registry. Checks scoping coverage, cascade completeness, convention coverage, workflow references, and scale management.",
|
|
219
|
+
{
|
|
220
|
+
file_path: z.string().describe("Path to the entry point file"),
|
|
221
|
+
project_type: z
|
|
222
|
+
.enum(["code", "data", "analytical", "mixed", "general"])
|
|
223
|
+
.default("general")
|
|
224
|
+
.describe("Project type for validation context"),
|
|
225
|
+
},
|
|
226
|
+
async ({ file_path, project_type }) => {
|
|
227
|
+
const result = validateEntryPoint(file_path, project_type, reg);
|
|
228
|
+
return {
|
|
229
|
+
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
230
|
+
};
|
|
231
|
+
}
|
|
232
|
+
);
|
|
233
|
+
|
|
234
|
+
// -------------------------------------------------------------------------
|
|
235
|
+
// Tool 3: detect_project — S8 detection
|
|
236
|
+
// -------------------------------------------------------------------------
|
|
237
|
+
|
|
238
|
+
server.tool(
|
|
239
|
+
"detect_project",
|
|
240
|
+
"Detect project type from directory contents. Analyzes file patterns, extensions, and directory structure to classify a project as code, data, analytical, mixed, or general.",
|
|
241
|
+
{
|
|
242
|
+
directory: z
|
|
243
|
+
.string()
|
|
244
|
+
.default(".")
|
|
245
|
+
.describe("Directory to analyze"),
|
|
246
|
+
},
|
|
247
|
+
async ({ directory }) => {
|
|
248
|
+
const result = detectProjectType(directory);
|
|
249
|
+
return {
|
|
250
|
+
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
251
|
+
};
|
|
252
|
+
}
|
|
253
|
+
);
|
|
254
|
+
|
|
255
|
+
// -------------------------------------------------------------------------
|
|
256
|
+
// Tool 4: resolve_cascade — S5 cascade resolution
|
|
257
|
+
// -------------------------------------------------------------------------
|
|
258
|
+
|
|
259
|
+
server.tool(
|
|
260
|
+
"resolve_cascade",
|
|
261
|
+
"Resolve the dependency cascade chain for a given trigger. Returns which files need updating when a specific change occurs (e.g., phase_completion, requirements_change, file_split).",
|
|
262
|
+
{
|
|
263
|
+
trigger: z
|
|
264
|
+
.string()
|
|
265
|
+
.describe(
|
|
266
|
+
"Cascade trigger: phase_completion, requirements_change, new_decision, open_question_resolved, project_structure, new_domain, file_exceeds_300_lines, file_split, database_schema, api_route, new_module, schema_change, pipeline_stage, etc."
|
|
267
|
+
),
|
|
268
|
+
project_type: z
|
|
269
|
+
.enum(["code", "data", "analytical", "mixed", "universal"])
|
|
270
|
+
.default("universal")
|
|
271
|
+
.describe("Project type determines which cascade tables are included"),
|
|
272
|
+
},
|
|
273
|
+
async ({ trigger, project_type }) => {
|
|
274
|
+
const cascadeContext = project_type === "general" ? "universal" : project_type;
|
|
275
|
+
const result = resolveCascade([], trigger, cascadeContext);
|
|
276
|
+
return {
|
|
277
|
+
content: [
|
|
278
|
+
{
|
|
279
|
+
type: "text",
|
|
280
|
+
text: JSON.stringify(
|
|
281
|
+
{
|
|
282
|
+
trigger,
|
|
283
|
+
project_type,
|
|
284
|
+
cascade_chain: result.cascade_files,
|
|
285
|
+
depth: result.depth,
|
|
286
|
+
},
|
|
287
|
+
null,
|
|
288
|
+
2
|
|
289
|
+
),
|
|
290
|
+
},
|
|
291
|
+
],
|
|
292
|
+
};
|
|
293
|
+
}
|
|
294
|
+
);
|
|
295
|
+
|
|
296
|
+
// -------------------------------------------------------------------------
|
|
297
|
+
// Tool 5: check_capability — model tier query
|
|
298
|
+
// -------------------------------------------------------------------------
|
|
299
|
+
|
|
300
|
+
server.tool(
|
|
301
|
+
"check_capability",
|
|
302
|
+
"Check whether a model tier supports a specific feature. Returns support status and recommendations for the given model tier (lite/standard/full).",
|
|
303
|
+
{
|
|
304
|
+
model_tier: z
|
|
305
|
+
.enum(["lite", "standard", "full"])
|
|
306
|
+
.describe("Model tier: lite (Haiku), standard (Sonnet), full (Opus)"),
|
|
307
|
+
feature_id: z
|
|
308
|
+
.string()
|
|
309
|
+
.describe("Feature ID to check (e.g., CTX-01, EXP-03, SCAN-02)"),
|
|
310
|
+
},
|
|
311
|
+
async ({ model_tier, feature_id }) => {
|
|
312
|
+
const tier = TIER_CAPABILITIES[model_tier];
|
|
313
|
+
const features = getFeatures(reg);
|
|
314
|
+
const feature = features[feature_id];
|
|
315
|
+
|
|
316
|
+
if (!feature) {
|
|
317
|
+
return {
|
|
318
|
+
content: [
|
|
319
|
+
{
|
|
320
|
+
type: "text",
|
|
321
|
+
text: JSON.stringify(
|
|
322
|
+
{
|
|
323
|
+
feature_id,
|
|
324
|
+
error: `Feature not found: ${feature_id}`,
|
|
325
|
+
available_features: Object.keys(features),
|
|
326
|
+
},
|
|
327
|
+
null,
|
|
328
|
+
2
|
|
329
|
+
),
|
|
330
|
+
},
|
|
331
|
+
],
|
|
332
|
+
};
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
const domain = feature_id.split("-")[0];
|
|
336
|
+
const supported = tier.supported_domains.includes(domain);
|
|
337
|
+
|
|
338
|
+
const recommendations = [];
|
|
339
|
+
if (!supported) {
|
|
340
|
+
recommendations.push(
|
|
341
|
+
`Feature ${feature_id} (domain: ${domain}) is not supported at ${model_tier} tier.`
|
|
342
|
+
);
|
|
343
|
+
const requiredTier = Object.entries(TIER_CAPABILITIES).find(([, t]) =>
|
|
344
|
+
t.supported_domains.includes(domain)
|
|
345
|
+
);
|
|
346
|
+
if (requiredTier) {
|
|
347
|
+
recommendations.push(`Minimum tier required: ${requiredTier[0]}`);
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
return {
|
|
352
|
+
content: [
|
|
353
|
+
{
|
|
354
|
+
type: "text",
|
|
355
|
+
text: JSON.stringify(
|
|
356
|
+
{
|
|
357
|
+
feature_id,
|
|
358
|
+
feature_name: feature.name,
|
|
359
|
+
domain,
|
|
360
|
+
model_tier,
|
|
361
|
+
supported,
|
|
362
|
+
tier_info: tier,
|
|
363
|
+
recommendations,
|
|
364
|
+
},
|
|
365
|
+
null,
|
|
366
|
+
2
|
|
367
|
+
),
|
|
368
|
+
},
|
|
369
|
+
],
|
|
370
|
+
};
|
|
371
|
+
}
|
|
372
|
+
);
|
|
373
|
+
|
|
374
|
+
// -------------------------------------------------------------------------
|
|
375
|
+
// Tool 6: lookup_feature — registry feature lookup
|
|
376
|
+
// -------------------------------------------------------------------------
|
|
377
|
+
|
|
378
|
+
server.tool(
|
|
379
|
+
"lookup_feature",
|
|
380
|
+
"Look up a feature by ID or search by keyword. Returns the full feature specification including domain, reads, writes, and dependencies.",
|
|
381
|
+
{
|
|
382
|
+
query: z
|
|
383
|
+
.string()
|
|
384
|
+
.describe("Feature ID (e.g., CTX-01) or keyword to search for"),
|
|
385
|
+
},
|
|
386
|
+
async ({ query }) => {
|
|
387
|
+
const features = getFeatures(reg);
|
|
388
|
+
const queryUpper = query.toUpperCase();
|
|
389
|
+
|
|
390
|
+
// Direct ID match
|
|
391
|
+
if (features[queryUpper]) {
|
|
392
|
+
return {
|
|
393
|
+
content: [
|
|
394
|
+
{
|
|
395
|
+
type: "text",
|
|
396
|
+
text: JSON.stringify(
|
|
397
|
+
{ match: "exact", feature: features[queryUpper] },
|
|
398
|
+
null,
|
|
399
|
+
2
|
|
400
|
+
),
|
|
401
|
+
},
|
|
402
|
+
],
|
|
403
|
+
};
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
// Keyword search
|
|
407
|
+
const queryLower = query.toLowerCase();
|
|
408
|
+
const matches = [];
|
|
409
|
+
for (const [id, feat] of Object.entries(features)) {
|
|
410
|
+
const searchable = `${id} ${feat.name || ""} ${feat.domain || ""}`.toLowerCase();
|
|
411
|
+
if (searchable.includes(queryLower)) {
|
|
412
|
+
matches.push({ id, ...feat });
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
return {
|
|
417
|
+
content: [
|
|
418
|
+
{
|
|
419
|
+
type: "text",
|
|
420
|
+
text: JSON.stringify(
|
|
421
|
+
{
|
|
422
|
+
match: matches.length > 0 ? "keyword" : "none",
|
|
423
|
+
query,
|
|
424
|
+
results: matches,
|
|
425
|
+
total_features: Object.keys(features).length,
|
|
426
|
+
},
|
|
427
|
+
null,
|
|
428
|
+
2
|
|
429
|
+
),
|
|
430
|
+
},
|
|
431
|
+
],
|
|
432
|
+
};
|
|
433
|
+
}
|
|
434
|
+
);
|
|
435
|
+
|
|
436
|
+
// -------------------------------------------------------------------------
|
|
437
|
+
// Tool 7: list_workflows — registry workflow listing
|
|
438
|
+
// -------------------------------------------------------------------------
|
|
439
|
+
|
|
440
|
+
server.tool(
|
|
441
|
+
"list_workflows",
|
|
442
|
+
"List all available methodology workflows, optionally filtered by project type. Returns workflow IDs, names, steps, and applicability.",
|
|
443
|
+
{
|
|
444
|
+
project_type: z
|
|
445
|
+
.enum(["code", "data", "analytical", "mixed", "general", "all"])
|
|
446
|
+
.default("all")
|
|
447
|
+
.describe("Filter workflows by project type, or 'all' for complete list"),
|
|
448
|
+
},
|
|
449
|
+
async ({ project_type }) => {
|
|
450
|
+
const workflows = getWorkflows(reg);
|
|
451
|
+
let filtered = workflows;
|
|
452
|
+
|
|
453
|
+
if (project_type !== "all") {
|
|
454
|
+
filtered = workflows.filter((wf) => {
|
|
455
|
+
const types = wf.project_types || ["universal"];
|
|
456
|
+
return (
|
|
457
|
+
types.includes("universal") ||
|
|
458
|
+
types.includes(project_type) ||
|
|
459
|
+
project_type === "mixed"
|
|
460
|
+
);
|
|
461
|
+
});
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
const result = filtered.map((wf) => ({
|
|
465
|
+
id: wf.id,
|
|
466
|
+
name: wf.name,
|
|
467
|
+
project_types: wf.project_types || ["universal"],
|
|
468
|
+
steps: (wf.steps || []).map((s) => ({
|
|
469
|
+
stage: s.stage,
|
|
470
|
+
action: s.action,
|
|
471
|
+
features: s.features || [],
|
|
472
|
+
})),
|
|
473
|
+
}));
|
|
474
|
+
|
|
475
|
+
return {
|
|
476
|
+
content: [
|
|
477
|
+
{
|
|
478
|
+
type: "text",
|
|
479
|
+
text: JSON.stringify(
|
|
480
|
+
{
|
|
481
|
+
project_type,
|
|
482
|
+
workflow_count: result.length,
|
|
483
|
+
workflows: result,
|
|
484
|
+
},
|
|
485
|
+
null,
|
|
486
|
+
2
|
|
487
|
+
),
|
|
488
|
+
},
|
|
489
|
+
],
|
|
490
|
+
};
|
|
491
|
+
}
|
|
492
|
+
);
|
|
493
|
+
|
|
494
|
+
// -------------------------------------------------------------------------
|
|
495
|
+
// Tool 8: refactor_markdown — CTX-06 scale analysis
|
|
496
|
+
// -------------------------------------------------------------------------
|
|
497
|
+
|
|
498
|
+
server.tool(
|
|
499
|
+
"refactor_markdown",
|
|
500
|
+
"Analyze a markdown file for scale management (CTX-06). Reports line count, whether it exceeds the 300-line threshold, section breakdown, and a split plan if refactoring is needed. Advisory only — does not modify the file.",
|
|
501
|
+
{
|
|
502
|
+
file_path: z.string().describe("Path to the markdown file to analyze"),
|
|
503
|
+
},
|
|
504
|
+
async ({ file_path }) => {
|
|
505
|
+
const result = analyzeMarkdown(file_path);
|
|
506
|
+
return {
|
|
507
|
+
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
508
|
+
};
|
|
509
|
+
}
|
|
510
|
+
);
|
|
511
|
+
|
|
512
|
+
return server;
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
// ---------------------------------------------------------------------------
|
|
516
|
+
// Standalone entry point — starts stdio transport
|
|
517
|
+
// ---------------------------------------------------------------------------
|
|
518
|
+
|
|
519
|
+
export async function startServer(registryPath) {
|
|
520
|
+
const server = createServer(registryPath);
|
|
521
|
+
const transport = new StdioServerTransport();
|
|
522
|
+
await server.connect(transport);
|
|
523
|
+
return server;
|
|
524
|
+
}
|
package/lib/pipeline.js
CHANGED
package/lib/registry.js
CHANGED
package/lib/watcher.js
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Registry watcher — chokidar-based file monitoring with proactive validation.
|
|
3
|
+
*
|
|
4
|
+
* Watches:
|
|
5
|
+
* - Entry points (CLAUDE.md, .cursorrules, AGENT.md)
|
|
6
|
+
* - Feature registry (feature-registry.yaml)
|
|
7
|
+
* - Markdown files for 300-line threshold violations
|
|
8
|
+
*
|
|
9
|
+
* On change: runs S7 validation and reports results.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { watch } from "chokidar";
|
|
13
|
+
import { readFileSync, statSync } from "node:fs";
|
|
14
|
+
import { resolve, basename, extname } from "node:path";
|
|
15
|
+
|
|
16
|
+
import { loadRegistry } from "./registry.js";
|
|
17
|
+
import { validateEntryPoint, detectProjectType } from "./pipeline.js";
|
|
18
|
+
|
|
19
|
+
const SCALE_THRESHOLD = 300;
|
|
20
|
+
|
|
21
|
+
const ENTRY_POINT_NAMES = new Set(["CLAUDE.md", ".cursorrules", "AGENT.md"]);
|
|
22
|
+
const INTELLIGENCE_LAYER = new Set([
|
|
23
|
+
"STATE.md", "ROADMAP.md", "PLAN.md", "SUMMARY.md", "PROJECT.md",
|
|
24
|
+
]);
|
|
25
|
+
|
|
26
|
+
// ---------------------------------------------------------------------------
|
|
27
|
+
// Validation handlers
|
|
28
|
+
// ---------------------------------------------------------------------------
|
|
29
|
+
|
|
30
|
+
function validateEntry(filePath, registryPath) {
|
|
31
|
+
const reg = loadRegistry(registryPath || undefined);
|
|
32
|
+
const dir = resolve(filePath, "..");
|
|
33
|
+
const detected = detectProjectType(dir);
|
|
34
|
+
const projectType = detected.project_type || "general";
|
|
35
|
+
const result = validateEntryPoint(filePath, projectType, reg);
|
|
36
|
+
return { type: "entry_point", file: filePath, projectType, result };
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
function checkScale(filePath) {
|
|
40
|
+
try {
|
|
41
|
+
const content = readFileSync(resolve(filePath), "utf-8");
|
|
42
|
+
const lineCount = content.split("\n").length;
|
|
43
|
+
const over = lineCount > SCALE_THRESHOLD;
|
|
44
|
+
return {
|
|
45
|
+
type: "scale_check",
|
|
46
|
+
file: filePath,
|
|
47
|
+
line_count: lineCount,
|
|
48
|
+
threshold: SCALE_THRESHOLD,
|
|
49
|
+
over_threshold: over,
|
|
50
|
+
};
|
|
51
|
+
} catch {
|
|
52
|
+
return { type: "scale_check", file: filePath, error: "Cannot read file" };
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// ---------------------------------------------------------------------------
|
|
57
|
+
// Watcher creation
|
|
58
|
+
// ---------------------------------------------------------------------------
|
|
59
|
+
|
|
60
|
+
export function createWatcher(directory, options = {}) {
|
|
61
|
+
const dir = resolve(directory);
|
|
62
|
+
const registryPath = options.registryPath || null;
|
|
63
|
+
const onResult = options.onResult || defaultReporter;
|
|
64
|
+
|
|
65
|
+
// Watch patterns: markdown files and yaml files
|
|
66
|
+
const patterns = [
|
|
67
|
+
`${dir}/**/*.md`,
|
|
68
|
+
`${dir}/**/feature-registry.yaml`,
|
|
69
|
+
`${dir}/.cursorrules`,
|
|
70
|
+
];
|
|
71
|
+
|
|
72
|
+
const ignored = [
|
|
73
|
+
"**/node_modules/**",
|
|
74
|
+
"**/.git/**",
|
|
75
|
+
"**/dist/**",
|
|
76
|
+
"**/build/**",
|
|
77
|
+
];
|
|
78
|
+
|
|
79
|
+
const watcher = watch(patterns, {
|
|
80
|
+
ignored,
|
|
81
|
+
persistent: true,
|
|
82
|
+
ignoreInitial: true,
|
|
83
|
+
awaitWriteFinish: {
|
|
84
|
+
stabilityThreshold: 300,
|
|
85
|
+
pollInterval: 100,
|
|
86
|
+
},
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
watcher.on("change", (filePath) => {
|
|
90
|
+
const name = basename(filePath);
|
|
91
|
+
const ext = extname(filePath);
|
|
92
|
+
|
|
93
|
+
// Entry point changed — run full validation
|
|
94
|
+
if (ENTRY_POINT_NAMES.has(name)) {
|
|
95
|
+
try {
|
|
96
|
+
const report = validateEntry(filePath, registryPath);
|
|
97
|
+
onResult(report);
|
|
98
|
+
} catch (err) {
|
|
99
|
+
onResult({ type: "error", file: filePath, message: err.message });
|
|
100
|
+
}
|
|
101
|
+
return;
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// Registry changed — reload and validate all entry points
|
|
105
|
+
if (name === "feature-registry.yaml") {
|
|
106
|
+
onResult({ type: "registry_reload", file: filePath, message: "Registry changed — reload triggered" });
|
|
107
|
+
return;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Markdown file changed — check scale threshold
|
|
111
|
+
if (ext === ".md") {
|
|
112
|
+
const report = checkScale(filePath);
|
|
113
|
+
// Only report if over threshold or if it's an intelligence layer file
|
|
114
|
+
if (report.over_threshold || INTELLIGENCE_LAYER.has(name)) {
|
|
115
|
+
onResult(report);
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
});
|
|
119
|
+
|
|
120
|
+
watcher.on("add", (filePath) => {
|
|
121
|
+
const name = basename(filePath);
|
|
122
|
+
if (ENTRY_POINT_NAMES.has(name)) {
|
|
123
|
+
onResult({ type: "entry_point_added", file: filePath, message: "New entry point detected" });
|
|
124
|
+
}
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
return watcher;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// ---------------------------------------------------------------------------
|
|
131
|
+
// Default reporter — console output
|
|
132
|
+
// ---------------------------------------------------------------------------
|
|
133
|
+
|
|
134
|
+
function defaultReporter(report) {
|
|
135
|
+
const timestamp = new Date().toISOString().substring(11, 19);
|
|
136
|
+
|
|
137
|
+
switch (report.type) {
|
|
138
|
+
case "entry_point": {
|
|
139
|
+
const status = report.result.valid ? "PASS" : "FAIL";
|
|
140
|
+
console.log(`[${timestamp}] Entry point ${status}: ${report.file}`);
|
|
141
|
+
if (!report.result.valid && report.result.issues) {
|
|
142
|
+
for (const issue of report.result.issues) {
|
|
143
|
+
console.log(` [${issue.severity}] ${issue.check}: ${issue.description}`);
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
break;
|
|
147
|
+
}
|
|
148
|
+
case "scale_check":
|
|
149
|
+
if (report.over_threshold) {
|
|
150
|
+
console.log(
|
|
151
|
+
`[${timestamp}] Scale warning: ${report.file} (${report.line_count} lines, threshold: ${report.threshold})`
|
|
152
|
+
);
|
|
153
|
+
}
|
|
154
|
+
break;
|
|
155
|
+
case "registry_reload":
|
|
156
|
+
console.log(`[${timestamp}] ${report.message}`);
|
|
157
|
+
break;
|
|
158
|
+
case "entry_point_added":
|
|
159
|
+
console.log(`[${timestamp}] ${report.message}: ${report.file}`);
|
|
160
|
+
break;
|
|
161
|
+
case "error":
|
|
162
|
+
console.error(`[${timestamp}] Error: ${report.file} — ${report.message}`);
|
|
163
|
+
break;
|
|
164
|
+
}
|
|
165
|
+
}
|