agent-method 1.5.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +343 -0
- package/bin/wwa.js +115 -0
- package/docs/internal/cli-commands.yaml +259 -0
- package/docs/internal/doc-tokens.yaml +1103 -0
- package/docs/internal/feature-registry.yaml +1643 -0
- package/lib/boundaries.js +247 -0
- package/lib/cli/add.js +170 -0
- package/lib/cli/casestudy.js +1000 -0
- package/lib/cli/check.js +323 -0
- package/lib/cli/close.js +838 -0
- package/lib/cli/completion.js +735 -0
- package/lib/cli/deps.js +234 -0
- package/lib/cli/digest.js +73 -0
- package/lib/cli/doc-review.js +486 -0
- package/lib/cli/docs.js +315 -0
- package/lib/cli/helpers.js +198 -0
- package/lib/cli/implement.js +169 -0
- package/lib/cli/init.js +280 -0
- package/lib/cli/pipeline.js +206 -0
- package/lib/cli/plan.js +140 -0
- package/lib/cli/record.js +98 -0
- package/lib/cli/refine.js +202 -0
- package/lib/cli/report-helpers.js +113 -0
- package/lib/cli/review.js +76 -0
- package/lib/cli/routable.js +109 -0
- package/lib/cli/route.js +101 -0
- package/lib/cli/scan.js +133 -0
- package/lib/cli/serve.js +23 -0
- package/lib/cli/status.js +65 -0
- package/lib/cli/update-docs.js +574 -0
- package/lib/cli/upgrade.js +222 -0
- package/lib/cli/watch.js +32 -0
- package/lib/dependencies.js +196 -0
- package/lib/init.js +692 -0
- package/lib/mcp-server.js +612 -0
- package/lib/pipeline.js +907 -0
- package/lib/registry.js +132 -0
- package/lib/watcher.js +165 -0
- package/package.json +54 -0
- package/templates/README.md +363 -0
- package/templates/entry-points/.cursorrules +90 -0
- package/templates/entry-points/AGENT.md +90 -0
- package/templates/entry-points/CLAUDE.md +88 -0
- package/templates/extensions/MANIFEST.md +110 -0
- package/templates/extensions/analytical-system.md +96 -0
- package/templates/extensions/code-project.md +77 -0
- package/templates/extensions/data-exploration.md +117 -0
- package/templates/full/.context/BASE.md +101 -0
- package/templates/full/.context/COMPOSITION.md +47 -0
- package/templates/full/.context/INDEX.yaml +56 -0
- package/templates/full/.context/METHODOLOGY.md +246 -0
- package/templates/full/.context/PROTOCOL.yaml +169 -0
- package/templates/full/.context/REGISTRY.md +75 -0
- package/templates/full/.cursorrules +90 -0
- package/templates/full/AGENT.md +90 -0
- package/templates/full/CLAUDE.md +90 -0
- package/templates/full/Management/DIGEST.md +23 -0
- package/templates/full/Management/STATUS.md +46 -0
- package/templates/full/PLAN.md +67 -0
- package/templates/full/PROJECT-PROFILE.md +61 -0
- package/templates/full/PROJECT.md +80 -0
- package/templates/full/REQUIREMENTS.md +30 -0
- package/templates/full/ROADMAP.md +39 -0
- package/templates/full/Reviews/INDEX.md +41 -0
- package/templates/full/Reviews/backlog.md +52 -0
- package/templates/full/Reviews/plan.md +43 -0
- package/templates/full/Reviews/project.md +41 -0
- package/templates/full/Reviews/requirements.md +42 -0
- package/templates/full/Reviews/roadmap.md +41 -0
- package/templates/full/Reviews/state.md +56 -0
- package/templates/full/SESSION-LOG.md +102 -0
- package/templates/full/STATE.md +42 -0
- package/templates/full/SUMMARY.md +27 -0
- package/templates/full/agentWorkflows/INDEX.md +42 -0
- package/templates/full/agentWorkflows/observations.md +65 -0
- package/templates/full/agentWorkflows/patterns.md +68 -0
- package/templates/full/agentWorkflows/sessions.md +92 -0
- package/templates/full/intro/README.md +39 -0
- package/templates/full/registry/feature-registry.yaml +25 -0
- package/templates/full/registry/features/catalog.yaml +743 -0
- package/templates/full/registry/features/protocol.yaml +121 -0
- package/templates/full/registry/features/routing.yaml +358 -0
- package/templates/full/registry/features/workflows.yaml +404 -0
- package/templates/full/todos/backlog.md +19 -0
- package/templates/starter/.context/BASE.md +66 -0
- package/templates/starter/.context/INDEX.yaml +51 -0
- package/templates/starter/.context/METHODOLOGY.md +228 -0
- package/templates/starter/.context/PROTOCOL.yaml +165 -0
- package/templates/starter/.cursorrules +90 -0
- package/templates/starter/AGENT.md +90 -0
- package/templates/starter/CLAUDE.md +90 -0
- package/templates/starter/Management/DIGEST.md +23 -0
- package/templates/starter/Management/STATUS.md +46 -0
- package/templates/starter/PLAN.md +67 -0
- package/templates/starter/PROJECT-PROFILE.md +44 -0
- package/templates/starter/PROJECT.md +80 -0
- package/templates/starter/ROADMAP.md +39 -0
- package/templates/starter/Reviews/INDEX.md +75 -0
- package/templates/starter/SESSION-LOG.md +102 -0
- package/templates/starter/STATE.md +42 -0
- package/templates/starter/SUMMARY.md +27 -0
- package/templates/starter/agentWorkflows/INDEX.md +61 -0
- package/templates/starter/intro/README.md +37 -0
- package/templates/starter/registry/feature-registry.yaml +25 -0
- package/templates/starter/registry/features/catalog.yaml +743 -0
- package/templates/starter/registry/features/protocol.yaml +121 -0
- package/templates/starter/registry/features/routing.yaml +358 -0
- package/templates/starter/registry/features/workflows.yaml +404 -0
|
@@ -0,0 +1,612 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MCP server — exposes methodology pipeline and registry as agent-callable tools.
|
|
3
|
+
*
|
|
4
|
+
* 8 tools:
|
|
5
|
+
* route_query S1-S5 End-to-end query routing
|
|
6
|
+
* validate_entry_point S7 Entry point validation
|
|
7
|
+
* detect_project S8 Project type detection
|
|
8
|
+
* resolve_cascade S5 Cascade chain resolution
|
|
9
|
+
* check_capability Registry Model tier capability check
|
|
10
|
+
* lookup_feature Registry Feature lookup by ID or keyword
|
|
11
|
+
* list_workflows Registry List available workflows
|
|
12
|
+
* refactor_markdown CTX-06 Markdown scale analysis
|
|
13
|
+
*
|
|
14
|
+
* Uses @modelcontextprotocol/sdk McpServer with stdio transport.
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import { readFileSync, statSync } from "node:fs";
|
|
18
|
+
import { resolve, join } from "node:path";
|
|
19
|
+
import { z } from "zod";
|
|
20
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
21
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
22
|
+
|
|
23
|
+
import { loadRegistry, getFeatures, getWorkflows, getVersion } from "./registry.js";
|
|
24
|
+
import {
|
|
25
|
+
classify,
|
|
26
|
+
selectWorkflow,
|
|
27
|
+
resolveFeatures,
|
|
28
|
+
computeFileSets,
|
|
29
|
+
resolveCascade,
|
|
30
|
+
validateEntryPoint,
|
|
31
|
+
detectProjectType,
|
|
32
|
+
} from "./pipeline.js";
|
|
33
|
+
|
|
34
|
+
// ---------------------------------------------------------------------------
|
|
35
|
+
// Registry singleton (loaded once at startup)
|
|
36
|
+
// ---------------------------------------------------------------------------
|
|
37
|
+
|
|
38
|
+
let _registry = null;
|
|
39
|
+
|
|
40
|
+
function registry(registryPath) {
|
|
41
|
+
if (!_registry) {
|
|
42
|
+
_registry = loadRegistry(registryPath || undefined);
|
|
43
|
+
}
|
|
44
|
+
return _registry;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// ---------------------------------------------------------------------------
|
|
48
|
+
// Capability tiers — maps model tiers to supported feature domains
|
|
49
|
+
// ---------------------------------------------------------------------------
|
|
50
|
+
|
|
51
|
+
const TIER_CAPABILITIES = {
|
|
52
|
+
lite: {
|
|
53
|
+
supported_domains: ["CTX", "QRY", "STT"],
|
|
54
|
+
max_features: 12,
|
|
55
|
+
notes: "Haiku-class models. Core context and query routing only.",
|
|
56
|
+
},
|
|
57
|
+
standard: {
|
|
58
|
+
supported_domains: ["CTX", "QRY", "STT", "TSK", "HAI", "TPL"],
|
|
59
|
+
max_features: 24,
|
|
60
|
+
notes: "Sonnet-class models. Full methodology minus exploration/discovery.",
|
|
61
|
+
},
|
|
62
|
+
full: {
|
|
63
|
+
supported_domains: ["CTX", "QRY", "STT", "TSK", "HAI", "TPL", "EXP", "SCAN"],
|
|
64
|
+
max_features: 32,
|
|
65
|
+
notes: "Opus-class models. Complete methodology surface area.",
|
|
66
|
+
},
|
|
67
|
+
};
|
|
68
|
+
|
|
69
|
+
// ---------------------------------------------------------------------------
|
|
70
|
+
// Markdown analysis helpers (CTX-06 scale management)
|
|
71
|
+
// ---------------------------------------------------------------------------
|
|
72
|
+
|
|
73
|
+
const SCALE_THRESHOLD = 300;
|
|
74
|
+
|
|
75
|
+
function analyzeMarkdown(filePath) {
|
|
76
|
+
const absPath = resolve(filePath);
|
|
77
|
+
let content;
|
|
78
|
+
try {
|
|
79
|
+
content = readFileSync(absPath, "utf-8");
|
|
80
|
+
} catch {
|
|
81
|
+
return { error: `File not found: ${filePath}` };
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
const lines = content.split("\n");
|
|
85
|
+
const lineCount = lines.length;
|
|
86
|
+
const overThreshold = lineCount > SCALE_THRESHOLD;
|
|
87
|
+
|
|
88
|
+
// Parse sections by headings
|
|
89
|
+
const sections = [];
|
|
90
|
+
let currentSection = null;
|
|
91
|
+
|
|
92
|
+
for (let i = 0; i < lines.length; i++) {
|
|
93
|
+
const match = lines[i].match(/^(#{1,4})\s+(.+)/);
|
|
94
|
+
if (match) {
|
|
95
|
+
if (currentSection) {
|
|
96
|
+
currentSection.end_line = i;
|
|
97
|
+
currentSection.line_count = currentSection.end_line - currentSection.start_line;
|
|
98
|
+
sections.push(currentSection);
|
|
99
|
+
}
|
|
100
|
+
currentSection = {
|
|
101
|
+
level: match[1].length,
|
|
102
|
+
title: match[2].trim(),
|
|
103
|
+
start_line: i + 1,
|
|
104
|
+
end_line: null,
|
|
105
|
+
line_count: 0,
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
if (currentSection) {
|
|
110
|
+
currentSection.end_line = lines.length;
|
|
111
|
+
currentSection.line_count = currentSection.end_line - currentSection.start_line;
|
|
112
|
+
sections.push(currentSection);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
const result = {
|
|
116
|
+
file: filePath,
|
|
117
|
+
line_count: lineCount,
|
|
118
|
+
threshold: SCALE_THRESHOLD,
|
|
119
|
+
over_threshold: overThreshold,
|
|
120
|
+
sections,
|
|
121
|
+
};
|
|
122
|
+
|
|
123
|
+
// Generate split plan only if over threshold
|
|
124
|
+
if (overThreshold) {
|
|
125
|
+
const topSections = sections.filter((s) => s.level <= 2);
|
|
126
|
+
const splitCandidates = topSections
|
|
127
|
+
.filter((s) => s.line_count > 30)
|
|
128
|
+
.sort((a, b) => b.line_count - a.line_count);
|
|
129
|
+
|
|
130
|
+
result.split_plan = {
|
|
131
|
+
strategy: "index_plus_components",
|
|
132
|
+
description:
|
|
133
|
+
"Split into index file (navigation + active content) and components subdirectory (archived sections grouped by semantic boundary).",
|
|
134
|
+
candidates: splitCandidates.map((s) => ({
|
|
135
|
+
section: s.title,
|
|
136
|
+
lines: s.line_count,
|
|
137
|
+
recommendation: s.line_count > 100 ? "extract to component" : "consider extracting",
|
|
138
|
+
})),
|
|
139
|
+
estimated_index_lines: lineCount - splitCandidates.reduce((sum, s) => sum + s.line_count, 0),
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
return result;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// ---------------------------------------------------------------------------
|
|
147
|
+
// CLI command metadata helper (docs/internal/cli-commands.yaml)
|
|
148
|
+
// ---------------------------------------------------------------------------
|
|
149
|
+
|
|
150
|
+
function loadCliCommandsMetadata() {
|
|
151
|
+
try {
|
|
152
|
+
// Use require here to avoid converting this file to ESM.
|
|
153
|
+
// eslint-disable-next-line global-require, @typescript-eslint/no-var-requires
|
|
154
|
+
const yaml = require("js-yaml");
|
|
155
|
+
const cliPath = join(process.cwd(), "docs", "internal", "cli-commands.yaml");
|
|
156
|
+
const raw = readFileSync(cliPath, "utf-8");
|
|
157
|
+
const parsed = yaml.load(raw);
|
|
158
|
+
if (!parsed || !Array.isArray(parsed.cli_commands)) {
|
|
159
|
+
return { error: "cli-commands.yaml missing cli_commands array" };
|
|
160
|
+
}
|
|
161
|
+
return { version: parsed.version || null, commands: parsed.cli_commands };
|
|
162
|
+
} catch (e) {
|
|
163
|
+
return { error: `Unable to load CLI command metadata: ${e.message}` };
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// ---------------------------------------------------------------------------
|
|
168
|
+
// Server creation
|
|
169
|
+
// ---------------------------------------------------------------------------
|
|
170
|
+
|
|
171
|
+
export function createServer(registryPath) {
|
|
172
|
+
const reg = registry(registryPath);
|
|
173
|
+
const version = getVersion(reg);
|
|
174
|
+
|
|
175
|
+
const server = new McpServer({
|
|
176
|
+
name: "wwa",
|
|
177
|
+
version: version,
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
// -------------------------------------------------------------------------
|
|
181
|
+
// Tool 1: route_query — end-to-end pipeline (S1-S5)
|
|
182
|
+
// -------------------------------------------------------------------------
|
|
183
|
+
|
|
184
|
+
server.tool(
|
|
185
|
+
"route_query",
|
|
186
|
+
"Route a natural language query through the methodology pipeline (S1-S5). Returns query classification, workflow selection, resolved features, and computed read/write file sets.",
|
|
187
|
+
{
|
|
188
|
+
query: z.string().describe("The natural language query to route"),
|
|
189
|
+
project_type: z
|
|
190
|
+
.enum(["code", "data", "analytical", "mixed", "general"])
|
|
191
|
+
.default("general")
|
|
192
|
+
.describe("Project type for context-aware routing"),
|
|
193
|
+
stage: z
|
|
194
|
+
.string()
|
|
195
|
+
.default("scope")
|
|
196
|
+
.describe("Pipeline stage: scope, act, or verify"),
|
|
197
|
+
},
|
|
198
|
+
async ({ query, project_type, stage }) => {
|
|
199
|
+
const classification = classify(query, project_type, reg);
|
|
200
|
+
const selection = selectWorkflow(classification.query_type, project_type);
|
|
201
|
+
const resolution = resolveFeatures(selection.workflow_id, stage, reg);
|
|
202
|
+
const fileSets = computeFileSets(resolution.features, classification.query_type, reg);
|
|
203
|
+
const cascade = resolveCascade([], null, project_type === "general" ? "universal" : project_type);
|
|
204
|
+
|
|
205
|
+
return {
|
|
206
|
+
content: [
|
|
207
|
+
{
|
|
208
|
+
type: "text",
|
|
209
|
+
text: JSON.stringify(
|
|
210
|
+
{
|
|
211
|
+
query,
|
|
212
|
+
project_type,
|
|
213
|
+
stage,
|
|
214
|
+
query_type: classification.query_type,
|
|
215
|
+
confidence: classification.confidence,
|
|
216
|
+
matched: classification.matched,
|
|
217
|
+
workflow: selection,
|
|
218
|
+
features: resolution.features,
|
|
219
|
+
feature_source: resolution.source,
|
|
220
|
+
read_set: fileSets.read_set,
|
|
221
|
+
write_set: fileSets.write_set,
|
|
222
|
+
cascade: cascade,
|
|
223
|
+
},
|
|
224
|
+
null,
|
|
225
|
+
2
|
|
226
|
+
),
|
|
227
|
+
},
|
|
228
|
+
],
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
);
|
|
232
|
+
|
|
233
|
+
// -------------------------------------------------------------------------
|
|
234
|
+
// Helper: list_cli_commands — expose CLI command metadata to agents
|
|
235
|
+
// -------------------------------------------------------------------------
|
|
236
|
+
|
|
237
|
+
server.tool(
|
|
238
|
+
"list_cli_commands",
|
|
239
|
+
"List wwa CLI commands from docs/internal/cli-commands.yaml so agents can safely discover and use them.",
|
|
240
|
+
{
|
|
241
|
+
category: z
|
|
242
|
+
.enum([
|
|
243
|
+
"setup",
|
|
244
|
+
"workflow",
|
|
245
|
+
"routable",
|
|
246
|
+
"add",
|
|
247
|
+
"analysis",
|
|
248
|
+
"pipeline",
|
|
249
|
+
"server",
|
|
250
|
+
"completion",
|
|
251
|
+
"any",
|
|
252
|
+
])
|
|
253
|
+
.default("any")
|
|
254
|
+
.describe("Filter commands by category (or 'any' for all)."),
|
|
255
|
+
safe_only: z
|
|
256
|
+
.boolean()
|
|
257
|
+
.default(true)
|
|
258
|
+
.describe("If true, only return commands marked safe_for_agent."),
|
|
259
|
+
},
|
|
260
|
+
async ({ category, safe_only }) => {
|
|
261
|
+
const meta = loadCliCommandsMetadata();
|
|
262
|
+
if (meta.error) {
|
|
263
|
+
return {
|
|
264
|
+
content: [
|
|
265
|
+
{
|
|
266
|
+
type: "text",
|
|
267
|
+
text: JSON.stringify({ error: meta.error }, null, 2),
|
|
268
|
+
},
|
|
269
|
+
],
|
|
270
|
+
};
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
let commands = meta.commands;
|
|
274
|
+
if (category !== "any") {
|
|
275
|
+
commands = commands.filter((c) => c.category === category);
|
|
276
|
+
}
|
|
277
|
+
if (safe_only) {
|
|
278
|
+
commands = commands.filter((c) => c.safe_for_agent);
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
return {
|
|
282
|
+
content: [
|
|
283
|
+
{
|
|
284
|
+
type: "text",
|
|
285
|
+
text: JSON.stringify(
|
|
286
|
+
{
|
|
287
|
+
version: meta.version,
|
|
288
|
+
count: commands.length,
|
|
289
|
+
commands,
|
|
290
|
+
},
|
|
291
|
+
null,
|
|
292
|
+
2
|
|
293
|
+
),
|
|
294
|
+
},
|
|
295
|
+
],
|
|
296
|
+
};
|
|
297
|
+
}
|
|
298
|
+
);
|
|
299
|
+
|
|
300
|
+
// -------------------------------------------------------------------------
|
|
301
|
+
// Tool 2: validate_entry_point — S7 validation
|
|
302
|
+
// -------------------------------------------------------------------------
|
|
303
|
+
|
|
304
|
+
server.tool(
|
|
305
|
+
"validate_entry_point",
|
|
306
|
+
"Validate an entry point file (CLAUDE.md, .cursorrules, AGENT.md) against the methodology registry. Checks scoping coverage, cascade completeness, convention coverage, workflow references, and scale management.",
|
|
307
|
+
{
|
|
308
|
+
file_path: z.string().describe("Path to the entry point file"),
|
|
309
|
+
project_type: z
|
|
310
|
+
.enum(["code", "data", "analytical", "mixed", "general"])
|
|
311
|
+
.default("general")
|
|
312
|
+
.describe("Project type for validation context"),
|
|
313
|
+
},
|
|
314
|
+
async ({ file_path, project_type }) => {
|
|
315
|
+
const result = validateEntryPoint(file_path, project_type, reg);
|
|
316
|
+
return {
|
|
317
|
+
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
318
|
+
};
|
|
319
|
+
}
|
|
320
|
+
);
|
|
321
|
+
|
|
322
|
+
// -------------------------------------------------------------------------
|
|
323
|
+
// Tool 3: detect_project — S8 detection
|
|
324
|
+
// -------------------------------------------------------------------------
|
|
325
|
+
|
|
326
|
+
server.tool(
|
|
327
|
+
"detect_project",
|
|
328
|
+
"Detect project type from directory contents. Analyzes file patterns, extensions, and directory structure to classify a project as code, data, analytical, mixed, or general.",
|
|
329
|
+
{
|
|
330
|
+
directory: z
|
|
331
|
+
.string()
|
|
332
|
+
.default(".")
|
|
333
|
+
.describe("Directory to analyze"),
|
|
334
|
+
},
|
|
335
|
+
async ({ directory }) => {
|
|
336
|
+
const result = detectProjectType(directory);
|
|
337
|
+
return {
|
|
338
|
+
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
339
|
+
};
|
|
340
|
+
}
|
|
341
|
+
);
|
|
342
|
+
|
|
343
|
+
// -------------------------------------------------------------------------
|
|
344
|
+
// Tool 4: resolve_cascade — S5 cascade resolution
|
|
345
|
+
// -------------------------------------------------------------------------
|
|
346
|
+
|
|
347
|
+
server.tool(
|
|
348
|
+
"resolve_cascade",
|
|
349
|
+
"Resolve the dependency cascade chain for a given trigger. Returns which files need updating when a specific change occurs (e.g., phase_completion, requirements_change, file_split).",
|
|
350
|
+
{
|
|
351
|
+
trigger: z
|
|
352
|
+
.string()
|
|
353
|
+
.describe(
|
|
354
|
+
"Cascade trigger: phase_completion, requirements_change, new_decision, open_question_resolved, project_structure, new_domain, file_exceeds_300_lines, file_split, database_schema, api_route, new_module, schema_change, pipeline_stage, etc."
|
|
355
|
+
),
|
|
356
|
+
project_type: z
|
|
357
|
+
.enum(["code", "data", "analytical", "mixed", "universal"])
|
|
358
|
+
.default("universal")
|
|
359
|
+
.describe("Project type determines which cascade tables are included"),
|
|
360
|
+
},
|
|
361
|
+
async ({ trigger, project_type }) => {
|
|
362
|
+
const cascadeContext = project_type === "general" ? "universal" : project_type;
|
|
363
|
+
const result = resolveCascade([], trigger, cascadeContext);
|
|
364
|
+
return {
|
|
365
|
+
content: [
|
|
366
|
+
{
|
|
367
|
+
type: "text",
|
|
368
|
+
text: JSON.stringify(
|
|
369
|
+
{
|
|
370
|
+
trigger,
|
|
371
|
+
project_type,
|
|
372
|
+
cascade_chain: result.cascade_files,
|
|
373
|
+
depth: result.depth,
|
|
374
|
+
},
|
|
375
|
+
null,
|
|
376
|
+
2
|
|
377
|
+
),
|
|
378
|
+
},
|
|
379
|
+
],
|
|
380
|
+
};
|
|
381
|
+
}
|
|
382
|
+
);
|
|
383
|
+
|
|
384
|
+
// -------------------------------------------------------------------------
|
|
385
|
+
// Tool 5: check_capability — model tier query
|
|
386
|
+
// -------------------------------------------------------------------------
|
|
387
|
+
|
|
388
|
+
server.tool(
|
|
389
|
+
"check_capability",
|
|
390
|
+
"Check whether a model tier supports a specific feature. Returns support status and recommendations for the given model tier (lite/standard/full).",
|
|
391
|
+
{
|
|
392
|
+
model_tier: z
|
|
393
|
+
.enum(["lite", "standard", "full"])
|
|
394
|
+
.describe("Model tier: lite (Haiku), standard (Sonnet), full (Opus)"),
|
|
395
|
+
feature_id: z
|
|
396
|
+
.string()
|
|
397
|
+
.describe("Feature ID to check (e.g., CTX-01, EXP-03, SCAN-02)"),
|
|
398
|
+
},
|
|
399
|
+
async ({ model_tier, feature_id }) => {
|
|
400
|
+
const tier = TIER_CAPABILITIES[model_tier];
|
|
401
|
+
const features = getFeatures(reg);
|
|
402
|
+
const feature = features[feature_id];
|
|
403
|
+
|
|
404
|
+
if (!feature) {
|
|
405
|
+
return {
|
|
406
|
+
content: [
|
|
407
|
+
{
|
|
408
|
+
type: "text",
|
|
409
|
+
text: JSON.stringify(
|
|
410
|
+
{
|
|
411
|
+
feature_id,
|
|
412
|
+
error: `Feature not found: ${feature_id}`,
|
|
413
|
+
available_features: Object.keys(features),
|
|
414
|
+
},
|
|
415
|
+
null,
|
|
416
|
+
2
|
|
417
|
+
),
|
|
418
|
+
},
|
|
419
|
+
],
|
|
420
|
+
};
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
const domain = feature_id.split("-")[0];
|
|
424
|
+
const supported = tier.supported_domains.includes(domain);
|
|
425
|
+
|
|
426
|
+
const recommendations = [];
|
|
427
|
+
if (!supported) {
|
|
428
|
+
recommendations.push(
|
|
429
|
+
`Feature ${feature_id} (domain: ${domain}) is not supported at ${model_tier} tier.`
|
|
430
|
+
);
|
|
431
|
+
const requiredTier = Object.entries(TIER_CAPABILITIES).find(([, t]) =>
|
|
432
|
+
t.supported_domains.includes(domain)
|
|
433
|
+
);
|
|
434
|
+
if (requiredTier) {
|
|
435
|
+
recommendations.push(`Minimum tier required: ${requiredTier[0]}`);
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
return {
|
|
440
|
+
content: [
|
|
441
|
+
{
|
|
442
|
+
type: "text",
|
|
443
|
+
text: JSON.stringify(
|
|
444
|
+
{
|
|
445
|
+
feature_id,
|
|
446
|
+
feature_name: feature.name,
|
|
447
|
+
domain,
|
|
448
|
+
model_tier,
|
|
449
|
+
supported,
|
|
450
|
+
tier_info: tier,
|
|
451
|
+
recommendations,
|
|
452
|
+
},
|
|
453
|
+
null,
|
|
454
|
+
2
|
|
455
|
+
),
|
|
456
|
+
},
|
|
457
|
+
],
|
|
458
|
+
};
|
|
459
|
+
}
|
|
460
|
+
);
|
|
461
|
+
|
|
462
|
+
// -------------------------------------------------------------------------
|
|
463
|
+
// Tool 6: lookup_feature — registry feature lookup
|
|
464
|
+
// -------------------------------------------------------------------------
|
|
465
|
+
|
|
466
|
+
server.tool(
|
|
467
|
+
"lookup_feature",
|
|
468
|
+
"Look up a feature by ID or search by keyword. Returns the full feature specification including domain, reads, writes, and dependencies.",
|
|
469
|
+
{
|
|
470
|
+
query: z
|
|
471
|
+
.string()
|
|
472
|
+
.describe("Feature ID (e.g., CTX-01) or keyword to search for"),
|
|
473
|
+
},
|
|
474
|
+
async ({ query }) => {
|
|
475
|
+
const features = getFeatures(reg);
|
|
476
|
+
const queryUpper = query.toUpperCase();
|
|
477
|
+
|
|
478
|
+
// Direct ID match
|
|
479
|
+
if (features[queryUpper]) {
|
|
480
|
+
return {
|
|
481
|
+
content: [
|
|
482
|
+
{
|
|
483
|
+
type: "text",
|
|
484
|
+
text: JSON.stringify(
|
|
485
|
+
{ match: "exact", feature: features[queryUpper] },
|
|
486
|
+
null,
|
|
487
|
+
2
|
|
488
|
+
),
|
|
489
|
+
},
|
|
490
|
+
],
|
|
491
|
+
};
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
// Keyword search
|
|
495
|
+
const queryLower = query.toLowerCase();
|
|
496
|
+
const matches = [];
|
|
497
|
+
for (const [id, feat] of Object.entries(features)) {
|
|
498
|
+
const searchable = `${id} ${feat.name || ""} ${feat.domain || ""}`.toLowerCase();
|
|
499
|
+
if (searchable.includes(queryLower)) {
|
|
500
|
+
matches.push({ id, ...feat });
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
return {
|
|
505
|
+
content: [
|
|
506
|
+
{
|
|
507
|
+
type: "text",
|
|
508
|
+
text: JSON.stringify(
|
|
509
|
+
{
|
|
510
|
+
match: matches.length > 0 ? "keyword" : "none",
|
|
511
|
+
query,
|
|
512
|
+
results: matches,
|
|
513
|
+
total_features: Object.keys(features).length,
|
|
514
|
+
},
|
|
515
|
+
null,
|
|
516
|
+
2
|
|
517
|
+
),
|
|
518
|
+
},
|
|
519
|
+
],
|
|
520
|
+
};
|
|
521
|
+
}
|
|
522
|
+
);
|
|
523
|
+
|
|
524
|
+
// -------------------------------------------------------------------------
|
|
525
|
+
// Tool 7: list_workflows — registry workflow listing
|
|
526
|
+
// -------------------------------------------------------------------------
|
|
527
|
+
|
|
528
|
+
server.tool(
|
|
529
|
+
"list_workflows",
|
|
530
|
+
"List all available methodology workflows, optionally filtered by project type. Returns workflow IDs, names, steps, and applicability.",
|
|
531
|
+
{
|
|
532
|
+
project_type: z
|
|
533
|
+
.enum(["code", "data", "analytical", "mixed", "general", "all"])
|
|
534
|
+
.default("all")
|
|
535
|
+
.describe("Filter workflows by project type, or 'all' for complete list"),
|
|
536
|
+
},
|
|
537
|
+
async ({ project_type }) => {
|
|
538
|
+
const workflows = getWorkflows(reg);
|
|
539
|
+
let filtered = workflows;
|
|
540
|
+
|
|
541
|
+
if (project_type !== "all") {
|
|
542
|
+
filtered = workflows.filter((wf) => {
|
|
543
|
+
const types = wf.project_types || ["universal"];
|
|
544
|
+
return (
|
|
545
|
+
types.includes("universal") ||
|
|
546
|
+
types.includes(project_type) ||
|
|
547
|
+
project_type === "mixed"
|
|
548
|
+
);
|
|
549
|
+
});
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
const result = filtered.map((wf) => ({
|
|
553
|
+
id: wf.id,
|
|
554
|
+
name: wf.name,
|
|
555
|
+
project_types: wf.project_types || ["universal"],
|
|
556
|
+
steps: (wf.steps || []).map((s) => ({
|
|
557
|
+
stage: s.stage,
|
|
558
|
+
action: s.action,
|
|
559
|
+
features: s.features || [],
|
|
560
|
+
})),
|
|
561
|
+
}));
|
|
562
|
+
|
|
563
|
+
return {
|
|
564
|
+
content: [
|
|
565
|
+
{
|
|
566
|
+
type: "text",
|
|
567
|
+
text: JSON.stringify(
|
|
568
|
+
{
|
|
569
|
+
project_type,
|
|
570
|
+
workflow_count: result.length,
|
|
571
|
+
workflows: result,
|
|
572
|
+
},
|
|
573
|
+
null,
|
|
574
|
+
2
|
|
575
|
+
),
|
|
576
|
+
},
|
|
577
|
+
],
|
|
578
|
+
};
|
|
579
|
+
}
|
|
580
|
+
);
|
|
581
|
+
|
|
582
|
+
// -------------------------------------------------------------------------
|
|
583
|
+
// Tool 8: refactor_markdown — CTX-06 scale analysis
|
|
584
|
+
// -------------------------------------------------------------------------
|
|
585
|
+
|
|
586
|
+
server.tool(
|
|
587
|
+
"refactor_markdown",
|
|
588
|
+
"Analyze a markdown file for scale management (CTX-06). Reports line count, whether it exceeds the 300-line threshold, section breakdown, and a split plan if refactoring is needed. Advisory only — does not modify the file.",
|
|
589
|
+
{
|
|
590
|
+
file_path: z.string().describe("Path to the markdown file to analyze"),
|
|
591
|
+
},
|
|
592
|
+
async ({ file_path }) => {
|
|
593
|
+
const result = analyzeMarkdown(file_path);
|
|
594
|
+
return {
|
|
595
|
+
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
596
|
+
};
|
|
597
|
+
}
|
|
598
|
+
);
|
|
599
|
+
|
|
600
|
+
return server;
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
// ---------------------------------------------------------------------------
|
|
604
|
+
// Standalone entry point — starts stdio transport
|
|
605
|
+
// ---------------------------------------------------------------------------
|
|
606
|
+
|
|
607
|
+
export async function startServer(registryPath) {
|
|
608
|
+
const server = createServer(registryPath);
|
|
609
|
+
const transport = new StdioServerTransport();
|
|
610
|
+
await server.connect(transport);
|
|
611
|
+
return server;
|
|
612
|
+
}
|