levante 0.3.6 → 0.3.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/mcp.js +206 -76
- package/package.json +1 -1
package/dist/mcp.js
CHANGED
|
@@ -28540,7 +28540,7 @@ class StdioServerTransport {
|
|
|
28540
28540
|
// src/mcp.ts
|
|
28541
28541
|
import { execSync } from "node:child_process";
|
|
28542
28542
|
import { existsSync as existsSync5, readFileSync as readFileSync5 } from "node:fs";
|
|
28543
|
-
import { join as
|
|
28543
|
+
import { join as join6 } from "node:path";
|
|
28544
28544
|
|
|
28545
28545
|
// src/agents/loadAgent.ts
|
|
28546
28546
|
import { readFileSync, existsSync as existsSync2, readdirSync } from "node:fs";
|
|
@@ -28726,6 +28726,44 @@ function parseSections(body) {
|
|
|
28726
28726
|
return sections;
|
|
28727
28727
|
}
|
|
28728
28728
|
|
|
28729
|
+
// src/config/paths.ts
|
|
28730
|
+
import { join as join3 } from "node:path";
|
|
28731
|
+
function resolvePaths(config2, key) {
|
|
28732
|
+
const root = getProjectRoot();
|
|
28733
|
+
const workingDir = join3(root, config2.paths.workingDir);
|
|
28734
|
+
const testsDir = join3(root, config2.paths.tests);
|
|
28735
|
+
const recordingsDir = join3(root, config2.paths.recordings);
|
|
28736
|
+
const transcriptsDir = join3(root, config2.paths.transcripts);
|
|
28737
|
+
const tracesDir = join3(root, config2.paths.traces);
|
|
28738
|
+
const qaDir = join3(root, config2.paths.qaOutput);
|
|
28739
|
+
const keyDir = key ? join3(workingDir, key) : null;
|
|
28740
|
+
const testDir = key ? join3(testsDir, key) : null;
|
|
28741
|
+
const testFile = key ? join3(testsDir, key, `${key}.test.ts`) : null;
|
|
28742
|
+
const scenarioDir = key ? join3(testsDir, key) : null;
|
|
28743
|
+
const scenarioFile = key ? join3(testsDir, key, `${key}.yaml`) : null;
|
|
28744
|
+
const qaFile = key ? join3(qaDir, `${key}.md`) : null;
|
|
28745
|
+
const needsZephyr = config2.outputTarget === "zephyr" || config2.outputTarget === "both";
|
|
28746
|
+
const zephyrJsonFile = needsZephyr && key ? join3(workingDir, key, `${key}-zephyr-test-case.json`) : null;
|
|
28747
|
+
const zephyrXmlFile = needsZephyr && key ? join3(testsDir, key, `${key}-zephyr-import.xml`) : null;
|
|
28748
|
+
return {
|
|
28749
|
+
projectRoot: root,
|
|
28750
|
+
workingDir,
|
|
28751
|
+
keyDir,
|
|
28752
|
+
testsDir,
|
|
28753
|
+
testDir,
|
|
28754
|
+
testFile,
|
|
28755
|
+
scenarioDir,
|
|
28756
|
+
scenarioFile,
|
|
28757
|
+
recordingsDir,
|
|
28758
|
+
transcriptsDir,
|
|
28759
|
+
tracesDir,
|
|
28760
|
+
qaDir,
|
|
28761
|
+
qaFile,
|
|
28762
|
+
zephyrJsonFile,
|
|
28763
|
+
zephyrXmlFile
|
|
28764
|
+
};
|
|
28765
|
+
}
|
|
28766
|
+
|
|
28729
28767
|
// src/scanner/scanner.ts
|
|
28730
28768
|
import { readFileSync as readFileSync3, existsSync as existsSync3, mkdirSync, writeFileSync } from "node:fs";
|
|
28731
28769
|
import { resolve as resolve2, relative as relative2 } from "node:path";
|
|
@@ -32218,10 +32256,10 @@ class TypeScriptParser {
|
|
|
32218
32256
|
|
|
32219
32257
|
// src/scanner/extractors/routes.ts
|
|
32220
32258
|
import { readdirSync as readdirSync2, statSync, readFileSync as readFileSync2 } from "node:fs";
|
|
32221
|
-
import { join as
|
|
32259
|
+
import { join as join4, relative, basename, dirname as dirname2 } from "node:path";
|
|
32222
32260
|
function extractRoutes(scanDir) {
|
|
32223
32261
|
const routes = [];
|
|
32224
|
-
const appDirs = ["app", "src/app"].map((d) =>
|
|
32262
|
+
const appDirs = ["app", "src/app"].map((d) => join4(scanDir, d));
|
|
32225
32263
|
for (const appDir of appDirs) {
|
|
32226
32264
|
try {
|
|
32227
32265
|
if (statSync(appDir).isDirectory()) {
|
|
@@ -32229,7 +32267,7 @@ function extractRoutes(scanDir) {
|
|
|
32229
32267
|
}
|
|
32230
32268
|
} catch {}
|
|
32231
32269
|
}
|
|
32232
|
-
const pagesDirs = ["pages", "src/pages"].map((d) =>
|
|
32270
|
+
const pagesDirs = ["pages", "src/pages"].map((d) => join4(scanDir, d));
|
|
32233
32271
|
for (const pagesDir of pagesDirs) {
|
|
32234
32272
|
try {
|
|
32235
32273
|
if (statSync(pagesDir).isDirectory()) {
|
|
@@ -32242,7 +32280,7 @@ function extractRoutes(scanDir) {
|
|
|
32242
32280
|
function extractAppRouterRoutes(dir, baseDir, routes) {
|
|
32243
32281
|
const entries = readdirSync2(dir, { withFileTypes: true });
|
|
32244
32282
|
for (const entry of entries) {
|
|
32245
|
-
const fullPath =
|
|
32283
|
+
const fullPath = join4(dir, entry.name);
|
|
32246
32284
|
if (entry.isDirectory()) {
|
|
32247
32285
|
extractAppRouterRoutes(fullPath, baseDir, routes);
|
|
32248
32286
|
continue;
|
|
@@ -32278,7 +32316,7 @@ function extractAppRouterRoutes(dir, baseDir, routes) {
|
|
|
32278
32316
|
function extractPagesRouterRoutes(dir, baseDir, routes) {
|
|
32279
32317
|
const entries = readdirSync2(dir, { withFileTypes: true });
|
|
32280
32318
|
for (const entry of entries) {
|
|
32281
|
-
const fullPath =
|
|
32319
|
+
const fullPath = join4(dir, entry.name);
|
|
32282
32320
|
if (entry.isDirectory()) {
|
|
32283
32321
|
if (entry.name === "api") {
|
|
32284
32322
|
extractPagesApiRoutes(fullPath, baseDir, routes);
|
|
@@ -32304,7 +32342,7 @@ function extractPagesRouterRoutes(dir, baseDir, routes) {
|
|
|
32304
32342
|
function extractPagesApiRoutes(dir, baseDir, routes) {
|
|
32305
32343
|
const entries = readdirSync2(dir, { withFileTypes: true });
|
|
32306
32344
|
for (const entry of entries) {
|
|
32307
|
-
const fullPath =
|
|
32345
|
+
const fullPath = join4(dir, entry.name);
|
|
32308
32346
|
if (entry.isDirectory()) {
|
|
32309
32347
|
extractPagesApiRoutes(fullPath, baseDir, routes);
|
|
32310
32348
|
continue;
|
|
@@ -32332,7 +32370,7 @@ function findLayoutFile(dir, baseDir) {
|
|
|
32332
32370
|
let current = dir;
|
|
32333
32371
|
while (current.startsWith(baseDir)) {
|
|
32334
32372
|
for (const ext of [".tsx", ".ts", ".jsx", ".js"]) {
|
|
32335
|
-
const layoutPath =
|
|
32373
|
+
const layoutPath = join4(current, `layout${ext}`);
|
|
32336
32374
|
try {
|
|
32337
32375
|
statSync(layoutPath);
|
|
32338
32376
|
return layoutPath;
|
|
@@ -32793,7 +32831,7 @@ function requireEnum(obj, field, allowed, context, errors3) {
|
|
|
32793
32831
|
|
|
32794
32832
|
// src/utils/scan.ts
|
|
32795
32833
|
import { readdirSync as readdirSync3, existsSync as existsSync4, readFileSync as readFileSync4 } from "node:fs";
|
|
32796
|
-
import { join as
|
|
32834
|
+
import { join as join5, relative as relative3 } from "node:path";
|
|
32797
32835
|
async function scanCodebase(root) {
|
|
32798
32836
|
const scan = {
|
|
32799
32837
|
testFiles: [],
|
|
@@ -32812,7 +32850,7 @@ async function scanCodebase(root) {
|
|
|
32812
32850
|
for (const entry of readdirSync3(dir, { withFileTypes: true })) {
|
|
32813
32851
|
if (entry.name.startsWith(".") || entry.name === "node_modules" || entry.name === "dist")
|
|
32814
32852
|
continue;
|
|
32815
|
-
const full =
|
|
32853
|
+
const full = join5(dir, entry.name);
|
|
32816
32854
|
if (entry.isDirectory()) {
|
|
32817
32855
|
files.push(...walk(full, depth + 1));
|
|
32818
32856
|
} else {
|
|
@@ -32849,7 +32887,7 @@ async function scanCodebase(root) {
|
|
|
32849
32887
|
}
|
|
32850
32888
|
}
|
|
32851
32889
|
for (const name of ["playwright.config.ts", "vitest.config.ts", "jest.config.ts", "tsconfig.json", "package.json"]) {
|
|
32852
|
-
if (existsSync4(
|
|
32890
|
+
if (existsSync4(join5(root, name)))
|
|
32853
32891
|
scan.configFiles.push(name);
|
|
32854
32892
|
}
|
|
32855
32893
|
return scan;
|
|
@@ -32898,38 +32936,60 @@ You have access to Levante, an AI-powered E2E test automation tool. Follow this
|
|
|
32898
32936
|
|
|
32899
32937
|
NEVER run multiple pipeline steps at once. Each step is a separate job with its own context.
|
|
32900
32938
|
|
|
32939
|
+
## Two Modes of Execution
|
|
32940
|
+
|
|
32941
|
+
Steps fall into two categories:
|
|
32942
|
+
|
|
32943
|
+
### 1. Tool Steps — use \`levante_execute_step\`
|
|
32944
|
+
These steps do NOT require an LLM. Run them via \`levante_execute_step\`:
|
|
32945
|
+
- \`record\` — opens Playwright browser codegen (interactive)
|
|
32946
|
+
- \`test\` — runs Playwright tests
|
|
32947
|
+
- \`push\` — pushes QA map to remote API
|
|
32948
|
+
- \`scan\` — runs the filesystem/AST scan
|
|
32949
|
+
|
|
32950
|
+
### 2. Agent Steps — YOU are the AI, do these yourself
|
|
32951
|
+
These steps require LLM reasoning. Do NOT call \`levante_execute_step\` for them. Instead, load the agent prompt and act on it directly:
|
|
32952
|
+
|
|
32953
|
+
| Step | Agent to load | Input to read | Output to write |
|
|
32954
|
+
|------|--------------|---------------|-----------------|
|
|
32955
|
+
| \`transcribe\` | \`transcript-agent\` | codegen file via \`levante_read_pipeline_file(key, "codegen")\` | transcript JSON via \`levante_write_pipeline_file\` |
|
|
32956
|
+
| \`scenario\` | \`scenario-agent\` | codegen + optional transcript | scenario YAML via \`levante_write_pipeline_file(key, "scenario", ...)\` |
|
|
32957
|
+
| \`generate\` | \`playwright-generator-agent\` | scenario YAML | test file via \`levante_write_pipeline_file(key, "test", ...)\` |
|
|
32958
|
+
| \`refine\` | \`refactor-agent\` | test file | updated test file (overwrite via \`levante_write_pipeline_file\`) |
|
|
32959
|
+
| \`heal\` | \`self-healing-agent\` | test file + error output | patched test file |
|
|
32960
|
+
| \`qa\` | \`qa-testcase-agent\` | test file + scenario | QA markdown via \`levante_write_pipeline_file(key, "qa", ...)\` |
|
|
32961
|
+
| \`analyze\` | \`feature-analyzer-agent\` | AST via \`levante_scan_ast_detail\` | QA map via \`levante_build_qa_map\` |
|
|
32962
|
+
|
|
32963
|
+
**Workflow for each agent step:**
|
|
32964
|
+
1. Call \`levante_read_agent("<agent-name>")\` to load the system prompt and config.
|
|
32965
|
+
2. Call \`levante_read_pipeline_file(key, "<type>")\` to load the input artifact.
|
|
32966
|
+
3. Read \`.qai/levante/context.md\` if it exists — this is project context the agent needs.
|
|
32967
|
+
4. Apply the agent system prompt to your own reasoning and produce the output.
|
|
32968
|
+
5. Call \`levante_write_pipeline_file(key, "<type>", content)\` to save the output.
|
|
32969
|
+
|
|
32901
32970
|
## Protocol
|
|
32902
32971
|
|
|
32903
|
-
1. **Plan first.** Call \`levante_plan_workflow\` with the user's goal
|
|
32904
|
-
2. **
|
|
32905
|
-
3. **
|
|
32906
|
-
|
|
32907
|
-
|
|
32908
|
-
|
|
32909
|
-
|
|
32910
|
-
d. If the step fails, stop and discuss with the user before continuing.
|
|
32911
|
-
e. Move to the next step only after the current one succeeds.
|
|
32912
|
-
5. **Use subagents when available.** If your AI platform supports subagents (e.g., Claude Code Agent tool), dispatch each step as a dedicated subagent to preserve context. Each subagent should:
|
|
32913
|
-
- Receive only the context it needs (step name, key, relevant file paths)
|
|
32914
|
-
- Call \`levante_execute_step\` to do its work
|
|
32915
|
-
- Return the result to the orchestrator
|
|
32972
|
+
1. **Plan first.** Call \`levante_plan_workflow\` with the user's goal to get a structured step list.
|
|
32973
|
+
2. **Present the plan.** Show the user the ordered steps. Ask for confirmation before proceeding.
|
|
32974
|
+
3. **Execute one step at a time.** For each step:
|
|
32975
|
+
a. Announce which step you're running and why.
|
|
32976
|
+
b. Use **Tool Steps** via \`levante_execute_step\`, or handle **Agent Steps** yourself (see above).
|
|
32977
|
+
c. Report the result. If a step fails, stop and discuss before continuing.
|
|
32978
|
+
4. **Use subagents when available.** If your AI platform supports subagents (Claude Code Agent tool), dispatch each step as a dedicated subagent. Each subagent should receive only the context it needs.
|
|
32916
32979
|
|
|
32917
32980
|
## Step Dependencies
|
|
32918
32981
|
|
|
32919
|
-
Steps produce artifacts
|
|
32982
|
+
Steps produce artifacts consumed by later steps. Do not skip steps unless the plan says a step can be skipped.
|
|
32920
32983
|
|
|
32921
32984
|
## Interactive Steps
|
|
32922
32985
|
|
|
32923
|
-
The \`record\` step opens a browser
|
|
32924
|
-
- Tell the user they need to interact with the browser window
|
|
32925
|
-
- The step will block until they close the codegen window
|
|
32926
|
-
- After recording completes, proceed with the next step
|
|
32986
|
+
The \`record\` step opens a browser. Tell the user to interact with it — the step blocks until they close the codegen window.
|
|
32927
32987
|
|
|
32928
32988
|
## When Things Fail
|
|
32929
32989
|
|
|
32930
|
-
- If \`test\` fails and \`heal\` is in the plan, that's expected —
|
|
32931
|
-
- If
|
|
32932
|
-
- For any other failure, stop and ask the user how to proceed
|
|
32990
|
+
- If \`test\` fails and \`heal\` is in the plan, that's expected — do the \`heal\` agent step yourself.
|
|
32991
|
+
- If healing fails after multiple attempts, stop and show the user the last error.
|
|
32992
|
+
- For any other failure, stop and ask the user how to proceed.
|
|
32933
32993
|
|
|
32934
32994
|
## Available Workflows
|
|
32935
32995
|
|
|
@@ -32945,7 +33005,7 @@ Always use \`levante_plan_workflow\` to determine the right steps — don't gues
|
|
|
32945
33005
|
|
|
32946
33006
|
## Scanner Analysis (Interactive QA Map)
|
|
32947
33007
|
|
|
32948
|
-
For deep codebase analysis and QA map generation, use the interactive scanner workflow
|
|
33008
|
+
For deep codebase analysis and QA map generation, use the interactive scanner workflow:
|
|
32949
33009
|
|
|
32950
33010
|
1. **Load the protocol.** Call \`levante_read_agent("scanner-agent")\` — this returns the full interactive protocol.
|
|
32951
33011
|
2. **Scan.** Call \`levante_scan_ast()\` to run the AST scanner and get a compact summary.
|
|
@@ -32954,8 +33014,6 @@ For deep codebase analysis and QA map generation, use the interactive scanner wo
|
|
|
32954
33014
|
5. **Build.** Construct the QA map payload and validate with \`levante_build_qa_map({ dryRun: true })\`.
|
|
32955
33015
|
6. **Write.** Once validated and approved, call \`levante_build_qa_map({ dryRun: false })\` to save.
|
|
32956
33016
|
7. **Read existing.** Use \`levante_read_qa_map()\` to load a previously generated QA map for incremental updates.
|
|
32957
|
-
|
|
32958
|
-
This approach is preferred over \`scan → analyze\` CLI steps because it allows interactive refinement with the user.
|
|
32959
33017
|
`.trim();
|
|
32960
33018
|
var TEST_PIPELINE_STEPS = [
|
|
32961
33019
|
{
|
|
@@ -33043,41 +33101,20 @@ var SCANNER_PIPELINE_STEPS = [
|
|
|
33043
33101
|
var ALL_STEPS = [...TEST_PIPELINE_STEPS, ...SCANNER_PIPELINE_STEPS];
|
|
33044
33102
|
var STEP_REQUIREMENTS = {
|
|
33045
33103
|
record: { envVars: [] },
|
|
33046
|
-
transcribe: { envVars: [
|
|
33047
|
-
scenario: { envVars: [
|
|
33048
|
-
|
|
33049
|
-
|
|
33050
|
-
] },
|
|
33051
|
-
generate: { envVars: [
|
|
33052
|
-
{ name: "OPENAI_API_KEY", reason: "LLM calls require OpenAI API key", onlyIf: () => getProvider() === "openai" },
|
|
33053
|
-
{ name: "ANTHROPIC_API_KEY", reason: "LLM calls require Anthropic API key", onlyIf: () => getProvider() === "anthropic" }
|
|
33054
|
-
] },
|
|
33055
|
-
refine: { envVars: [
|
|
33056
|
-
{ name: "OPENAI_API_KEY", reason: "LLM calls require OpenAI API key", onlyIf: () => getProvider() === "openai" },
|
|
33057
|
-
{ name: "ANTHROPIC_API_KEY", reason: "LLM calls require Anthropic API key", onlyIf: () => getProvider() === "anthropic" }
|
|
33058
|
-
] },
|
|
33104
|
+
transcribe: { envVars: [] },
|
|
33105
|
+
scenario: { envVars: [] },
|
|
33106
|
+
generate: { envVars: [] },
|
|
33107
|
+
refine: { envVars: [] },
|
|
33059
33108
|
test: { envVars: [] },
|
|
33060
|
-
heal: { envVars: [
|
|
33061
|
-
|
|
33062
|
-
{ name: "ANTHROPIC_API_KEY", reason: "LLM calls require Anthropic API key", onlyIf: () => getProvider() === "anthropic" }
|
|
33063
|
-
] },
|
|
33064
|
-
qa: { envVars: [
|
|
33065
|
-
{ name: "OPENAI_API_KEY", reason: "LLM calls require OpenAI API key", onlyIf: () => getProvider() === "openai" },
|
|
33066
|
-
{ name: "ANTHROPIC_API_KEY", reason: "LLM calls require Anthropic API key", onlyIf: () => getProvider() === "anthropic" }
|
|
33067
|
-
] },
|
|
33109
|
+
heal: { envVars: [] },
|
|
33110
|
+
qa: { envVars: [] },
|
|
33068
33111
|
scan: { envVars: [] },
|
|
33069
|
-
analyze: { envVars: [
|
|
33070
|
-
{ name: "OPENAI_API_KEY", reason: "LLM calls require OpenAI API key", onlyIf: () => getProvider() === "openai" },
|
|
33071
|
-
{ name: "ANTHROPIC_API_KEY", reason: "LLM calls require Anthropic API key", onlyIf: () => getProvider() === "anthropic" }
|
|
33072
|
-
] },
|
|
33112
|
+
analyze: { envVars: [] },
|
|
33073
33113
|
push: { envVars: [
|
|
33074
33114
|
{ name: "E2E_AI_API_URL", reason: "Push requires API URL (set E2E_AI_API_URL or push.apiUrl in config)" },
|
|
33075
33115
|
{ name: "E2E_AI_API_KEY", reason: "Push requires API key (set E2E_AI_API_KEY or push.apiKey in config)" }
|
|
33076
33116
|
] }
|
|
33077
33117
|
};
|
|
33078
|
-
function getProvider() {
|
|
33079
|
-
return process.env.AI_PROVIDER ?? "openai";
|
|
33080
|
-
}
|
|
33081
33118
|
function checkPrerequisites(stepNames) {
|
|
33082
33119
|
const issueMap = new Map;
|
|
33083
33120
|
for (const stepName of stepNames) {
|
|
@@ -33085,8 +33122,6 @@ function checkPrerequisites(stepNames) {
|
|
|
33085
33122
|
if (!reqs)
|
|
33086
33123
|
continue;
|
|
33087
33124
|
for (const envReq of reqs.envVars) {
|
|
33088
|
-
if (envReq.onlyIf && !envReq.onlyIf())
|
|
33089
|
-
continue;
|
|
33090
33125
|
if (!process.env[envReq.name]) {
|
|
33091
33126
|
const key = `env:${envReq.name}`;
|
|
33092
33127
|
if (issueMap.has(key)) {
|
|
@@ -33230,7 +33265,7 @@ function executeStep(stepName, options) {
|
|
|
33230
33265
|
args.push(...options.extraArgs);
|
|
33231
33266
|
}
|
|
33232
33267
|
const pkgRoot = getPackageRoot();
|
|
33233
|
-
const cliBin =
|
|
33268
|
+
const cliBin = join6(pkgRoot, "dist", "cli.js");
|
|
33234
33269
|
const command = `node ${cliBin} ${args.join(" ")}`;
|
|
33235
33270
|
try {
|
|
33236
33271
|
const stdout = execSync(command, {
|
|
@@ -33257,7 +33292,7 @@ ${stderr}`,
|
|
|
33257
33292
|
};
|
|
33258
33293
|
}
|
|
33259
33294
|
}
|
|
33260
|
-
var server = new McpServer({ name: "levante", version: "1.
|
|
33295
|
+
var server = new McpServer({ name: "levante", version: "1.6.0" }, { instructions: SERVER_INSTRUCTIONS });
|
|
33261
33296
|
server.registerTool("levante_scan_codebase", {
|
|
33262
33297
|
title: "Scan Codebase",
|
|
33263
33298
|
description: "Scan a project directory for test files, configs, fixtures, path aliases, and sample test content. Use this during project setup or to understand test infrastructure.",
|
|
@@ -33309,13 +33344,108 @@ server.registerTool("levante_read_agent", {
|
|
|
33309
33344
|
};
|
|
33310
33345
|
}
|
|
33311
33346
|
});
|
|
33347
|
+
server.registerTool("levante_read_pipeline_file", {
|
|
33348
|
+
title: "Read Pipeline File",
|
|
33349
|
+
description: "Read a pipeline artifact for a given issue key. Use this before an agent step to load the input. " + 'Types: "codegen" (raw Playwright codegen), "transcript" (voice transcript JSON), ' + '"scenario" (YAML test scenario), "test" (Playwright .test.ts), "qa" (QA markdown), "context" (project context.md).',
|
|
33350
|
+
inputSchema: exports_external.object({
|
|
33351
|
+
key: exports_external.string().optional().describe('Issue key (e.g. PROJ-101). Not required for "context" type.'),
|
|
33352
|
+
type: exports_external.enum(["codegen", "transcript", "scenario", "test", "qa", "context"]).describe("Which artifact to read")
|
|
33353
|
+
})
|
|
33354
|
+
}, async ({ key, type }) => {
|
|
33355
|
+
try {
|
|
33356
|
+
let filePath = null;
|
|
33357
|
+
if (type === "context") {
|
|
33358
|
+
filePath = join6(process.cwd(), CONFIG_DIR, "context.md");
|
|
33359
|
+
} else {
|
|
33360
|
+
if (!key) {
|
|
33361
|
+
return {
|
|
33362
|
+
content: [{ type: "text", text: "Error: key is required for this file type" }],
|
|
33363
|
+
isError: true
|
|
33364
|
+
};
|
|
33365
|
+
}
|
|
33366
|
+
const config2 = await loadConfig();
|
|
33367
|
+
const paths = resolvePaths(config2, key);
|
|
33368
|
+
if (type === "codegen") {
|
|
33369
|
+
filePath = paths.keyDir ? join6(paths.keyDir, `${key}.codegen.ts`) : null;
|
|
33370
|
+
} else if (type === "transcript") {
|
|
33371
|
+
filePath = paths.keyDir ? join6(paths.keyDir, `${key}.transcript.json`) : null;
|
|
33372
|
+
} else if (type === "scenario") {
|
|
33373
|
+
filePath = paths.scenarioFile;
|
|
33374
|
+
} else if (type === "test") {
|
|
33375
|
+
filePath = paths.testFile;
|
|
33376
|
+
} else if (type === "qa") {
|
|
33377
|
+
filePath = paths.qaFile;
|
|
33378
|
+
}
|
|
33379
|
+
}
|
|
33380
|
+
if (!filePath) {
|
|
33381
|
+
return {
|
|
33382
|
+
content: [{ type: "text", text: JSON.stringify({ found: false, path: null, reason: "Could not resolve path" }) }]
|
|
33383
|
+
};
|
|
33384
|
+
}
|
|
33385
|
+
if (!existsSync5(filePath)) {
|
|
33386
|
+
return {
|
|
33387
|
+
content: [{ type: "text", text: JSON.stringify({ found: false, path: filePath }) }]
|
|
33388
|
+
};
|
|
33389
|
+
}
|
|
33390
|
+
const fileContent = readFileSync5(filePath, "utf-8");
|
|
33391
|
+
return {
|
|
33392
|
+
content: [{ type: "text", text: JSON.stringify({ found: true, path: filePath, content: fileContent }) }]
|
|
33393
|
+
};
|
|
33394
|
+
} catch (err) {
|
|
33395
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
33396
|
+
return {
|
|
33397
|
+
content: [{ type: "text", text: `Error: ${message}` }],
|
|
33398
|
+
isError: true
|
|
33399
|
+
};
|
|
33400
|
+
}
|
|
33401
|
+
});
|
|
33402
|
+
server.registerTool("levante_write_pipeline_file", {
|
|
33403
|
+
title: "Write Pipeline File",
|
|
33404
|
+
description: "Write a pipeline artifact after completing an agent step. " + 'Types: "transcript" (voice transcript JSON), "scenario" (YAML test scenario), ' + '"test" (Playwright .test.ts), "qa" (QA markdown).',
|
|
33405
|
+
inputSchema: exports_external.object({
|
|
33406
|
+
key: exports_external.string().describe("Issue key (e.g. PROJ-101)"),
|
|
33407
|
+
type: exports_external.enum(["transcript", "scenario", "test", "qa"]).describe("Which artifact to write"),
|
|
33408
|
+
content: exports_external.string().describe("The file content to write")
|
|
33409
|
+
})
|
|
33410
|
+
}, async ({ key, type, content: fileContent }) => {
|
|
33411
|
+
try {
|
|
33412
|
+
const config2 = await loadConfig();
|
|
33413
|
+
const paths = resolvePaths(config2, key);
|
|
33414
|
+
const fileMap = {
|
|
33415
|
+
transcript: paths.keyDir ? join6(paths.keyDir, `${key}.transcript.json`) : null,
|
|
33416
|
+
scenario: paths.scenarioFile,
|
|
33417
|
+
test: paths.testFile,
|
|
33418
|
+
qa: paths.qaFile
|
|
33419
|
+
};
|
|
33420
|
+
const filePath = fileMap[type];
|
|
33421
|
+
if (!filePath) {
|
|
33422
|
+
return {
|
|
33423
|
+
content: [{ type: "text", text: `Error: Could not resolve path for type "${type}" with key "${key}"` }],
|
|
33424
|
+
isError: true
|
|
33425
|
+
};
|
|
33426
|
+
}
|
|
33427
|
+
const { mkdirSync: mkdirSync2, writeFileSync: writeFileSync2 } = await import("node:fs");
|
|
33428
|
+
const { dirname: dirname4 } = await import("node:path");
|
|
33429
|
+
mkdirSync2(dirname4(filePath), { recursive: true });
|
|
33430
|
+
writeFileSync2(filePath, fileContent, "utf-8");
|
|
33431
|
+
return {
|
|
33432
|
+
content: [{ type: "text", text: JSON.stringify({ written: true, path: filePath }) }]
|
|
33433
|
+
};
|
|
33434
|
+
} catch (err) {
|
|
33435
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
33436
|
+
return {
|
|
33437
|
+
content: [{ type: "text", text: `Error: ${message}` }],
|
|
33438
|
+
isError: true
|
|
33439
|
+
};
|
|
33440
|
+
}
|
|
33441
|
+
});
|
|
33312
33442
|
server.registerTool("levante_get_example", {
|
|
33313
33443
|
title: "Get Example Context",
|
|
33314
33444
|
description: `Returns the full example context markdown file that shows the expected format for ${CONFIG_DIR}/context.md.`,
|
|
33315
33445
|
inputSchema: exports_external.object({})
|
|
33316
33446
|
}, async () => {
|
|
33317
33447
|
try {
|
|
33318
|
-
const examplePath =
|
|
33448
|
+
const examplePath = join6(getPackageRoot(), "templates", "e2e-ai.context.example.md");
|
|
33319
33449
|
const content = readFileSync5(examplePath, "utf-8");
|
|
33320
33450
|
return {
|
|
33321
33451
|
content: [{ type: "text", text: content }]
|
|
@@ -33411,7 +33541,7 @@ server.registerTool("levante_get_workflow_guide", {
|
|
|
33411
33541
|
inputSchema: exports_external.object({})
|
|
33412
33542
|
}, async () => {
|
|
33413
33543
|
try {
|
|
33414
|
-
const guidePath =
|
|
33544
|
+
const guidePath = join6(getPackageRoot(), "templates", "workflow.md");
|
|
33415
33545
|
if (!existsSync5(guidePath)) {
|
|
33416
33546
|
return {
|
|
33417
33547
|
content: [{ type: "text", text: "Error: workflow.md not found in templates" }],
|
|
@@ -33442,15 +33572,15 @@ server.registerTool("levante_scan_ast", {
|
|
|
33442
33572
|
const cwd = process.cwd();
|
|
33443
33573
|
const dir = scanDir ?? "src";
|
|
33444
33574
|
const scanConfig = {
|
|
33445
|
-
scanDir:
|
|
33575
|
+
scanDir: join6(cwd, dir),
|
|
33446
33576
|
include: include ?? ["**/*.ts", "**/*.tsx", "**/*.js", "**/*.jsx"],
|
|
33447
33577
|
exclude: exclude ?? ["**/node_modules/**", "**/dist/**", "**/build/**", "**/.next/**", "**/*.test.*", "**/*.spec.*", "**/__tests__/**"],
|
|
33448
|
-
cacheDir:
|
|
33578
|
+
cacheDir: join6(cwd, CONFIG_DIR, "cache")
|
|
33449
33579
|
};
|
|
33450
33580
|
const ast = await runStage1(scanConfig);
|
|
33451
|
-
const astPath =
|
|
33581
|
+
const astPath = join6(cwd, CONFIG_DIR, "ast-scan.json");
|
|
33452
33582
|
const { mkdirSync: mkdirSync2, writeFileSync: writeFileSync2 } = await import("node:fs");
|
|
33453
|
-
mkdirSync2(
|
|
33583
|
+
mkdirSync2(join6(cwd, CONFIG_DIR), { recursive: true });
|
|
33454
33584
|
writeFileSync2(astPath, JSON.stringify(ast, null, 2));
|
|
33455
33585
|
const summary2 = summarizeAST(ast, astPath);
|
|
33456
33586
|
return {
|
|
@@ -33473,7 +33603,7 @@ server.registerTool("levante_scan_ast_detail", {
|
|
|
33473
33603
|
})
|
|
33474
33604
|
}, async ({ category, filter, limit }) => {
|
|
33475
33605
|
try {
|
|
33476
|
-
const astPath =
|
|
33606
|
+
const astPath = join6(process.cwd(), CONFIG_DIR, "ast-scan.json");
|
|
33477
33607
|
if (!existsSync5(astPath)) {
|
|
33478
33608
|
return {
|
|
33479
33609
|
content: [{ type: "text", text: "Error: No AST scan found. Run levante_scan_ast first." }],
|
|
@@ -33509,7 +33639,7 @@ server.registerTool("levante_build_qa_map", {
|
|
|
33509
33639
|
payload.commitSha = sha;
|
|
33510
33640
|
} catch {}
|
|
33511
33641
|
}
|
|
33512
|
-
const outputPath = output ??
|
|
33642
|
+
const outputPath = output ?? join6(process.cwd(), CONFIG_DIR, "qa-map.json");
|
|
33513
33643
|
let written = false;
|
|
33514
33644
|
if (validation.valid && !dryRun) {
|
|
33515
33645
|
const { mkdirSync: mkdirSync2, writeFileSync: writeFileSync2 } = await import("node:fs");
|
|
@@ -33553,7 +33683,7 @@ server.registerTool("levante_read_qa_map", {
|
|
|
33553
33683
|
})
|
|
33554
33684
|
}, async ({ path }) => {
|
|
33555
33685
|
try {
|
|
33556
|
-
const mapPath = path ??
|
|
33686
|
+
const mapPath = path ?? join6(process.cwd(), CONFIG_DIR, "qa-map.json");
|
|
33557
33687
|
if (!existsSync5(mapPath)) {
|
|
33558
33688
|
return {
|
|
33559
33689
|
content: [{ type: "text", text: JSON.stringify(null) }]
|