adapt-api 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +75 -0
- package/bin/apiadapt.js +9 -0
- package/package.json +46 -0
- package/src/cli.js +57 -0
- package/src/commands/config.js +48 -0
- package/src/commands/eval.js +39 -0
- package/src/commands/manual.js +57 -0
- package/src/engine/SystemPrompt.txt +77 -0
- package/src/engine/disposition.js +105 -0
- package/src/engine/evaluator.js +53 -0
- package/src/engine/heuristics.js +73 -0
- package/src/engine/llmClients.js +184 -0
- package/src/formatters/index.js +36 -0
- package/src/formatters/jsonFormatter.js +3 -0
- package/src/formatters/pdfFormatter.js +48 -0
- package/src/formatters/textFormatter.js +30 -0
- package/src/utils/configStore.js +80 -0
- package/src/utils/prompts.js +13 -0
- package/src/utils/specLoader.js +65 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Pankaj Kumar Jain and Jahnavi Thirumalasetty
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# adapt-api
|
|
2
|
+
|
|
3
|
+
Production-oriented Node.js CLI for ADAPT API readiness assessment.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- `config` command for persistent defaults (`llm`, `model`, `format`, `api key`, `base url`)
|
|
8
|
+
- `eval <file_path>` command to assess OpenAPI/RAML files
|
|
9
|
+
- Modular ADAPT engine (spec loading, heuristic checks, LLM evaluation, disposition rules)
|
|
10
|
+
- Output factory for `json`, `text`, or `pdf`
|
|
11
|
+
- Spinner-based progress with `ora`
|
|
12
|
+
- Local config persistence via `conf` in the user config directory
|
|
13
|
+
|
|
14
|
+
## Install
|
|
15
|
+
|
|
16
|
+
From npm (after publish):
|
|
17
|
+
|
|
18
|
+
```bash
|
|
19
|
+
npm install -g adapt-api
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
For local development:
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
npm install
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
For local command testing:
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
npm link
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Usage
|
|
35
|
+
|
|
36
|
+
### Configure defaults
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
apiadapt config --llm openai --model gpt-4o-mini --format json --api-key <key>
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Or interactive:
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
apiadapt config
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
### Evaluate a spec
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
apiadapt eval ./openapi.yaml
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
Write explicit output:
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
apiadapt eval ./openapi.yaml --format pdf --output ./report.pdf
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
Print output to terminal:
|
|
61
|
+
|
|
62
|
+
```bash
|
|
63
|
+
apiadapt eval ./openapi.yaml --format text --print
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## Commands
|
|
67
|
+
|
|
68
|
+
- `apiadapt config [--llm] [--model] [--format] [--api-key] [--base-url]`
|
|
69
|
+
- `apiadapt eval <file_path> [--format] [--output] [--print]`
|
|
70
|
+
|
|
71
|
+
## Notes
|
|
72
|
+
|
|
73
|
+
- OpenAI and Anthropic providers require an API key (`--api-key` in config).
|
|
74
|
+
- Ollama defaults to `http://localhost:11434`; override with `--base-url`.
|
|
75
|
+
- If LLM evaluation fails, the CLI falls back to heuristic assessment and still returns output.
|
package/bin/apiadapt.js
ADDED
package/package.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "adapt-api",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "ADAPT API readiness assessment CLI",
|
|
5
|
+
"license": "MIT",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"homepage": "https://www.npmjs.com/package/adapt-api",
|
|
8
|
+
"bugs": {
|
|
9
|
+
"url": "https://www.npmjs.com/package/adapt-api"
|
|
10
|
+
},
|
|
11
|
+
"bin": {
|
|
12
|
+
"apiadapt": "./bin/apiadapt.js"
|
|
13
|
+
},
|
|
14
|
+
"files": [
|
|
15
|
+
"bin/",
|
|
16
|
+
"src/",
|
|
17
|
+
"README.md",
|
|
18
|
+
"LICENSE"
|
|
19
|
+
],
|
|
20
|
+
"publishConfig": {
|
|
21
|
+
"access": "public"
|
|
22
|
+
},
|
|
23
|
+
"scripts": {
|
|
24
|
+
"start": "node ./bin/apiadapt.js",
|
|
25
|
+
"verify": "node ./bin/apiadapt.js --help && node ./bin/apiadapt.js manual > /dev/null",
|
|
26
|
+
"prepublishOnly": "npm run verify"
|
|
27
|
+
},
|
|
28
|
+
"keywords": [
|
|
29
|
+
"cli",
|
|
30
|
+
"adapt",
|
|
31
|
+
"openapi",
|
|
32
|
+
"raml",
|
|
33
|
+
"assessment"
|
|
34
|
+
],
|
|
35
|
+
"engines": {
|
|
36
|
+
"node": ">=18.17.0"
|
|
37
|
+
},
|
|
38
|
+
"dependencies": {
|
|
39
|
+
"commander": "^14.0.1",
|
|
40
|
+
"conf": "^14.0.0",
|
|
41
|
+
"fs-extra": "^11.3.2",
|
|
42
|
+
"js-yaml": "^4.1.0",
|
|
43
|
+
"ora": "^9.0.0",
|
|
44
|
+
"pdfkit": "^0.17.2"
|
|
45
|
+
}
|
|
46
|
+
}
|
package/src/cli.js
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import { Command } from "commander";
|
|
2
|
+
import { runConfigCommand } from "./commands/config.js";
|
|
3
|
+
import { runEvalCommand } from "./commands/eval.js";
|
|
4
|
+
import { runManualCommand } from "./commands/manual.js";
|
|
5
|
+
|
|
6
|
+
export async function runCli(argv) {
|
|
7
|
+
const program = new Command();
|
|
8
|
+
|
|
9
|
+
program
|
|
10
|
+
.name("apiadapt")
|
|
11
|
+
.description("ADAPT engine CLI for API readiness assessments")
|
|
12
|
+
.version("1.0.0");
|
|
13
|
+
program.addHelpText(
|
|
14
|
+
"after",
|
|
15
|
+
`
|
|
16
|
+
Quick examples:
|
|
17
|
+
apiadapt manual
|
|
18
|
+
apiadapt config --llm ollama --model llama3.2 --format text
|
|
19
|
+
apiadapt config --llm openai --model gpt-4o-mini --api-key <OPENAI_KEY> --format json
|
|
20
|
+
apiadapt eval ./openapi.yaml --format json --output ./report.json
|
|
21
|
+
apiadapt eval ./openapi.yaml --format pdf --output ./assessment.pdf
|
|
22
|
+
`,
|
|
23
|
+
);
|
|
24
|
+
|
|
25
|
+
program
|
|
26
|
+
.command("config")
|
|
27
|
+
.description("Set default LLM provider/model/output format")
|
|
28
|
+
.option("--llm <provider>", "LLM provider: ollama|openai|anthropic")
|
|
29
|
+
.option("--model <name>", "Model name for provider")
|
|
30
|
+
.option("--format <type>", "Default output format: json|text|pdf")
|
|
31
|
+
.option("--api-key <key>", "Provider API key (for openai/anthropic)")
|
|
32
|
+
.option("--base-url <url>", "Custom provider base URL")
|
|
33
|
+
.action(async (options) => {
|
|
34
|
+
await runConfigCommand(options);
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
program
|
|
38
|
+
.command("eval")
|
|
39
|
+
.description("Evaluate an API spec file with ADAPT engine")
|
|
40
|
+
.argument("<file_path>", "Path to OpenAPI/RAML file")
|
|
41
|
+
.option("--output <path>", "Output path (writes file when set)")
|
|
42
|
+
.option("--format <type>", "Output format: json|text|pdf")
|
|
43
|
+
.option("--print", "Print result to stdout", false)
|
|
44
|
+
.action(async (filePath, options) => {
|
|
45
|
+
await runEvalCommand(filePath, options);
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
program
|
|
49
|
+
.command("manual")
|
|
50
|
+
.alias("man")
|
|
51
|
+
.description("Show detailed command guide with examples")
|
|
52
|
+
.action(() => {
|
|
53
|
+
runManualCommand();
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
await program.parseAsync(argv);
|
|
57
|
+
}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import { askQuestion } from "../utils/prompts.js";
|
|
2
|
+
import { getConfigPath, getSettings, saveSettings } from "../utils/configStore.js";
|
|
3
|
+
|
|
4
|
+
export async function runConfigCommand(options) {
|
|
5
|
+
const current = getSettings();
|
|
6
|
+
const fromFlags = {
|
|
7
|
+
llmProvider: options.llm,
|
|
8
|
+
model: options.model,
|
|
9
|
+
outputFormat: options.format,
|
|
10
|
+
apiKey: options.apiKey,
|
|
11
|
+
baseUrl: options.baseUrl,
|
|
12
|
+
};
|
|
13
|
+
|
|
14
|
+
const hasFlags = Object.values(fromFlags).some((v) => v !== undefined);
|
|
15
|
+
|
|
16
|
+
let finalConfig = fromFlags;
|
|
17
|
+
if (!hasFlags) {
|
|
18
|
+
const llmProvider = await askQuestion(
|
|
19
|
+
`LLM provider [ollama/openai/anthropic] (${current.llmProvider}): `,
|
|
20
|
+
current.llmProvider,
|
|
21
|
+
);
|
|
22
|
+
const model = await askQuestion(`Model (${current.model}): `, current.model);
|
|
23
|
+
const outputFormat = await askQuestion(
|
|
24
|
+
`Default format [text/json/pdf] (${current.outputFormat}): `,
|
|
25
|
+
current.outputFormat,
|
|
26
|
+
);
|
|
27
|
+
const apiKey = await askQuestion(
|
|
28
|
+
"API key (leave blank to keep current): ",
|
|
29
|
+
current.apiKey,
|
|
30
|
+
);
|
|
31
|
+
const baseUrl = await askQuestion(
|
|
32
|
+
"Base URL (leave blank for provider default): ",
|
|
33
|
+
current.baseUrl,
|
|
34
|
+
);
|
|
35
|
+
finalConfig = { llmProvider, model, outputFormat, apiKey, baseUrl };
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const saved = saveSettings(finalConfig);
|
|
39
|
+
process.stdout.write(
|
|
40
|
+
[
|
|
41
|
+
"Configuration saved.",
|
|
42
|
+
`Config file: ${getConfigPath()}`,
|
|
43
|
+
`Provider: ${saved.llmProvider}`,
|
|
44
|
+
`Model: ${saved.model}`,
|
|
45
|
+
`Default format: ${saved.outputFormat}`,
|
|
46
|
+
].join("\n") + "\n",
|
|
47
|
+
);
|
|
48
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import ora from "ora";
|
|
2
|
+
import {
|
|
3
|
+
defaultOutputPath,
|
|
4
|
+
renderOutput,
|
|
5
|
+
writeOutputFile,
|
|
6
|
+
} from "../formatters/index.js";
|
|
7
|
+
import { evaluateSpecFile } from "../engine/evaluator.js";
|
|
8
|
+
import { getSettings } from "../utils/configStore.js";
|
|
9
|
+
|
|
10
|
+
export async function runEvalCommand(filePath, options) {
|
|
11
|
+
const settings = getSettings();
|
|
12
|
+
const outputFormat = String(options.format || settings.outputFormat || "text").toLowerCase();
|
|
13
|
+
const spinner = ora("Running ADAPT assessment...").start();
|
|
14
|
+
|
|
15
|
+
let report;
|
|
16
|
+
try {
|
|
17
|
+
report = await evaluateSpecFile(filePath, settings);
|
|
18
|
+
spinner.succeed(`Assessment complete: ${report.assessment.disposition}`);
|
|
19
|
+
} catch (error) {
|
|
20
|
+
spinner.fail("Assessment failed.");
|
|
21
|
+
throw error;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const payload = await renderOutput(report, outputFormat);
|
|
25
|
+
const shouldPrint = Boolean(options.print) || (!options.output && outputFormat !== "pdf");
|
|
26
|
+
if (shouldPrint) {
|
|
27
|
+
if (payload.isBinary) {
|
|
28
|
+
process.stdout.write("PDF output is binary; provide --output to save file.\n");
|
|
29
|
+
} else {
|
|
30
|
+
process.stdout.write(payload.content);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
if (options.output || outputFormat === "pdf") {
|
|
35
|
+
const outputPath = options.output || defaultOutputPath(filePath, outputFormat);
|
|
36
|
+
await writeOutputFile(outputPath, payload);
|
|
37
|
+
process.stdout.write(`Saved output to: ${outputPath}\n`);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
export function runManualCommand() {
|
|
2
|
+
const manual = `
|
|
3
|
+
apiadapt manual
|
|
4
|
+
==============
|
|
5
|
+
|
|
6
|
+
Overview
|
|
7
|
+
--------
|
|
8
|
+
apiadapt evaluates OpenAPI/RAML specs for ADAPT readiness.
|
|
9
|
+
|
|
10
|
+
Core Commands
|
|
11
|
+
-------------
|
|
12
|
+
1) Configure defaults
|
|
13
|
+
apiadapt config [--llm <provider>] [--model <name>] [--format <text|json|pdf>] [--api-key <key>] [--base-url <url>]
|
|
14
|
+
|
|
15
|
+
2) Evaluate a spec
|
|
16
|
+
apiadapt eval <file_path> [--format <text|json|pdf>] [--output <path>] [--print]
|
|
17
|
+
|
|
18
|
+
How to Configure LLM + Model
|
|
19
|
+
----------------------------
|
|
20
|
+
Use ollama (local):
|
|
21
|
+
apiadapt config --llm ollama --model llama3.2 --format text
|
|
22
|
+
# Optional custom host:
|
|
23
|
+
apiadapt config --llm ollama --model llama3.2 --base-url http://localhost:11434
|
|
24
|
+
|
|
25
|
+
Use OpenAI:
|
|
26
|
+
apiadapt config --llm openai --model gpt-4o-mini --api-key <OPENAI_API_KEY> --format json
|
|
27
|
+
# Optional custom endpoint:
|
|
28
|
+
apiadapt config --llm openai --model gpt-4o --base-url https://api.openai.com
|
|
29
|
+
|
|
30
|
+
Use Anthropic:
|
|
31
|
+
apiadapt config --llm anthropic --model claude-3-5-sonnet-latest --api-key <ANTHROPIC_API_KEY> --format json
|
|
32
|
+
# Optional custom endpoint:
|
|
33
|
+
apiadapt config --llm anthropic --model claude-3-5-sonnet-latest --base-url https://api.anthropic.com
|
|
34
|
+
|
|
35
|
+
Run Evaluations
|
|
36
|
+
---------------
|
|
37
|
+
Print text to terminal:
|
|
38
|
+
apiadapt eval ./openapi.yaml --format text --print
|
|
39
|
+
|
|
40
|
+
Save JSON report:
|
|
41
|
+
apiadapt eval ./openapi.yaml --format json --output ./reports/adapt-report.json
|
|
42
|
+
|
|
43
|
+
Save PDF report:
|
|
44
|
+
apiadapt eval ./openapi.yaml --format pdf --output ./reports/adapt-report.pdf
|
|
45
|
+
|
|
46
|
+
Use stored defaults (no extra flags):
|
|
47
|
+
apiadapt eval ./openapi.yaml
|
|
48
|
+
|
|
49
|
+
Tips
|
|
50
|
+
----
|
|
51
|
+
- Run "apiadapt config" with no flags for interactive setup.
|
|
52
|
+
- Use "apiadapt --help" for short help.
|
|
53
|
+
- Use "apiadapt manual" for full guide and examples.
|
|
54
|
+
`;
|
|
55
|
+
|
|
56
|
+
process.stdout.write(manual);
|
|
57
|
+
}
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
You are an expert Enterprise API Architect and an AI Agent Evaluator. Your task is to analyze OpenAPI or RAML specifications and assess their readiness for Generative AI agent consumption using the protocol-agnostic Agent-Driven API Assessment and Transformation (ADAPT) framework.
|
|
2
|
+
|
|
3
|
+
Your entire reply must be exactly one JSON object—no other text, no markdown, no bullet points, no section headers (e.g. no "Pillar 1: Interpretability"), no prose. Responses that are not valid JSON will be rejected.
|
|
4
|
+
|
|
5
|
+
You must evaluate the provided API specification against 4 Pillars, 17 Constructs (five under Interpretability, Operability, and Governance; two under Performance & Economics), and identify applicable gaps from the ADAPT taxonomy of twenty named deficiency types.
|
|
6
|
+
|
|
7
|
+
# THE ADAPT FRAMEWORK EVALUATION CRITERIA
|
|
8
|
+
|
|
9
|
+
## Pillar 1: Interpretability
|
|
10
|
+
Check if the agent can understand what this API does. Look for:
|
|
11
|
+
* I-01: Semantic Opacity — vague endpoint names, missing descriptions, or unclear parameter purposes (classify severity from evidence in the spec).
|
|
12
|
+
* I-02: Discovery Blindness (Severity: Major) — lack of proper tags, missing entry points, or unlinked operations.
|
|
13
|
+
* I-03: Schema Permissiveness (Severity: Critical when schemas are clearly underspecified) — missing strict data types, lack of enums, or overly broad formats causing hallucination risk.
|
|
14
|
+
* I-04: Spec-Reality Divergence (Severity: Critical when evidence is strong) — indications the spec may not match runtime behavior (e.g., catch-all/proxy paths, known parser-invalid composition).
|
|
15
|
+
* I-05: Intent Misalignment (Severity: Major) — under Intent-Oriented Design: the contract makes it hard for an agent to infer the correct task-level goal or to map user intent to the right operations (e.g., misleading summaries, verb/noun semantics at odds with behavior, generic CRUD surfaces that obscure real business actions, operations grouped or named such that tool selection would systematically misfire).
|
|
16
|
+
|
|
17
|
+
## Pillar 2: Operability
|
|
18
|
+
Check if the agent can invoke this API safely and reliably. Look for:
|
|
19
|
+
* O-01: Non-Idempotency Exposure (Severity: Critical) — destructive or mutating operations lacking idempotency keys or safe retry mechanisms.
|
|
20
|
+
* O-02: Response Inconsistency (Severity: Major) — varying return shapes or unpredictable payload structures.
|
|
21
|
+
* O-03: Opaque Error Signaling (Severity: Major) — generic errors without standardized, machine-readable codes or mitigations.
|
|
22
|
+
* O-04: Stateful Session Dependency (Severity: Critical / Override) — reliance on cookies, sticky sessions, MQTT/event connection state, or multi-step stateful flows rather than stateless token-style access.
|
|
23
|
+
* O-05: Operation Dependency Opacity (Severity: Major) — hidden prerequisites (e.g., endpoint A before B) not documented in the spec.
|
|
24
|
+
|
|
25
|
+
## Pillar 3: Governance
|
|
26
|
+
Check if the agent can be safely controlled, audited, or governed. Look for:
|
|
27
|
+
* G-01: Authorization Opacity (Severity: from evidence) — unclear or complex auth schemes not suited to standard agent tool calling.
|
|
28
|
+
* G-02: Human Approval Bypass Risk (Severity: Critical / Override) — high-stakes operations without documented approval, confirmation, or safeguard when the spec implies such risk.
|
|
29
|
+
* G-03: Observability Absence (Severity: Major) — missing correlation IDs or tracking headers for audit trails.
|
|
30
|
+
* G-04: Version Instability (Severity: Major) — unclear versioning risking agent breakage on updates.
|
|
31
|
+
* G-05: Regulated Data Exposure (Severity: Critical) — unfiltered PII, PCI, or PHI in responses.
|
|
32
|
+
* G-06: Excessive Permission Surface (Severity: Critical) — coarse-grained scopes instead of least-privilege agent scopes.
|
|
33
|
+
* G-07: Prompt Injection Susceptibility (Severity: Critical) — raw user input fields passed to sensitive execution without validation.
|
|
34
|
+
|
|
35
|
+
## Pillar 4: Performance & Economics
|
|
36
|
+
Check if the agent can operate within practical constraints. Look for:
|
|
37
|
+
* P-01: Context Window Overflow (Severity: Major) — oversized payloads, missing pagination, or missing filters that can overwhelm context.
|
|
38
|
+
* P-02: Cost Opacity (Severity: Major) — missing rate-limit or cost signals.
|
|
39
|
+
* P-03: Infrastructure Fragility (Severity: Critical) — shared legacy backends with no isolation path for high-velocity agent traffic.
|
|
40
|
+
|
|
41
|
+
# SEVERITY AND OVERRIDE DISCIPLINE
|
|
42
|
+
- Use Critical only with clear evidence in the spec; prefer Major or Minor when uncertain.
|
|
43
|
+
- O-04: Report when the spec documents cookie/session auth, broker persistence, or other session-like coupling that conflicts with stateless agent invocation. For cookie-only specs, report O-04 with overrides_disposition true and avoid piling on redundant Criticals for the same session issue alone.
|
|
44
|
+
- G-02: Report only for clearly high-impact operations lacking documented safeguards—not routine CRUD.
|
|
45
|
+
- I-04: Report for catch-all/proxy segments or strong hints the contract does not match behavior.
|
|
46
|
+
- I-05: Report when naming, descriptions, or operation grouping suggest an agent would mis-select tools or misunderstand what each operation is for relative to likely user tasks—not mere missing prose where I-01 applies.
|
|
47
|
+
- G-07: Report when fields suggest unvalidated execution (e.g. system_command_override, raw_command, exec_payload).
|
|
48
|
+
|
|
49
|
+
# DISPOSITION LOGIC (for your reasoning; the server recomputes disposition from gaps)
|
|
50
|
+
The assessment assigns one of five transformation dispositions:
|
|
51
|
+
1. Agent-Ready: Zero Critical, Zero Major, only Minor gaps with no operational-monitoring concern.
|
|
52
|
+
2. Expose and Monitor: Zero Critical, Zero Major, Minor gaps that warrant monitoring (e.g. payment workflows, implicit dependencies, undocumented rate limits).
|
|
53
|
+
3. Fix & Expose: Zero Critical, Major gaps only, addressable via specification enrichment; Majors limited and confined to Interpretability or Performance & Economics per rules engine.
|
|
54
|
+
4. Wrap & Expose: Override gaps (O-04, G-02), Major count threshold, Critical in Operability or Governance, or other wrap rules.
|
|
55
|
+
5. Rebuild for Agents: Critical gaps in three or more pillars simultaneously, or Critical P-03 on a shared backend without isolation.
|
|
56
|
+
|
|
57
|
+
Set root-level "minor_operational_risk" to true only when Critical and Major counts are both zero, at least one Minor gap is present, and some Minor gap(s) imply operational monitoring (financial/approval workflows, undocumented limits, risky implicit ordering). Otherwise false.
|
|
58
|
+
|
|
59
|
+
# OUTPUT FORMAT (STRICT, JSON-ONLY)
|
|
60
|
+
Your entire response must be exactly one JSON object. Any other format will be rejected.
|
|
61
|
+
|
|
62
|
+
- Do NOT use markdown, code fences, headers, or prose outside the JSON.
|
|
63
|
+
- The response MUST be strict JSON, ideally one line.
|
|
64
|
+
- Root object fields:
|
|
65
|
+
- "pillars": exactly ["Interpretability", "Operability", "Governance", "Performance & Economics"]
|
|
66
|
+
- "minor_operational_risk": boolean
|
|
67
|
+
- "gaps": array of {"gap_id": "X-NN", "description": "...", "pillar": "...", "severity": "Critical|Major|Minor", "overrides_disposition": false}
|
|
68
|
+
- Override: overrides_disposition true ONLY for O-04 and G-02 when those gaps are reported.
|
|
69
|
+
|
|
70
|
+
- If truly no ADAPT gaps apply, return:
|
|
71
|
+
{"pillars": ["Interpretability", "Operability", "Governance", "Performance & Economics"], "minor_operational_risk": false, "gaps": []}
|
|
72
|
+
|
|
73
|
+
Example (one gap, Major):
|
|
74
|
+
{"pillars": ["Interpretability", "Operability", "Governance", "Performance & Economics"], "minor_operational_risk": false, "gaps": [{"gap_id": "I-01", "description": "Vague naming", "pillar": "Interpretability", "severity": "Major", "overrides_disposition": false}]}
|
|
75
|
+
|
|
76
|
+
Example (two Majors, Fix & Expose path):
|
|
77
|
+
{"pillars": ["Interpretability", "Operability", "Governance", "Performance & Economics"], "minor_operational_risk": false, "gaps": [{"gap_id": "I-02", "description": "Lack of tags or entry points", "pillar": "Interpretability", "severity": "Major", "overrides_disposition": false}, {"gap_id": "P-01", "description": "List endpoint has no pagination", "pillar": "Performance & Economics", "severity": "Major", "overrides_disposition": false}]}
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
const OVERRIDE_GAP_IDS = new Set(["O-04", "G-02"]);
|
|
2
|
+
|
|
3
|
+
function normalizeGaps(gaps) {
|
|
4
|
+
return (gaps || []).map((g) => ({
|
|
5
|
+
gap_id: String(g.gap_id || g.id || "").trim().toUpperCase(),
|
|
6
|
+
severity: String(g.severity || "Minor").trim(),
|
|
7
|
+
pillar: String(g.pillar || "").trim(),
|
|
8
|
+
description: String(g.description || ""),
|
|
9
|
+
overrides_disposition: Boolean(g.overrides_disposition),
|
|
10
|
+
}));
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
function countBySeverity(gaps) {
|
|
14
|
+
let critical = 0;
|
|
15
|
+
let major = 0;
|
|
16
|
+
let minor = 0;
|
|
17
|
+
for (const gap of gaps) {
|
|
18
|
+
const sev = gap.severity.toLowerCase();
|
|
19
|
+
if (sev === "critical") critical += 1;
|
|
20
|
+
else if (sev === "major") major += 1;
|
|
21
|
+
else minor += 1;
|
|
22
|
+
}
|
|
23
|
+
return { critical, major, minor };
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
function criticalPillars(gaps) {
|
|
27
|
+
const pillars = new Set();
|
|
28
|
+
for (const gap of gaps) {
|
|
29
|
+
if (gap.severity.toLowerCase() === "critical") {
|
|
30
|
+
pillars.add(gap.pillar);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
return pillars;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
function hasOverrideGaps(gaps) {
|
|
37
|
+
return gaps.some(
|
|
38
|
+
(gap) => OVERRIDE_GAP_IDS.has(gap.gap_id) || gap.overrides_disposition,
|
|
39
|
+
);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
function hasCriticalOperabilityOrGovernance(gaps) {
|
|
43
|
+
const target = new Set(["Operability", "Governance"]);
|
|
44
|
+
return gaps.some(
|
|
45
|
+
(gap) => gap.severity.toLowerCase() === "critical" && target.has(gap.pillar),
|
|
46
|
+
);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
function majorOnlyIP(gaps) {
|
|
50
|
+
const allowed = new Set(["Interpretability", "Performance & Economics"]);
|
|
51
|
+
for (const gap of gaps) {
|
|
52
|
+
if (gap.severity.toLowerCase() !== "major") continue;
|
|
53
|
+
if (!allowed.has(gap.pillar)) return false;
|
|
54
|
+
}
|
|
55
|
+
return true;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
function hasP03Critical(gaps) {
|
|
59
|
+
return gaps.some(
|
|
60
|
+
(gap) => gap.gap_id === "P-03" && gap.severity.toLowerCase() === "critical",
|
|
61
|
+
);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
export function computeDisposition(rawGaps, minorOperationalRisk = false) {
|
|
65
|
+
const gaps = normalizeGaps(rawGaps);
|
|
66
|
+
const { critical, major, minor } = countBySeverity(gaps);
|
|
67
|
+
const cPillars = criticalPillars(gaps);
|
|
68
|
+
|
|
69
|
+
if (cPillars.size >= 3 || hasP03Critical(gaps)) {
|
|
70
|
+
return "Rebuild for Agents";
|
|
71
|
+
}
|
|
72
|
+
if (hasOverrideGaps(gaps) || major >= 3 || hasCriticalOperabilityOrGovernance(gaps)) {
|
|
73
|
+
return "Wrap & Expose";
|
|
74
|
+
}
|
|
75
|
+
if (critical === 0 && major === 0) {
|
|
76
|
+
return minor > 0 && minorOperationalRisk ? "Expose and Monitor" : "Agent-Ready";
|
|
77
|
+
}
|
|
78
|
+
if (critical === 0 && major <= 2 && majorOnlyIP(gaps)) {
|
|
79
|
+
return "Fix & Expose";
|
|
80
|
+
}
|
|
81
|
+
if (critical === 0 && major <= 2) {
|
|
82
|
+
return "Fix & Expose";
|
|
83
|
+
}
|
|
84
|
+
return "Wrap & Expose";
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
export function computeDispositionSummary(rawGaps, disposition) {
|
|
88
|
+
const gaps = normalizeGaps(rawGaps);
|
|
89
|
+
const { critical, major, minor } = countBySeverity(gaps);
|
|
90
|
+
let reason = "risk posture aligns with current ADAPT findings";
|
|
91
|
+
|
|
92
|
+
if (disposition === "Rebuild for Agents") {
|
|
93
|
+
reason = "critical gaps are broad enough to require structural rework";
|
|
94
|
+
} else if (disposition === "Wrap & Expose") {
|
|
95
|
+
reason = "gap profile requires a protective wrapper before agent exposure";
|
|
96
|
+
} else if (disposition === "Fix & Expose") {
|
|
97
|
+
reason = "critical risk is absent and major issues are fixable in place";
|
|
98
|
+
} else if (disposition === "Expose and Monitor") {
|
|
99
|
+
reason = "only minor gaps exist, but they require active monitoring";
|
|
100
|
+
} else if (disposition === "Agent-Ready") {
|
|
101
|
+
reason = "no blocking critical or major gaps were detected";
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
return `Disposition is ${disposition} because ${reason}. Observed ${critical} critical, ${major} major, and ${minor} minor gaps.`;
|
|
105
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import { loadSpecFromFile } from "../utils/specLoader.js";
|
|
2
|
+
import { runStaticHeuristics } from "./heuristics.js";
|
|
3
|
+
import { evaluateWithLlm } from "./llmClients.js";
|
|
4
|
+
import { computeDisposition, computeDispositionSummary } from "./disposition.js";
|
|
5
|
+
|
|
6
|
+
export async function evaluateSpecFile(filePath, settings) {
|
|
7
|
+
const startedAt = Date.now();
|
|
8
|
+
const spec = await loadSpecFromFile(filePath);
|
|
9
|
+
|
|
10
|
+
let assessment = runStaticHeuristics(spec.parsed);
|
|
11
|
+
let llmError = "";
|
|
12
|
+
|
|
13
|
+
try {
|
|
14
|
+
const llmAssessment = await evaluateWithLlm(settings, spec.rawText);
|
|
15
|
+
if (Array.isArray(llmAssessment?.gaps) && llmAssessment.gaps.length > 0) {
|
|
16
|
+
assessment = llmAssessment;
|
|
17
|
+
} else {
|
|
18
|
+
assessment.notes = llmAssessment?.notes || assessment.notes;
|
|
19
|
+
assessment.raw_llm_response = llmAssessment?.raw_llm_response || "";
|
|
20
|
+
}
|
|
21
|
+
} catch (error) {
|
|
22
|
+
llmError = error.message;
|
|
23
|
+
assessment.notes = `${assessment.notes} LLM call failed: ${llmError}`.trim();
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const disposition = computeDisposition(
|
|
27
|
+
assessment.gaps,
|
|
28
|
+
Boolean(assessment.minor_operational_risk),
|
|
29
|
+
);
|
|
30
|
+
const summary = computeDispositionSummary(assessment.gaps, disposition);
|
|
31
|
+
|
|
32
|
+
return {
|
|
33
|
+
spec: {
|
|
34
|
+
filePath: spec.filePath,
|
|
35
|
+
fileName: spec.fileName,
|
|
36
|
+
specType: spec.specType,
|
|
37
|
+
sizeBytes: spec.sizeBytes,
|
|
38
|
+
sizeKb: Number((spec.sizeBytes / 1024).toFixed(2)),
|
|
39
|
+
},
|
|
40
|
+
assessment: {
|
|
41
|
+
...assessment,
|
|
42
|
+
disposition,
|
|
43
|
+
summary,
|
|
44
|
+
},
|
|
45
|
+
meta: {
|
|
46
|
+
provider: settings.llmProvider,
|
|
47
|
+
model: settings.model,
|
|
48
|
+
generatedAt: new Date().toISOString(),
|
|
49
|
+
durationMs: Date.now() - startedAt,
|
|
50
|
+
llmError,
|
|
51
|
+
},
|
|
52
|
+
};
|
|
53
|
+
}
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
const PILLARS = [
|
|
2
|
+
"Interpretability",
|
|
3
|
+
"Operability",
|
|
4
|
+
"Governance",
|
|
5
|
+
"Performance & Economics",
|
|
6
|
+
];
|
|
7
|
+
|
|
8
|
+
function addGap(gaps, gap) {
|
|
9
|
+
gaps.push({
|
|
10
|
+
gap_id: gap.gap_id,
|
|
11
|
+
description: gap.description,
|
|
12
|
+
pillar: gap.pillar,
|
|
13
|
+
severity: gap.severity,
|
|
14
|
+
overrides_disposition: Boolean(gap.overrides_disposition),
|
|
15
|
+
});
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export function runStaticHeuristics(parsedSpec) {
|
|
19
|
+
const gaps = [];
|
|
20
|
+
const info = parsedSpec?.info || {};
|
|
21
|
+
const paths = parsedSpec?.paths || {};
|
|
22
|
+
const security = parsedSpec?.security || [];
|
|
23
|
+
const servers = parsedSpec?.servers || [];
|
|
24
|
+
|
|
25
|
+
if (!info.title || !info.description) {
|
|
26
|
+
addGap(gaps, {
|
|
27
|
+
gap_id: "I-01",
|
|
28
|
+
pillar: "Interpretability",
|
|
29
|
+
severity: "Major",
|
|
30
|
+
description:
|
|
31
|
+
"API metadata is incomplete. Include clear title and description for agent context.",
|
|
32
|
+
});
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
if (Object.keys(paths).length === 0) {
|
|
36
|
+
addGap(gaps, {
|
|
37
|
+
gap_id: "I-03",
|
|
38
|
+
pillar: "Interpretability",
|
|
39
|
+
severity: "Critical",
|
|
40
|
+
description: "No API paths were found. Agents cannot discover callable operations.",
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
if (!Array.isArray(security) || security.length === 0) {
|
|
45
|
+
addGap(gaps, {
|
|
46
|
+
gap_id: "G-01",
|
|
47
|
+
pillar: "Governance",
|
|
48
|
+
severity: "Major",
|
|
49
|
+
description:
|
|
50
|
+
"No global security requirement found. Define auth expectations to reduce misuse risk.",
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
if (!Array.isArray(servers) || servers.length === 0) {
|
|
55
|
+
addGap(gaps, {
|
|
56
|
+
gap_id: "O-02",
|
|
57
|
+
pillar: "Operability",
|
|
58
|
+
severity: "Minor",
|
|
59
|
+
description:
|
|
60
|
+
"No server environment declared. Add server definitions to improve runtime portability.",
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
return {
|
|
65
|
+
pillars: PILLARS,
|
|
66
|
+
minor_operational_risk: gaps.some((g) => g.severity === "Minor"),
|
|
67
|
+
gaps,
|
|
68
|
+
notes:
|
|
69
|
+
"Heuristic-only assessment. Configure an LLM provider for a full ADAPT semantic evaluation.",
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
export { PILLARS };
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
import { PILLARS } from "./heuristics.js";
|
|
2
|
+
import { readFileSync } from "node:fs";
|
|
3
|
+
|
|
4
|
+
const SYSTEM_PROMPT = readFileSync(
|
|
5
|
+
new URL("./SystemPrompt.txt", import.meta.url),
|
|
6
|
+
"utf8",
|
|
7
|
+
);
|
|
8
|
+
|
|
9
|
+
function safeJsonParse(text) {
|
|
10
|
+
try {
|
|
11
|
+
return JSON.parse(text);
|
|
12
|
+
} catch {
|
|
13
|
+
return null;
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
function parseJsonFromText(text) {
|
|
18
|
+
const direct = safeJsonParse(text);
|
|
19
|
+
if (direct) return direct;
|
|
20
|
+
|
|
21
|
+
const codeBlockMatch = text.match(/```(?:json)?\s*([\s\S]*?)```/i);
|
|
22
|
+
if (codeBlockMatch?.[1]) {
|
|
23
|
+
const blockParsed = safeJsonParse(codeBlockMatch[1].trim());
|
|
24
|
+
if (blockParsed) return blockParsed;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const start = text.indexOf("{");
|
|
28
|
+
const end = text.lastIndexOf("}");
|
|
29
|
+
if (start >= 0 && end > start) {
|
|
30
|
+
const sliced = text.slice(start, end + 1);
|
|
31
|
+
return safeJsonParse(sliced);
|
|
32
|
+
}
|
|
33
|
+
return null;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
function normalizeAssessment(obj, rawResponse = "") {
|
|
37
|
+
const gapsRaw = Array.isArray(obj?.gaps) ? obj.gaps : [];
|
|
38
|
+
const gaps = gapsRaw
|
|
39
|
+
.filter((g) => g && typeof g === "object")
|
|
40
|
+
.map((g) => ({
|
|
41
|
+
gap_id: String(g.gap_id || "GAP-UNK").toUpperCase(),
|
|
42
|
+
description: String(g.description || ""),
|
|
43
|
+
pillar: String(g.pillar || "Interpretability"),
|
|
44
|
+
severity: String(g.severity || "Minor"),
|
|
45
|
+
overrides_disposition: Boolean(g.overrides_disposition),
|
|
46
|
+
}));
|
|
47
|
+
|
|
48
|
+
const minorOperationalRisk =
|
|
49
|
+
typeof obj?.minor_operational_risk === "string"
|
|
50
|
+
? ["true", "1", "yes"].includes(obj.minor_operational_risk.toLowerCase())
|
|
51
|
+
: Boolean(obj?.minor_operational_risk);
|
|
52
|
+
|
|
53
|
+
const notes =
|
|
54
|
+
typeof obj?.notes === "string" && obj.notes.trim()
|
|
55
|
+
? obj.notes.trim()
|
|
56
|
+
: gaps.length === 0
|
|
57
|
+
? "LLM response did not produce valid ADAPT gaps; empty gap list returned."
|
|
58
|
+
: "";
|
|
59
|
+
|
|
60
|
+
return {
|
|
61
|
+
pillars: PILLARS,
|
|
62
|
+
gaps,
|
|
63
|
+
minor_operational_risk: minorOperationalRisk,
|
|
64
|
+
notes,
|
|
65
|
+
raw_llm_response: rawResponse || "",
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
async function requestJson(url, payload, headers) {
|
|
70
|
+
const timeoutMs = Number(process.env.APIADAPT_LLM_TIMEOUT_MS || 20000);
|
|
71
|
+
let response;
|
|
72
|
+
try {
|
|
73
|
+
response = await fetch(url, {
|
|
74
|
+
method: "POST",
|
|
75
|
+
headers: {
|
|
76
|
+
"content-type": "application/json",
|
|
77
|
+
...headers,
|
|
78
|
+
},
|
|
79
|
+
body: JSON.stringify(payload),
|
|
80
|
+
signal: AbortSignal.timeout(timeoutMs),
|
|
81
|
+
});
|
|
82
|
+
} catch (error) {
|
|
83
|
+
if (error?.name === "TimeoutError") {
|
|
84
|
+
throw new Error(`LLM request timed out after ${timeoutMs}ms.`);
|
|
85
|
+
}
|
|
86
|
+
throw error;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
if (!response.ok) {
|
|
90
|
+
const text = await response.text();
|
|
91
|
+
throw new Error(`LLM request failed (${response.status}): ${text.slice(0, 300)}`);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
return response.json();
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
async function runOllama(model, specText, baseUrl) {
|
|
98
|
+
const url = `${(baseUrl || "http://localhost:11434").replace(/\/$/, "")}/api/chat`;
|
|
99
|
+
const data = await requestJson(
|
|
100
|
+
url,
|
|
101
|
+
{
|
|
102
|
+
model,
|
|
103
|
+
stream: false,
|
|
104
|
+
messages: [
|
|
105
|
+
{ role: "system", content: SYSTEM_PROMPT },
|
|
106
|
+
{ role: "user", content: specText.slice(0, 12000) },
|
|
107
|
+
],
|
|
108
|
+
options: { temperature: 0 },
|
|
109
|
+
},
|
|
110
|
+
{},
|
|
111
|
+
);
|
|
112
|
+
|
|
113
|
+
const content = String(data?.message?.content || "");
|
|
114
|
+
const parsed = parseJsonFromText(content);
|
|
115
|
+
return normalizeAssessment(parsed, content);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
async function runOpenAI(model, specText, apiKey, baseUrl) {
|
|
119
|
+
if (!apiKey) {
|
|
120
|
+
throw new Error("Missing OpenAI API key. Set via `apiadapt config --api-key`.");
|
|
121
|
+
}
|
|
122
|
+
const url = `${(baseUrl || "https://api.openai.com").replace(/\/$/, "")}/v1/chat/completions`;
|
|
123
|
+
const data = await requestJson(
|
|
124
|
+
url,
|
|
125
|
+
{
|
|
126
|
+
model,
|
|
127
|
+
temperature: 0,
|
|
128
|
+
response_format: { type: "json_object" },
|
|
129
|
+
messages: [
|
|
130
|
+
{ role: "system", content: SYSTEM_PROMPT },
|
|
131
|
+
{ role: "user", content: specText.slice(0, 24000) },
|
|
132
|
+
],
|
|
133
|
+
},
|
|
134
|
+
{ Authorization: `Bearer ${apiKey}` },
|
|
135
|
+
);
|
|
136
|
+
|
|
137
|
+
const content = String(data?.choices?.[0]?.message?.content || "");
|
|
138
|
+
const parsed = parseJsonFromText(content);
|
|
139
|
+
return normalizeAssessment(parsed, content);
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
async function runAnthropic(model, specText, apiKey, baseUrl) {
|
|
143
|
+
if (!apiKey) {
|
|
144
|
+
throw new Error("Missing Anthropic API key. Set via `apiadapt config --api-key`.");
|
|
145
|
+
}
|
|
146
|
+
const url = `${(baseUrl || "https://api.anthropic.com").replace(/\/$/, "")}/v1/messages`;
|
|
147
|
+
const data = await requestJson(
|
|
148
|
+
url,
|
|
149
|
+
{
|
|
150
|
+
model,
|
|
151
|
+
max_tokens: 1800,
|
|
152
|
+
temperature: 0,
|
|
153
|
+
system: SYSTEM_PROMPT,
|
|
154
|
+
messages: [{ role: "user", content: specText.slice(0, 24000) }],
|
|
155
|
+
},
|
|
156
|
+
{
|
|
157
|
+
"x-api-key": apiKey,
|
|
158
|
+
"anthropic-version": "2023-06-01",
|
|
159
|
+
},
|
|
160
|
+
);
|
|
161
|
+
|
|
162
|
+
const content = Array.isArray(data?.content)
|
|
163
|
+
? String(data.content.find((item) => item.type === "text")?.text || "")
|
|
164
|
+
: "";
|
|
165
|
+
const parsed = parseJsonFromText(content);
|
|
166
|
+
return normalizeAssessment(parsed, content);
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
export async function evaluateWithLlm(settings, specText) {
|
|
170
|
+
const provider = String(settings.llmProvider || "").toLowerCase();
|
|
171
|
+
const model = settings.model || "llama3.2";
|
|
172
|
+
|
|
173
|
+
if (provider === "ollama") {
|
|
174
|
+
return runOllama(model, specText, settings.baseUrl);
|
|
175
|
+
}
|
|
176
|
+
if (provider === "openai") {
|
|
177
|
+
return runOpenAI(model, specText, settings.apiKey, settings.baseUrl);
|
|
178
|
+
}
|
|
179
|
+
if (provider === "anthropic") {
|
|
180
|
+
return runAnthropic(model, specText, settings.apiKey, settings.baseUrl);
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
throw new Error(`Unsupported LLM provider "${settings.llmProvider}".`);
|
|
184
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import path from "node:path";
|
|
2
|
+
import fs from "fs-extra";
|
|
3
|
+
import { formatJson } from "./jsonFormatter.js";
|
|
4
|
+
import { formatText } from "./textFormatter.js";
|
|
5
|
+
import { formatPdf } from "./pdfFormatter.js";
|
|
6
|
+
|
|
7
|
+
function extensionFor(format) {
|
|
8
|
+
if (format === "json") return ".json";
|
|
9
|
+
if (format === "pdf") return ".pdf";
|
|
10
|
+
return ".txt";
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export function defaultOutputPath(inputFilePath, format) {
|
|
14
|
+
const parsed = path.parse(inputFilePath);
|
|
15
|
+
return path.join(parsed.dir, `${parsed.name}.adapt-report${extensionFor(format)}`);
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export async function renderOutput(report, format) {
|
|
19
|
+
const normalized = String(format || "text").toLowerCase();
|
|
20
|
+
if (normalized === "json") {
|
|
21
|
+
return { content: formatJson(report), isBinary: false };
|
|
22
|
+
}
|
|
23
|
+
if (normalized === "pdf") {
|
|
24
|
+
return { content: await formatPdf(report), isBinary: true };
|
|
25
|
+
}
|
|
26
|
+
return { content: formatText(report), isBinary: false };
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export async function writeOutputFile(outputPath, payload) {
|
|
30
|
+
await fs.ensureDir(path.dirname(outputPath));
|
|
31
|
+
if (payload.isBinary) {
|
|
32
|
+
await fs.writeFile(outputPath, payload.content);
|
|
33
|
+
} else {
|
|
34
|
+
await fs.writeFile(outputPath, payload.content, "utf8");
|
|
35
|
+
}
|
|
36
|
+
}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import PDFDocument from "pdfkit";
|
|
2
|
+
|
|
3
|
+
function collectPdfBuffer(doc) {
|
|
4
|
+
return new Promise((resolve, reject) => {
|
|
5
|
+
const chunks = [];
|
|
6
|
+
doc.on("data", (chunk) => chunks.push(chunk));
|
|
7
|
+
doc.on("end", () => resolve(Buffer.concat(chunks)));
|
|
8
|
+
doc.on("error", reject);
|
|
9
|
+
});
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
export async function formatPdf(report) {
|
|
13
|
+
const doc = new PDFDocument({ size: "LETTER", margin: 50 });
|
|
14
|
+
const bufferPromise = collectPdfBuffer(doc);
|
|
15
|
+
|
|
16
|
+
doc.fontSize(18).text("ADAPT API Assessment Report");
|
|
17
|
+
doc.moveDown(0.5);
|
|
18
|
+
doc.fontSize(11).text(`Date: ${new Date(report.meta.generatedAt).toISOString()}`);
|
|
19
|
+
doc.text(`Spec: ${report.spec.fileName}`);
|
|
20
|
+
doc.text(`Type: ${report.spec.specType}`);
|
|
21
|
+
doc.text(`Disposition: ${report.assessment.disposition}`);
|
|
22
|
+
doc.moveDown(0.5);
|
|
23
|
+
doc.fontSize(12).text("Summary", { underline: true });
|
|
24
|
+
doc.fontSize(10).text(report.assessment.summary || "No summary.");
|
|
25
|
+
doc.moveDown(0.5);
|
|
26
|
+
doc.fontSize(12).text("Gaps", { underline: true });
|
|
27
|
+
doc.fontSize(10);
|
|
28
|
+
|
|
29
|
+
if (!report.assessment.gaps.length) {
|
|
30
|
+
doc.text("No gaps identified.");
|
|
31
|
+
} else {
|
|
32
|
+
for (const gap of report.assessment.gaps) {
|
|
33
|
+
doc.text(
|
|
34
|
+
`- [${gap.severity}] ${gap.gap_id} (${gap.pillar}): ${gap.description}`,
|
|
35
|
+
{ lineGap: 2 },
|
|
36
|
+
);
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
if (report.assessment.notes) {
|
|
41
|
+
doc.moveDown(0.5);
|
|
42
|
+
doc.fontSize(12).text("Notes", { underline: true });
|
|
43
|
+
doc.fontSize(10).text(report.assessment.notes);
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
doc.end();
|
|
47
|
+
return bufferPromise;
|
|
48
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
function formatGap(gap, index) {
|
|
2
|
+
return `${index + 1}. [${gap.severity}] ${gap.gap_id} (${gap.pillar}) - ${gap.description}`;
|
|
3
|
+
}
|
|
4
|
+
|
|
5
|
+
export function formatText(report) {
|
|
6
|
+
const lines = [];
|
|
7
|
+
lines.push("ADAPT API Assessment");
|
|
8
|
+
lines.push("====================");
|
|
9
|
+
lines.push(`Spec: ${report.spec.fileName} (${report.spec.specType}, ${report.spec.sizeKb} KB)`);
|
|
10
|
+
lines.push(`Provider/Model: ${report.meta.provider}/${report.meta.model}`);
|
|
11
|
+
lines.push(`Disposition: ${report.assessment.disposition}`);
|
|
12
|
+
lines.push(`Summary: ${report.assessment.summary}`);
|
|
13
|
+
lines.push("");
|
|
14
|
+
lines.push("Gaps:");
|
|
15
|
+
|
|
16
|
+
if (!report.assessment.gaps.length) {
|
|
17
|
+
lines.push("- No gaps identified.");
|
|
18
|
+
} else {
|
|
19
|
+
report.assessment.gaps.forEach((gap, index) => {
|
|
20
|
+
lines.push(formatGap(gap, index));
|
|
21
|
+
});
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
if (report.assessment.notes) {
|
|
25
|
+
lines.push("");
|
|
26
|
+
lines.push(`Notes: ${report.assessment.notes}`);
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
return `${lines.join("\n")}\n`;
|
|
30
|
+
}
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import Conf from "conf";
|
|
2
|
+
import os from "node:os";
|
|
3
|
+
import path from "node:path";
|
|
4
|
+
|
|
5
|
+
const configDir =
|
|
6
|
+
process.env.APIADAPT_CONFIG_DIR || path.join(os.homedir(), ".config", "apiadapt");
|
|
7
|
+
|
|
8
|
+
let configInstance = null;
|
|
9
|
+
|
|
10
|
+
const VALID_PROVIDERS = new Set(["ollama", "openai", "anthropic"]);
|
|
11
|
+
const VALID_FORMATS = new Set(["json", "text", "pdf"]);
|
|
12
|
+
|
|
13
|
+
function getConfig() {
|
|
14
|
+
if (!configInstance) {
|
|
15
|
+
configInstance = new Conf({
|
|
16
|
+
projectName: "apiadapt",
|
|
17
|
+
configName: "settings",
|
|
18
|
+
cwd: configDir,
|
|
19
|
+
defaults: {
|
|
20
|
+
llmProvider: "ollama",
|
|
21
|
+
model: "llama3.2",
|
|
22
|
+
outputFormat: "text",
|
|
23
|
+
apiKey: "",
|
|
24
|
+
baseUrl: "",
|
|
25
|
+
},
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
return configInstance;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export function getSettings() {
|
|
32
|
+
const config = getConfig();
|
|
33
|
+
return {
|
|
34
|
+
llmProvider: config.get("llmProvider"),
|
|
35
|
+
model: config.get("model"),
|
|
36
|
+
outputFormat: config.get("outputFormat"),
|
|
37
|
+
apiKey: config.get("apiKey"),
|
|
38
|
+
baseUrl: config.get("baseUrl"),
|
|
39
|
+
};
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
export function saveSettings(partialSettings) {
|
|
43
|
+
const config = getConfig();
|
|
44
|
+
if (partialSettings.llmProvider) {
|
|
45
|
+
const provider = String(partialSettings.llmProvider).trim().toLowerCase();
|
|
46
|
+
if (!VALID_PROVIDERS.has(provider)) {
|
|
47
|
+
throw new Error(
|
|
48
|
+
`Invalid --llm value "${partialSettings.llmProvider}". Use ollama, openai, or anthropic.`,
|
|
49
|
+
);
|
|
50
|
+
}
|
|
51
|
+
config.set("llmProvider", provider);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
if (partialSettings.outputFormat) {
|
|
55
|
+
const format = String(partialSettings.outputFormat).trim().toLowerCase();
|
|
56
|
+
if (!VALID_FORMATS.has(format)) {
|
|
57
|
+
throw new Error(
|
|
58
|
+
`Invalid --format value "${partialSettings.outputFormat}". Use json, text, or pdf.`,
|
|
59
|
+
);
|
|
60
|
+
}
|
|
61
|
+
config.set("outputFormat", format);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
if (partialSettings.model !== undefined) {
|
|
65
|
+
config.set("model", String(partialSettings.model).trim());
|
|
66
|
+
}
|
|
67
|
+
if (partialSettings.apiKey !== undefined) {
|
|
68
|
+
config.set("apiKey", String(partialSettings.apiKey).trim());
|
|
69
|
+
}
|
|
70
|
+
if (partialSettings.baseUrl !== undefined) {
|
|
71
|
+
config.set("baseUrl", String(partialSettings.baseUrl).trim());
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
return getSettings();
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
export function getConfigPath() {
|
|
78
|
+
const config = getConfig();
|
|
79
|
+
return config.path;
|
|
80
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { createInterface } from "node:readline/promises";
|
|
2
|
+
import { stdin as input, stdout as output } from "node:process";
|
|
3
|
+
|
|
4
|
+
export async function askQuestion(question, fallback = "") {
|
|
5
|
+
const rl = createInterface({ input, output });
|
|
6
|
+
try {
|
|
7
|
+
const answer = await rl.question(question);
|
|
8
|
+
const trimmed = answer.trim();
|
|
9
|
+
return trimmed || fallback;
|
|
10
|
+
} finally {
|
|
11
|
+
rl.close();
|
|
12
|
+
}
|
|
13
|
+
}
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import path from "node:path";
|
|
2
|
+
import fs from "fs-extra";
|
|
3
|
+
import yaml from "js-yaml";
|
|
4
|
+
|
|
5
|
+
function inferSpecType(filePath, rawText) {
|
|
6
|
+
const lower = filePath.toLowerCase();
|
|
7
|
+
const trimmed = rawText.trimStart();
|
|
8
|
+
|
|
9
|
+
if (lower.endsWith(".raml") || trimmed.startsWith("#%RAML")) {
|
|
10
|
+
return "RAML";
|
|
11
|
+
}
|
|
12
|
+
if (lower.endsWith(".json")) {
|
|
13
|
+
return "OpenAPI (JSON)";
|
|
14
|
+
}
|
|
15
|
+
if (lower.endsWith(".yaml") || lower.endsWith(".yml")) {
|
|
16
|
+
return "OpenAPI (YAML)";
|
|
17
|
+
}
|
|
18
|
+
return "Unknown";
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
function parseSpec(rawText, specType) {
|
|
22
|
+
if (specType === "OpenAPI (JSON)") {
|
|
23
|
+
return JSON.parse(rawText);
|
|
24
|
+
}
|
|
25
|
+
if (specType === "OpenAPI (YAML)" || specType === "RAML") {
|
|
26
|
+
return yaml.load(rawText);
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
try {
|
|
30
|
+
return JSON.parse(rawText);
|
|
31
|
+
} catch {
|
|
32
|
+
return yaml.load(rawText);
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
export async function loadSpecFromFile(filePath) {
|
|
37
|
+
const absolutePath = path.resolve(filePath);
|
|
38
|
+
const exists = await fs.pathExists(absolutePath);
|
|
39
|
+
if (!exists) {
|
|
40
|
+
throw new Error(`Spec file not found: ${filePath}`);
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
const rawText = await fs.readFile(absolutePath, "utf8");
|
|
44
|
+
const specType = inferSpecType(absolutePath, rawText);
|
|
45
|
+
|
|
46
|
+
let parsed;
|
|
47
|
+
try {
|
|
48
|
+
parsed = parseSpec(rawText, specType);
|
|
49
|
+
} catch (error) {
|
|
50
|
+
throw new Error(`Invalid API spec syntax: ${error.message}`);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
if (!parsed || typeof parsed !== "object") {
|
|
54
|
+
throw new Error("Invalid API spec: expected JSON or YAML object.");
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
return {
|
|
58
|
+
filePath: absolutePath,
|
|
59
|
+
fileName: path.basename(absolutePath),
|
|
60
|
+
specType,
|
|
61
|
+
sizeBytes: Buffer.byteLength(rawText, "utf8"),
|
|
62
|
+
rawText,
|
|
63
|
+
parsed,
|
|
64
|
+
};
|
|
65
|
+
}
|