@delegance/claude-autopilot 5.2.2 → 6.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +1027 -1
- package/README.md +104 -17
- package/dist/src/adapters/council/claude.js +2 -1
- package/dist/src/adapters/council/openai.js +14 -7
- package/dist/src/adapters/deploy/_http.d.ts +43 -0
- package/dist/src/adapters/deploy/_http.js +99 -0
- package/dist/src/adapters/deploy/fly.d.ts +206 -0
- package/dist/src/adapters/deploy/fly.js +696 -0
- package/dist/src/adapters/deploy/generic.d.ts +39 -0
- package/dist/src/adapters/deploy/generic.js +98 -0
- package/dist/src/adapters/deploy/index.d.ts +15 -0
- package/dist/src/adapters/deploy/index.js +78 -0
- package/dist/src/adapters/deploy/render.d.ts +181 -0
- package/dist/src/adapters/deploy/render.js +550 -0
- package/dist/src/adapters/deploy/types.d.ts +221 -0
- package/dist/src/adapters/deploy/types.js +15 -0
- package/dist/src/adapters/deploy/vercel.d.ts +143 -0
- package/dist/src/adapters/deploy/vercel.js +426 -0
- package/dist/src/adapters/pricing.d.ts +36 -0
- package/dist/src/adapters/pricing.js +40 -0
- package/dist/src/adapters/review-engine/claude.js +2 -1
- package/dist/src/adapters/review-engine/codex.js +12 -8
- package/dist/src/adapters/review-engine/gemini.js +2 -1
- package/dist/src/adapters/review-engine/openai-compatible.js +2 -1
- package/dist/src/adapters/sdk-loader.d.ts +15 -0
- package/dist/src/adapters/sdk-loader.js +77 -0
- package/dist/src/cli/autopilot.d.ts +71 -0
- package/dist/src/cli/autopilot.js +735 -0
- package/dist/src/cli/brainstorm.d.ts +23 -0
- package/dist/src/cli/brainstorm.js +131 -0
- package/dist/src/cli/costs.d.ts +15 -1
- package/dist/src/cli/costs.js +99 -10
- package/dist/src/cli/deploy.d.ts +71 -0
- package/dist/src/cli/deploy.js +539 -0
- package/dist/src/cli/fix.d.ts +18 -0
- package/dist/src/cli/fix.js +105 -11
- package/dist/src/cli/help-text.d.ts +52 -0
- package/dist/src/cli/help-text.js +400 -0
- package/dist/src/cli/implement.d.ts +91 -0
- package/dist/src/cli/implement.js +196 -0
- package/dist/src/cli/index.js +784 -222
- package/dist/src/cli/json-envelope.d.ts +187 -0
- package/dist/src/cli/json-envelope.js +270 -0
- package/dist/src/cli/json-mode.d.ts +33 -0
- package/dist/src/cli/json-mode.js +201 -0
- package/dist/src/cli/migrate.d.ts +111 -0
- package/dist/src/cli/migrate.js +305 -0
- package/dist/src/cli/plan.d.ts +81 -0
- package/dist/src/cli/plan.js +149 -0
- package/dist/src/cli/pr.d.ts +106 -0
- package/dist/src/cli/pr.js +191 -19
- package/dist/src/cli/preflight.js +102 -1
- package/dist/src/cli/review.d.ts +27 -0
- package/dist/src/cli/review.js +126 -0
- package/dist/src/cli/runs-watch-renderer.d.ts +45 -0
- package/dist/src/cli/runs-watch-renderer.js +275 -0
- package/dist/src/cli/runs-watch.d.ts +41 -0
- package/dist/src/cli/runs-watch.js +395 -0
- package/dist/src/cli/runs.d.ts +122 -0
- package/dist/src/cli/runs.js +902 -0
- package/dist/src/cli/scan.d.ts +93 -0
- package/dist/src/cli/scan.js +166 -40
- package/dist/src/cli/spec.d.ts +66 -0
- package/dist/src/cli/spec.js +132 -0
- package/dist/src/cli/validate.d.ts +29 -0
- package/dist/src/cli/validate.js +131 -0
- package/dist/src/core/config/schema.d.ts +43 -0
- package/dist/src/core/config/schema.js +25 -0
- package/dist/src/core/config/types.d.ts +17 -0
- package/dist/src/core/council/runner.d.ts +10 -1
- package/dist/src/core/council/runner.js +25 -3
- package/dist/src/core/council/types.d.ts +7 -0
- package/dist/src/core/errors.d.ts +1 -1
- package/dist/src/core/errors.js +12 -0
- package/dist/src/core/logging/redaction.d.ts +13 -0
- package/dist/src/core/logging/redaction.js +20 -0
- package/dist/src/core/migrate/detector-rules.js +6 -0
- package/dist/src/core/migrate/schema-validator.js +22 -1
- package/dist/src/core/phases/static-rules.d.ts +5 -1
- package/dist/src/core/phases/static-rules.js +2 -5
- package/dist/src/core/run-state/budget.d.ts +88 -0
- package/dist/src/core/run-state/budget.js +141 -0
- package/dist/src/core/run-state/cli-internal.d.ts +21 -0
- package/dist/src/core/run-state/cli-internal.js +174 -0
- package/dist/src/core/run-state/events.d.ts +59 -0
- package/dist/src/core/run-state/events.js +504 -0
- package/dist/src/core/run-state/lock.d.ts +61 -0
- package/dist/src/core/run-state/lock.js +206 -0
- package/dist/src/core/run-state/phase-context.d.ts +60 -0
- package/dist/src/core/run-state/phase-context.js +108 -0
- package/dist/src/core/run-state/phase-registry.d.ts +137 -0
- package/dist/src/core/run-state/phase-registry.js +162 -0
- package/dist/src/core/run-state/phase-runner.d.ts +80 -0
- package/dist/src/core/run-state/phase-runner.js +447 -0
- package/dist/src/core/run-state/provider-readback.d.ts +130 -0
- package/dist/src/core/run-state/provider-readback.js +426 -0
- package/dist/src/core/run-state/replay-decision.d.ts +69 -0
- package/dist/src/core/run-state/replay-decision.js +144 -0
- package/dist/src/core/run-state/resolve-engine.d.ts +100 -0
- package/dist/src/core/run-state/resolve-engine.js +190 -0
- package/dist/src/core/run-state/resume-preflight.d.ts +66 -0
- package/dist/src/core/run-state/resume-preflight.js +116 -0
- package/dist/src/core/run-state/run-phase-with-lifecycle.d.ts +73 -0
- package/dist/src/core/run-state/run-phase-with-lifecycle.js +186 -0
- package/dist/src/core/run-state/runs.d.ts +57 -0
- package/dist/src/core/run-state/runs.js +288 -0
- package/dist/src/core/run-state/snapshot.d.ts +14 -0
- package/dist/src/core/run-state/snapshot.js +114 -0
- package/dist/src/core/run-state/state.d.ts +40 -0
- package/dist/src/core/run-state/state.js +164 -0
- package/dist/src/core/run-state/types.d.ts +278 -0
- package/dist/src/core/run-state/types.js +13 -0
- package/dist/src/core/run-state/ulid.d.ts +11 -0
- package/dist/src/core/run-state/ulid.js +95 -0
- package/dist/src/core/schema-alignment/extractor/index.d.ts +1 -1
- package/dist/src/core/schema-alignment/extractor/index.js +2 -2
- package/dist/src/core/schema-alignment/extractor/prisma.d.ts +13 -1
- package/dist/src/core/schema-alignment/extractor/prisma.js +65 -10
- package/dist/src/core/schema-alignment/git-history.d.ts +19 -0
- package/dist/src/core/schema-alignment/git-history.js +53 -0
- package/dist/src/core/static-rules/rules/brand-tokens.js +2 -2
- package/dist/src/core/static-rules/rules/schema-alignment.js +14 -4
- package/package.json +9 -5
- package/scripts/autoregress.ts +3 -2
- package/skills/claude-autopilot.md +1 -1
- package/skills/make-interfaces-feel-better/SKILL.md +104 -0
- package/skills/migrate/SKILL.md +193 -47
- package/skills/simplify-ui/SKILL.md +103 -0
- package/skills/ui/SKILL.md +117 -0
- package/skills/ui-ux-pro-max/SKILL.md +90 -0
|
@@ -0,0 +1,426 @@
|
|
|
1
|
+
// src/adapters/deploy/vercel.ts
|
|
2
|
+
//
|
|
3
|
+
// First-class Vercel deploy adapter. Phase 1 of the v5.4 spec.
|
|
4
|
+
//
|
|
5
|
+
// Implements `deploy()` (POST + poll until terminal) and `status()` (one-shot
|
|
6
|
+
// GET). Log streaming is Phase 2; rollback is Phase 3.
|
|
7
|
+
//
|
|
8
|
+
// All HTTP calls go through an injectable `fetchImpl` so unit tests never hit
|
|
9
|
+
// the real Vercel API.
|
|
10
|
+
//
|
|
11
|
+
// Spec: docs/specs/v5.4-vercel-adapter.md
|
|
12
|
+
import { GuardrailError } from "../../core/errors.js";
|
|
13
|
+
import { redactLogLines } from "../../core/logging/redaction.js";
|
|
14
|
+
import { fetchWithRetry, safeReadBody } from "./_http.js";
|
|
15
|
+
const VERCEL_API_BASE = 'https://api.vercel.com';
|
|
16
|
+
/**
|
|
17
|
+
* Vercel deploy adapter.
|
|
18
|
+
*
|
|
19
|
+
* Construct once per pipeline run. The adapter is stateless across calls — all
|
|
20
|
+
* configuration (token, project, team) is captured at construction time.
|
|
21
|
+
*/
|
|
22
|
+
export class VercelDeployAdapter {
|
|
23
|
+
name = 'vercel';
|
|
24
|
+
token;
|
|
25
|
+
project;
|
|
26
|
+
team;
|
|
27
|
+
target;
|
|
28
|
+
pollIntervalMs;
|
|
29
|
+
maxPollMs;
|
|
30
|
+
fetchImpl;
|
|
31
|
+
sleep;
|
|
32
|
+
now;
|
|
33
|
+
redactionPatterns;
|
|
34
|
+
constructor(opts) {
|
|
35
|
+
const token = opts.token ?? process.env.VERCEL_TOKEN;
|
|
36
|
+
if (!token) {
|
|
37
|
+
throw new GuardrailError('Vercel deploy adapter requires VERCEL_TOKEN. Create one at https://vercel.com/account/tokens', { code: 'auth', provider: 'vercel' });
|
|
38
|
+
}
|
|
39
|
+
if (!opts.project) {
|
|
40
|
+
throw new GuardrailError('Vercel deploy adapter requires `project` (project ID or slug)', { code: 'invalid_config', provider: 'vercel' });
|
|
41
|
+
}
|
|
42
|
+
this.token = token;
|
|
43
|
+
this.project = opts.project;
|
|
44
|
+
this.team = opts.team;
|
|
45
|
+
this.target = opts.target ?? 'production';
|
|
46
|
+
this.pollIntervalMs = opts.pollIntervalMs ?? 2000;
|
|
47
|
+
this.maxPollMs = opts.maxPollMs ?? 15 * 60 * 1000;
|
|
48
|
+
this.fetchImpl = opts.fetchImpl ?? globalThis.fetch;
|
|
49
|
+
this.sleep = opts.sleepImpl ?? ((ms) => new Promise((r) => setTimeout(r, ms)));
|
|
50
|
+
this.now = opts.nowImpl ?? Date.now;
|
|
51
|
+
this.redactionPatterns = opts.redactionPatterns;
|
|
52
|
+
}
|
|
53
|
+
async deploy(input) {
|
|
54
|
+
const start = this.now();
|
|
55
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v13/deployments`);
|
|
56
|
+
const body = {
|
|
57
|
+
name: this.project,
|
|
58
|
+
target: this.target,
|
|
59
|
+
meta: input.meta,
|
|
60
|
+
};
|
|
61
|
+
// Only include gitSource when we have a commitSha — Vercel requires the
|
|
62
|
+
// full {type, repoId, ref} contract for git deploys, which we can't
|
|
63
|
+
// synthesize from a SHA alone in Phase 1. Callers using `commitSha` should
|
|
64
|
+
// also have `VERCEL_PROJECT_ID` linked via `vc link` so Vercel resolves
|
|
65
|
+
// the repo from the linked project.
|
|
66
|
+
if (input.commitSha) {
|
|
67
|
+
body.gitSource = { type: 'github', sha: input.commitSha, ref: input.ref };
|
|
68
|
+
}
|
|
69
|
+
else if (input.ref) {
|
|
70
|
+
body.gitSource = { type: 'github', ref: input.ref };
|
|
71
|
+
}
|
|
72
|
+
const res = await fetchWithRetry(this.fetchImpl, url, {
|
|
73
|
+
method: 'POST',
|
|
74
|
+
headers: this.headers(),
|
|
75
|
+
body: JSON.stringify(body),
|
|
76
|
+
signal: input.signal,
|
|
77
|
+
}, { sleepImpl: this.sleep, provider: 'vercel' });
|
|
78
|
+
await this.assertOkOrThrow(res, 'create deployment');
|
|
79
|
+
const created = (await res.json());
|
|
80
|
+
if (!created.id) {
|
|
81
|
+
throw new GuardrailError(`Vercel returned no deployment id (got: ${JSON.stringify(created).slice(0, 200)})`, { code: 'adapter_bug', provider: 'vercel' });
|
|
82
|
+
}
|
|
83
|
+
// Phase 2: fire onDeployStart so callers (e.g. --watch) can subscribe
|
|
84
|
+
// to logs in parallel with polling. Wrap in try/catch — a buggy callback
|
|
85
|
+
// must not crash the deploy.
|
|
86
|
+
try {
|
|
87
|
+
input.onDeployStart?.(created.id);
|
|
88
|
+
}
|
|
89
|
+
catch {
|
|
90
|
+
/* swallow — observability concern only */
|
|
91
|
+
}
|
|
92
|
+
return this.pollUntilTerminal(created.id, start, input.signal);
|
|
93
|
+
}
|
|
94
|
+
async status(input) {
|
|
95
|
+
const start = this.now();
|
|
96
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v13/deployments/${encodeURIComponent(input.deployId)}`);
|
|
97
|
+
const res = await fetchWithRetry(this.fetchImpl, url, {
|
|
98
|
+
method: 'GET',
|
|
99
|
+
headers: this.headers(),
|
|
100
|
+
signal: input.signal,
|
|
101
|
+
}, { sleepImpl: this.sleep, provider: 'vercel' });
|
|
102
|
+
await this.assertOkOrThrow(res, 'get deployment');
|
|
103
|
+
const data = (await res.json());
|
|
104
|
+
const state = data.readyState ?? data.state;
|
|
105
|
+
const result = this.shapeResult(input.deployId, data, state, this.now() - start);
|
|
106
|
+
return { ...result, deployId: input.deployId };
|
|
107
|
+
}
|
|
108
|
+
/**
|
|
109
|
+
* Phase 2 — subscribe to real-time build logs for a deployment.
|
|
110
|
+
*
|
|
111
|
+
* Streams `GET /v2/deployments/<id>/events?builds=1&follow=1` and yields a
|
|
112
|
+
* `DeployLogLine` for each `stdout` / `stderr` event. Lifecycle events
|
|
113
|
+
* (`state`, `complete`) are filtered out — the polling loop in `deploy()`
|
|
114
|
+
* already handles them. Malformed JSON lines are skipped silently rather
|
|
115
|
+
* than crashing a long-running stream.
|
|
116
|
+
*
|
|
117
|
+
* Cancellation: pass `input.signal`. Once aborted, the underlying fetch
|
|
118
|
+
* is torn down and the iterator returns.
|
|
119
|
+
*/
|
|
120
|
+
async *streamLogs(input) {
|
|
121
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v2/deployments/${encodeURIComponent(input.deployId)}/events?builds=1&follow=1`);
|
|
122
|
+
const res = await this.fetchEventsWithRetry(url, input.signal);
|
|
123
|
+
await this.assertOkOrThrow(res, 'stream logs');
|
|
124
|
+
if (!res.body) {
|
|
125
|
+
throw new GuardrailError(`Vercel events response had no body for ${input.deployId}`, { code: 'adapter_bug', provider: 'vercel' });
|
|
126
|
+
}
|
|
127
|
+
const reader = res.body.getReader();
|
|
128
|
+
const decoder = new TextDecoder('utf-8');
|
|
129
|
+
let buf = '';
|
|
130
|
+
try {
|
|
131
|
+
while (true) {
|
|
132
|
+
if (input.signal?.aborted)
|
|
133
|
+
return;
|
|
134
|
+
const { done, value } = await reader.read();
|
|
135
|
+
if (done) {
|
|
136
|
+
// Flush a trailing partial line if present.
|
|
137
|
+
if (buf.length > 0) {
|
|
138
|
+
const line = parseEventLine(buf);
|
|
139
|
+
if (line)
|
|
140
|
+
yield this.redactLine(line);
|
|
141
|
+
}
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
buf += decoder.decode(value, { stream: true });
|
|
145
|
+
let nl = buf.indexOf('\n');
|
|
146
|
+
while (nl !== -1) {
|
|
147
|
+
const raw = buf.slice(0, nl);
|
|
148
|
+
buf = buf.slice(nl + 1);
|
|
149
|
+
const line = parseEventLine(raw);
|
|
150
|
+
if (line)
|
|
151
|
+
yield this.redactLine(line);
|
|
152
|
+
nl = buf.indexOf('\n');
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
finally {
|
|
157
|
+
try {
|
|
158
|
+
reader.releaseLock();
|
|
159
|
+
}
|
|
160
|
+
catch { /* ignore */ }
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Phase 3 — promote a previously-built deployment to production.
|
|
165
|
+
*
|
|
166
|
+
* Two modes:
|
|
167
|
+
* - `input.to` set → promote that deploy ID directly. Cheapest path.
|
|
168
|
+
* - `input.to` omitted → look up the previous prod deploy via
|
|
169
|
+
* `listDeployments(5)` and promote it. Throws `no_previous_deploy`
|
|
170
|
+
* when there's nothing to roll back to (project with one deploy,
|
|
171
|
+
* or every prior deploy is in ERROR/CANCELED state).
|
|
172
|
+
*
|
|
173
|
+
* Always-query is intentional: we never cache deploy IDs locally, so a
|
|
174
|
+
* promote performed from the Vercel dashboard between our deploy and
|
|
175
|
+
* our rollback is still observable.
|
|
176
|
+
*/
|
|
177
|
+
async rollback(input) {
|
|
178
|
+
const start = this.now();
|
|
179
|
+
let targetId = input.to;
|
|
180
|
+
if (!targetId) {
|
|
181
|
+
const prev = await this.findPreviousProdDeployment(input.signal);
|
|
182
|
+
if (!prev) {
|
|
183
|
+
throw new GuardrailError(`No previous production deployment found for project "${this.project}" to roll back to`, { code: 'no_previous_deploy', provider: 'vercel' });
|
|
184
|
+
}
|
|
185
|
+
targetId = prev.id;
|
|
186
|
+
}
|
|
187
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v13/deployments/${encodeURIComponent(targetId)}/promote`);
|
|
188
|
+
const res = await fetchWithRetry(this.fetchImpl, url, {
|
|
189
|
+
method: 'POST',
|
|
190
|
+
headers: this.headers(),
|
|
191
|
+
body: '{}',
|
|
192
|
+
signal: input.signal,
|
|
193
|
+
}, { sleepImpl: this.sleep, provider: 'vercel' });
|
|
194
|
+
await this.assertOkOrThrow(res, 'promote deployment');
|
|
195
|
+
// Promote responses are typically empty (204) or echo the deployment.
|
|
196
|
+
// Be defensive — parse if there's a body, otherwise carry on with the
|
|
197
|
+
// ID alone.
|
|
198
|
+
let data;
|
|
199
|
+
try {
|
|
200
|
+
data = (await res.json());
|
|
201
|
+
}
|
|
202
|
+
catch {
|
|
203
|
+
data = undefined;
|
|
204
|
+
}
|
|
205
|
+
return {
|
|
206
|
+
status: 'pass',
|
|
207
|
+
deployId: targetId,
|
|
208
|
+
rolledBackTo: targetId,
|
|
209
|
+
deployUrl: data?.url ? `https://${data.url}` : undefined,
|
|
210
|
+
buildLogsUrl: this.buildLogsUrl(targetId),
|
|
211
|
+
durationMs: this.now() - start,
|
|
212
|
+
output: redactLogLines(`Vercel deployment ${targetId} promoted to production`, this.redactionPatterns),
|
|
213
|
+
};
|
|
214
|
+
}
|
|
215
|
+
/**
|
|
216
|
+
* Phase 3 — list recent production deployments for the configured
|
|
217
|
+
* project. Used by the `deploy status` CLI subcommand and by the
|
|
218
|
+
* `findPreviousProdDeployment()` helper backing `rollback()`.
|
|
219
|
+
*
|
|
220
|
+
* The list endpoint is `/v6/deployments` (v13 is for individual
|
|
221
|
+
* deployments). `target=production` filters out preview builds; `limit`
|
|
222
|
+
* caps the result set — Vercel returns newest-first so a small limit
|
|
223
|
+
* is sufficient for both rollback target detection and the CLI status
|
|
224
|
+
* display (defaults to 5).
|
|
225
|
+
*/
|
|
226
|
+
async listDeployments(limit = 5, signal) {
|
|
227
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v6/deployments` +
|
|
228
|
+
`?projectId=${encodeURIComponent(this.project)}` +
|
|
229
|
+
`&limit=${encodeURIComponent(String(limit))}` +
|
|
230
|
+
`&target=production`);
|
|
231
|
+
const res = await fetchWithRetry(this.fetchImpl, url, {
|
|
232
|
+
method: 'GET',
|
|
233
|
+
headers: this.headers(),
|
|
234
|
+
signal,
|
|
235
|
+
}, { sleepImpl: this.sleep, provider: 'vercel' });
|
|
236
|
+
await this.assertOkOrThrow(res, 'list deployments');
|
|
237
|
+
const data = (await res.json());
|
|
238
|
+
return Array.isArray(data.deployments) ? data.deployments : [];
|
|
239
|
+
}
|
|
240
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
241
|
+
// private helpers
|
|
242
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
243
|
+
/**
|
|
244
|
+
* Apply the adapter's redaction patterns to a streamed log line's `text`.
|
|
245
|
+
* Mirrors the Fly/Render redactLine helpers introduced in v5.6 so secrets
|
|
246
|
+
* never escape the adapter via streamed log entries.
|
|
247
|
+
*/
|
|
248
|
+
redactLine(line) {
|
|
249
|
+
return { ...line, text: redactLogLines(line.text, this.redactionPatterns) };
|
|
250
|
+
}
|
|
251
|
+
/**
|
|
252
|
+
* Returns the deployment immediately preceding the current production
|
|
253
|
+
* deployment, or `null` if no rollback target exists.
|
|
254
|
+
*
|
|
255
|
+
* "Preceding" means: among deployments with `state === 'READY'` (so we
|
|
256
|
+
* never roll back to a known-broken build), sorted newest-first by
|
|
257
|
+
* `createdAt`, the second entry. The first entry is the current prod
|
|
258
|
+
* deploy and we drop it.
|
|
259
|
+
*/
|
|
260
|
+
async findPreviousProdDeployment(signal) {
|
|
261
|
+
const items = await this.listDeployments(5, signal);
|
|
262
|
+
const ready = items.filter((d) => d.state === 'READY');
|
|
263
|
+
ready.sort((a, b) => (b.createdAt ?? 0) - (a.createdAt ?? 0));
|
|
264
|
+
if (ready.length < 2)
|
|
265
|
+
return null;
|
|
266
|
+
return ready[1] ?? null;
|
|
267
|
+
}
|
|
268
|
+
async pollUntilTerminal(deployId, start, signal) {
|
|
269
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v13/deployments/${encodeURIComponent(deployId)}`);
|
|
270
|
+
while (true) {
|
|
271
|
+
if (signal?.aborted) {
|
|
272
|
+
return { status: 'in-progress', deployId, durationMs: this.now() - start };
|
|
273
|
+
}
|
|
274
|
+
if (this.now() - start > this.maxPollMs) {
|
|
275
|
+
return {
|
|
276
|
+
status: 'in-progress',
|
|
277
|
+
deployId,
|
|
278
|
+
durationMs: this.now() - start,
|
|
279
|
+
buildLogsUrl: this.buildLogsUrl(deployId),
|
|
280
|
+
output: redactLogLines(`Deployment still in progress after ${this.maxPollMs}ms — check ${this.buildLogsUrl(deployId)}`, this.redactionPatterns),
|
|
281
|
+
};
|
|
282
|
+
}
|
|
283
|
+
const res = await fetchWithRetry(this.fetchImpl, url, {
|
|
284
|
+
method: 'GET',
|
|
285
|
+
headers: this.headers(),
|
|
286
|
+
signal,
|
|
287
|
+
}, { sleepImpl: this.sleep, provider: 'vercel' });
|
|
288
|
+
await this.assertOkOrThrow(res, 'poll deployment');
|
|
289
|
+
const data = (await res.json());
|
|
290
|
+
const state = data.readyState ?? data.state;
|
|
291
|
+
if (state === 'READY' || state === 'ERROR' || state === 'CANCELED') {
|
|
292
|
+
return this.shapeResult(deployId, data, state, this.now() - start);
|
|
293
|
+
}
|
|
294
|
+
await this.sleep(this.pollIntervalMs);
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
shapeResult(deployId, data, state, durationMs) {
|
|
298
|
+
const status = state === 'READY' ? 'pass' : state === 'ERROR' || state === 'CANCELED' ? 'fail' : 'in-progress';
|
|
299
|
+
return {
|
|
300
|
+
status,
|
|
301
|
+
deployId,
|
|
302
|
+
deployUrl: data.url ? `https://${data.url}` : undefined,
|
|
303
|
+
buildLogsUrl: this.buildLogsUrl(deployId),
|
|
304
|
+
durationMs,
|
|
305
|
+
output: state
|
|
306
|
+
? redactLogLines(`Vercel deployment ${deployId}: state=${state}`, this.redactionPatterns)
|
|
307
|
+
: undefined,
|
|
308
|
+
};
|
|
309
|
+
}
|
|
310
|
+
headers() {
|
|
311
|
+
return {
|
|
312
|
+
Authorization: `Bearer ${this.token}`,
|
|
313
|
+
'Content-Type': 'application/json',
|
|
314
|
+
};
|
|
315
|
+
}
|
|
316
|
+
urlWithTeam(base) {
|
|
317
|
+
if (!this.team)
|
|
318
|
+
return base;
|
|
319
|
+
const sep = base.includes('?') ? '&' : '?';
|
|
320
|
+
return `${base}${sep}teamId=${encodeURIComponent(this.team)}`;
|
|
321
|
+
}
|
|
322
|
+
buildLogsUrl(deployId) {
|
|
323
|
+
const teamSlug = this.team ?? 'me';
|
|
324
|
+
return `https://vercel.com/${encodeURIComponent(teamSlug)}/${encodeURIComponent(this.project)}/${encodeURIComponent(deployId)}`;
|
|
325
|
+
}
|
|
326
|
+
async assertOkOrThrow(res, step) {
|
|
327
|
+
if (res.ok)
|
|
328
|
+
return;
|
|
329
|
+
const bodyText = await safeReadBody(res);
|
|
330
|
+
if (res.status === 401 || res.status === 403) {
|
|
331
|
+
throw new GuardrailError(`Vercel auth failed (${res.status}) on ${step} — check VERCEL_TOKEN scope for project "${this.project}"${this.team ? ` (team ${this.team})` : ''}: ${bodyText}`, { code: 'auth', provider: 'vercel', step, details: { status: res.status } });
|
|
332
|
+
}
|
|
333
|
+
if (res.status === 404) {
|
|
334
|
+
throw new GuardrailError(`Vercel project "${this.project}" not found (${res.status}) on ${step}: ${bodyText}`, { code: 'invalid_config', provider: 'vercel', step, details: { status: res.status } });
|
|
335
|
+
}
|
|
336
|
+
throw new GuardrailError(`Vercel API error (${res.status}) on ${step}: ${bodyText}`, { code: 'adapter_bug', provider: 'vercel', step, details: { status: res.status } });
|
|
337
|
+
}
|
|
338
|
+
/**
|
|
339
|
+
* Like `fetchWithRetry` but tuned for the events endpoint:
|
|
340
|
+
* - 404 right after a deploy POST is a known race (the deploy hasn't yet
|
|
341
|
+
* propagated to the events service). Retry up to N times with backoff.
|
|
342
|
+
* - 5xx behaves the same as `fetchWithRetry`.
|
|
343
|
+
* - Cancels cleanly on AbortError.
|
|
344
|
+
* - Returns the last `Response` so the caller can `assertOkOrThrow` on a
|
|
345
|
+
* final non-OK status (e.g. 401 still bubbles immediately on attempt 1).
|
|
346
|
+
*/
|
|
347
|
+
async fetchEventsWithRetry(url, signal, attempts = 3, baseMs = 500) {
|
|
348
|
+
let lastRes;
|
|
349
|
+
for (let i = 0; i < attempts; i++) {
|
|
350
|
+
let res;
|
|
351
|
+
try {
|
|
352
|
+
res = await this.fetchImpl(url, {
|
|
353
|
+
method: 'GET',
|
|
354
|
+
headers: { ...this.headers(), Accept: 'text/event-stream' },
|
|
355
|
+
signal,
|
|
356
|
+
});
|
|
357
|
+
}
|
|
358
|
+
catch (err) {
|
|
359
|
+
if (err instanceof Error && err.name === 'AbortError')
|
|
360
|
+
throw err;
|
|
361
|
+
if (i < attempts - 1) {
|
|
362
|
+
await this.sleep(baseMs * 2 ** i);
|
|
363
|
+
continue;
|
|
364
|
+
}
|
|
365
|
+
throw new GuardrailError(`Vercel events endpoint unreachable after ${attempts} attempts: ${err?.message ?? String(err)}`, { code: 'transient_network', provider: 'vercel' });
|
|
366
|
+
}
|
|
367
|
+
lastRes = res;
|
|
368
|
+
// 404 after create-deployment is the known race — retry.
|
|
369
|
+
if (res.status === 404 && i < attempts - 1) {
|
|
370
|
+
await this.sleep(baseMs * 2 ** i);
|
|
371
|
+
continue;
|
|
372
|
+
}
|
|
373
|
+
// 5xx is transient — retry.
|
|
374
|
+
if (res.status >= 500 && res.status < 600 && i < attempts - 1) {
|
|
375
|
+
await this.sleep(baseMs * 2 ** i);
|
|
376
|
+
continue;
|
|
377
|
+
}
|
|
378
|
+
return res;
|
|
379
|
+
}
|
|
380
|
+
return lastRes;
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
/**
|
|
384
|
+
* Parse a single line from Vercel's events endpoint into a `DeployLogLine`.
|
|
385
|
+
*
|
|
386
|
+
* Accepts both raw NDJSON and classic SSE `data: {...}` lines. Returns
|
|
387
|
+
* `null` for events we don't surface (state changes, completes, heartbeats,
|
|
388
|
+
* and any line that fails to JSON-parse) — silently skipping a malformed
|
|
389
|
+
* event is preferable to crashing a long-running stream.
|
|
390
|
+
*/
|
|
391
|
+
function parseEventLine(raw) {
|
|
392
|
+
const trimmed = raw.trim();
|
|
393
|
+
if (!trimmed)
|
|
394
|
+
return null;
|
|
395
|
+
// SSE comment / heartbeat lines start with ':'
|
|
396
|
+
if (trimmed.startsWith(':'))
|
|
397
|
+
return null;
|
|
398
|
+
// SSE event/id/retry lines — not data, skip
|
|
399
|
+
if (trimmed.startsWith('event:') || trimmed.startsWith('id:') || trimmed.startsWith('retry:'))
|
|
400
|
+
return null;
|
|
401
|
+
// Strip 'data: ' prefix if present (classic SSE)
|
|
402
|
+
const jsonPart = trimmed.startsWith('data:') ? trimmed.slice(5).trim() : trimmed;
|
|
403
|
+
if (!jsonPart)
|
|
404
|
+
return null;
|
|
405
|
+
let parsed;
|
|
406
|
+
try {
|
|
407
|
+
parsed = JSON.parse(jsonPart);
|
|
408
|
+
}
|
|
409
|
+
catch {
|
|
410
|
+
return null;
|
|
411
|
+
}
|
|
412
|
+
if (typeof parsed !== 'object' || parsed === null)
|
|
413
|
+
return null;
|
|
414
|
+
const ev = parsed;
|
|
415
|
+
// Only surface log-bearing event types. Vercel emits 'stdout'/'stderr' for
|
|
416
|
+
// build output; 'state'/'complete'/etc. are deploy lifecycle events that
|
|
417
|
+
// the polling loop already handles.
|
|
418
|
+
if (ev.type !== 'stdout' && ev.type !== 'stderr')
|
|
419
|
+
return null;
|
|
420
|
+
const text = typeof ev.payload?.text === 'string' ? ev.payload.text : '';
|
|
421
|
+
if (!text)
|
|
422
|
+
return null;
|
|
423
|
+
const ts = typeof ev.created === 'number' ? ev.created : typeof ev.date === 'number' ? ev.date : Date.now();
|
|
424
|
+
return { timestamp: ts, level: ev.type, text };
|
|
425
|
+
}
|
|
426
|
+
//# sourceMappingURL=vercel.js.map
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Canonical per-model pricing for cost-ledger / cost-estimate / cost-cap features.
|
|
3
|
+
*
|
|
4
|
+
* Computed client-side because the OpenAI Responses API and most provider APIs
|
|
5
|
+
* return token counts but no $-cost field. Without these constants every codex
|
|
6
|
+
* run logged costUSD=0 even though tokens were tracked correctly.
|
|
7
|
+
*
|
|
8
|
+
* Units: USD per 1,000,000 tokens.
|
|
9
|
+
*
|
|
10
|
+
* `cachedInputPer1M` is the price for cached-input tokens (OpenAI's prompt-cache
|
|
11
|
+
* read tier — typically ~1/8 of `inputPer1M`). Set to `null` when the provider
|
|
12
|
+
* doesn't surface a cached tier or we don't have a confirmed published number.
|
|
13
|
+
*
|
|
14
|
+
* Adding a model here does NOT auto-wire it into any adapter — the per-adapter
|
|
15
|
+
* COST_PER_M_INPUT/OUTPUT constants in src/adapters/{council,review-engine}/*.ts
|
|
16
|
+
* remain the actual source of truth for runtime cost computation, and stay
|
|
17
|
+
* env-overridable. This table is the documentation/single-source-of-truth for
|
|
18
|
+
* "what should the defaults be" + drives the cost-ledger config defaults.
|
|
19
|
+
*
|
|
20
|
+
* Keep entries sorted: oldest → newest within each provider, providers
|
|
21
|
+
* alphabetical.
|
|
22
|
+
*/
|
|
23
|
+
export interface ModelPricing {
|
|
24
|
+
inputPer1M: number;
|
|
25
|
+
outputPer1M: number;
|
|
26
|
+
cachedInputPer1M: number | null;
|
|
27
|
+
}
|
|
28
|
+
export declare const MODEL_PRICING: Record<string, ModelPricing>;
|
|
29
|
+
/**
|
|
30
|
+
* Look up canonical pricing for a model. Returns `undefined` for unknown
|
|
31
|
+
* models — callers should fall back to env-var defaults rather than throwing,
|
|
32
|
+
* because this table is intentionally non-exhaustive (adapters work with any
|
|
33
|
+
* model the underlying SDK accepts).
|
|
34
|
+
*/
|
|
35
|
+
export declare function getModelPricing(model: string): ModelPricing | undefined;
|
|
36
|
+
//# sourceMappingURL=pricing.d.ts.map
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
export const MODEL_PRICING = {
|
|
2
|
+
// OpenAI ----------------------------------------------------------------
|
|
3
|
+
// gpt-5.3-codex: legacy default (pre-2026-04-23). Kept for back-compat —
|
|
4
|
+
// users may have pinned via CODEX_MODEL env var.
|
|
5
|
+
'gpt-5.3-codex': {
|
|
6
|
+
inputPer1M: 1.25,
|
|
7
|
+
outputPer1M: 10.0,
|
|
8
|
+
cachedInputPer1M: null,
|
|
9
|
+
},
|
|
10
|
+
// gpt-5.4: superseded by gpt-5.5 on 2026-04-23. Kept for back-compat.
|
|
11
|
+
'gpt-5.4': {
|
|
12
|
+
inputPer1M: 2.5,
|
|
13
|
+
outputPer1M: 15.0,
|
|
14
|
+
cachedInputPer1M: null,
|
|
15
|
+
},
|
|
16
|
+
// gpt-5.5 (codename Spud, released 2026-04-23): current default for codex
|
|
17
|
+
// adapter + council openai adapter. Better at coding than 5.4 with fewer
|
|
18
|
+
// tokens, but ~2× more expensive per token. Available via standard
|
|
19
|
+
// Responses/Chat Completions API at `gpt-5.5` (no `-codex` suffix).
|
|
20
|
+
// Bugbot MEDIUM PR #93: `cachedInputPer1M` is `null` (NOT 0) until we have
|
|
21
|
+
// a confirmed published number. The interface contract treats `0` as
|
|
22
|
+
// "cached tokens are free" — using it would make consumers silently
|
|
23
|
+
// compute $0 for cached usage. Heuristic is ~1/8 of input (~$0.625/1M)
|
|
24
|
+
// per OpenAI's prompt-cache pattern, but no definitive source yet.
|
|
25
|
+
'gpt-5.5': {
|
|
26
|
+
inputPer1M: 5.0,
|
|
27
|
+
outputPer1M: 30.0,
|
|
28
|
+
cachedInputPer1M: null,
|
|
29
|
+
},
|
|
30
|
+
};
|
|
31
|
+
/**
|
|
32
|
+
* Look up canonical pricing for a model. Returns `undefined` for unknown
|
|
33
|
+
* models — callers should fall back to env-var defaults rather than throwing,
|
|
34
|
+
* because this table is intentionally non-exhaustive (adapters work with any
|
|
35
|
+
* model the underlying SDK accepts).
|
|
36
|
+
*/
|
|
37
|
+
export function getModelPricing(model) {
|
|
38
|
+
return MODEL_PRICING[model];
|
|
39
|
+
}
|
|
40
|
+
//# sourceMappingURL=pricing.js.map
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import Anthropic from '@anthropic-ai/sdk';
|
|
2
1
|
import { GuardrailError } from "../../core/errors.js";
|
|
3
2
|
import { parseReviewOutput } from "./parse-output.js";
|
|
4
3
|
import { buildSystemPrompt, classifyError } from "./prompt-builder.js";
|
|
4
|
+
import { loadAnthropic } from "../sdk-loader.js";
|
|
5
5
|
const DEFAULT_MODEL = 'claude-opus-4-7';
|
|
6
6
|
const MAX_OUTPUT_TOKENS = 4096;
|
|
7
7
|
// Cost per million tokens (USD) — opus-4-7 pricing
|
|
@@ -47,6 +47,7 @@ export const claudeAdapter = {
|
|
|
47
47
|
}
|
|
48
48
|
const model = input.context?.['model'] ?? DEFAULT_MODEL;
|
|
49
49
|
const systemPrompt = buildSystemPrompt(input, SYSTEM_PROMPT_TEMPLATE);
|
|
50
|
+
const Anthropic = await loadAnthropic();
|
|
50
51
|
const client = new Anthropic({ apiKey });
|
|
51
52
|
let response;
|
|
52
53
|
try {
|
|
@@ -1,15 +1,18 @@
|
|
|
1
|
-
import OpenAI from 'openai';
|
|
2
1
|
import { parseReviewOutput } from "./parse-output.js";
|
|
3
2
|
import { GuardrailError } from "../../core/errors.js";
|
|
4
3
|
import { buildSystemPrompt, classifyError } from "./prompt-builder.js";
|
|
5
|
-
|
|
4
|
+
import { loadOpenAI } from "../sdk-loader.js";
|
|
5
|
+
import { getModelPricing } from "../pricing.js";
|
|
6
|
+
const DEFAULT_MODEL = process.env.CODEX_MODEL ?? 'gpt-5.5';
|
|
6
7
|
const MAX_OUTPUT_TOKENS = 4096;
|
|
7
|
-
// Per-million-token rates
|
|
8
|
-
//
|
|
9
|
-
//
|
|
10
|
-
//
|
|
11
|
-
|
|
12
|
-
const
|
|
8
|
+
// Per-million-token rates. Bugbot LOW PR #93: wired to read from the
|
|
9
|
+
// canonical MODEL_PRICING table so the table is no longer dead code.
|
|
10
|
+
// Resolution order: env override → MODEL_PRICING entry for DEFAULT_MODEL →
|
|
11
|
+
// numeric fallback (gpt-5.5 published rates). Costs are computed client-side
|
|
12
|
+
// because the OpenAI Responses API returns token counts but no $-cost field.
|
|
13
|
+
const _pricing = getModelPricing(DEFAULT_MODEL);
|
|
14
|
+
const COST_PER_M_INPUT = Number(process.env.CODEX_COST_INPUT_PER_M ?? _pricing?.inputPer1M ?? 5.0);
|
|
15
|
+
const COST_PER_M_OUTPUT = Number(process.env.CODEX_COST_OUTPUT_PER_M ?? _pricing?.outputPer1M ?? 30.0);
|
|
13
16
|
const SYSTEM_PROMPT_TEMPLATE = `You are a senior software architect providing feedback on designs, proposals, and ideas.
|
|
14
17
|
|
|
15
18
|
The codebase context:
|
|
@@ -48,6 +51,7 @@ export const codexAdapter = {
|
|
|
48
51
|
throw new GuardrailError('OPENAI_API_KEY not set', { code: 'auth', provider: 'codex' });
|
|
49
52
|
}
|
|
50
53
|
const systemPrompt = buildSystemPrompt(input, SYSTEM_PROMPT_TEMPLATE);
|
|
54
|
+
const OpenAI = await loadOpenAI();
|
|
51
55
|
const client = new OpenAI({ apiKey });
|
|
52
56
|
let response;
|
|
53
57
|
try {
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import { GoogleGenerativeAI } from '@google/generative-ai';
|
|
2
1
|
import { parseReviewOutput } from "./parse-output.js";
|
|
3
2
|
import { GuardrailError } from "../../core/errors.js";
|
|
4
3
|
import { buildSystemPrompt, classifyError } from "./prompt-builder.js";
|
|
4
|
+
import { loadGoogleGenerativeAI } from "../sdk-loader.js";
|
|
5
5
|
const DEFAULT_MODEL = 'gemini-2.5-pro-preview-05-06';
|
|
6
6
|
const MAX_OUTPUT_TOKENS = 4096;
|
|
7
7
|
// Cost per million tokens (USD) — gemini-2.5-pro pricing (<200k context)
|
|
@@ -55,6 +55,7 @@ export const geminiAdapter = {
|
|
|
55
55
|
}
|
|
56
56
|
const model = input.context?.['model'] ?? DEFAULT_MODEL;
|
|
57
57
|
const prompt = buildSystemPrompt(input, PROMPT_TEMPLATE).replace('{CONTENT}', input.content);
|
|
58
|
+
const GoogleGenerativeAI = await loadGoogleGenerativeAI();
|
|
58
59
|
const genAI = new GoogleGenerativeAI(apiKey);
|
|
59
60
|
const genModel = genAI.getGenerativeModel({
|
|
60
61
|
model,
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import OpenAI from 'openai';
|
|
2
1
|
import { parseReviewOutput } from "./parse-output.js";
|
|
3
2
|
import { GuardrailError } from "../../core/errors.js";
|
|
4
3
|
import { buildSystemPrompt, classifyError } from "./prompt-builder.js";
|
|
4
|
+
import { loadOpenAI } from "../sdk-loader.js";
|
|
5
5
|
const MAX_OUTPUT_TOKENS = 4096;
|
|
6
6
|
const SYSTEM_PROMPT_TEMPLATE = `You are a senior software architect reviewing code changes for quality, security, and correctness.
|
|
7
7
|
|
|
@@ -49,6 +49,7 @@ export const openaiCompatibleAdapter = {
|
|
|
49
49
|
throw new GuardrailError('openai-compatible adapter requires options.model to be set in guardrail.config.yaml', { code: 'invalid_config', provider: 'openai-compatible' });
|
|
50
50
|
}
|
|
51
51
|
const systemPrompt = buildSystemPrompt(input, SYSTEM_PROMPT_TEMPLATE);
|
|
52
|
+
const OpenAI = await loadOpenAI();
|
|
52
53
|
const client = new OpenAI({ apiKey, ...(baseURL ? { baseURL } : {}) });
|
|
53
54
|
let response;
|
|
54
55
|
try {
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type AnthropicNS from '@anthropic-ai/sdk';
|
|
2
|
+
import type OpenAINS from 'openai';
|
|
3
|
+
import type { GoogleGenerativeAI as GoogleGenAINS } from '@google/generative-ai';
|
|
4
|
+
type AnthropicCtor = typeof AnthropicNS;
|
|
5
|
+
type OpenAICtor = typeof OpenAINS;
|
|
6
|
+
type GoogleGenerativeAICtor = typeof GoogleGenAINS;
|
|
7
|
+
export declare function loadAnthropic(): Promise<AnthropicCtor>;
|
|
8
|
+
export declare function loadOpenAI(): Promise<OpenAICtor>;
|
|
9
|
+
export declare function loadGoogleGenerativeAI(): Promise<GoogleGenerativeAICtor>;
|
|
10
|
+
/**
|
|
11
|
+
* Quick non-throwing check — used by `doctor` to report install state.
|
|
12
|
+
*/
|
|
13
|
+
export declare function isSdkInstalled(pkg: string): Promise<boolean>;
|
|
14
|
+
export {};
|
|
15
|
+
//# sourceMappingURL=sdk-loader.d.ts.map
|