@delegance/claude-autopilot 5.2.1 → 5.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +97 -0
- package/README.md +49 -17
- package/dist/src/adapters/council/claude.js +2 -1
- package/dist/src/adapters/council/openai.js +2 -1
- package/dist/src/adapters/deploy/generic.d.ts +39 -0
- package/dist/src/adapters/deploy/generic.js +98 -0
- package/dist/src/adapters/deploy/index.d.ts +13 -0
- package/dist/src/adapters/deploy/index.js +45 -0
- package/dist/src/adapters/deploy/types.d.ts +157 -0
- package/dist/src/adapters/deploy/types.js +15 -0
- package/dist/src/adapters/deploy/vercel.d.ts +127 -0
- package/dist/src/adapters/deploy/vercel.js +446 -0
- package/dist/src/adapters/review-engine/claude.js +2 -1
- package/dist/src/adapters/review-engine/codex.js +2 -1
- package/dist/src/adapters/review-engine/gemini.js +2 -1
- package/dist/src/adapters/review-engine/openai-compatible.js +2 -1
- package/dist/src/adapters/sdk-loader.d.ts +15 -0
- package/dist/src/adapters/sdk-loader.js +77 -0
- package/dist/src/cli/costs.js +4 -2
- package/dist/src/cli/deploy.d.ts +71 -0
- package/dist/src/cli/deploy.js +514 -0
- package/dist/src/cli/index.js +91 -3
- package/dist/src/cli/pr.js +8 -2
- package/dist/src/cli/preflight.js +76 -1
- package/dist/src/core/config/schema.d.ts +34 -0
- package/dist/src/core/config/schema.js +18 -0
- package/dist/src/core/config/types.d.ts +6 -0
- package/dist/src/core/errors.d.ts +1 -1
- package/dist/src/core/errors.js +1 -0
- package/dist/src/core/migrate/detector-rules.js +6 -0
- package/dist/src/core/migrate/schema-validator.js +7 -0
- package/dist/src/core/persist/cost-log.js +8 -0
- package/package.json +8 -5
- package/scripts/autoregress.ts +2 -1
- package/skills/migrate/SKILL.md +193 -47
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
import type { DeployAdapter, DeployInput, DeployLogLine, DeployResult, DeployRollbackInput, DeployStatusInput, DeployStatusResult, DeployStreamLogsInput } from './types.ts';
|
|
2
|
+
/** Vercel deployment states. The first three are terminal; the rest are interim. */
|
|
3
|
+
type VercelState = 'READY' | 'ERROR' | 'CANCELED' | 'BUILDING' | 'INITIALIZING' | 'QUEUED' | 'DEPLOYING' | 'ANALYZING';
|
|
4
|
+
/**
|
|
5
|
+
* Single entry from `GET /v6/deployments`. Vercel's list response wraps
|
|
6
|
+
* these under `{ deployments: [...] }`. We only surface the fields the
|
|
7
|
+
* rollback + status flows need; callers MUST treat all fields beyond `id`
|
|
8
|
+
* as best-effort (older deployments occasionally omit `createdAt` and
|
|
9
|
+
* `state` is not always populated for super-recent in-flight builds).
|
|
10
|
+
*/
|
|
11
|
+
export interface VercelDeployListItem {
|
|
12
|
+
id: string;
|
|
13
|
+
url?: string;
|
|
14
|
+
state?: VercelState;
|
|
15
|
+
/** Milliseconds since epoch — Vercel returns this as a number. */
|
|
16
|
+
createdAt?: number;
|
|
17
|
+
}
|
|
18
|
+
export interface VercelDeployAdapterOptions {
|
|
19
|
+
/** Personal access token. Falls back to `process.env.VERCEL_TOKEN`. */
|
|
20
|
+
token?: string;
|
|
21
|
+
/** Vercel project ID or slug. Required. */
|
|
22
|
+
project: string;
|
|
23
|
+
/** Vercel team ID. Optional — required only for team accounts. */
|
|
24
|
+
team?: string;
|
|
25
|
+
/** Deploy target. Default: `production`. */
|
|
26
|
+
target?: 'production' | 'preview';
|
|
27
|
+
/** Polling interval (ms) when waiting for the build to reach a terminal state. Default: 2000. */
|
|
28
|
+
pollIntervalMs?: number;
|
|
29
|
+
/** Maximum total time to poll before returning `in-progress`. Default: 15 minutes. */
|
|
30
|
+
maxPollMs?: number;
|
|
31
|
+
/** Injected fetch implementation — defaults to `globalThis.fetch`. Tests pass a mock. */
|
|
32
|
+
fetchImpl?: typeof fetch;
|
|
33
|
+
/** Injected sleep implementation — tests pass a no-op so they don't actually wait. */
|
|
34
|
+
sleepImpl?: (ms: number) => Promise<void>;
|
|
35
|
+
/** Wall-clock source — tests pass a controllable counter. */
|
|
36
|
+
nowImpl?: () => number;
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Vercel deploy adapter.
|
|
40
|
+
*
|
|
41
|
+
* Construct once per pipeline run. The adapter is stateless across calls — all
|
|
42
|
+
* configuration (token, project, team) is captured at construction time.
|
|
43
|
+
*/
|
|
44
|
+
export declare class VercelDeployAdapter implements DeployAdapter {
|
|
45
|
+
readonly name = "vercel";
|
|
46
|
+
private readonly token;
|
|
47
|
+
private readonly project;
|
|
48
|
+
private readonly team;
|
|
49
|
+
private readonly target;
|
|
50
|
+
private readonly pollIntervalMs;
|
|
51
|
+
private readonly maxPollMs;
|
|
52
|
+
private readonly fetchImpl;
|
|
53
|
+
private readonly sleep;
|
|
54
|
+
private readonly now;
|
|
55
|
+
constructor(opts: VercelDeployAdapterOptions);
|
|
56
|
+
deploy(input: DeployInput): Promise<DeployResult>;
|
|
57
|
+
status(input: DeployStatusInput): Promise<DeployStatusResult>;
|
|
58
|
+
/**
|
|
59
|
+
* Phase 2 — subscribe to real-time build logs for a deployment.
|
|
60
|
+
*
|
|
61
|
+
* Streams `GET /v2/deployments/<id>/events?builds=1&follow=1` and yields a
|
|
62
|
+
* `DeployLogLine` for each `stdout` / `stderr` event. Lifecycle events
|
|
63
|
+
* (`state`, `complete`) are filtered out — the polling loop in `deploy()`
|
|
64
|
+
* already handles them. Malformed JSON lines are skipped silently rather
|
|
65
|
+
* than crashing a long-running stream.
|
|
66
|
+
*
|
|
67
|
+
* Cancellation: pass `input.signal`. Once aborted, the underlying fetch
|
|
68
|
+
* is torn down and the iterator returns.
|
|
69
|
+
*/
|
|
70
|
+
streamLogs(input: DeployStreamLogsInput): AsyncGenerator<DeployLogLine>;
|
|
71
|
+
/**
|
|
72
|
+
* Phase 3 — promote a previously-built deployment to production.
|
|
73
|
+
*
|
|
74
|
+
* Two modes:
|
|
75
|
+
* - `input.to` set → promote that deploy ID directly. Cheapest path.
|
|
76
|
+
* - `input.to` omitted → look up the previous prod deploy via
|
|
77
|
+
* `listDeployments(5)` and promote it. Throws `no_previous_deploy`
|
|
78
|
+
* when there's nothing to roll back to (project with one deploy,
|
|
79
|
+
* or every prior deploy is in ERROR/CANCELED state).
|
|
80
|
+
*
|
|
81
|
+
* Always-query is intentional: we never cache deploy IDs locally, so a
|
|
82
|
+
* promote performed from the Vercel dashboard between our deploy and
|
|
83
|
+
* our rollback is still observable.
|
|
84
|
+
*/
|
|
85
|
+
rollback(input: DeployRollbackInput): Promise<DeployResult>;
|
|
86
|
+
/**
|
|
87
|
+
* Phase 3 — list recent production deployments for the configured
|
|
88
|
+
* project. Used by the `deploy status` CLI subcommand and by the
|
|
89
|
+
* `findPreviousProdDeployment()` helper backing `rollback()`.
|
|
90
|
+
*
|
|
91
|
+
* The list endpoint is `/v6/deployments` (v13 is for individual
|
|
92
|
+
* deployments). `target=production` filters out preview builds; `limit`
|
|
93
|
+
* caps the result set — Vercel returns newest-first so a small limit
|
|
94
|
+
* is sufficient for both rollback target detection and the CLI status
|
|
95
|
+
* display (defaults to 5).
|
|
96
|
+
*/
|
|
97
|
+
listDeployments(limit?: number, signal?: AbortSignal): Promise<VercelDeployListItem[]>;
|
|
98
|
+
/**
|
|
99
|
+
* Returns the deployment immediately preceding the current production
|
|
100
|
+
* deployment, or `null` if no rollback target exists.
|
|
101
|
+
*
|
|
102
|
+
* "Preceding" means: among deployments with `state === 'READY'` (so we
|
|
103
|
+
* never roll back to a known-broken build), sorted newest-first by
|
|
104
|
+
* `createdAt`, the second entry. The first entry is the current prod
|
|
105
|
+
* deploy and we drop it.
|
|
106
|
+
*/
|
|
107
|
+
private findPreviousProdDeployment;
|
|
108
|
+
private pollUntilTerminal;
|
|
109
|
+
private shapeResult;
|
|
110
|
+
private headers;
|
|
111
|
+
private urlWithTeam;
|
|
112
|
+
private buildLogsUrl;
|
|
113
|
+
private assertOkOrThrow;
|
|
114
|
+
private fetchWithRetry;
|
|
115
|
+
/**
|
|
116
|
+
* Like `fetchWithRetry` but tuned for the events endpoint:
|
|
117
|
+
* - 404 right after a deploy POST is a known race (the deploy hasn't yet
|
|
118
|
+
* propagated to the events service). Retry up to N times with backoff.
|
|
119
|
+
* - 5xx behaves the same as `fetchWithRetry`.
|
|
120
|
+
* - Cancels cleanly on AbortError.
|
|
121
|
+
* - Returns the last `Response` so the caller can `assertOkOrThrow` on a
|
|
122
|
+
* final non-OK status (e.g. 401 still bubbles immediately on attempt 1).
|
|
123
|
+
*/
|
|
124
|
+
private fetchEventsWithRetry;
|
|
125
|
+
}
|
|
126
|
+
export {};
|
|
127
|
+
//# sourceMappingURL=vercel.d.ts.map
|
|
@@ -0,0 +1,446 @@
|
|
|
1
|
+
// src/adapters/deploy/vercel.ts
|
|
2
|
+
//
|
|
3
|
+
// First-class Vercel deploy adapter. Phase 1 of the v5.4 spec.
|
|
4
|
+
//
|
|
5
|
+
// Implements `deploy()` (POST + poll until terminal) and `status()` (one-shot
|
|
6
|
+
// GET). Log streaming is Phase 2; rollback is Phase 3.
|
|
7
|
+
//
|
|
8
|
+
// All HTTP calls go through an injectable `fetchImpl` so unit tests never hit
|
|
9
|
+
// the real Vercel API.
|
|
10
|
+
//
|
|
11
|
+
// Spec: docs/specs/v5.4-vercel-adapter.md
|
|
12
|
+
import { GuardrailError } from "../../core/errors.js";
|
|
13
|
+
const VERCEL_API_BASE = 'https://api.vercel.com';
|
|
14
|
+
/**
|
|
15
|
+
* Vercel deploy adapter.
|
|
16
|
+
*
|
|
17
|
+
* Construct once per pipeline run. The adapter is stateless across calls — all
|
|
18
|
+
* configuration (token, project, team) is captured at construction time.
|
|
19
|
+
*/
|
|
20
|
+
export class VercelDeployAdapter {
|
|
21
|
+
name = 'vercel';
|
|
22
|
+
token;
|
|
23
|
+
project;
|
|
24
|
+
team;
|
|
25
|
+
target;
|
|
26
|
+
pollIntervalMs;
|
|
27
|
+
maxPollMs;
|
|
28
|
+
fetchImpl;
|
|
29
|
+
sleep;
|
|
30
|
+
now;
|
|
31
|
+
constructor(opts) {
|
|
32
|
+
const token = opts.token ?? process.env.VERCEL_TOKEN;
|
|
33
|
+
if (!token) {
|
|
34
|
+
throw new GuardrailError('Vercel deploy adapter requires VERCEL_TOKEN. Create one at https://vercel.com/account/tokens', { code: 'auth', provider: 'vercel' });
|
|
35
|
+
}
|
|
36
|
+
if (!opts.project) {
|
|
37
|
+
throw new GuardrailError('Vercel deploy adapter requires `project` (project ID or slug)', { code: 'invalid_config', provider: 'vercel' });
|
|
38
|
+
}
|
|
39
|
+
this.token = token;
|
|
40
|
+
this.project = opts.project;
|
|
41
|
+
this.team = opts.team;
|
|
42
|
+
this.target = opts.target ?? 'production';
|
|
43
|
+
this.pollIntervalMs = opts.pollIntervalMs ?? 2000;
|
|
44
|
+
this.maxPollMs = opts.maxPollMs ?? 15 * 60 * 1000;
|
|
45
|
+
this.fetchImpl = opts.fetchImpl ?? globalThis.fetch;
|
|
46
|
+
this.sleep = opts.sleepImpl ?? ((ms) => new Promise((r) => setTimeout(r, ms)));
|
|
47
|
+
this.now = opts.nowImpl ?? Date.now;
|
|
48
|
+
}
|
|
49
|
+
async deploy(input) {
|
|
50
|
+
const start = this.now();
|
|
51
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v13/deployments`);
|
|
52
|
+
const body = {
|
|
53
|
+
name: this.project,
|
|
54
|
+
target: this.target,
|
|
55
|
+
meta: input.meta,
|
|
56
|
+
};
|
|
57
|
+
// Only include gitSource when we have a commitSha — Vercel requires the
|
|
58
|
+
// full {type, repoId, ref} contract for git deploys, which we can't
|
|
59
|
+
// synthesize from a SHA alone in Phase 1. Callers using `commitSha` should
|
|
60
|
+
// also have `VERCEL_PROJECT_ID` linked via `vc link` so Vercel resolves
|
|
61
|
+
// the repo from the linked project.
|
|
62
|
+
if (input.commitSha) {
|
|
63
|
+
body.gitSource = { type: 'github', sha: input.commitSha, ref: input.ref };
|
|
64
|
+
}
|
|
65
|
+
else if (input.ref) {
|
|
66
|
+
body.gitSource = { type: 'github', ref: input.ref };
|
|
67
|
+
}
|
|
68
|
+
const res = await this.fetchWithRetry(url, {
|
|
69
|
+
method: 'POST',
|
|
70
|
+
headers: this.headers(),
|
|
71
|
+
body: JSON.stringify(body),
|
|
72
|
+
signal: input.signal,
|
|
73
|
+
});
|
|
74
|
+
await this.assertOkOrThrow(res, 'create deployment');
|
|
75
|
+
const created = (await res.json());
|
|
76
|
+
if (!created.id) {
|
|
77
|
+
throw new GuardrailError(`Vercel returned no deployment id (got: ${JSON.stringify(created).slice(0, 200)})`, { code: 'adapter_bug', provider: 'vercel' });
|
|
78
|
+
}
|
|
79
|
+
// Phase 2: fire onDeployStart so callers (e.g. --watch) can subscribe
|
|
80
|
+
// to logs in parallel with polling. Wrap in try/catch — a buggy callback
|
|
81
|
+
// must not crash the deploy.
|
|
82
|
+
try {
|
|
83
|
+
input.onDeployStart?.(created.id);
|
|
84
|
+
}
|
|
85
|
+
catch {
|
|
86
|
+
/* swallow — observability concern only */
|
|
87
|
+
}
|
|
88
|
+
return this.pollUntilTerminal(created.id, start, input.signal);
|
|
89
|
+
}
|
|
90
|
+
async status(input) {
|
|
91
|
+
const start = this.now();
|
|
92
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v13/deployments/${encodeURIComponent(input.deployId)}`);
|
|
93
|
+
const res = await this.fetchWithRetry(url, {
|
|
94
|
+
method: 'GET',
|
|
95
|
+
headers: this.headers(),
|
|
96
|
+
signal: input.signal,
|
|
97
|
+
});
|
|
98
|
+
await this.assertOkOrThrow(res, 'get deployment');
|
|
99
|
+
const data = (await res.json());
|
|
100
|
+
const state = data.readyState ?? data.state;
|
|
101
|
+
const result = this.shapeResult(input.deployId, data, state, this.now() - start);
|
|
102
|
+
return { ...result, deployId: input.deployId };
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Phase 2 — subscribe to real-time build logs for a deployment.
|
|
106
|
+
*
|
|
107
|
+
* Streams `GET /v2/deployments/<id>/events?builds=1&follow=1` and yields a
|
|
108
|
+
* `DeployLogLine` for each `stdout` / `stderr` event. Lifecycle events
|
|
109
|
+
* (`state`, `complete`) are filtered out — the polling loop in `deploy()`
|
|
110
|
+
* already handles them. Malformed JSON lines are skipped silently rather
|
|
111
|
+
* than crashing a long-running stream.
|
|
112
|
+
*
|
|
113
|
+
* Cancellation: pass `input.signal`. Once aborted, the underlying fetch
|
|
114
|
+
* is torn down and the iterator returns.
|
|
115
|
+
*/
|
|
116
|
+
async *streamLogs(input) {
|
|
117
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v2/deployments/${encodeURIComponent(input.deployId)}/events?builds=1&follow=1`);
|
|
118
|
+
const res = await this.fetchEventsWithRetry(url, input.signal);
|
|
119
|
+
await this.assertOkOrThrow(res, 'stream logs');
|
|
120
|
+
if (!res.body) {
|
|
121
|
+
throw new GuardrailError(`Vercel events response had no body for ${input.deployId}`, { code: 'adapter_bug', provider: 'vercel' });
|
|
122
|
+
}
|
|
123
|
+
const reader = res.body.getReader();
|
|
124
|
+
const decoder = new TextDecoder('utf-8');
|
|
125
|
+
let buf = '';
|
|
126
|
+
try {
|
|
127
|
+
while (true) {
|
|
128
|
+
if (input.signal?.aborted)
|
|
129
|
+
return;
|
|
130
|
+
const { done, value } = await reader.read();
|
|
131
|
+
if (done) {
|
|
132
|
+
// Flush a trailing partial line if present.
|
|
133
|
+
if (buf.length > 0) {
|
|
134
|
+
const line = parseEventLine(buf);
|
|
135
|
+
if (line)
|
|
136
|
+
yield line;
|
|
137
|
+
}
|
|
138
|
+
return;
|
|
139
|
+
}
|
|
140
|
+
buf += decoder.decode(value, { stream: true });
|
|
141
|
+
let nl = buf.indexOf('\n');
|
|
142
|
+
while (nl !== -1) {
|
|
143
|
+
const raw = buf.slice(0, nl);
|
|
144
|
+
buf = buf.slice(nl + 1);
|
|
145
|
+
const line = parseEventLine(raw);
|
|
146
|
+
if (line)
|
|
147
|
+
yield line;
|
|
148
|
+
nl = buf.indexOf('\n');
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
finally {
|
|
153
|
+
try {
|
|
154
|
+
reader.releaseLock();
|
|
155
|
+
}
|
|
156
|
+
catch { /* ignore */ }
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
/**
|
|
160
|
+
* Phase 3 — promote a previously-built deployment to production.
|
|
161
|
+
*
|
|
162
|
+
* Two modes:
|
|
163
|
+
* - `input.to` set → promote that deploy ID directly. Cheapest path.
|
|
164
|
+
* - `input.to` omitted → look up the previous prod deploy via
|
|
165
|
+
* `listDeployments(5)` and promote it. Throws `no_previous_deploy`
|
|
166
|
+
* when there's nothing to roll back to (project with one deploy,
|
|
167
|
+
* or every prior deploy is in ERROR/CANCELED state).
|
|
168
|
+
*
|
|
169
|
+
* Always-query is intentional: we never cache deploy IDs locally, so a
|
|
170
|
+
* promote performed from the Vercel dashboard between our deploy and
|
|
171
|
+
* our rollback is still observable.
|
|
172
|
+
*/
|
|
173
|
+
async rollback(input) {
|
|
174
|
+
const start = this.now();
|
|
175
|
+
let targetId = input.to;
|
|
176
|
+
if (!targetId) {
|
|
177
|
+
const prev = await this.findPreviousProdDeployment(input.signal);
|
|
178
|
+
if (!prev) {
|
|
179
|
+
throw new GuardrailError(`No previous production deployment found for project "${this.project}" to roll back to`, { code: 'no_previous_deploy', provider: 'vercel' });
|
|
180
|
+
}
|
|
181
|
+
targetId = prev.id;
|
|
182
|
+
}
|
|
183
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v13/deployments/${encodeURIComponent(targetId)}/promote`);
|
|
184
|
+
const res = await this.fetchWithRetry(url, {
|
|
185
|
+
method: 'POST',
|
|
186
|
+
headers: this.headers(),
|
|
187
|
+
body: '{}',
|
|
188
|
+
signal: input.signal,
|
|
189
|
+
});
|
|
190
|
+
await this.assertOkOrThrow(res, 'promote deployment');
|
|
191
|
+
// Promote responses are typically empty (204) or echo the deployment.
|
|
192
|
+
// Be defensive — parse if there's a body, otherwise carry on with the
|
|
193
|
+
// ID alone.
|
|
194
|
+
let data;
|
|
195
|
+
try {
|
|
196
|
+
data = (await res.json());
|
|
197
|
+
}
|
|
198
|
+
catch {
|
|
199
|
+
data = undefined;
|
|
200
|
+
}
|
|
201
|
+
return {
|
|
202
|
+
status: 'pass',
|
|
203
|
+
deployId: targetId,
|
|
204
|
+
rolledBackTo: targetId,
|
|
205
|
+
deployUrl: data?.url ? `https://${data.url}` : undefined,
|
|
206
|
+
buildLogsUrl: this.buildLogsUrl(targetId),
|
|
207
|
+
durationMs: this.now() - start,
|
|
208
|
+
output: `Vercel deployment ${targetId} promoted to production`,
|
|
209
|
+
};
|
|
210
|
+
}
|
|
211
|
+
/**
|
|
212
|
+
* Phase 3 — list recent production deployments for the configured
|
|
213
|
+
* project. Used by the `deploy status` CLI subcommand and by the
|
|
214
|
+
* `findPreviousProdDeployment()` helper backing `rollback()`.
|
|
215
|
+
*
|
|
216
|
+
* The list endpoint is `/v6/deployments` (v13 is for individual
|
|
217
|
+
* deployments). `target=production` filters out preview builds; `limit`
|
|
218
|
+
* caps the result set — Vercel returns newest-first so a small limit
|
|
219
|
+
* is sufficient for both rollback target detection and the CLI status
|
|
220
|
+
* display (defaults to 5).
|
|
221
|
+
*/
|
|
222
|
+
async listDeployments(limit = 5, signal) {
|
|
223
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v6/deployments` +
|
|
224
|
+
`?projectId=${encodeURIComponent(this.project)}` +
|
|
225
|
+
`&limit=${encodeURIComponent(String(limit))}` +
|
|
226
|
+
`&target=production`);
|
|
227
|
+
const res = await this.fetchWithRetry(url, {
|
|
228
|
+
method: 'GET',
|
|
229
|
+
headers: this.headers(),
|
|
230
|
+
signal,
|
|
231
|
+
});
|
|
232
|
+
await this.assertOkOrThrow(res, 'list deployments');
|
|
233
|
+
const data = (await res.json());
|
|
234
|
+
return Array.isArray(data.deployments) ? data.deployments : [];
|
|
235
|
+
}
|
|
236
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
237
|
+
// private helpers
|
|
238
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
239
|
+
/**
|
|
240
|
+
* Returns the deployment immediately preceding the current production
|
|
241
|
+
* deployment, or `null` if no rollback target exists.
|
|
242
|
+
*
|
|
243
|
+
* "Preceding" means: among deployments with `state === 'READY'` (so we
|
|
244
|
+
* never roll back to a known-broken build), sorted newest-first by
|
|
245
|
+
* `createdAt`, the second entry. The first entry is the current prod
|
|
246
|
+
* deploy and we drop it.
|
|
247
|
+
*/
|
|
248
|
+
async findPreviousProdDeployment(signal) {
|
|
249
|
+
const items = await this.listDeployments(5, signal);
|
|
250
|
+
const ready = items.filter((d) => d.state === 'READY');
|
|
251
|
+
ready.sort((a, b) => (b.createdAt ?? 0) - (a.createdAt ?? 0));
|
|
252
|
+
if (ready.length < 2)
|
|
253
|
+
return null;
|
|
254
|
+
return ready[1] ?? null;
|
|
255
|
+
}
|
|
256
|
+
async pollUntilTerminal(deployId, start, signal) {
|
|
257
|
+
const url = this.urlWithTeam(`${VERCEL_API_BASE}/v13/deployments/${encodeURIComponent(deployId)}`);
|
|
258
|
+
while (true) {
|
|
259
|
+
if (signal?.aborted) {
|
|
260
|
+
return { status: 'in-progress', deployId, durationMs: this.now() - start };
|
|
261
|
+
}
|
|
262
|
+
if (this.now() - start > this.maxPollMs) {
|
|
263
|
+
return {
|
|
264
|
+
status: 'in-progress',
|
|
265
|
+
deployId,
|
|
266
|
+
durationMs: this.now() - start,
|
|
267
|
+
buildLogsUrl: this.buildLogsUrl(deployId),
|
|
268
|
+
output: `Deployment still in progress after ${this.maxPollMs}ms — check ${this.buildLogsUrl(deployId)}`,
|
|
269
|
+
};
|
|
270
|
+
}
|
|
271
|
+
const res = await this.fetchWithRetry(url, {
|
|
272
|
+
method: 'GET',
|
|
273
|
+
headers: this.headers(),
|
|
274
|
+
signal,
|
|
275
|
+
});
|
|
276
|
+
await this.assertOkOrThrow(res, 'poll deployment');
|
|
277
|
+
const data = (await res.json());
|
|
278
|
+
const state = data.readyState ?? data.state;
|
|
279
|
+
if (state === 'READY' || state === 'ERROR' || state === 'CANCELED') {
|
|
280
|
+
return this.shapeResult(deployId, data, state, this.now() - start);
|
|
281
|
+
}
|
|
282
|
+
await this.sleep(this.pollIntervalMs);
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
shapeResult(deployId, data, state, durationMs) {
|
|
286
|
+
const status = state === 'READY' ? 'pass' : state === 'ERROR' || state === 'CANCELED' ? 'fail' : 'in-progress';
|
|
287
|
+
return {
|
|
288
|
+
status,
|
|
289
|
+
deployId,
|
|
290
|
+
deployUrl: data.url ? `https://${data.url}` : undefined,
|
|
291
|
+
buildLogsUrl: this.buildLogsUrl(deployId),
|
|
292
|
+
durationMs,
|
|
293
|
+
output: state ? `Vercel deployment ${deployId}: state=${state}` : undefined,
|
|
294
|
+
};
|
|
295
|
+
}
|
|
296
|
+
headers() {
|
|
297
|
+
return {
|
|
298
|
+
Authorization: `Bearer ${this.token}`,
|
|
299
|
+
'Content-Type': 'application/json',
|
|
300
|
+
};
|
|
301
|
+
}
|
|
302
|
+
urlWithTeam(base) {
|
|
303
|
+
if (!this.team)
|
|
304
|
+
return base;
|
|
305
|
+
const sep = base.includes('?') ? '&' : '?';
|
|
306
|
+
return `${base}${sep}teamId=${encodeURIComponent(this.team)}`;
|
|
307
|
+
}
|
|
308
|
+
buildLogsUrl(deployId) {
|
|
309
|
+
const teamSlug = this.team ?? 'me';
|
|
310
|
+
return `https://vercel.com/${encodeURIComponent(teamSlug)}/${encodeURIComponent(this.project)}/${encodeURIComponent(deployId)}`;
|
|
311
|
+
}
|
|
312
|
+
async assertOkOrThrow(res, step) {
|
|
313
|
+
if (res.ok)
|
|
314
|
+
return;
|
|
315
|
+
const bodyText = await safeReadBody(res);
|
|
316
|
+
if (res.status === 401 || res.status === 403) {
|
|
317
|
+
throw new GuardrailError(`Vercel auth failed (${res.status}) on ${step} — check VERCEL_TOKEN scope for project "${this.project}"${this.team ? ` (team ${this.team})` : ''}: ${bodyText}`, { code: 'auth', provider: 'vercel', step, details: { status: res.status } });
|
|
318
|
+
}
|
|
319
|
+
if (res.status === 404) {
|
|
320
|
+
throw new GuardrailError(`Vercel project "${this.project}" not found (${res.status}) on ${step}: ${bodyText}`, { code: 'invalid_config', provider: 'vercel', step, details: { status: res.status } });
|
|
321
|
+
}
|
|
322
|
+
throw new GuardrailError(`Vercel API error (${res.status}) on ${step}: ${bodyText}`, { code: 'adapter_bug', provider: 'vercel', step, details: { status: res.status } });
|
|
323
|
+
}
|
|
324
|
+
async fetchWithRetry(url, init, attempts = 3, baseMs = 500) {
|
|
325
|
+
let lastErr;
|
|
326
|
+
for (let i = 0; i < attempts; i++) {
|
|
327
|
+
try {
|
|
328
|
+
const res = await this.fetchImpl(url, init);
|
|
329
|
+
// 5xx is transient — retry. 4xx is the caller's problem — fail fast.
|
|
330
|
+
if (res.status >= 500 && res.status < 600 && i < attempts - 1) {
|
|
331
|
+
lastErr = new Error(`HTTP ${res.status}`);
|
|
332
|
+
await this.sleep(baseMs * 2 ** i);
|
|
333
|
+
continue;
|
|
334
|
+
}
|
|
335
|
+
return res;
|
|
336
|
+
}
|
|
337
|
+
catch (err) {
|
|
338
|
+
lastErr = err;
|
|
339
|
+
// AbortError is intentional cancellation — surface it directly without retry.
|
|
340
|
+
if (err instanceof Error && err.name === 'AbortError')
|
|
341
|
+
throw err;
|
|
342
|
+
if (i < attempts - 1) {
|
|
343
|
+
await this.sleep(baseMs * 2 ** i);
|
|
344
|
+
continue;
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
throw new GuardrailError(`Vercel API unreachable after ${attempts} attempts: ${lastErr?.message ?? String(lastErr)}`, { code: 'transient_network', provider: 'vercel' });
|
|
349
|
+
}
|
|
350
|
+
/**
|
|
351
|
+
* Like `fetchWithRetry` but tuned for the events endpoint:
|
|
352
|
+
* - 404 right after a deploy POST is a known race (the deploy hasn't yet
|
|
353
|
+
* propagated to the events service). Retry up to N times with backoff.
|
|
354
|
+
* - 5xx behaves the same as `fetchWithRetry`.
|
|
355
|
+
* - Cancels cleanly on AbortError.
|
|
356
|
+
* - Returns the last `Response` so the caller can `assertOkOrThrow` on a
|
|
357
|
+
* final non-OK status (e.g. 401 still bubbles immediately on attempt 1).
|
|
358
|
+
*/
|
|
359
|
+
async fetchEventsWithRetry(url, signal, attempts = 3, baseMs = 500) {
|
|
360
|
+
let lastRes;
|
|
361
|
+
for (let i = 0; i < attempts; i++) {
|
|
362
|
+
let res;
|
|
363
|
+
try {
|
|
364
|
+
res = await this.fetchImpl(url, {
|
|
365
|
+
method: 'GET',
|
|
366
|
+
headers: { ...this.headers(), Accept: 'text/event-stream' },
|
|
367
|
+
signal,
|
|
368
|
+
});
|
|
369
|
+
}
|
|
370
|
+
catch (err) {
|
|
371
|
+
if (err instanceof Error && err.name === 'AbortError')
|
|
372
|
+
throw err;
|
|
373
|
+
if (i < attempts - 1) {
|
|
374
|
+
await this.sleep(baseMs * 2 ** i);
|
|
375
|
+
continue;
|
|
376
|
+
}
|
|
377
|
+
throw new GuardrailError(`Vercel events endpoint unreachable after ${attempts} attempts: ${err?.message ?? String(err)}`, { code: 'transient_network', provider: 'vercel' });
|
|
378
|
+
}
|
|
379
|
+
lastRes = res;
|
|
380
|
+
// 404 after create-deployment is the known race — retry.
|
|
381
|
+
if (res.status === 404 && i < attempts - 1) {
|
|
382
|
+
await this.sleep(baseMs * 2 ** i);
|
|
383
|
+
continue;
|
|
384
|
+
}
|
|
385
|
+
// 5xx is transient — retry.
|
|
386
|
+
if (res.status >= 500 && res.status < 600 && i < attempts - 1) {
|
|
387
|
+
await this.sleep(baseMs * 2 ** i);
|
|
388
|
+
continue;
|
|
389
|
+
}
|
|
390
|
+
return res;
|
|
391
|
+
}
|
|
392
|
+
return lastRes;
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
async function safeReadBody(res) {
|
|
396
|
+
try {
|
|
397
|
+
return (await res.text()).slice(0, 500);
|
|
398
|
+
}
|
|
399
|
+
catch {
|
|
400
|
+
return '<no body>';
|
|
401
|
+
}
|
|
402
|
+
}
|
|
403
|
+
/**
|
|
404
|
+
* Parse a single line from Vercel's events endpoint into a `DeployLogLine`.
|
|
405
|
+
*
|
|
406
|
+
* Accepts both raw NDJSON and classic SSE `data: {...}` lines. Returns
|
|
407
|
+
* `null` for events we don't surface (state changes, completes, heartbeats,
|
|
408
|
+
* and any line that fails to JSON-parse) — silently skipping a malformed
|
|
409
|
+
* event is preferable to crashing a long-running stream.
|
|
410
|
+
*/
|
|
411
|
+
function parseEventLine(raw) {
|
|
412
|
+
const trimmed = raw.trim();
|
|
413
|
+
if (!trimmed)
|
|
414
|
+
return null;
|
|
415
|
+
// SSE comment / heartbeat lines start with ':'
|
|
416
|
+
if (trimmed.startsWith(':'))
|
|
417
|
+
return null;
|
|
418
|
+
// SSE event/id/retry lines — not data, skip
|
|
419
|
+
if (trimmed.startsWith('event:') || trimmed.startsWith('id:') || trimmed.startsWith('retry:'))
|
|
420
|
+
return null;
|
|
421
|
+
// Strip 'data: ' prefix if present (classic SSE)
|
|
422
|
+
const jsonPart = trimmed.startsWith('data:') ? trimmed.slice(5).trim() : trimmed;
|
|
423
|
+
if (!jsonPart)
|
|
424
|
+
return null;
|
|
425
|
+
let parsed;
|
|
426
|
+
try {
|
|
427
|
+
parsed = JSON.parse(jsonPart);
|
|
428
|
+
}
|
|
429
|
+
catch {
|
|
430
|
+
return null;
|
|
431
|
+
}
|
|
432
|
+
if (typeof parsed !== 'object' || parsed === null)
|
|
433
|
+
return null;
|
|
434
|
+
const ev = parsed;
|
|
435
|
+
// Only surface log-bearing event types. Vercel emits 'stdout'/'stderr' for
|
|
436
|
+
// build output; 'state'/'complete'/etc. are deploy lifecycle events that
|
|
437
|
+
// the polling loop already handles.
|
|
438
|
+
if (ev.type !== 'stdout' && ev.type !== 'stderr')
|
|
439
|
+
return null;
|
|
440
|
+
const text = typeof ev.payload?.text === 'string' ? ev.payload.text : '';
|
|
441
|
+
if (!text)
|
|
442
|
+
return null;
|
|
443
|
+
const ts = typeof ev.created === 'number' ? ev.created : typeof ev.date === 'number' ? ev.date : Date.now();
|
|
444
|
+
return { timestamp: ts, level: ev.type, text };
|
|
445
|
+
}
|
|
446
|
+
//# sourceMappingURL=vercel.js.map
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import Anthropic from '@anthropic-ai/sdk';
|
|
2
1
|
import { GuardrailError } from "../../core/errors.js";
|
|
3
2
|
import { parseReviewOutput } from "./parse-output.js";
|
|
4
3
|
import { buildSystemPrompt, classifyError } from "./prompt-builder.js";
|
|
4
|
+
import { loadAnthropic } from "../sdk-loader.js";
|
|
5
5
|
const DEFAULT_MODEL = 'claude-opus-4-7';
|
|
6
6
|
const MAX_OUTPUT_TOKENS = 4096;
|
|
7
7
|
// Cost per million tokens (USD) — opus-4-7 pricing
|
|
@@ -47,6 +47,7 @@ export const claudeAdapter = {
|
|
|
47
47
|
}
|
|
48
48
|
const model = input.context?.['model'] ?? DEFAULT_MODEL;
|
|
49
49
|
const systemPrompt = buildSystemPrompt(input, SYSTEM_PROMPT_TEMPLATE);
|
|
50
|
+
const Anthropic = await loadAnthropic();
|
|
50
51
|
const client = new Anthropic({ apiKey });
|
|
51
52
|
let response;
|
|
52
53
|
try {
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import OpenAI from 'openai';
|
|
2
1
|
import { parseReviewOutput } from "./parse-output.js";
|
|
3
2
|
import { GuardrailError } from "../../core/errors.js";
|
|
4
3
|
import { buildSystemPrompt, classifyError } from "./prompt-builder.js";
|
|
4
|
+
import { loadOpenAI } from "../sdk-loader.js";
|
|
5
5
|
const DEFAULT_MODEL = process.env.CODEX_MODEL ?? 'gpt-5.3-codex';
|
|
6
6
|
const MAX_OUTPUT_TOKENS = 4096;
|
|
7
7
|
// Per-million-token rates for gpt-5.3-codex (override via env for other models).
|
|
@@ -48,6 +48,7 @@ export const codexAdapter = {
|
|
|
48
48
|
throw new GuardrailError('OPENAI_API_KEY not set', { code: 'auth', provider: 'codex' });
|
|
49
49
|
}
|
|
50
50
|
const systemPrompt = buildSystemPrompt(input, SYSTEM_PROMPT_TEMPLATE);
|
|
51
|
+
const OpenAI = await loadOpenAI();
|
|
51
52
|
const client = new OpenAI({ apiKey });
|
|
52
53
|
let response;
|
|
53
54
|
try {
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import { GoogleGenerativeAI } from '@google/generative-ai';
|
|
2
1
|
import { parseReviewOutput } from "./parse-output.js";
|
|
3
2
|
import { GuardrailError } from "../../core/errors.js";
|
|
4
3
|
import { buildSystemPrompt, classifyError } from "./prompt-builder.js";
|
|
4
|
+
import { loadGoogleGenerativeAI } from "../sdk-loader.js";
|
|
5
5
|
const DEFAULT_MODEL = 'gemini-2.5-pro-preview-05-06';
|
|
6
6
|
const MAX_OUTPUT_TOKENS = 4096;
|
|
7
7
|
// Cost per million tokens (USD) — gemini-2.5-pro pricing (<200k context)
|
|
@@ -55,6 +55,7 @@ export const geminiAdapter = {
|
|
|
55
55
|
}
|
|
56
56
|
const model = input.context?.['model'] ?? DEFAULT_MODEL;
|
|
57
57
|
const prompt = buildSystemPrompt(input, PROMPT_TEMPLATE).replace('{CONTENT}', input.content);
|
|
58
|
+
const GoogleGenerativeAI = await loadGoogleGenerativeAI();
|
|
58
59
|
const genAI = new GoogleGenerativeAI(apiKey);
|
|
59
60
|
const genModel = genAI.getGenerativeModel({
|
|
60
61
|
model,
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import OpenAI from 'openai';
|
|
2
1
|
import { parseReviewOutput } from "./parse-output.js";
|
|
3
2
|
import { GuardrailError } from "../../core/errors.js";
|
|
4
3
|
import { buildSystemPrompt, classifyError } from "./prompt-builder.js";
|
|
4
|
+
import { loadOpenAI } from "../sdk-loader.js";
|
|
5
5
|
const MAX_OUTPUT_TOKENS = 4096;
|
|
6
6
|
const SYSTEM_PROMPT_TEMPLATE = `You are a senior software architect reviewing code changes for quality, security, and correctness.
|
|
7
7
|
|
|
@@ -49,6 +49,7 @@ export const openaiCompatibleAdapter = {
|
|
|
49
49
|
throw new GuardrailError('openai-compatible adapter requires options.model to be set in guardrail.config.yaml', { code: 'invalid_config', provider: 'openai-compatible' });
|
|
50
50
|
}
|
|
51
51
|
const systemPrompt = buildSystemPrompt(input, SYSTEM_PROMPT_TEMPLATE);
|
|
52
|
+
const OpenAI = await loadOpenAI();
|
|
52
53
|
const client = new OpenAI({ apiKey, ...(baseURL ? { baseURL } : {}) });
|
|
53
54
|
let response;
|
|
54
55
|
try {
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type AnthropicNS from '@anthropic-ai/sdk';
|
|
2
|
+
import type OpenAINS from 'openai';
|
|
3
|
+
import type { GoogleGenerativeAI as GoogleGenAINS } from '@google/generative-ai';
|
|
4
|
+
type AnthropicCtor = typeof AnthropicNS;
|
|
5
|
+
type OpenAICtor = typeof OpenAINS;
|
|
6
|
+
type GoogleGenerativeAICtor = typeof GoogleGenAINS;
|
|
7
|
+
export declare function loadAnthropic(): Promise<AnthropicCtor>;
|
|
8
|
+
export declare function loadOpenAI(): Promise<OpenAICtor>;
|
|
9
|
+
export declare function loadGoogleGenerativeAI(): Promise<GoogleGenerativeAICtor>;
|
|
10
|
+
/**
|
|
11
|
+
* Quick non-throwing check — used by `doctor` to report install state.
|
|
12
|
+
*/
|
|
13
|
+
export declare function isSdkInstalled(pkg: string): Promise<boolean>;
|
|
14
|
+
export {};
|
|
15
|
+
//# sourceMappingURL=sdk-loader.d.ts.map
|