run402 1.48.0 → 1.50.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/core-dist/config.js +10 -0
- package/core-dist/wallet-auth.js +62 -0
- package/core-dist/wallet.js +25 -0
- package/lib/deploy-v2.mjs +359 -0
- package/lib/deploy.mjs +14 -0
- package/lib/functions.mjs +16 -1
- package/package.json +1 -1
- package/sdk/core-dist/config.js +10 -0
- package/sdk/core-dist/wallet-auth.js +62 -0
- package/sdk/core-dist/wallet.js +25 -0
- package/sdk/dist/errors.d.ts +41 -0
- package/sdk/dist/errors.d.ts.map +1 -1
- package/sdk/dist/errors.js +23 -0
- package/sdk/dist/errors.js.map +1 -1
- package/sdk/dist/index.d.ts +24 -1
- package/sdk/dist/index.d.ts.map +1 -1
- package/sdk/dist/index.js +24 -1
- package/sdk/dist/index.js.map +1 -1
- package/sdk/dist/namespaces/apps.d.ts +14 -0
- package/sdk/dist/namespaces/apps.d.ts.map +1 -1
- package/sdk/dist/namespaces/apps.js +162 -19
- package/sdk/dist/namespaces/apps.js.map +1 -1
- package/sdk/dist/namespaces/deploy.d.ts +116 -0
- package/sdk/dist/namespaces/deploy.d.ts.map +1 -0
- package/sdk/dist/namespaces/deploy.js +1127 -0
- package/sdk/dist/namespaces/deploy.js.map +1 -0
- package/sdk/dist/namespaces/deploy.types.d.ts +430 -0
- package/sdk/dist/namespaces/deploy.types.d.ts.map +1 -0
- package/sdk/dist/namespaces/deploy.types.js +11 -0
- package/sdk/dist/namespaces/deploy.types.js.map +1 -0
- package/sdk/dist/namespaces/functions.d.ts +11 -3
- package/sdk/dist/namespaces/functions.d.ts.map +1 -1
- package/sdk/dist/namespaces/functions.js +11 -3
- package/sdk/dist/namespaces/functions.js.map +1 -1
- package/sdk/dist/namespaces/functions.types.d.ts +21 -4
- package/sdk/dist/namespaces/functions.types.d.ts.map +1 -1
- package/sdk/dist/node/canonicalize.d.ts +12 -5
- package/sdk/dist/node/canonicalize.d.ts.map +1 -1
- package/sdk/dist/node/canonicalize.js +12 -5
- package/sdk/dist/node/canonicalize.js.map +1 -1
- package/sdk/dist/node/files.d.ts +38 -0
- package/sdk/dist/node/files.d.ts.map +1 -0
- package/sdk/dist/node/files.js +88 -0
- package/sdk/dist/node/files.js.map +1 -0
- package/sdk/dist/node/index.d.ts +5 -3
- package/sdk/dist/node/index.d.ts.map +1 -1
- package/sdk/dist/node/index.js +2 -1
- package/sdk/dist/node/index.js.map +1 -1
- package/sdk/dist/node/sites-node.d.ts +34 -107
- package/sdk/dist/node/sites-node.d.ts.map +1 -1
- package/sdk/dist/node/sites-node.js +84 -353
- package/sdk/dist/node/sites-node.js.map +1 -1
|
@@ -0,0 +1,1127 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* `deploy` namespace — the canonical unified deploy primitive.
|
|
3
|
+
*
|
|
4
|
+
* Three layers exposed:
|
|
5
|
+
* - `apply(spec, opts?)` — one-shot, awaits to completion or terminal failure.
|
|
6
|
+
* - `start(spec, opts?)` — returns a `DeployOperation` with `events()` + `result()`.
|
|
7
|
+
* - `plan` / `upload` / `commit` — low-level steps for CLI and tests.
|
|
8
|
+
*
|
|
9
|
+
* All bytes ride through the CAS content service via presigned PUTs to S3.
|
|
10
|
+
* The wire body to `POST /deploy/v2/plans` carries `ContentRef` objects only —
|
|
11
|
+
* never inline file bytes. When the normalized spec exceeds 5 MB JSON, the
|
|
12
|
+
* SDK uploads the manifest itself as a CAS object and references it.
|
|
13
|
+
*
|
|
14
|
+
* Idempotency is keyed on the gateway-computed manifest digest, not the
|
|
15
|
+
* SDK's local digest. The SDK does not canonicalize for correctness — the
|
|
16
|
+
* gateway is authoritative.
|
|
17
|
+
*
|
|
18
|
+
* See `unified-deploy` and `cas-content` capability specs for normative
|
|
19
|
+
* behavior; this file is the implementation.
|
|
20
|
+
*/
|
|
21
|
+
import { ApiError, Run402DeployError, } from "../errors.js";
|
|
22
|
+
// ─── Constants ───────────────────────────────────────────────────────────────
|
|
23
|
+
const PLAN_BODY_LIMIT_BYTES = 5 * 1024 * 1024;
|
|
24
|
+
const COMMIT_POLL_INITIAL_MS = 1_000;
|
|
25
|
+
const COMMIT_POLL_MAX_MS = 30_000;
|
|
26
|
+
const COMMIT_POLL_BACKOFF_AFTER_MS = 30_000;
|
|
27
|
+
const COMMIT_POLL_TIMEOUT_MS = 10 * 60 * 1000;
|
|
28
|
+
const URL_REFRESH_AT_MS = 50 * 60 * 1000;
|
|
29
|
+
const MANIFEST_CONTENT_TYPE = "application/vnd.run402.deploy-manifest+json";
|
|
30
|
+
const TERMINAL_STATUSES = [
|
|
31
|
+
"ready",
|
|
32
|
+
"failed",
|
|
33
|
+
"rolled_back",
|
|
34
|
+
"needs_repair",
|
|
35
|
+
];
|
|
36
|
+
const SUCCESS_STATUS = "ready";
|
|
37
|
+
// ─── Public class ────────────────────────────────────────────────────────────
|
|
38
|
+
export class Deploy {
|
|
39
|
+
client;
|
|
40
|
+
constructor(client) {
|
|
41
|
+
this.client = client;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* One-shot deploy. Normalizes byte sources, plans, uploads missing
|
|
45
|
+
* content, commits, and polls until terminal. Throws
|
|
46
|
+
* {@link Run402DeployError} on any state-machine failure.
|
|
47
|
+
*/
|
|
48
|
+
async apply(spec, opts = {}) {
|
|
49
|
+
const emit = makeEmitter(opts.onEvent);
|
|
50
|
+
emit({ type: "plan.started" });
|
|
51
|
+
const { plan, byteReaders } = await planInternal(this.client, spec, opts.idempotencyKey);
|
|
52
|
+
emit({ type: "plan.diff", diff: plan.diff });
|
|
53
|
+
if (plan.payment_required) {
|
|
54
|
+
emit({
|
|
55
|
+
type: "payment.required",
|
|
56
|
+
amount: plan.payment_required.amount,
|
|
57
|
+
asset: plan.payment_required.asset,
|
|
58
|
+
payTo: plan.payment_required.payTo,
|
|
59
|
+
reason: plan.payment_required.reason,
|
|
60
|
+
});
|
|
61
|
+
// The kernel's x402-wrapped fetch (Node) handles 402 transparently
|
|
62
|
+
// when the commit happens; we don't block here. Agents using a
|
|
63
|
+
// sandbox provider without payment auto-handling can intercept the
|
|
64
|
+
// event and resolve before we hit upload.
|
|
65
|
+
}
|
|
66
|
+
await uploadMissing(this.client, spec.project, plan.missing_content, byteReaders, emit);
|
|
67
|
+
emit({ type: "commit.phase", phase: "validate", status: "started" });
|
|
68
|
+
const commit = await commitInternal(this.client, plan.plan_id, opts.idempotencyKey);
|
|
69
|
+
return await pollUntilReady(this.client, commit, plan.diff, emit, spec.project);
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Start a resumable deploy operation. Returns an object exposing the
|
|
73
|
+
* operation id, an event async-iterable, and a result promise.
|
|
74
|
+
*/
|
|
75
|
+
start(spec, opts = {}) {
|
|
76
|
+
return startInternal(this.client, spec, opts);
|
|
77
|
+
}
|
|
78
|
+
/**
|
|
79
|
+
* Low-level plan: normalize the spec, upload the manifest as CAS if over
|
|
80
|
+
* the inline limit, and call `POST /deploy/v2/plans`. Returns the plan
|
|
81
|
+
* response and a byte-reader map keyed by sha256 (used by `upload`).
|
|
82
|
+
*/
|
|
83
|
+
async plan(spec, opts = {}) {
|
|
84
|
+
return planInternal(this.client, spec, opts.idempotencyKey);
|
|
85
|
+
}
|
|
86
|
+
/**
|
|
87
|
+
* Low-level upload: ensure every ref the gateway reported as missing for
|
|
88
|
+
* this project has bytes in CAS. Issues a content-plan, PUTs bytes to
|
|
89
|
+
* the returned presigned URLs, and finalizes the content plan. Caller
|
|
90
|
+
* passes the project id so the apikey-gated CAS routes can authenticate.
|
|
91
|
+
*/
|
|
92
|
+
async upload(plan, opts) {
|
|
93
|
+
const emit = makeEmitter(opts.onEvent);
|
|
94
|
+
await uploadMissing(this.client, opts.project, plan.missing_content, opts.byteReaders, emit);
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Low-level commit: `POST /deploy/v2/plans/:id/commit`, then poll
|
|
98
|
+
* `/operations/:id` until terminal. Pass the project id whose anon_key
|
|
99
|
+
* should authenticate the polling — the operations endpoint requires
|
|
100
|
+
* apikey auth even though the plan/commit endpoints accept SIWX.
|
|
101
|
+
*/
|
|
102
|
+
async commit(planId, opts = {}) {
|
|
103
|
+
const emit = makeEmitter(opts.onEvent);
|
|
104
|
+
const commit = await commitInternal(this.client, planId, opts.idempotencyKey);
|
|
105
|
+
return await pollUntilReady(this.client, commit, {}, emit, opts.project);
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Resume an operation in `schema_settling` or `activation_pending`. The
|
|
109
|
+
* gateway re-runs only the failed phase forward — never replays SQL.
|
|
110
|
+
* Returns the resulting snapshot, polling until terminal. The resume
|
|
111
|
+
* endpoint accepts wallet (SIWX) auth; the polling that follows requires
|
|
112
|
+
* the project's apikey, so pass `project` to enable polling. (Without
|
|
113
|
+
* `project`, this method returns once the gateway accepts the resume
|
|
114
|
+
* request — successful resumes typically reach `ready` synchronously
|
|
115
|
+
* via the auto-resume worker.)
|
|
116
|
+
*/
|
|
117
|
+
async resume(operationId, opts = {}) {
|
|
118
|
+
const emit = makeEmitter(opts.onEvent);
|
|
119
|
+
const snapshot = await this.client.request(`/deploy/v2/operations/${operationId}/resume`, { method: "POST", context: "resuming deploy operation" });
|
|
120
|
+
return await pollSnapshotUntilReady(this.client, snapshot, {}, emit, opts.project);
|
|
121
|
+
}
|
|
122
|
+
/**
|
|
123
|
+
* Snapshot a deploy operation. The endpoint requires `apikey` auth, so
|
|
124
|
+
* pass the project that owns the operation. (When omitted, the request
|
|
125
|
+
* is sent without an apikey header and the gateway will return 401.)
|
|
126
|
+
*/
|
|
127
|
+
async status(operationId, opts = {}) {
|
|
128
|
+
const headers = opts.project ? await apikeyHeaders(this.client, opts.project) : {};
|
|
129
|
+
return this.client.request(`/deploy/v2/operations/${operationId}`, { headers, context: "fetching deploy operation" });
|
|
130
|
+
}
|
|
131
|
+
/**
|
|
132
|
+
* Fetch a release by id. (Endpoint may not be live in early v2 builds —
|
|
133
|
+
* falls through to the gateway's standard 404 handling in that case.)
|
|
134
|
+
*/
|
|
135
|
+
async getRelease(releaseId) {
|
|
136
|
+
return this.client.request(`/deploy/v2/releases/${releaseId}`, {
|
|
137
|
+
context: "fetching release",
|
|
138
|
+
});
|
|
139
|
+
}
|
|
140
|
+
/**
|
|
141
|
+
* Diff two releases. (Endpoint may not be live in early v2 builds.)
|
|
142
|
+
*/
|
|
143
|
+
async diff(opts) {
|
|
144
|
+
const qs = new URLSearchParams({ from: opts.from, to: opts.to });
|
|
145
|
+
return this.client.request(`/deploy/v2/releases/diff?${qs}`, {
|
|
146
|
+
context: "diffing releases",
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
// ─── Internal pipeline ───────────────────────────────────────────────────────
|
|
151
|
+
async function planInternal(client, spec, idempotencyKey) {
|
|
152
|
+
validateSpec(spec);
|
|
153
|
+
const { normalized, byteReaders } = await normalizeReleaseSpec(client, spec);
|
|
154
|
+
// The gateway expects { spec, manifest_ref?, idempotency_key? } with
|
|
155
|
+
// ReleaseSpec.project (singular). For oversized specs the SDK uploads
|
|
156
|
+
// the manifest JSON to CAS first and references it; the gateway still
|
|
157
|
+
// needs `spec` in the body (with at least the project), so we keep a
|
|
158
|
+
// minimal stub there.
|
|
159
|
+
const inlineBody = { spec: normalized };
|
|
160
|
+
if (idempotencyKey)
|
|
161
|
+
inlineBody.idempotency_key = idempotencyKey;
|
|
162
|
+
const inlineBytes = new TextEncoder().encode(JSON.stringify(inlineBody)).byteLength;
|
|
163
|
+
let body;
|
|
164
|
+
if (inlineBytes <= PLAN_BODY_LIMIT_BYTES) {
|
|
165
|
+
body = inlineBody;
|
|
166
|
+
}
|
|
167
|
+
else {
|
|
168
|
+
// Upload the normalized manifest itself as a CAS object so the gateway
|
|
169
|
+
// can pick it up via `manifest_ref`. The body still carries a minimal
|
|
170
|
+
// `spec` so the gateway has the project for auth + plan persistence.
|
|
171
|
+
const manifestBytes = new TextEncoder().encode(JSON.stringify(normalized));
|
|
172
|
+
const ref = await uploadInlineCas(client, spec.project, manifestBytes, MANIFEST_CONTENT_TYPE);
|
|
173
|
+
body = { spec: { project: spec.project }, manifest_ref: ref };
|
|
174
|
+
if (idempotencyKey)
|
|
175
|
+
body.idempotency_key = idempotencyKey;
|
|
176
|
+
}
|
|
177
|
+
let plan;
|
|
178
|
+
try {
|
|
179
|
+
plan = await client.request("/deploy/v2/plans", {
|
|
180
|
+
method: "POST",
|
|
181
|
+
body,
|
|
182
|
+
context: "planning deploy",
|
|
183
|
+
});
|
|
184
|
+
}
|
|
185
|
+
catch (err) {
|
|
186
|
+
throw translateDeployError(err, "plan", null, null);
|
|
187
|
+
}
|
|
188
|
+
return { plan, byteReaders };
|
|
189
|
+
}
|
|
190
|
+
async function commitInternal(client, planId, idempotencyKey) {
|
|
191
|
+
try {
|
|
192
|
+
return await client.request(`/deploy/v2/plans/${encodeURIComponent(planId)}/commit`, {
|
|
193
|
+
method: "POST",
|
|
194
|
+
body: idempotencyKey ? { idempotency_key: idempotencyKey } : {},
|
|
195
|
+
context: "committing deploy",
|
|
196
|
+
});
|
|
197
|
+
}
|
|
198
|
+
catch (err) {
|
|
199
|
+
throw translateDeployError(err, "commit", planId, null);
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
async function uploadMissing(client, projectId, presence, byteReaders, emit) {
|
|
203
|
+
// Filter to refs the gateway reported as missing for this project.
|
|
204
|
+
const needsUpload = presence.filter((p) => !p.present);
|
|
205
|
+
if (needsUpload.length === 0)
|
|
206
|
+
return;
|
|
207
|
+
// Hand off to the CAS content service: hand it the list of missing
|
|
208
|
+
// refs, it issues an upload session per ref with presigned PUT URLs,
|
|
209
|
+
// then we PUT the bytes and commit the content plan.
|
|
210
|
+
const headers = await apikeyHeaders(client, projectId);
|
|
211
|
+
const contentRequest = needsUpload.map((p) => {
|
|
212
|
+
const reader = byteReaders.get(p.sha256);
|
|
213
|
+
return {
|
|
214
|
+
sha256: p.sha256,
|
|
215
|
+
size: p.size,
|
|
216
|
+
content_type: reader?.contentType,
|
|
217
|
+
};
|
|
218
|
+
});
|
|
219
|
+
const planRes = await client.request("/content/v1/plans", {
|
|
220
|
+
method: "POST",
|
|
221
|
+
headers,
|
|
222
|
+
body: { content: contentRequest },
|
|
223
|
+
context: "planning content upload",
|
|
224
|
+
});
|
|
225
|
+
const total = planRes.missing.length;
|
|
226
|
+
let done = 0;
|
|
227
|
+
for (const session of planRes.missing) {
|
|
228
|
+
const reader = byteReaders.get(session.sha256);
|
|
229
|
+
if (!reader) {
|
|
230
|
+
throw new Run402DeployError(`internal: no local byte reader for sha ${session.sha256.slice(0, 12)}…`, {
|
|
231
|
+
code: "CONTENT_UPLOAD_FAILED",
|
|
232
|
+
phase: "upload",
|
|
233
|
+
retryable: false,
|
|
234
|
+
context: "uploading deploy bytes",
|
|
235
|
+
});
|
|
236
|
+
}
|
|
237
|
+
const bytes = await reader();
|
|
238
|
+
await uploadOne(client.fetch, session, bytes);
|
|
239
|
+
// Per-session completion — promotes the staged object to CAS via
|
|
240
|
+
// services/cas-promote.ts. The plan-level `/content/v1/plans/:id/commit`
|
|
241
|
+
// call below is the plan-level finalize; per-session promotion happens
|
|
242
|
+
// here through the existing /storage/v1/uploads/:id/complete handler,
|
|
243
|
+
// which knows how to handle `kind='cas'` sessions and write the
|
|
244
|
+
// internal.content_objects + internal.plan_claims rows the deploy
|
|
245
|
+
// commit's FK constraints rely on.
|
|
246
|
+
const completeBody = {};
|
|
247
|
+
if (session.mode === "multipart" && session.parts.length > 1) {
|
|
248
|
+
// Multipart completion needs per-part ETags. The SDK doesn't capture
|
|
249
|
+
// ETags during the PUT loop today (it would need a multi-PUT
|
|
250
|
+
// helper); for the common single-PUT case below this is empty.
|
|
251
|
+
// TODO: collect part ETags during uploadOne for true multipart.
|
|
252
|
+
}
|
|
253
|
+
await client.request(`/storage/v1/uploads/${encodeURIComponent(session.upload_id)}/complete`, {
|
|
254
|
+
method: "POST",
|
|
255
|
+
headers,
|
|
256
|
+
body: completeBody,
|
|
257
|
+
context: "completing content upload session",
|
|
258
|
+
});
|
|
259
|
+
done += 1;
|
|
260
|
+
emit({
|
|
261
|
+
type: "content.upload.progress",
|
|
262
|
+
label: reader.label ?? session.sha256,
|
|
263
|
+
sha256: session.sha256,
|
|
264
|
+
done,
|
|
265
|
+
total,
|
|
266
|
+
});
|
|
267
|
+
}
|
|
268
|
+
// Plan-level finalize — marks the plan committed in the deploy_plans
|
|
269
|
+
// table. Per-session promotion to CAS already happened in the loop
|
|
270
|
+
// above; this call is the plan-level idempotency anchor.
|
|
271
|
+
await client.request(`/content/v1/plans/${encodeURIComponent(planRes.plan_id)}/commit`, { method: "POST", headers, body: {}, context: "committing content upload" });
|
|
272
|
+
}
|
|
273
|
+
async function uploadOne(fetchFn, entry, bytes) {
|
|
274
|
+
if (entry.mode === "single") {
|
|
275
|
+
if (entry.parts.length !== 1) {
|
|
276
|
+
throw new Run402DeployError(`internal: single-mode upload for ${entry.sha256.slice(0, 12)}… returned ${entry.parts.length} parts`, {
|
|
277
|
+
code: "CONTENT_UPLOAD_FAILED",
|
|
278
|
+
phase: "upload",
|
|
279
|
+
retryable: false,
|
|
280
|
+
context: "uploading deploy bytes",
|
|
281
|
+
});
|
|
282
|
+
}
|
|
283
|
+
const part = entry.parts[0];
|
|
284
|
+
const slice = bytes.subarray(part.byte_start, part.byte_end + 1);
|
|
285
|
+
const checksum = base64FromHex(entry.sha256);
|
|
286
|
+
await putToS3(fetchFn, part.url, slice, checksum, part.part_number);
|
|
287
|
+
return;
|
|
288
|
+
}
|
|
289
|
+
for (const part of entry.parts) {
|
|
290
|
+
const slice = bytes.subarray(part.byte_start, part.byte_end + 1);
|
|
291
|
+
const checksum = await sha256Base64(slice);
|
|
292
|
+
await putToS3(fetchFn, part.url, slice, checksum, part.part_number);
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
async function putToS3(fetchFn, url, body, checksumBase64, partNumber) {
|
|
296
|
+
// The gateway issues SigV4 presigned URLs with `ChecksumSHA256` set on
|
|
297
|
+
// PutObjectCommand / UploadPartCommand. The AWS SDK (v3) encodes that
|
|
298
|
+
// value as the `x-amz-checksum-sha256` query parameter and only signs
|
|
299
|
+
// `host` + `content-length`. If we ALSO send it as a request header, S3
|
|
300
|
+
// returns 403 "There were headers present in the request which were not
|
|
301
|
+
// signed: x-amz-checksum-sha256" because the header isn't in the
|
|
302
|
+
// SigV4-signed list.
|
|
303
|
+
//
|
|
304
|
+
// So: only send the header when the URL doesn't already encode the
|
|
305
|
+
// checksum as a query param. This keeps us compatible with both
|
|
306
|
+
// signing styles (query-param-encoded, the default for AWS SDK v3, and
|
|
307
|
+
// header-signed, which an older signer might still produce).
|
|
308
|
+
const headers = {};
|
|
309
|
+
const urlHasChecksum = (() => {
|
|
310
|
+
try {
|
|
311
|
+
return new URL(url).searchParams.has("x-amz-checksum-sha256");
|
|
312
|
+
}
|
|
313
|
+
catch {
|
|
314
|
+
return false;
|
|
315
|
+
}
|
|
316
|
+
})();
|
|
317
|
+
if (!urlHasChecksum) {
|
|
318
|
+
headers["x-amz-checksum-sha256"] = checksumBase64;
|
|
319
|
+
}
|
|
320
|
+
void checksumBase64; // silence unused-var if both branches skip the header
|
|
321
|
+
let res;
|
|
322
|
+
try {
|
|
323
|
+
res = await fetchFn(url, {
|
|
324
|
+
method: "PUT",
|
|
325
|
+
headers,
|
|
326
|
+
body: body,
|
|
327
|
+
});
|
|
328
|
+
}
|
|
329
|
+
catch (err) {
|
|
330
|
+
throw new Run402DeployError(`S3 PUT failed for part ${partNumber}: ${err.message}`, {
|
|
331
|
+
code: "CONTENT_UPLOAD_FAILED",
|
|
332
|
+
phase: "upload",
|
|
333
|
+
retryable: true,
|
|
334
|
+
context: "uploading deploy bytes",
|
|
335
|
+
});
|
|
336
|
+
}
|
|
337
|
+
if (!res.ok) {
|
|
338
|
+
const text = await res.text().catch(() => "");
|
|
339
|
+
throw new Run402DeployError(`S3 PUT failed for part ${partNumber} (HTTP ${res.status})${text ? ": " + text.slice(0, 200) : ""}`, {
|
|
340
|
+
code: "CONTENT_UPLOAD_FAILED",
|
|
341
|
+
phase: "upload",
|
|
342
|
+
retryable: res.status >= 500 || res.status === 403,
|
|
343
|
+
status: res.status,
|
|
344
|
+
body: text,
|
|
345
|
+
context: "uploading deploy bytes",
|
|
346
|
+
});
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
async function pollUntilReady(client, commit, diff, emit, projectId) {
|
|
350
|
+
if (commit.status === "failed") {
|
|
351
|
+
throw translateGatewayError(commit.error, "commit", null, commit.operation_id);
|
|
352
|
+
}
|
|
353
|
+
if (commit.status === "ready") {
|
|
354
|
+
if (!commit.release_id || !commit.urls) {
|
|
355
|
+
throw new Run402DeployError("Commit returned ready but no release_id/urls", {
|
|
356
|
+
code: "INTERNAL_ERROR",
|
|
357
|
+
phase: "ready",
|
|
358
|
+
retryable: false,
|
|
359
|
+
operationId: commit.operation_id,
|
|
360
|
+
context: "committing deploy",
|
|
361
|
+
});
|
|
362
|
+
}
|
|
363
|
+
emit({ type: "ready", releaseId: commit.release_id, urls: commit.urls });
|
|
364
|
+
return {
|
|
365
|
+
release_id: commit.release_id,
|
|
366
|
+
operation_id: commit.operation_id,
|
|
367
|
+
urls: commit.urls,
|
|
368
|
+
diff,
|
|
369
|
+
};
|
|
370
|
+
}
|
|
371
|
+
const opHeaders = projectId ? await apikeyHeaders(client, projectId) : {};
|
|
372
|
+
const initialSnapshot = await client.request(`/deploy/v2/operations/${encodeURIComponent(commit.operation_id)}`, { headers: opHeaders, context: "fetching deploy operation" });
|
|
373
|
+
return await pollSnapshotUntilReady(client, initialSnapshot, diff, emit, projectId);
|
|
374
|
+
}
|
|
375
|
+
async function pollSnapshotUntilReady(client, initial, diff, emit, projectId) {
|
|
376
|
+
let snapshot = initial;
|
|
377
|
+
const opHeaders = projectId ? await apikeyHeaders(client, projectId) : {};
|
|
378
|
+
let lastPhaseEmitted = null;
|
|
379
|
+
const start = Date.now();
|
|
380
|
+
let interval = COMMIT_POLL_INITIAL_MS;
|
|
381
|
+
const phaseFor = (status) => {
|
|
382
|
+
const map = {
|
|
383
|
+
staging: { type: "commit.phase", phase: "stage", status: "started" },
|
|
384
|
+
gating: { type: "commit.phase", phase: "migrate-gate", status: "started" },
|
|
385
|
+
migrating: { type: "commit.phase", phase: "migrate", status: "started" },
|
|
386
|
+
schema_settling: {
|
|
387
|
+
type: "commit.phase",
|
|
388
|
+
phase: "schema-settle",
|
|
389
|
+
status: "started",
|
|
390
|
+
},
|
|
391
|
+
activating: { type: "commit.phase", phase: "activate", status: "started" },
|
|
392
|
+
activation_pending: {
|
|
393
|
+
type: "commit.phase",
|
|
394
|
+
phase: "activate",
|
|
395
|
+
status: "failed",
|
|
396
|
+
},
|
|
397
|
+
};
|
|
398
|
+
return map[status] ?? null;
|
|
399
|
+
};
|
|
400
|
+
while (true) {
|
|
401
|
+
if (lastPhaseEmitted !== snapshot.status) {
|
|
402
|
+
const ev = phaseFor(snapshot.status);
|
|
403
|
+
if (ev)
|
|
404
|
+
emit(ev);
|
|
405
|
+
lastPhaseEmitted = snapshot.status;
|
|
406
|
+
}
|
|
407
|
+
if (snapshot.status === SUCCESS_STATUS) {
|
|
408
|
+
if (!snapshot.release_id || !snapshot.urls) {
|
|
409
|
+
throw new Run402DeployError("Operation reached ready but no release_id/urls available", {
|
|
410
|
+
code: "INTERNAL_ERROR",
|
|
411
|
+
phase: "ready",
|
|
412
|
+
retryable: false,
|
|
413
|
+
operationId: snapshot.operation_id,
|
|
414
|
+
context: "polling deploy",
|
|
415
|
+
});
|
|
416
|
+
}
|
|
417
|
+
emit({ type: "ready", releaseId: snapshot.release_id, urls: snapshot.urls });
|
|
418
|
+
return {
|
|
419
|
+
release_id: snapshot.release_id,
|
|
420
|
+
operation_id: snapshot.operation_id,
|
|
421
|
+
urls: snapshot.urls,
|
|
422
|
+
diff,
|
|
423
|
+
};
|
|
424
|
+
}
|
|
425
|
+
if (TERMINAL_STATUSES.includes(snapshot.status)) {
|
|
426
|
+
throw translateGatewayError(snapshot.error, snapshot.status, snapshot.plan_id, snapshot.operation_id);
|
|
427
|
+
}
|
|
428
|
+
if (Date.now() - start > COMMIT_POLL_TIMEOUT_MS) {
|
|
429
|
+
throw new Run402DeployError(`Timed out waiting for operation ${snapshot.operation_id} to reach ready`, {
|
|
430
|
+
code: "INTERNAL_ERROR",
|
|
431
|
+
phase: snapshot.status,
|
|
432
|
+
retryable: true,
|
|
433
|
+
operationId: snapshot.operation_id,
|
|
434
|
+
status: 504,
|
|
435
|
+
context: "polling deploy",
|
|
436
|
+
});
|
|
437
|
+
}
|
|
438
|
+
await sleep(interval);
|
|
439
|
+
if (Date.now() - start > COMMIT_POLL_BACKOFF_AFTER_MS) {
|
|
440
|
+
interval = Math.min(Math.floor(interval * 1.5), COMMIT_POLL_MAX_MS);
|
|
441
|
+
}
|
|
442
|
+
snapshot = await client.request(`/deploy/v2/operations/${encodeURIComponent(snapshot.operation_id)}`, { headers: opHeaders, context: "polling deploy operation" });
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
// ─── start() implementation ──────────────────────────────────────────────────
|
|
446
|
+
async function startInternal(client, spec, opts) {
|
|
447
|
+
const buffered = [];
|
|
448
|
+
const subscribers = [];
|
|
449
|
+
const emit = (event) => {
|
|
450
|
+
buffered.push(event);
|
|
451
|
+
if (opts.onEvent) {
|
|
452
|
+
try {
|
|
453
|
+
opts.onEvent(event);
|
|
454
|
+
}
|
|
455
|
+
catch {
|
|
456
|
+
/* swallow */
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
for (const fn of subscribers) {
|
|
460
|
+
try {
|
|
461
|
+
fn(event);
|
|
462
|
+
}
|
|
463
|
+
catch {
|
|
464
|
+
/* swallow */
|
|
465
|
+
}
|
|
466
|
+
}
|
|
467
|
+
};
|
|
468
|
+
emit({ type: "plan.started" });
|
|
469
|
+
const { plan, byteReaders } = await planInternal(client, spec, opts.idempotencyKey);
|
|
470
|
+
emit({ type: "plan.diff", diff: plan.diff });
|
|
471
|
+
if (plan.payment_required) {
|
|
472
|
+
emit({
|
|
473
|
+
type: "payment.required",
|
|
474
|
+
amount: plan.payment_required.amount,
|
|
475
|
+
asset: plan.payment_required.asset,
|
|
476
|
+
payTo: plan.payment_required.payTo,
|
|
477
|
+
reason: plan.payment_required.reason,
|
|
478
|
+
});
|
|
479
|
+
}
|
|
480
|
+
const resultPromise = (async () => {
|
|
481
|
+
await uploadMissing(client, spec.project, plan.missing_content, byteReaders, emit);
|
|
482
|
+
emit({ type: "commit.phase", phase: "validate", status: "started" });
|
|
483
|
+
const commit = await commitInternal(client, plan.plan_id, opts.idempotencyKey);
|
|
484
|
+
return await pollUntilReady(client, commit, plan.diff, emit, spec.project);
|
|
485
|
+
})();
|
|
486
|
+
// Avoid an unhandled-rejection at construction time. Consumers must call
|
|
487
|
+
// .result() to actually observe the error.
|
|
488
|
+
resultPromise.catch(() => { });
|
|
489
|
+
let snapshot = null;
|
|
490
|
+
const startHeaders = await apikeyHeaders(client, spec.project);
|
|
491
|
+
const fetchSnapshot = async () => {
|
|
492
|
+
if (snapshot && TERMINAL_STATUSES.includes(snapshot.status))
|
|
493
|
+
return snapshot;
|
|
494
|
+
snapshot = await client.request(`/deploy/v2/operations/${encodeURIComponent(plan.operation_id)}`, { headers: startHeaders, context: "fetching deploy operation" });
|
|
495
|
+
return snapshot;
|
|
496
|
+
};
|
|
497
|
+
return {
|
|
498
|
+
id: plan.operation_id,
|
|
499
|
+
async snapshot() {
|
|
500
|
+
return fetchSnapshot();
|
|
501
|
+
},
|
|
502
|
+
async result() {
|
|
503
|
+
return resultPromise;
|
|
504
|
+
},
|
|
505
|
+
events() {
|
|
506
|
+
return {
|
|
507
|
+
[Symbol.asyncIterator]() {
|
|
508
|
+
const queue = [...buffered];
|
|
509
|
+
let resolveNext = null;
|
|
510
|
+
let done = false;
|
|
511
|
+
const subscriber = (ev) => {
|
|
512
|
+
const waiter = resolveNext;
|
|
513
|
+
if (waiter) {
|
|
514
|
+
resolveNext = null;
|
|
515
|
+
waiter({ value: ev, done: false });
|
|
516
|
+
}
|
|
517
|
+
else {
|
|
518
|
+
queue.push(ev);
|
|
519
|
+
}
|
|
520
|
+
if (ev.type === "ready") {
|
|
521
|
+
done = true;
|
|
522
|
+
// The next() loop checks `done` after queue drain, so a
|
|
523
|
+
// pending waiter that was just satisfied above will see
|
|
524
|
+
// `done` on its next call. No second wake-up needed here.
|
|
525
|
+
}
|
|
526
|
+
};
|
|
527
|
+
subscribers.push(subscriber);
|
|
528
|
+
// Surface terminal failure as iterator end.
|
|
529
|
+
resultPromise.catch(() => {
|
|
530
|
+
done = true;
|
|
531
|
+
if (resolveNext) {
|
|
532
|
+
const r = resolveNext;
|
|
533
|
+
resolveNext = null;
|
|
534
|
+
r({ value: undefined, done: true });
|
|
535
|
+
}
|
|
536
|
+
});
|
|
537
|
+
return {
|
|
538
|
+
next() {
|
|
539
|
+
if (queue.length > 0) {
|
|
540
|
+
return Promise.resolve({ value: queue.shift(), done: false });
|
|
541
|
+
}
|
|
542
|
+
if (done) {
|
|
543
|
+
return Promise.resolve({
|
|
544
|
+
value: undefined,
|
|
545
|
+
done: true,
|
|
546
|
+
});
|
|
547
|
+
}
|
|
548
|
+
return new Promise((resolve) => {
|
|
549
|
+
resolveNext = resolve;
|
|
550
|
+
});
|
|
551
|
+
},
|
|
552
|
+
return() {
|
|
553
|
+
done = true;
|
|
554
|
+
const idx = subscribers.indexOf(subscriber);
|
|
555
|
+
if (idx >= 0)
|
|
556
|
+
subscribers.splice(idx, 1);
|
|
557
|
+
return Promise.resolve({
|
|
558
|
+
value: undefined,
|
|
559
|
+
done: true,
|
|
560
|
+
});
|
|
561
|
+
},
|
|
562
|
+
};
|
|
563
|
+
},
|
|
564
|
+
};
|
|
565
|
+
},
|
|
566
|
+
};
|
|
567
|
+
}
|
|
568
|
+
function validateSpec(spec) {
|
|
569
|
+
if (!spec || typeof spec !== "object") {
|
|
570
|
+
throw new Run402DeployError("ReleaseSpec must be an object", {
|
|
571
|
+
code: "INVALID_SPEC",
|
|
572
|
+
retryable: false,
|
|
573
|
+
context: "validating spec",
|
|
574
|
+
});
|
|
575
|
+
}
|
|
576
|
+
if (!spec.project || typeof spec.project !== "string") {
|
|
577
|
+
throw new Run402DeployError("ReleaseSpec.project is required", {
|
|
578
|
+
code: "INVALID_SPEC",
|
|
579
|
+
retryable: false,
|
|
580
|
+
context: "validating spec",
|
|
581
|
+
});
|
|
582
|
+
}
|
|
583
|
+
if (spec.subdomains?.set && spec.subdomains.set.length > 1) {
|
|
584
|
+
throw new Run402DeployError("subdomains.set accepts at most one subdomain per project; multi-subdomain support is not yet available", {
|
|
585
|
+
code: "SUBDOMAIN_MULTI_NOT_SUPPORTED",
|
|
586
|
+
resource: "subdomains.set",
|
|
587
|
+
retryable: false,
|
|
588
|
+
context: "validating spec",
|
|
589
|
+
});
|
|
590
|
+
}
|
|
591
|
+
}
|
|
592
|
+
async function normalizeReleaseSpec(client, spec) {
|
|
593
|
+
const byteReaders = new Map();
|
|
594
|
+
const remember = (resolved) => {
|
|
595
|
+
// Propagate the final content-type onto the deferred reader so the CAS
|
|
596
|
+
// upload session can declare it correctly. Callers may set
|
|
597
|
+
// ref.contentType *after* resolveContent returns (e.g. normalizeFileSet
|
|
598
|
+
// sets it from the path extension), so do this at remember time.
|
|
599
|
+
if (resolved.ref.contentType && !resolved.reader.contentType) {
|
|
600
|
+
resolved.reader.contentType = resolved.ref.contentType;
|
|
601
|
+
}
|
|
602
|
+
if (!byteReaders.has(resolved.ref.sha256)) {
|
|
603
|
+
byteReaders.set(resolved.ref.sha256, resolved.reader);
|
|
604
|
+
}
|
|
605
|
+
else {
|
|
606
|
+
// Already remembered — but if the existing reader has no contentType
|
|
607
|
+
// and we just learned it, fill it in.
|
|
608
|
+
const existing = byteReaders.get(resolved.ref.sha256);
|
|
609
|
+
if (resolved.ref.contentType && !existing.contentType) {
|
|
610
|
+
existing.contentType = resolved.ref.contentType;
|
|
611
|
+
}
|
|
612
|
+
}
|
|
613
|
+
return resolved.ref;
|
|
614
|
+
};
|
|
615
|
+
const normalized = { project: spec.project };
|
|
616
|
+
if (spec.base)
|
|
617
|
+
normalized.base = spec.base;
|
|
618
|
+
if (spec.subdomains)
|
|
619
|
+
normalized.subdomains = spec.subdomains;
|
|
620
|
+
if (spec.routes)
|
|
621
|
+
normalized.routes = spec.routes;
|
|
622
|
+
if (spec.checks)
|
|
623
|
+
normalized.checks = spec.checks;
|
|
624
|
+
if (spec.secrets)
|
|
625
|
+
normalized.secrets = spec.secrets;
|
|
626
|
+
if (spec.database) {
|
|
627
|
+
const db = {};
|
|
628
|
+
if (spec.database.expose)
|
|
629
|
+
db.expose = spec.database.expose;
|
|
630
|
+
if (typeof spec.database.zero_downtime === "boolean") {
|
|
631
|
+
db.zero_downtime = spec.database.zero_downtime;
|
|
632
|
+
}
|
|
633
|
+
if (spec.database.migrations && spec.database.migrations.length > 0) {
|
|
634
|
+
db.migrations = await Promise.all(spec.database.migrations.map(async (m) => normalizeMigration(client, spec.project, m, remember)));
|
|
635
|
+
}
|
|
636
|
+
normalized.database = db;
|
|
637
|
+
}
|
|
638
|
+
if (spec.functions) {
|
|
639
|
+
const fns = {};
|
|
640
|
+
if (spec.functions.replace) {
|
|
641
|
+
fns.replace = await normalizeFunctionMap(spec.functions.replace, remember);
|
|
642
|
+
}
|
|
643
|
+
if (spec.functions.patch) {
|
|
644
|
+
fns.patch = {};
|
|
645
|
+
if (spec.functions.patch.set) {
|
|
646
|
+
fns.patch.set = await normalizeFunctionMap(spec.functions.patch.set, remember);
|
|
647
|
+
}
|
|
648
|
+
if (spec.functions.patch.delete)
|
|
649
|
+
fns.patch.delete = spec.functions.patch.delete;
|
|
650
|
+
}
|
|
651
|
+
normalized.functions = fns;
|
|
652
|
+
}
|
|
653
|
+
if (spec.site) {
|
|
654
|
+
if ("replace" in spec.site && spec.site.replace) {
|
|
655
|
+
const map = await normalizeFileSet(spec.site.replace, remember);
|
|
656
|
+
normalized.site = { replace: map };
|
|
657
|
+
}
|
|
658
|
+
else if ("patch" in spec.site && spec.site.patch) {
|
|
659
|
+
const patch = {};
|
|
660
|
+
if (spec.site.patch.put) {
|
|
661
|
+
patch.put = await normalizeFileSet(spec.site.patch.put, remember);
|
|
662
|
+
}
|
|
663
|
+
if (spec.site.patch.delete)
|
|
664
|
+
patch.delete = spec.site.patch.delete;
|
|
665
|
+
normalized.site = { patch };
|
|
666
|
+
}
|
|
667
|
+
}
|
|
668
|
+
return { normalized, byteReaders };
|
|
669
|
+
}
|
|
670
|
+
async function normalizeFunctionMap(map, remember) {
|
|
671
|
+
const out = {};
|
|
672
|
+
for (const [name, fn] of Object.entries(map)) {
|
|
673
|
+
out[name] = await normalizeFunction(fn, remember);
|
|
674
|
+
}
|
|
675
|
+
return out;
|
|
676
|
+
}
|
|
677
|
+
async function normalizeFunction(fn, remember) {
|
|
678
|
+
const out = {
|
|
679
|
+
runtime: fn.runtime ?? "node22",
|
|
680
|
+
};
|
|
681
|
+
if (fn.config)
|
|
682
|
+
out.config = fn.config;
|
|
683
|
+
if (fn.schedule !== undefined)
|
|
684
|
+
out.schedule = fn.schedule;
|
|
685
|
+
if (fn.entrypoint)
|
|
686
|
+
out.entrypoint = fn.entrypoint;
|
|
687
|
+
if (fn.source !== undefined) {
|
|
688
|
+
const resolved = await resolveContent(fn.source, "function source");
|
|
689
|
+
out.source = remember(resolved);
|
|
690
|
+
}
|
|
691
|
+
if (fn.files) {
|
|
692
|
+
out.files = await normalizeFileSet(fn.files, remember);
|
|
693
|
+
}
|
|
694
|
+
return out;
|
|
695
|
+
}
|
|
696
|
+
async function normalizeFileSet(set, remember) {
|
|
697
|
+
const out = {};
|
|
698
|
+
for (const [path, source] of Object.entries(set)) {
|
|
699
|
+
const resolved = await resolveContent(source, path);
|
|
700
|
+
if (!resolved.ref.contentType) {
|
|
701
|
+
resolved.ref.contentType = guessContentType(path);
|
|
702
|
+
}
|
|
703
|
+
out[path] = remember(resolved);
|
|
704
|
+
}
|
|
705
|
+
return out;
|
|
706
|
+
}
|
|
707
|
+
async function normalizeMigration(client, projectId, m, remember) {
|
|
708
|
+
if (!m.id) {
|
|
709
|
+
throw new Run402DeployError("MigrationSpec.id is required", {
|
|
710
|
+
code: "INVALID_SPEC",
|
|
711
|
+
resource: "database.migrations",
|
|
712
|
+
retryable: false,
|
|
713
|
+
context: "validating spec",
|
|
714
|
+
});
|
|
715
|
+
}
|
|
716
|
+
let sql_ref;
|
|
717
|
+
let checksum;
|
|
718
|
+
if (m.sql_ref) {
|
|
719
|
+
sql_ref = m.sql_ref;
|
|
720
|
+
checksum = m.checksum ?? m.sql_ref.sha256;
|
|
721
|
+
}
|
|
722
|
+
else if (m.sql !== undefined) {
|
|
723
|
+
const bytes = new TextEncoder().encode(m.sql);
|
|
724
|
+
const sha256 = await sha256Hex(bytes);
|
|
725
|
+
const ref = { sha256, size: bytes.byteLength, contentType: "application/sql" };
|
|
726
|
+
remember({ ref, reader: makeBytesReader(bytes, `migration:${m.id}`) });
|
|
727
|
+
sql_ref = ref;
|
|
728
|
+
checksum = m.checksum ?? sha256;
|
|
729
|
+
}
|
|
730
|
+
else {
|
|
731
|
+
throw new Run402DeployError(`MigrationSpec ${m.id} must include sql or sql_ref`, {
|
|
732
|
+
code: "INVALID_SPEC",
|
|
733
|
+
resource: `database.migrations.${m.id}`,
|
|
734
|
+
retryable: false,
|
|
735
|
+
context: "validating spec",
|
|
736
|
+
});
|
|
737
|
+
}
|
|
738
|
+
const out = { id: m.id, checksum, sql_ref };
|
|
739
|
+
if (m.transaction)
|
|
740
|
+
out.transaction = m.transaction;
|
|
741
|
+
return out;
|
|
742
|
+
// projectId / client params reserved for future content-presence preflight.
|
|
743
|
+
void client;
|
|
744
|
+
void projectId;
|
|
745
|
+
}
|
|
746
|
+
// ─── Content source resolution ───────────────────────────────────────────────
|
|
747
|
+
async function resolveContent(source, label) {
|
|
748
|
+
// Pre-resolved ContentRef — pass through, no reader needed (caller is
|
|
749
|
+
// responsible for ensuring the bytes are already in CAS).
|
|
750
|
+
if (isContentRef(source)) {
|
|
751
|
+
return {
|
|
752
|
+
ref: { ...source },
|
|
753
|
+
reader: makeUnreadableReader(source.sha256, label),
|
|
754
|
+
};
|
|
755
|
+
}
|
|
756
|
+
// { data, contentType } wrapper — recurse into data, override contentType.
|
|
757
|
+
if (typeof source === "object" &&
|
|
758
|
+
source !== null &&
|
|
759
|
+
!Array.isArray(source) &&
|
|
760
|
+
!(source instanceof Uint8Array) &&
|
|
761
|
+
!(source instanceof ArrayBuffer) &&
|
|
762
|
+
!(typeof Blob !== "undefined" && source instanceof Blob) &&
|
|
763
|
+
!isReadableStream(source) &&
|
|
764
|
+
!isFsFileSource(source) &&
|
|
765
|
+
"data" in source) {
|
|
766
|
+
const inner = await resolveContent(source.data, label);
|
|
767
|
+
if (source.contentType) {
|
|
768
|
+
inner.ref.contentType = source.contentType;
|
|
769
|
+
}
|
|
770
|
+
return inner;
|
|
771
|
+
}
|
|
772
|
+
if (isFsFileSource(source)) {
|
|
773
|
+
return await resolveFsFile(source, label);
|
|
774
|
+
}
|
|
775
|
+
if (typeof source === "string") {
|
|
776
|
+
const bytes = new TextEncoder().encode(source);
|
|
777
|
+
return makeMemResolved(bytes, undefined, label);
|
|
778
|
+
}
|
|
779
|
+
if (source instanceof Uint8Array) {
|
|
780
|
+
return makeMemResolved(source, undefined, label);
|
|
781
|
+
}
|
|
782
|
+
if (source instanceof ArrayBuffer) {
|
|
783
|
+
return makeMemResolved(new Uint8Array(source), undefined, label);
|
|
784
|
+
}
|
|
785
|
+
if (typeof Blob !== "undefined" && source instanceof Blob) {
|
|
786
|
+
const bytes = new Uint8Array(await source.arrayBuffer());
|
|
787
|
+
const ct = source.type && source.type.length > 0 ? source.type : undefined;
|
|
788
|
+
return makeMemResolved(bytes, ct, label);
|
|
789
|
+
}
|
|
790
|
+
if (isReadableStream(source)) {
|
|
791
|
+
const bytes = await readStreamFully(source);
|
|
792
|
+
return makeMemResolved(bytes, undefined, label);
|
|
793
|
+
}
|
|
794
|
+
throw new Run402DeployError(`Unsupported byte source for ${label}`, {
|
|
795
|
+
code: "INVALID_SPEC",
|
|
796
|
+
resource: label,
|
|
797
|
+
retryable: false,
|
|
798
|
+
context: "normalizing byte sources",
|
|
799
|
+
});
|
|
800
|
+
}
|
|
801
|
+
async function makeMemResolved(bytes, contentType, label) {
|
|
802
|
+
const sha256 = await sha256Hex(bytes);
|
|
803
|
+
const ref = { sha256, size: bytes.byteLength };
|
|
804
|
+
if (contentType)
|
|
805
|
+
ref.contentType = contentType;
|
|
806
|
+
return { ref, reader: makeBytesReader(bytes, label) };
|
|
807
|
+
}
|
|
808
|
+
async function resolveFsFile(source, label) {
|
|
809
|
+
// Lazy import — keeps the root SDK V8-isolate-safe. fileSetFromDir lives
|
|
810
|
+
// in `@run402/sdk/node`, so any `FsFileSource` we see here must be in a
|
|
811
|
+
// Node runtime where `node:fs/promises` resolves.
|
|
812
|
+
let fsMod;
|
|
813
|
+
try {
|
|
814
|
+
fsMod = (await import("node:fs/promises"));
|
|
815
|
+
}
|
|
816
|
+
catch {
|
|
817
|
+
throw new Run402DeployError("FsFileSource is only supported in Node runtimes (received in a non-Node environment)", {
|
|
818
|
+
code: "INVALID_SPEC",
|
|
819
|
+
resource: label,
|
|
820
|
+
retryable: false,
|
|
821
|
+
context: "normalizing byte sources",
|
|
822
|
+
});
|
|
823
|
+
}
|
|
824
|
+
const buf = await fsMod.readFile(source.path);
|
|
825
|
+
const bytes = new Uint8Array(buf.buffer, buf.byteOffset, buf.byteLength);
|
|
826
|
+
const sha256 = await sha256Hex(bytes);
|
|
827
|
+
const ref = { sha256, size: bytes.byteLength };
|
|
828
|
+
if (source.contentType)
|
|
829
|
+
ref.contentType = source.contentType;
|
|
830
|
+
return {
|
|
831
|
+
ref,
|
|
832
|
+
reader: Object.assign(async () => {
|
|
833
|
+
const buf2 = await fsMod.readFile(source.path);
|
|
834
|
+
return new Uint8Array(buf2.buffer, buf2.byteOffset, buf2.byteLength);
|
|
835
|
+
}, { label }),
|
|
836
|
+
};
|
|
837
|
+
}
|
|
838
|
+
function makeBytesReader(bytes, label, contentType) {
|
|
839
|
+
const reader = async () => bytes;
|
|
840
|
+
reader.label = label;
|
|
841
|
+
if (contentType)
|
|
842
|
+
reader.contentType = contentType;
|
|
843
|
+
return reader;
|
|
844
|
+
}
|
|
845
|
+
function makeUnreadableReader(sha256, label) {
|
|
846
|
+
const reader = async () => {
|
|
847
|
+
throw new Run402DeployError(`ContentRef ${sha256.slice(0, 12)}… was passed pre-resolved but the gateway reports it missing — provide bytes inline instead`, {
|
|
848
|
+
code: "CONTENT_UPLOAD_FAILED",
|
|
849
|
+
resource: label,
|
|
850
|
+
retryable: false,
|
|
851
|
+
context: "uploading deploy bytes",
|
|
852
|
+
});
|
|
853
|
+
};
|
|
854
|
+
reader.label = label;
|
|
855
|
+
return reader;
|
|
856
|
+
}
|
|
857
|
+
// ─── Manifest-ref CAS upload (bypasses the upload phase loop) ────────────────
|
|
858
|
+
async function uploadInlineCas(client, projectId, bytes, contentType) {
|
|
859
|
+
const sha256 = await sha256Hex(bytes);
|
|
860
|
+
const headers = await apikeyHeaders(client, projectId);
|
|
861
|
+
const planRes = await client.request("/content/v1/plans", {
|
|
862
|
+
method: "POST",
|
|
863
|
+
headers,
|
|
864
|
+
body: {
|
|
865
|
+
content: [{ sha256, size: bytes.byteLength, content_type: contentType }],
|
|
866
|
+
},
|
|
867
|
+
context: "planning content upload",
|
|
868
|
+
});
|
|
869
|
+
if (planRes.missing.length > 0) {
|
|
870
|
+
const session = planRes.missing[0];
|
|
871
|
+
await uploadOne(client.fetch, session, bytes);
|
|
872
|
+
// Per-session promotion to CAS (see uploadMissing for the rationale).
|
|
873
|
+
await client.request(`/storage/v1/uploads/${encodeURIComponent(session.upload_id)}/complete`, {
|
|
874
|
+
method: "POST",
|
|
875
|
+
headers,
|
|
876
|
+
body: {},
|
|
877
|
+
context: "completing content upload session",
|
|
878
|
+
});
|
|
879
|
+
await client.request(`/content/v1/plans/${encodeURIComponent(planRes.plan_id)}/commit`, { method: "POST", headers, body: {}, context: "committing content upload" });
|
|
880
|
+
}
|
|
881
|
+
return { sha256, size: bytes.byteLength, contentType };
|
|
882
|
+
}
|
|
883
|
+
// ─── Helpers ─────────────────────────────────────────────────────────────────
|
|
884
|
+
/**
|
|
885
|
+
* Build the apikey header set for a project. The v1.34 gateway's
|
|
886
|
+
* `/deploy/v2/operations/:id*` and `/content/v1/plans*` routes require
|
|
887
|
+
* `apikey: <project.anon_key>` (apikeyAuth middleware). Plan + commit on
|
|
888
|
+
* `/deploy/v2/plans*` use SIWX, which the kernel's getAuth provides
|
|
889
|
+
* automatically — only the apikey-gated paths need this helper.
|
|
890
|
+
*
|
|
891
|
+
* Returns an empty object when the credentials provider doesn't know the
|
|
892
|
+
* project (the request will then go out without an apikey and the gateway
|
|
893
|
+
* will reject with 401 — matches the failure mode for unconfigured
|
|
894
|
+
* projects in any of today's other apikey-auth tools).
|
|
895
|
+
*/
|
|
896
|
+
async function apikeyHeaders(client, projectId) {
|
|
897
|
+
const project = await client.getProject(projectId);
|
|
898
|
+
if (!project)
|
|
899
|
+
return {};
|
|
900
|
+
return { apikey: project.anon_key };
|
|
901
|
+
}
|
|
902
|
+
function makeEmitter(cb) {
|
|
903
|
+
if (!cb)
|
|
904
|
+
return () => { };
|
|
905
|
+
return (event) => {
|
|
906
|
+
try {
|
|
907
|
+
cb(event);
|
|
908
|
+
}
|
|
909
|
+
catch {
|
|
910
|
+
/* swallow */
|
|
911
|
+
}
|
|
912
|
+
};
|
|
913
|
+
}
|
|
914
|
+
function isContentRef(source) {
|
|
915
|
+
return (typeof source === "object" &&
|
|
916
|
+
source !== null &&
|
|
917
|
+
typeof source.sha256 === "string" &&
|
|
918
|
+
typeof source.size === "number" &&
|
|
919
|
+
!("data" in source) &&
|
|
920
|
+
!("__source" in source));
|
|
921
|
+
}
|
|
922
|
+
function isFsFileSource(source) {
|
|
923
|
+
return (typeof source === "object" &&
|
|
924
|
+
source !== null &&
|
|
925
|
+
source.__source === "fs-file" &&
|
|
926
|
+
typeof source.path === "string");
|
|
927
|
+
}
|
|
928
|
+
function isReadableStream(source) {
|
|
929
|
+
return (typeof source === "object" &&
|
|
930
|
+
source !== null &&
|
|
931
|
+
typeof source.getReader === "function" &&
|
|
932
|
+
typeof source.tee === "function");
|
|
933
|
+
}
|
|
934
|
+
async function readStreamFully(stream) {
|
|
935
|
+
const reader = stream.getReader();
|
|
936
|
+
const chunks = [];
|
|
937
|
+
let total = 0;
|
|
938
|
+
while (true) {
|
|
939
|
+
const { value, done } = await reader.read();
|
|
940
|
+
if (done)
|
|
941
|
+
break;
|
|
942
|
+
if (value) {
|
|
943
|
+
chunks.push(value);
|
|
944
|
+
total += value.byteLength;
|
|
945
|
+
}
|
|
946
|
+
}
|
|
947
|
+
const out = new Uint8Array(total);
|
|
948
|
+
let off = 0;
|
|
949
|
+
for (const c of chunks) {
|
|
950
|
+
out.set(c, off);
|
|
951
|
+
off += c.byteLength;
|
|
952
|
+
}
|
|
953
|
+
return out;
|
|
954
|
+
}
|
|
955
|
+
async function sha256Hex(bytes) {
|
|
956
|
+
const buf = await crypto.subtle.digest("SHA-256", bytes);
|
|
957
|
+
return Array.from(new Uint8Array(buf))
|
|
958
|
+
.map((b) => b.toString(16).padStart(2, "0"))
|
|
959
|
+
.join("");
|
|
960
|
+
}
|
|
961
|
+
async function sha256Base64(bytes) {
|
|
962
|
+
const buf = await crypto.subtle.digest("SHA-256", bytes);
|
|
963
|
+
return base64FromBytes(new Uint8Array(buf));
|
|
964
|
+
}
|
|
965
|
+
function base64FromHex(hex) {
|
|
966
|
+
const bytes = new Uint8Array(hex.length / 2);
|
|
967
|
+
for (let i = 0; i < bytes.byteLength; i++) {
|
|
968
|
+
bytes[i] = parseInt(hex.substr(i * 2, 2), 16);
|
|
969
|
+
}
|
|
970
|
+
return base64FromBytes(bytes);
|
|
971
|
+
}
|
|
972
|
+
function base64FromBytes(bytes) {
|
|
973
|
+
if (typeof Buffer !== "undefined") {
|
|
974
|
+
return Buffer.from(bytes).toString("base64");
|
|
975
|
+
}
|
|
976
|
+
let bin = "";
|
|
977
|
+
for (let i = 0; i < bytes.byteLength; i++)
|
|
978
|
+
bin += String.fromCharCode(bytes[i]);
|
|
979
|
+
return btoa(bin);
|
|
980
|
+
}
|
|
981
|
+
function sleep(ms) {
|
|
982
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
983
|
+
}
|
|
984
|
+
const CONTENT_TYPE_BY_EXT = {
|
|
985
|
+
html: "text/html; charset=utf-8",
|
|
986
|
+
htm: "text/html; charset=utf-8",
|
|
987
|
+
css: "text/css; charset=utf-8",
|
|
988
|
+
js: "text/javascript; charset=utf-8",
|
|
989
|
+
mjs: "text/javascript; charset=utf-8",
|
|
990
|
+
cjs: "text/javascript; charset=utf-8",
|
|
991
|
+
json: "application/json",
|
|
992
|
+
svg: "image/svg+xml",
|
|
993
|
+
png: "image/png",
|
|
994
|
+
jpg: "image/jpeg",
|
|
995
|
+
jpeg: "image/jpeg",
|
|
996
|
+
gif: "image/gif",
|
|
997
|
+
webp: "image/webp",
|
|
998
|
+
ico: "image/x-icon",
|
|
999
|
+
woff: "font/woff",
|
|
1000
|
+
woff2: "font/woff2",
|
|
1001
|
+
ttf: "font/ttf",
|
|
1002
|
+
otf: "font/otf",
|
|
1003
|
+
txt: "text/plain; charset=utf-8",
|
|
1004
|
+
md: "text/markdown; charset=utf-8",
|
|
1005
|
+
xml: "application/xml",
|
|
1006
|
+
pdf: "application/pdf",
|
|
1007
|
+
wasm: "application/wasm",
|
|
1008
|
+
sql: "application/sql",
|
|
1009
|
+
};
|
|
1010
|
+
function guessContentType(path) {
|
|
1011
|
+
const ix = path.lastIndexOf(".");
|
|
1012
|
+
if (ix < 0)
|
|
1013
|
+
return "application/octet-stream";
|
|
1014
|
+
const ext = path.slice(ix + 1).toLowerCase();
|
|
1015
|
+
return CONTENT_TYPE_BY_EXT[ext] ?? "application/octet-stream";
|
|
1016
|
+
}
|
|
1017
|
+
// ─── Error translation ──────────────────────────────────────────────────────
|
|
1018
|
+
function translateDeployError(err, phase, planId, operationId) {
|
|
1019
|
+
if (err instanceof Run402DeployError)
|
|
1020
|
+
return err;
|
|
1021
|
+
if (err instanceof ApiError) {
|
|
1022
|
+
const body = err.body && typeof err.body === "object"
|
|
1023
|
+
? err.body
|
|
1024
|
+
: null;
|
|
1025
|
+
const gw = body && typeof body === "object" ? extractGatewayError(body) : null;
|
|
1026
|
+
if (gw) {
|
|
1027
|
+
return translateGatewayError(gw, phase, planId, operationId);
|
|
1028
|
+
}
|
|
1029
|
+
return new Run402DeployError(err.message, {
|
|
1030
|
+
code: "INTERNAL_ERROR",
|
|
1031
|
+
phase,
|
|
1032
|
+
retryable: err.status !== null && err.status >= 500,
|
|
1033
|
+
operationId,
|
|
1034
|
+
planId,
|
|
1035
|
+
status: err.status,
|
|
1036
|
+
body: err.body,
|
|
1037
|
+
context: err.context,
|
|
1038
|
+
});
|
|
1039
|
+
}
|
|
1040
|
+
// Re-throw other Run402Error subclasses (PaymentRequired, Unauthorized,
|
|
1041
|
+
// NetworkError, etc.) as-is — the consumer handles them at a different
|
|
1042
|
+
// layer than deploy-state-machine errors.
|
|
1043
|
+
if (err instanceof Error) {
|
|
1044
|
+
return new Run402DeployError(err.message, {
|
|
1045
|
+
code: "INTERNAL_ERROR",
|
|
1046
|
+
phase,
|
|
1047
|
+
retryable: false,
|
|
1048
|
+
operationId,
|
|
1049
|
+
planId,
|
|
1050
|
+
context: phase,
|
|
1051
|
+
});
|
|
1052
|
+
}
|
|
1053
|
+
return new Run402DeployError(String(err), {
|
|
1054
|
+
code: "INTERNAL_ERROR",
|
|
1055
|
+
phase,
|
|
1056
|
+
retryable: false,
|
|
1057
|
+
operationId,
|
|
1058
|
+
planId,
|
|
1059
|
+
context: phase,
|
|
1060
|
+
});
|
|
1061
|
+
}
|
|
1062
|
+
function extractGatewayError(body) {
|
|
1063
|
+
// Gateway returns the error in any of:
|
|
1064
|
+
// { error: { code, message?, phase?, ... } } — nested
|
|
1065
|
+
// { code, message?, phase?, ... } — top-level
|
|
1066
|
+
// { error: "<message>", code: "..." } — older shape, error as string
|
|
1067
|
+
// The only required field is `code`; `message` is convenient but
|
|
1068
|
+
// optional (some gateway routes return just a code on simple validation
|
|
1069
|
+
// failures, e.g. `{code: "invalid_spec"}`).
|
|
1070
|
+
if (body.error &&
|
|
1071
|
+
typeof body.error === "object" &&
|
|
1072
|
+
typeof body.error.code === "string") {
|
|
1073
|
+
return body.error;
|
|
1074
|
+
}
|
|
1075
|
+
if (typeof body.code === "string") {
|
|
1076
|
+
const out = { code: body.code };
|
|
1077
|
+
if (typeof body.message === "string") {
|
|
1078
|
+
out.message = body.message;
|
|
1079
|
+
}
|
|
1080
|
+
else if (typeof body.error === "string") {
|
|
1081
|
+
out.message = body.error;
|
|
1082
|
+
}
|
|
1083
|
+
else {
|
|
1084
|
+
out.message = `Deploy error: ${body.code}`;
|
|
1085
|
+
}
|
|
1086
|
+
if (typeof body.phase === "string")
|
|
1087
|
+
out.phase = body.phase;
|
|
1088
|
+
if (typeof body.resource === "string")
|
|
1089
|
+
out.resource = body.resource;
|
|
1090
|
+
if (typeof body.retryable === "boolean")
|
|
1091
|
+
out.retryable = body.retryable;
|
|
1092
|
+
if (body.fix !== undefined)
|
|
1093
|
+
out.fix = body.fix;
|
|
1094
|
+
if (Array.isArray(body.logs))
|
|
1095
|
+
out.logs = body.logs;
|
|
1096
|
+
if (typeof body.rolled_back === "boolean")
|
|
1097
|
+
out.rolled_back = body.rolled_back;
|
|
1098
|
+
return out;
|
|
1099
|
+
}
|
|
1100
|
+
return null;
|
|
1101
|
+
}
|
|
1102
|
+
function translateGatewayError(gw, phase, planId, operationId) {
|
|
1103
|
+
if (!gw) {
|
|
1104
|
+
return new Run402DeployError("Deploy failed without a structured error", {
|
|
1105
|
+
code: "INTERNAL_ERROR",
|
|
1106
|
+
phase,
|
|
1107
|
+
retryable: false,
|
|
1108
|
+
operationId,
|
|
1109
|
+
planId,
|
|
1110
|
+
context: phase,
|
|
1111
|
+
});
|
|
1112
|
+
}
|
|
1113
|
+
return new Run402DeployError(gw.message ?? `Deploy failed: ${gw.code}`, {
|
|
1114
|
+
code: gw.code,
|
|
1115
|
+
phase: gw.phase ?? phase,
|
|
1116
|
+
resource: gw.resource ?? null,
|
|
1117
|
+
retryable: gw.retryable ?? false,
|
|
1118
|
+
operationId,
|
|
1119
|
+
planId,
|
|
1120
|
+
fix: (gw.fix ?? null),
|
|
1121
|
+
logs: gw.logs ?? null,
|
|
1122
|
+
rolledBack: gw.rolled_back ?? false,
|
|
1123
|
+
body: gw,
|
|
1124
|
+
context: phase,
|
|
1125
|
+
});
|
|
1126
|
+
}
|
|
1127
|
+
//# sourceMappingURL=deploy.js.map
|