run402 1.49.0 → 1.50.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/core-dist/config.js +10 -0
  2. package/lib/deploy-v2.mjs +359 -0
  3. package/lib/deploy.mjs +14 -0
  4. package/package.json +1 -1
  5. package/sdk/core-dist/config.js +10 -0
  6. package/sdk/dist/errors.d.ts +41 -0
  7. package/sdk/dist/errors.d.ts.map +1 -1
  8. package/sdk/dist/errors.js +23 -0
  9. package/sdk/dist/errors.js.map +1 -1
  10. package/sdk/dist/index.d.ts +24 -1
  11. package/sdk/dist/index.d.ts.map +1 -1
  12. package/sdk/dist/index.js +24 -1
  13. package/sdk/dist/index.js.map +1 -1
  14. package/sdk/dist/namespaces/apps.d.ts +14 -0
  15. package/sdk/dist/namespaces/apps.d.ts.map +1 -1
  16. package/sdk/dist/namespaces/apps.js +175 -20
  17. package/sdk/dist/namespaces/apps.js.map +1 -1
  18. package/sdk/dist/namespaces/blobs.d.ts.map +1 -1
  19. package/sdk/dist/namespaces/blobs.js +20 -6
  20. package/sdk/dist/namespaces/blobs.js.map +1 -1
  21. package/sdk/dist/namespaces/blobs.types.d.ts +20 -5
  22. package/sdk/dist/namespaces/blobs.types.d.ts.map +1 -1
  23. package/sdk/dist/namespaces/deploy.d.ts +116 -0
  24. package/sdk/dist/namespaces/deploy.d.ts.map +1 -0
  25. package/sdk/dist/namespaces/deploy.js +1251 -0
  26. package/sdk/dist/namespaces/deploy.js.map +1 -0
  27. package/sdk/dist/namespaces/deploy.types.d.ts +438 -0
  28. package/sdk/dist/namespaces/deploy.types.d.ts.map +1 -0
  29. package/sdk/dist/namespaces/deploy.types.js +11 -0
  30. package/sdk/dist/namespaces/deploy.types.js.map +1 -0
  31. package/sdk/dist/node/canonicalize.d.ts +12 -5
  32. package/sdk/dist/node/canonicalize.d.ts.map +1 -1
  33. package/sdk/dist/node/canonicalize.js +12 -5
  34. package/sdk/dist/node/canonicalize.js.map +1 -1
  35. package/sdk/dist/node/files.d.ts +38 -0
  36. package/sdk/dist/node/files.d.ts.map +1 -0
  37. package/sdk/dist/node/files.js +88 -0
  38. package/sdk/dist/node/files.js.map +1 -0
  39. package/sdk/dist/node/index.d.ts +5 -3
  40. package/sdk/dist/node/index.d.ts.map +1 -1
  41. package/sdk/dist/node/index.js +2 -1
  42. package/sdk/dist/node/index.js.map +1 -1
  43. package/sdk/dist/node/sites-node.d.ts +34 -107
  44. package/sdk/dist/node/sites-node.d.ts.map +1 -1
  45. package/sdk/dist/node/sites-node.js +91 -353
  46. package/sdk/dist/node/sites-node.js.map +1 -1
@@ -0,0 +1,1251 @@
1
+ /**
2
+ * `deploy` namespace — the canonical unified deploy primitive.
3
+ *
4
+ * Three layers exposed:
5
+ * - `apply(spec, opts?)` — one-shot, awaits to completion or terminal failure.
6
+ * - `start(spec, opts?)` — returns a `DeployOperation` with `events()` + `result()`.
7
+ * - `plan` / `upload` / `commit` — low-level steps for CLI and tests.
8
+ *
9
+ * All bytes ride through the CAS content service via presigned PUTs to S3.
10
+ * The wire body to `POST /deploy/v2/plans` carries `ContentRef` objects only —
11
+ * never inline file bytes. When the normalized spec exceeds 5 MB JSON, the
12
+ * SDK uploads the manifest itself as a CAS object and references it.
13
+ *
14
+ * Idempotency is keyed on the gateway-computed manifest digest, not the
15
+ * SDK's local digest. The SDK does not canonicalize for correctness — the
16
+ * gateway is authoritative.
17
+ *
18
+ * See `unified-deploy` and `cas-content` capability specs for normative
19
+ * behavior; this file is the implementation.
20
+ */
21
+ import { ApiError, NetworkError, Run402DeployError, } from "../errors.js";
22
+ // ─── Constants ───────────────────────────────────────────────────────────────
23
+ const PLAN_BODY_LIMIT_BYTES = 5 * 1024 * 1024;
24
+ const COMMIT_POLL_INITIAL_MS = 1_000;
25
+ const COMMIT_POLL_MAX_MS = 30_000;
26
+ const COMMIT_POLL_BACKOFF_AFTER_MS = 30_000;
27
+ const COMMIT_POLL_TIMEOUT_MS = 10 * 60 * 1000;
28
+ const URL_REFRESH_AT_MS = 50 * 60 * 1000;
29
+ const MANIFEST_CONTENT_TYPE = "application/vnd.run402.deploy-manifest+json";
30
+ const TERMINAL_STATUSES = [
31
+ "ready",
32
+ "failed",
33
+ "rolled_back",
34
+ "needs_repair",
35
+ ];
36
+ const SUCCESS_STATUS = "ready";
37
+ // ─── Public class ────────────────────────────────────────────────────────────
38
+ export class Deploy {
39
+ client;
40
+ constructor(client) {
41
+ this.client = client;
42
+ }
43
+ /**
44
+ * One-shot deploy. Normalizes byte sources, plans, uploads missing
45
+ * content, commits, and polls until terminal. Throws
46
+ * {@link Run402DeployError} on any state-machine failure.
47
+ */
48
+ async apply(spec, opts = {}) {
49
+ const emit = makeEmitter(opts.onEvent);
50
+ emit({ type: "plan.started" });
51
+ const { plan, byteReaders } = await planInternal(this.client, spec, opts.idempotencyKey);
52
+ emit({ type: "plan.diff", diff: plan.diff });
53
+ if (plan.payment_required) {
54
+ emit({
55
+ type: "payment.required",
56
+ amount: plan.payment_required.amount,
57
+ asset: plan.payment_required.asset,
58
+ payTo: plan.payment_required.payTo,
59
+ reason: plan.payment_required.reason,
60
+ });
61
+ // The kernel's x402-wrapped fetch (Node) handles 402 transparently
62
+ // when the commit happens; we don't block here. Agents using a
63
+ // sandbox provider without payment auto-handling can intercept the
64
+ // event and resolve before we hit upload.
65
+ }
66
+ await uploadMissing(this.client, spec.project, plan.missing_content, byteReaders, emit);
67
+ emit({ type: "commit.phase", phase: "validate", status: "started" });
68
+ const commit = await commitInternal(this.client, plan.plan_id, opts.idempotencyKey);
69
+ return await pollUntilReady(this.client, commit, plan.diff, emit, spec.project);
70
+ }
71
+ /**
72
+ * Start a resumable deploy operation. Returns an object exposing the
73
+ * operation id, an event async-iterable, and a result promise.
74
+ */
75
+ start(spec, opts = {}) {
76
+ return startInternal(this.client, spec, opts);
77
+ }
78
+ /**
79
+ * Low-level plan: normalize the spec, upload the manifest as CAS if over
80
+ * the inline limit, and call `POST /deploy/v2/plans`. Returns the plan
81
+ * response and a byte-reader map keyed by sha256 (used by `upload`).
82
+ */
83
+ async plan(spec, opts = {}) {
84
+ return planInternal(this.client, spec, opts.idempotencyKey);
85
+ }
86
+ /**
87
+ * Low-level upload: ensure every ref the gateway reported as missing for
88
+ * this project has bytes in CAS. Issues a content-plan, PUTs bytes to
89
+ * the returned presigned URLs, and finalizes the content plan. Caller
90
+ * passes the project id so the apikey-gated CAS routes can authenticate.
91
+ */
92
+ async upload(plan, opts) {
93
+ const emit = makeEmitter(opts.onEvent);
94
+ await uploadMissing(this.client, opts.project, plan.missing_content, opts.byteReaders, emit);
95
+ }
96
+ /**
97
+ * Low-level commit: `POST /deploy/v2/plans/:id/commit`, then poll
98
+ * `/operations/:id` until terminal. Pass the project id whose anon_key
99
+ * should authenticate the polling — the operations endpoint requires
100
+ * apikey auth even though the plan/commit endpoints accept SIWX.
101
+ */
102
+ async commit(planId, opts = {}) {
103
+ const emit = makeEmitter(opts.onEvent);
104
+ const commit = await commitInternal(this.client, planId, opts.idempotencyKey);
105
+ return await pollUntilReady(this.client, commit, {}, emit, opts.project);
106
+ }
107
+ /**
108
+ * Resume an operation in `schema_settling` or `activation_pending`. The
109
+ * gateway re-runs only the failed phase forward — never replays SQL.
110
+ * Returns the resulting snapshot, polling until terminal. The resume
111
+ * endpoint accepts wallet (SIWX) auth; the polling that follows requires
112
+ * the project's apikey, so pass `project` to enable polling. (Without
113
+ * `project`, this method returns once the gateway accepts the resume
114
+ * request — successful resumes typically reach `ready` synchronously
115
+ * via the auto-resume worker.)
116
+ */
117
+ async resume(operationId, opts = {}) {
118
+ if (!operationId || !operationId.startsWith("op_")) {
119
+ throw new Run402DeployError(`Invalid operation id: "${operationId}"`, {
120
+ code: "OPERATION_NOT_FOUND",
121
+ retryable: false,
122
+ context: "resuming deploy operation",
123
+ });
124
+ }
125
+ const emit = makeEmitter(opts.onEvent);
126
+ let snapshot;
127
+ try {
128
+ snapshot = await this.client.request(`/deploy/v2/operations/${encodeURIComponent(operationId)}/resume`, { method: "POST", context: "resuming deploy operation" });
129
+ }
130
+ catch (err) {
131
+ throw translateDeployError(err, "resume", null, operationId);
132
+ }
133
+ return await pollSnapshotUntilReady(this.client, snapshot, {}, emit, opts.project);
134
+ }
135
+ /**
136
+ * Snapshot a deploy operation. The endpoint requires `apikey` auth, so
137
+ * pass the project that owns the operation. (When omitted, the request
138
+ * is sent without an apikey header and the gateway will return 401.)
139
+ */
140
+ async status(operationId, opts = {}) {
141
+ const headers = opts.project ? await apikeyHeaders(this.client, opts.project) : {};
142
+ return this.client.request(`/deploy/v2/operations/${operationId}`, { headers, context: "fetching deploy operation" });
143
+ }
144
+ /**
145
+ * Fetch a release by id. (Endpoint may not be live in early v2 builds —
146
+ * falls through to the gateway's standard 404 handling in that case.)
147
+ */
148
+ async getRelease(releaseId) {
149
+ return this.client.request(`/deploy/v2/releases/${releaseId}`, {
150
+ context: "fetching release",
151
+ });
152
+ }
153
+ /**
154
+ * Diff two releases. (Endpoint may not be live in early v2 builds.)
155
+ */
156
+ async diff(opts) {
157
+ const qs = new URLSearchParams({ from: opts.from, to: opts.to });
158
+ return this.client.request(`/deploy/v2/releases/diff?${qs}`, {
159
+ context: "diffing releases",
160
+ });
161
+ }
162
+ }
163
+ // ─── Internal pipeline ───────────────────────────────────────────────────────
164
+ async function planInternal(client, spec, idempotencyKey) {
165
+ validateSpec(spec);
166
+ const { normalized, byteReaders } = await normalizeReleaseSpec(client, spec);
167
+ // The gateway expects { spec, manifest_ref?, idempotency_key? } with
168
+ // ReleaseSpec.project (singular). For oversized specs the SDK uploads
169
+ // the manifest JSON to CAS first and references it; the gateway still
170
+ // needs `spec` in the body (with at least the project), so we keep a
171
+ // minimal stub there.
172
+ const inlineBody = { spec: normalized };
173
+ if (idempotencyKey)
174
+ inlineBody.idempotency_key = idempotencyKey;
175
+ const inlineBytes = new TextEncoder().encode(JSON.stringify(inlineBody)).byteLength;
176
+ let body;
177
+ if (inlineBytes <= PLAN_BODY_LIMIT_BYTES) {
178
+ body = inlineBody;
179
+ }
180
+ else {
181
+ // Upload the normalized manifest itself as a CAS object so the gateway
182
+ // can pick it up via `manifest_ref`. The body still carries a minimal
183
+ // `spec` so the gateway has the project for auth + plan persistence.
184
+ const manifestBytes = new TextEncoder().encode(JSON.stringify(normalized));
185
+ const ref = await uploadInlineCas(client, spec.project, manifestBytes, MANIFEST_CONTENT_TYPE);
186
+ body = { spec: { project: spec.project }, manifest_ref: ref };
187
+ if (idempotencyKey)
188
+ body.idempotency_key = idempotencyKey;
189
+ }
190
+ let plan;
191
+ try {
192
+ plan = await client.request("/deploy/v2/plans", {
193
+ method: "POST",
194
+ body,
195
+ context: "planning deploy",
196
+ });
197
+ }
198
+ catch (err) {
199
+ throw translateDeployError(err, "plan", null, null);
200
+ }
201
+ return { plan, byteReaders };
202
+ }
203
+ async function commitInternal(client, planId, idempotencyKey) {
204
+ try {
205
+ return await client.request(`/deploy/v2/plans/${encodeURIComponent(planId)}/commit`, {
206
+ method: "POST",
207
+ body: idempotencyKey ? { idempotency_key: idempotencyKey } : {},
208
+ context: "committing deploy",
209
+ });
210
+ }
211
+ catch (err) {
212
+ throw translateDeployError(err, "commit", planId, null);
213
+ }
214
+ }
215
+ async function uploadMissing(client, projectId, presence, byteReaders, emit) {
216
+ // Surface CAS dedup hits so agents can distinguish "N files were already
217
+ // present" from "nothing happened". The gateway reports both present and
218
+ // missing refs in `missing_content`; emit a skipped event for each present
219
+ // one before short-circuiting on a fully-deduped plan. (#124, #134)
220
+ const skipped = presence.filter((p) => p.present);
221
+ for (const p of skipped) {
222
+ const reader = byteReaders.get(p.sha256);
223
+ emit({
224
+ type: "content.upload.skipped",
225
+ label: reader?.label ?? p.sha256,
226
+ sha256: p.sha256,
227
+ reason: "present",
228
+ });
229
+ }
230
+ // Filter to refs the gateway reported as missing for this project.
231
+ const needsUpload = presence.filter((p) => !p.present);
232
+ if (needsUpload.length === 0)
233
+ return;
234
+ // Hand off to the CAS content service: hand it the list of missing
235
+ // refs, it issues an upload session per ref with presigned PUT URLs,
236
+ // then we PUT the bytes and commit the content plan.
237
+ const headers = await apikeyHeaders(client, projectId);
238
+ const contentRequest = needsUpload.map((p) => {
239
+ const reader = byteReaders.get(p.sha256);
240
+ return {
241
+ sha256: p.sha256,
242
+ size: p.size,
243
+ content_type: reader?.contentType,
244
+ };
245
+ });
246
+ const planRes = await client.request("/content/v1/plans", {
247
+ method: "POST",
248
+ headers,
249
+ body: { content: contentRequest },
250
+ context: "planning content upload",
251
+ });
252
+ const total = planRes.missing.length;
253
+ let done = 0;
254
+ for (const session of planRes.missing) {
255
+ const reader = byteReaders.get(session.sha256);
256
+ if (!reader) {
257
+ throw new Run402DeployError(`internal: no local byte reader for sha ${session.sha256.slice(0, 12)}…`, {
258
+ code: "CONTENT_UPLOAD_FAILED",
259
+ phase: "upload",
260
+ retryable: false,
261
+ context: "uploading deploy bytes",
262
+ });
263
+ }
264
+ const bytes = await reader();
265
+ await uploadOneWithRetry(client.fetch, session, bytes);
266
+ // Per-session completion — promotes the staged object to CAS via
267
+ // services/cas-promote.ts. The plan-level `/content/v1/plans/:id/commit`
268
+ // call below is the plan-level finalize; per-session promotion happens
269
+ // here through the existing /storage/v1/uploads/:id/complete handler,
270
+ // which knows how to handle `kind='cas'` sessions and write the
271
+ // internal.content_objects + internal.plan_claims rows the deploy
272
+ // commit's FK constraints rely on.
273
+ const completeBody = {};
274
+ if (session.mode === "multipart" && session.parts.length > 1) {
275
+ // Multipart completion needs per-part ETags. The SDK doesn't capture
276
+ // ETags during the PUT loop today (it would need a multi-PUT
277
+ // helper); for the common single-PUT case below this is empty.
278
+ // TODO: collect part ETags during uploadOne for true multipart.
279
+ }
280
+ await client.request(`/storage/v1/uploads/${encodeURIComponent(session.upload_id)}/complete`, {
281
+ method: "POST",
282
+ headers,
283
+ body: completeBody,
284
+ context: "completing content upload session",
285
+ });
286
+ done += 1;
287
+ emit({
288
+ type: "content.upload.progress",
289
+ label: reader.label ?? session.sha256,
290
+ sha256: session.sha256,
291
+ done,
292
+ total,
293
+ });
294
+ }
295
+ // Plan-level finalize — marks the plan committed in the deploy_plans
296
+ // table. Per-session promotion to CAS already happened in the loop
297
+ // above; this call is the plan-level idempotency anchor.
298
+ await client.request(`/content/v1/plans/${encodeURIComponent(planRes.plan_id)}/commit`, { method: "POST", headers, body: {}, context: "committing content upload" });
299
+ }
300
+ // Wrap `uploadOne` with exponential backoff for retryable failures.
301
+ // `putToS3` raises Run402DeployError(retryable: true) for transient network
302
+ // drops and 5xx/403 responses; one network blip should not fail the entire
303
+ // deploy. Cap at 3 attempts (1 initial + 2 retries) with delays 1s, 2s.
304
+ // Non-retryable errors (4xx other than 403, internal SDK invariants) bubble
305
+ // up on the first attempt. See GH-140.
306
+ async function uploadOneWithRetry(fetchFn, session, bytes) {
307
+ const MAX_ATTEMPTS = 3;
308
+ for (let attempt = 1;; attempt++) {
309
+ try {
310
+ await uploadOne(fetchFn, session, bytes);
311
+ return;
312
+ }
313
+ catch (err) {
314
+ const retryable = err instanceof Run402DeployError && err.retryable;
315
+ if (!retryable || attempt >= MAX_ATTEMPTS)
316
+ throw err;
317
+ await sleep(1000 * Math.pow(2, attempt - 1)); // 1s, 2s
318
+ }
319
+ }
320
+ }
321
+ async function uploadOne(fetchFn, entry, bytes) {
322
+ if (entry.mode === "single") {
323
+ if (entry.parts.length !== 1) {
324
+ throw new Run402DeployError(`internal: single-mode upload for ${entry.sha256.slice(0, 12)}… returned ${entry.parts.length} parts`, {
325
+ code: "CONTENT_UPLOAD_FAILED",
326
+ phase: "upload",
327
+ retryable: false,
328
+ context: "uploading deploy bytes",
329
+ });
330
+ }
331
+ const part = entry.parts[0];
332
+ const slice = bytes.subarray(part.byte_start, part.byte_end + 1);
333
+ const checksum = base64FromHex(entry.sha256);
334
+ await putToS3(fetchFn, part.url, slice, checksum, part.part_number);
335
+ return;
336
+ }
337
+ for (const part of entry.parts) {
338
+ const slice = bytes.subarray(part.byte_start, part.byte_end + 1);
339
+ const checksum = await sha256Base64(slice);
340
+ await putToS3(fetchFn, part.url, slice, checksum, part.part_number);
341
+ }
342
+ }
343
+ async function putToS3(fetchFn, url, body, checksumBase64, partNumber) {
344
+ // The gateway issues SigV4 presigned URLs with `ChecksumSHA256` set on
345
+ // PutObjectCommand / UploadPartCommand. The AWS SDK (v3) encodes that
346
+ // value as the `x-amz-checksum-sha256` query parameter and only signs
347
+ // `host` + `content-length`. If we ALSO send it as a request header, S3
348
+ // returns 403 "There were headers present in the request which were not
349
+ // signed: x-amz-checksum-sha256" because the header isn't in the
350
+ // SigV4-signed list.
351
+ //
352
+ // So: only send the header when the URL doesn't already encode the
353
+ // checksum as a query param. This keeps us compatible with both
354
+ // signing styles (query-param-encoded, the default for AWS SDK v3, and
355
+ // header-signed, which an older signer might still produce).
356
+ const headers = {};
357
+ const urlHasChecksum = (() => {
358
+ try {
359
+ return new URL(url).searchParams.has("x-amz-checksum-sha256");
360
+ }
361
+ catch {
362
+ return false;
363
+ }
364
+ })();
365
+ if (!urlHasChecksum) {
366
+ headers["x-amz-checksum-sha256"] = checksumBase64;
367
+ }
368
+ void checksumBase64; // silence unused-var if both branches skip the header
369
+ let res;
370
+ try {
371
+ res = await fetchFn(url, {
372
+ method: "PUT",
373
+ headers,
374
+ body: body,
375
+ });
376
+ }
377
+ catch (err) {
378
+ throw new Run402DeployError(`S3 PUT failed for part ${partNumber}: ${err.message}`, {
379
+ code: "CONTENT_UPLOAD_FAILED",
380
+ phase: "upload",
381
+ retryable: true,
382
+ context: "uploading deploy bytes",
383
+ });
384
+ }
385
+ if (!res.ok) {
386
+ const text = await res.text().catch(() => "");
387
+ throw new Run402DeployError(`S3 PUT failed for part ${partNumber} (HTTP ${res.status})${text ? ": " + text.slice(0, 200) : ""}`, {
388
+ code: "CONTENT_UPLOAD_FAILED",
389
+ phase: "upload",
390
+ retryable: res.status >= 500 || res.status === 403,
391
+ status: res.status,
392
+ body: text,
393
+ context: "uploading deploy bytes",
394
+ });
395
+ }
396
+ }
397
+ async function pollUntilReady(client, commit, diff, emit, projectId) {
398
+ if (commit.status === "failed") {
399
+ throw translateGatewayError(commit.error, "commit", null, commit.operation_id);
400
+ }
401
+ if (commit.status === "ready") {
402
+ if (!commit.release_id || !commit.urls) {
403
+ throw new Run402DeployError("Commit returned ready but no release_id/urls", {
404
+ code: "INTERNAL_ERROR",
405
+ phase: "ready",
406
+ retryable: false,
407
+ operationId: commit.operation_id,
408
+ context: "committing deploy",
409
+ });
410
+ }
411
+ emit({ type: "ready", releaseId: commit.release_id, urls: commit.urls });
412
+ return {
413
+ release_id: commit.release_id,
414
+ operation_id: commit.operation_id,
415
+ urls: commit.urls,
416
+ diff,
417
+ };
418
+ }
419
+ const opHeaders = projectId ? await apikeyHeaders(client, projectId) : {};
420
+ const initialSnapshot = await client.request(`/deploy/v2/operations/${encodeURIComponent(commit.operation_id)}`, { headers: opHeaders, context: "fetching deploy operation" });
421
+ return await pollSnapshotUntilReady(client, initialSnapshot, diff, emit, projectId);
422
+ }
423
+ async function pollSnapshotUntilReady(client, initial, diff, emit, projectId) {
424
+ let snapshot = initial;
425
+ const opHeaders = projectId ? await apikeyHeaders(client, projectId) : {};
426
+ let lastPhaseEmitted = null;
427
+ const start = Date.now();
428
+ let interval = COMMIT_POLL_INITIAL_MS;
429
+ const phaseFor = (status) => {
430
+ const map = {
431
+ staging: { type: "commit.phase", phase: "stage", status: "started" },
432
+ gating: { type: "commit.phase", phase: "migrate-gate", status: "started" },
433
+ migrating: { type: "commit.phase", phase: "migrate", status: "started" },
434
+ schema_settling: {
435
+ type: "commit.phase",
436
+ phase: "schema-settle",
437
+ status: "started",
438
+ },
439
+ activating: { type: "commit.phase", phase: "activate", status: "started" },
440
+ activation_pending: {
441
+ type: "commit.phase",
442
+ phase: "activate",
443
+ status: "failed",
444
+ },
445
+ };
446
+ return map[status] ?? null;
447
+ };
448
+ // Close out the previously-emitted phase as `done` (or `failed`) before
449
+ // emitting the next phase's `started` event. Skips when there's no prior
450
+ // phase, when the prior emission wasn't a `started` event (e.g. the
451
+ // `activation_pending` path which already emits `failed`), or when the
452
+ // prior phase string equals the next phase. (#135)
453
+ const closePreviousPhase = (nextPhase, closeStatus = "done") => {
454
+ if (lastPhaseEmitted === null)
455
+ return;
456
+ const prev = phaseFor(lastPhaseEmitted);
457
+ if (!prev || prev.type !== "commit.phase")
458
+ return;
459
+ if (prev.status !== "started")
460
+ return;
461
+ if (nextPhase !== undefined && prev.phase === nextPhase)
462
+ return;
463
+ emit({ type: "commit.phase", phase: prev.phase, status: closeStatus });
464
+ };
465
+ while (true) {
466
+ if (lastPhaseEmitted !== snapshot.status) {
467
+ const ev = phaseFor(snapshot.status);
468
+ if (ev) {
469
+ if (ev.type === "commit.phase")
470
+ closePreviousPhase(ev.phase);
471
+ emit(ev);
472
+ lastPhaseEmitted = snapshot.status;
473
+ }
474
+ // If `ev` is null (status not in the phase map, e.g. "ready"), leave
475
+ // lastPhaseEmitted pointing at the prior in-flight phase so the
476
+ // terminal-success closePreviousPhase() below can emit its `done`.
477
+ }
478
+ if (snapshot.status === SUCCESS_STATUS) {
479
+ if (!snapshot.release_id || !snapshot.urls) {
480
+ throw new Run402DeployError("Operation reached ready but no release_id/urls available", {
481
+ code: "INTERNAL_ERROR",
482
+ phase: "ready",
483
+ retryable: false,
484
+ operationId: snapshot.operation_id,
485
+ context: "polling deploy",
486
+ });
487
+ }
488
+ closePreviousPhase();
489
+ emit({ type: "ready", releaseId: snapshot.release_id, urls: snapshot.urls });
490
+ return {
491
+ release_id: snapshot.release_id,
492
+ operation_id: snapshot.operation_id,
493
+ urls: snapshot.urls,
494
+ diff,
495
+ };
496
+ }
497
+ if (TERMINAL_STATUSES.includes(snapshot.status)) {
498
+ closePreviousPhase(undefined, "failed");
499
+ throw translateGatewayError(snapshot.error, snapshot.status, snapshot.plan_id, snapshot.operation_id);
500
+ }
501
+ if (Date.now() - start > COMMIT_POLL_TIMEOUT_MS) {
502
+ throw new Run402DeployError(`Timed out waiting for operation ${snapshot.operation_id} to reach ready`, {
503
+ code: "INTERNAL_ERROR",
504
+ phase: snapshot.status,
505
+ retryable: true,
506
+ operationId: snapshot.operation_id,
507
+ status: 504,
508
+ context: "polling deploy",
509
+ });
510
+ }
511
+ await sleep(interval);
512
+ if (Date.now() - start > COMMIT_POLL_BACKOFF_AFTER_MS) {
513
+ interval = Math.min(Math.floor(interval * 1.5), COMMIT_POLL_MAX_MS);
514
+ }
515
+ snapshot = await client.request(`/deploy/v2/operations/${encodeURIComponent(snapshot.operation_id)}`, { headers: opHeaders, context: "polling deploy operation" });
516
+ }
517
+ }
518
+ // ─── start() implementation ──────────────────────────────────────────────────
519
+ async function startInternal(client, spec, opts) {
520
+ const buffered = [];
521
+ const subscribers = [];
522
+ const emit = (event) => {
523
+ buffered.push(event);
524
+ if (opts.onEvent) {
525
+ try {
526
+ opts.onEvent(event);
527
+ }
528
+ catch {
529
+ /* swallow */
530
+ }
531
+ }
532
+ for (const fn of subscribers) {
533
+ try {
534
+ fn(event);
535
+ }
536
+ catch {
537
+ /* swallow */
538
+ }
539
+ }
540
+ };
541
+ emit({ type: "plan.started" });
542
+ const { plan, byteReaders } = await planInternal(client, spec, opts.idempotencyKey);
543
+ emit({ type: "plan.diff", diff: plan.diff });
544
+ if (plan.payment_required) {
545
+ emit({
546
+ type: "payment.required",
547
+ amount: plan.payment_required.amount,
548
+ asset: plan.payment_required.asset,
549
+ payTo: plan.payment_required.payTo,
550
+ reason: plan.payment_required.reason,
551
+ });
552
+ }
553
+ const resultPromise = (async () => {
554
+ await uploadMissing(client, spec.project, plan.missing_content, byteReaders, emit);
555
+ emit({ type: "commit.phase", phase: "validate", status: "started" });
556
+ const commit = await commitInternal(client, plan.plan_id, opts.idempotencyKey);
557
+ return await pollUntilReady(client, commit, plan.diff, emit, spec.project);
558
+ })();
559
+ // Avoid an unhandled-rejection at construction time. Consumers must call
560
+ // .result() to actually observe the error.
561
+ resultPromise.catch(() => { });
562
+ let snapshot = null;
563
+ const startHeaders = await apikeyHeaders(client, spec.project);
564
+ const fetchSnapshot = async () => {
565
+ if (snapshot && TERMINAL_STATUSES.includes(snapshot.status))
566
+ return snapshot;
567
+ snapshot = await client.request(`/deploy/v2/operations/${encodeURIComponent(plan.operation_id)}`, { headers: startHeaders, context: "fetching deploy operation" });
568
+ return snapshot;
569
+ };
570
+ return {
571
+ id: plan.operation_id,
572
+ async snapshot() {
573
+ return fetchSnapshot();
574
+ },
575
+ async result() {
576
+ return resultPromise;
577
+ },
578
+ events() {
579
+ return {
580
+ [Symbol.asyncIterator]() {
581
+ const queue = [...buffered];
582
+ let resolveNext = null;
583
+ let done = false;
584
+ // If a terminal event was already buffered before this iterator
585
+ // attached (e.g. iteration starts after `await op.result()`
586
+ // resolved), we'll go done after the queue drains. Without this,
587
+ // late iteration would hang forever waiting for an emit that
588
+ // will never come.
589
+ const terminalAlreadyBuffered = buffered.some((ev) => ev.type === "ready");
590
+ const subscriber = (ev) => {
591
+ const waiter = resolveNext;
592
+ if (waiter) {
593
+ resolveNext = null;
594
+ waiter({ value: ev, done: false });
595
+ }
596
+ else {
597
+ queue.push(ev);
598
+ }
599
+ if (ev.type === "ready") {
600
+ done = true;
601
+ // The next() loop checks `done` after queue drain, so a
602
+ // pending waiter that was just satisfied above will see
603
+ // `done` on its next call. No second wake-up needed here.
604
+ }
605
+ };
606
+ subscribers.push(subscriber);
607
+ // Wake up on either success or failure of the result promise
608
+ // so an iterator attached after termination always exits.
609
+ const finalize = () => {
610
+ done = true;
611
+ if (resolveNext) {
612
+ const r = resolveNext;
613
+ resolveNext = null;
614
+ r({ value: undefined, done: true });
615
+ }
616
+ };
617
+ resultPromise.then(finalize, finalize);
618
+ return {
619
+ next() {
620
+ if (queue.length > 0) {
621
+ return Promise.resolve({ value: queue.shift(), done: false });
622
+ }
623
+ // After queue drain, if we already saw the terminal event
624
+ // in the initial buffer (or the result promise has
625
+ // resolved/rejected), we're done.
626
+ if (done || terminalAlreadyBuffered) {
627
+ return Promise.resolve({
628
+ value: undefined,
629
+ done: true,
630
+ });
631
+ }
632
+ return new Promise((resolve) => {
633
+ resolveNext = resolve;
634
+ });
635
+ },
636
+ return() {
637
+ done = true;
638
+ const idx = subscribers.indexOf(subscriber);
639
+ if (idx >= 0)
640
+ subscribers.splice(idx, 1);
641
+ return Promise.resolve({
642
+ value: undefined,
643
+ done: true,
644
+ });
645
+ },
646
+ };
647
+ },
648
+ };
649
+ },
650
+ };
651
+ }
652
+ function validateSpec(spec) {
653
+ if (!spec || typeof spec !== "object") {
654
+ throw new Run402DeployError("ReleaseSpec must be an object", {
655
+ code: "INVALID_SPEC",
656
+ phase: "validate",
657
+ resource: "spec",
658
+ retryable: false,
659
+ fix: { action: "set_field", path: "" },
660
+ context: "validating spec",
661
+ });
662
+ }
663
+ if (!spec.project || typeof spec.project !== "string") {
664
+ throw new Run402DeployError("ReleaseSpec.project is required", {
665
+ code: "INVALID_SPEC",
666
+ phase: "validate",
667
+ resource: "spec.project",
668
+ retryable: false,
669
+ fix: { action: "set_field", path: "project" },
670
+ context: "validating spec",
671
+ });
672
+ }
673
+ if (spec.subdomains?.set && spec.subdomains.set.length > 1) {
674
+ throw new Run402DeployError("subdomains.set accepts at most one subdomain per project; multi-subdomain support is not yet available", {
675
+ code: "SUBDOMAIN_MULTI_NOT_SUPPORTED",
676
+ phase: "validate",
677
+ resource: "subdomains.set",
678
+ retryable: false,
679
+ fix: { action: "set_field", path: "subdomains.set" },
680
+ context: "validating spec",
681
+ });
682
+ }
683
+ }
684
+ async function normalizeReleaseSpec(client, spec) {
685
+ const byteReaders = new Map();
686
+ const remember = (resolved) => {
687
+ // Propagate the final content-type onto the deferred reader so the CAS
688
+ // upload session can declare it correctly. Callers may set
689
+ // ref.contentType *after* resolveContent returns (e.g. normalizeFileSet
690
+ // sets it from the path extension), so do this at remember time.
691
+ if (resolved.ref.contentType && !resolved.reader.contentType) {
692
+ resolved.reader.contentType = resolved.ref.contentType;
693
+ }
694
+ if (!byteReaders.has(resolved.ref.sha256)) {
695
+ byteReaders.set(resolved.ref.sha256, resolved.reader);
696
+ }
697
+ else {
698
+ // Already remembered — but if the existing reader has no contentType
699
+ // and we just learned it, fill it in.
700
+ const existing = byteReaders.get(resolved.ref.sha256);
701
+ if (resolved.ref.contentType && !existing.contentType) {
702
+ existing.contentType = resolved.ref.contentType;
703
+ }
704
+ }
705
+ return resolved.ref;
706
+ };
707
+ const normalized = { project: spec.project };
708
+ if (spec.base)
709
+ normalized.base = spec.base;
710
+ if (spec.subdomains)
711
+ normalized.subdomains = spec.subdomains;
712
+ if (spec.routes)
713
+ normalized.routes = spec.routes;
714
+ if (spec.checks)
715
+ normalized.checks = spec.checks;
716
+ if (spec.secrets)
717
+ normalized.secrets = spec.secrets;
718
+ if (spec.database) {
719
+ const db = {};
720
+ if (spec.database.expose)
721
+ db.expose = spec.database.expose;
722
+ if (typeof spec.database.zero_downtime === "boolean") {
723
+ db.zero_downtime = spec.database.zero_downtime;
724
+ }
725
+ if (spec.database.migrations && spec.database.migrations.length > 0) {
726
+ db.migrations = await Promise.all(spec.database.migrations.map(async (m) => normalizeMigration(client, spec.project, m, remember)));
727
+ }
728
+ normalized.database = db;
729
+ }
730
+ if (spec.functions) {
731
+ const fns = {};
732
+ if (spec.functions.replace) {
733
+ fns.replace = await normalizeFunctionMap(spec.functions.replace, remember);
734
+ }
735
+ if (spec.functions.patch) {
736
+ fns.patch = {};
737
+ if (spec.functions.patch.set) {
738
+ fns.patch.set = await normalizeFunctionMap(spec.functions.patch.set, remember);
739
+ }
740
+ if (spec.functions.patch.delete)
741
+ fns.patch.delete = spec.functions.patch.delete;
742
+ }
743
+ normalized.functions = fns;
744
+ }
745
+ if (spec.site) {
746
+ if ("replace" in spec.site && spec.site.replace) {
747
+ const map = await normalizeFileSet(spec.site.replace, remember);
748
+ normalized.site = { replace: map };
749
+ }
750
+ else if ("patch" in spec.site && spec.site.patch) {
751
+ const patch = {};
752
+ if (spec.site.patch.put) {
753
+ patch.put = await normalizeFileSet(spec.site.patch.put, remember);
754
+ }
755
+ if (spec.site.patch.delete)
756
+ patch.delete = spec.site.patch.delete;
757
+ normalized.site = { patch };
758
+ }
759
+ }
760
+ return { normalized, byteReaders };
761
+ }
762
+ async function normalizeFunctionMap(map, remember) {
763
+ const out = {};
764
+ for (const [name, fn] of Object.entries(map)) {
765
+ out[name] = await normalizeFunction(fn, remember);
766
+ }
767
+ return out;
768
+ }
769
+ async function normalizeFunction(fn, remember) {
770
+ const out = {
771
+ runtime: fn.runtime ?? "node22",
772
+ };
773
+ if (fn.config)
774
+ out.config = fn.config;
775
+ if (fn.schedule !== undefined)
776
+ out.schedule = fn.schedule;
777
+ if (fn.entrypoint)
778
+ out.entrypoint = fn.entrypoint;
779
+ if (fn.source !== undefined) {
780
+ const resolved = await resolveContent(fn.source, "function source");
781
+ out.source = remember(resolved);
782
+ }
783
+ if (fn.files) {
784
+ out.files = await normalizeFileSet(fn.files, remember);
785
+ }
786
+ return out;
787
+ }
788
+ async function normalizeFileSet(set, remember) {
789
+ const out = {};
790
+ for (const [path, source] of Object.entries(set)) {
791
+ const resolved = await resolveContent(source, path);
792
+ if (!resolved.ref.contentType) {
793
+ resolved.ref.contentType = guessContentType(path);
794
+ }
795
+ out[path] = remember(resolved);
796
+ }
797
+ return out;
798
+ }
799
+ async function normalizeMigration(client, projectId, m, remember) {
800
+ if (!m.id) {
801
+ throw new Run402DeployError("MigrationSpec.id is required", {
802
+ code: "INVALID_SPEC",
803
+ phase: "validate",
804
+ resource: "database.migrations",
805
+ retryable: false,
806
+ fix: { action: "set_field", path: "database.migrations[].id" },
807
+ context: "validating spec",
808
+ });
809
+ }
810
+ let sql_ref;
811
+ let checksum;
812
+ if (m.sql_ref) {
813
+ sql_ref = m.sql_ref;
814
+ checksum = m.checksum ?? m.sql_ref.sha256;
815
+ }
816
+ else if (m.sql !== undefined) {
817
+ const bytes = new TextEncoder().encode(m.sql);
818
+ const sha256 = await sha256Hex(bytes);
819
+ const ref = { sha256, size: bytes.byteLength, contentType: "application/sql" };
820
+ remember({ ref, reader: makeBytesReader(bytes, `migration:${m.id}`) });
821
+ sql_ref = ref;
822
+ checksum = m.checksum ?? sha256;
823
+ }
824
+ else {
825
+ throw new Run402DeployError(`MigrationSpec ${m.id} must include sql or sql_ref`, {
826
+ code: "INVALID_SPEC",
827
+ phase: "validate",
828
+ resource: `database.migrations.${m.id}`,
829
+ retryable: false,
830
+ fix: {
831
+ action: "set_field",
832
+ path: `database.migrations.${m.id}.sql`,
833
+ },
834
+ context: "validating spec",
835
+ });
836
+ }
837
+ const out = { id: m.id, checksum, sql_ref };
838
+ if (m.transaction)
839
+ out.transaction = m.transaction;
840
+ return out;
841
+ // projectId / client params reserved for future content-presence preflight.
842
+ void client;
843
+ void projectId;
844
+ }
845
+ // ─── Content source resolution ───────────────────────────────────────────────
846
+ async function resolveContent(source, label) {
847
+ // Pre-resolved ContentRef — pass through, no reader needed (caller is
848
+ // responsible for ensuring the bytes are already in CAS).
849
+ if (isContentRef(source)) {
850
+ return {
851
+ ref: { ...source },
852
+ reader: makeUnreadableReader(source.sha256, label),
853
+ };
854
+ }
855
+ // { data, contentType } wrapper — recurse into data, override contentType.
856
+ if (typeof source === "object" &&
857
+ source !== null &&
858
+ !Array.isArray(source) &&
859
+ !(source instanceof Uint8Array) &&
860
+ !(source instanceof ArrayBuffer) &&
861
+ !(typeof Blob !== "undefined" && source instanceof Blob) &&
862
+ !isReadableStream(source) &&
863
+ !isFsFileSource(source) &&
864
+ "data" in source) {
865
+ const inner = await resolveContent(source.data, label);
866
+ if (source.contentType) {
867
+ inner.ref.contentType = source.contentType;
868
+ }
869
+ return inner;
870
+ }
871
+ if (isFsFileSource(source)) {
872
+ return await resolveFsFile(source, label);
873
+ }
874
+ if (typeof source === "string") {
875
+ const bytes = new TextEncoder().encode(source);
876
+ return makeMemResolved(bytes, undefined, label);
877
+ }
878
+ if (source instanceof Uint8Array) {
879
+ return makeMemResolved(source, undefined, label);
880
+ }
881
+ if (source instanceof ArrayBuffer) {
882
+ return makeMemResolved(new Uint8Array(source), undefined, label);
883
+ }
884
+ if (typeof Blob !== "undefined" && source instanceof Blob) {
885
+ const bytes = new Uint8Array(await source.arrayBuffer());
886
+ const ct = source.type && source.type.length > 0 ? source.type : undefined;
887
+ return makeMemResolved(bytes, ct, label);
888
+ }
889
+ if (isReadableStream(source)) {
890
+ const bytes = await readStreamFully(source);
891
+ return makeMemResolved(bytes, undefined, label);
892
+ }
893
+ throw new Run402DeployError(`Unsupported byte source for ${label}`, {
894
+ code: "INVALID_SPEC",
895
+ resource: label,
896
+ retryable: false,
897
+ context: "normalizing byte sources",
898
+ });
899
+ }
900
+ async function makeMemResolved(bytes, contentType, label) {
901
+ const sha256 = await sha256Hex(bytes);
902
+ const ref = { sha256, size: bytes.byteLength };
903
+ if (contentType)
904
+ ref.contentType = contentType;
905
+ return { ref, reader: makeBytesReader(bytes, label) };
906
+ }
907
+ async function resolveFsFile(source, label) {
908
+ // Lazy import — keeps the root SDK V8-isolate-safe. fileSetFromDir lives
909
+ // in `@run402/sdk/node`, so any `FsFileSource` we see here must be in a
910
+ // Node runtime where `node:fs/promises` resolves.
911
+ let fsMod;
912
+ try {
913
+ fsMod = (await import("node:fs/promises"));
914
+ }
915
+ catch {
916
+ throw new Run402DeployError("FsFileSource is only supported in Node runtimes (received in a non-Node environment)", {
917
+ code: "INVALID_SPEC",
918
+ resource: label,
919
+ retryable: false,
920
+ context: "normalizing byte sources",
921
+ });
922
+ }
923
+ const buf = await fsMod.readFile(source.path);
924
+ const bytes = new Uint8Array(buf.buffer, buf.byteOffset, buf.byteLength);
925
+ const sha256 = await sha256Hex(bytes);
926
+ const ref = { sha256, size: bytes.byteLength };
927
+ if (source.contentType)
928
+ ref.contentType = source.contentType;
929
+ return {
930
+ ref,
931
+ reader: Object.assign(async () => {
932
+ const buf2 = await fsMod.readFile(source.path);
933
+ return new Uint8Array(buf2.buffer, buf2.byteOffset, buf2.byteLength);
934
+ }, { label }),
935
+ };
936
+ }
937
+ function makeBytesReader(bytes, label, contentType) {
938
+ const reader = async () => bytes;
939
+ reader.label = label;
940
+ if (contentType)
941
+ reader.contentType = contentType;
942
+ return reader;
943
+ }
944
+ function makeUnreadableReader(sha256, label) {
945
+ const reader = async () => {
946
+ throw new Run402DeployError(`ContentRef ${sha256.slice(0, 12)}… was passed pre-resolved but the gateway reports it missing — provide bytes inline instead`, {
947
+ code: "CONTENT_UPLOAD_FAILED",
948
+ resource: label,
949
+ retryable: false,
950
+ context: "uploading deploy bytes",
951
+ });
952
+ };
953
+ reader.label = label;
954
+ return reader;
955
+ }
956
+ // ─── Manifest-ref CAS upload (bypasses the upload phase loop) ────────────────
957
+ async function uploadInlineCas(client, projectId, bytes, contentType) {
958
+ const sha256 = await sha256Hex(bytes);
959
+ const headers = await apikeyHeaders(client, projectId);
960
+ const planRes = await client.request("/content/v1/plans", {
961
+ method: "POST",
962
+ headers,
963
+ body: {
964
+ content: [{ sha256, size: bytes.byteLength, content_type: contentType }],
965
+ },
966
+ context: "planning content upload",
967
+ });
968
+ if (planRes.missing.length > 0) {
969
+ const session = planRes.missing[0];
970
+ await uploadOne(client.fetch, session, bytes);
971
+ // Per-session promotion to CAS (see uploadMissing for the rationale).
972
+ await client.request(`/storage/v1/uploads/${encodeURIComponent(session.upload_id)}/complete`, {
973
+ method: "POST",
974
+ headers,
975
+ body: {},
976
+ context: "completing content upload session",
977
+ });
978
+ await client.request(`/content/v1/plans/${encodeURIComponent(planRes.plan_id)}/commit`, { method: "POST", headers, body: {}, context: "committing content upload" });
979
+ }
980
+ return { sha256, size: bytes.byteLength, contentType };
981
+ }
982
+ // ─── Helpers ─────────────────────────────────────────────────────────────────
983
+ /**
984
+ * Build the apikey header set for a project. The v1.34 gateway's
985
+ * `/deploy/v2/operations/:id*` and `/content/v1/plans*` routes require
986
+ * `apikey: <project.anon_key>` (apikeyAuth middleware). Plan + commit on
987
+ * `/deploy/v2/plans*` use SIWX, which the kernel's getAuth provides
988
+ * automatically — only the apikey-gated paths need this helper.
989
+ *
990
+ * Returns an empty object when the credentials provider doesn't know the
991
+ * project (the request will then go out without an apikey and the gateway
992
+ * will reject with 401 — matches the failure mode for unconfigured
993
+ * projects in any of today's other apikey-auth tools).
994
+ */
995
+ async function apikeyHeaders(client, projectId) {
996
+ const project = await client.getProject(projectId);
997
+ if (!project)
998
+ return {};
999
+ return { apikey: project.anon_key };
1000
+ }
1001
+ function makeEmitter(cb) {
1002
+ if (!cb)
1003
+ return () => { };
1004
+ return (event) => {
1005
+ try {
1006
+ cb(event);
1007
+ }
1008
+ catch {
1009
+ /* swallow */
1010
+ }
1011
+ };
1012
+ }
1013
+ function isContentRef(source) {
1014
+ return (typeof source === "object" &&
1015
+ source !== null &&
1016
+ typeof source.sha256 === "string" &&
1017
+ typeof source.size === "number" &&
1018
+ !("data" in source) &&
1019
+ !("__source" in source));
1020
+ }
1021
+ function isFsFileSource(source) {
1022
+ return (typeof source === "object" &&
1023
+ source !== null &&
1024
+ source.__source === "fs-file" &&
1025
+ typeof source.path === "string");
1026
+ }
1027
+ function isReadableStream(source) {
1028
+ return (typeof source === "object" &&
1029
+ source !== null &&
1030
+ typeof source.getReader === "function" &&
1031
+ typeof source.tee === "function");
1032
+ }
1033
+ async function readStreamFully(stream) {
1034
+ const reader = stream.getReader();
1035
+ const chunks = [];
1036
+ let total = 0;
1037
+ while (true) {
1038
+ const { value, done } = await reader.read();
1039
+ if (done)
1040
+ break;
1041
+ if (value) {
1042
+ chunks.push(value);
1043
+ total += value.byteLength;
1044
+ }
1045
+ }
1046
+ const out = new Uint8Array(total);
1047
+ let off = 0;
1048
+ for (const c of chunks) {
1049
+ out.set(c, off);
1050
+ off += c.byteLength;
1051
+ }
1052
+ return out;
1053
+ }
1054
+ async function sha256Hex(bytes) {
1055
+ const buf = await crypto.subtle.digest("SHA-256", bytes);
1056
+ return Array.from(new Uint8Array(buf))
1057
+ .map((b) => b.toString(16).padStart(2, "0"))
1058
+ .join("");
1059
+ }
1060
+ async function sha256Base64(bytes) {
1061
+ const buf = await crypto.subtle.digest("SHA-256", bytes);
1062
+ return base64FromBytes(new Uint8Array(buf));
1063
+ }
1064
+ function base64FromHex(hex) {
1065
+ const bytes = new Uint8Array(hex.length / 2);
1066
+ for (let i = 0; i < bytes.byteLength; i++) {
1067
+ bytes[i] = parseInt(hex.substr(i * 2, 2), 16);
1068
+ }
1069
+ return base64FromBytes(bytes);
1070
+ }
1071
+ function base64FromBytes(bytes) {
1072
+ if (typeof Buffer !== "undefined") {
1073
+ return Buffer.from(bytes).toString("base64");
1074
+ }
1075
+ let bin = "";
1076
+ for (let i = 0; i < bytes.byteLength; i++)
1077
+ bin += String.fromCharCode(bytes[i]);
1078
+ return btoa(bin);
1079
+ }
1080
+ function sleep(ms) {
1081
+ return new Promise((resolve) => setTimeout(resolve, ms));
1082
+ }
1083
+ const CONTENT_TYPE_BY_EXT = {
1084
+ html: "text/html; charset=utf-8",
1085
+ htm: "text/html; charset=utf-8",
1086
+ css: "text/css; charset=utf-8",
1087
+ js: "text/javascript; charset=utf-8",
1088
+ mjs: "text/javascript; charset=utf-8",
1089
+ cjs: "text/javascript; charset=utf-8",
1090
+ json: "application/json",
1091
+ svg: "image/svg+xml",
1092
+ png: "image/png",
1093
+ jpg: "image/jpeg",
1094
+ jpeg: "image/jpeg",
1095
+ gif: "image/gif",
1096
+ webp: "image/webp",
1097
+ ico: "image/x-icon",
1098
+ woff: "font/woff",
1099
+ woff2: "font/woff2",
1100
+ ttf: "font/ttf",
1101
+ otf: "font/otf",
1102
+ txt: "text/plain; charset=utf-8",
1103
+ md: "text/markdown; charset=utf-8",
1104
+ xml: "application/xml",
1105
+ pdf: "application/pdf",
1106
+ wasm: "application/wasm",
1107
+ sql: "application/sql",
1108
+ };
1109
+ function guessContentType(path) {
1110
+ const ix = path.lastIndexOf(".");
1111
+ if (ix < 0)
1112
+ return "application/octet-stream";
1113
+ const ext = path.slice(ix + 1).toLowerCase();
1114
+ return CONTENT_TYPE_BY_EXT[ext] ?? "application/octet-stream";
1115
+ }
1116
+ // ─── Error translation ──────────────────────────────────────────────────────
1117
+ function translateDeployError(err, phase, planId, operationId) {
1118
+ if (err instanceof Run402DeployError)
1119
+ return err;
1120
+ if (err instanceof ApiError) {
1121
+ const body = err.body && typeof err.body === "object"
1122
+ ? err.body
1123
+ : null;
1124
+ const gw = body && typeof body === "object" ? extractGatewayError(body) : null;
1125
+ if (gw) {
1126
+ return translateGatewayError(gw, phase, planId, operationId);
1127
+ }
1128
+ return new Run402DeployError(err.message, {
1129
+ code: "INTERNAL_ERROR",
1130
+ phase,
1131
+ retryable: err.status !== null && err.status >= 500,
1132
+ operationId,
1133
+ planId,
1134
+ status: err.status,
1135
+ body: err.body,
1136
+ context: err.context,
1137
+ });
1138
+ }
1139
+ if (err instanceof NetworkError) {
1140
+ return new Run402DeployError(err.message, {
1141
+ code: "NETWORK_ERROR",
1142
+ phase,
1143
+ retryable: true,
1144
+ operationId,
1145
+ planId,
1146
+ context: phase,
1147
+ });
1148
+ }
1149
+ // Re-throw other Run402Error subclasses (PaymentRequired, Unauthorized, etc.)
1150
+ // as-is — the consumer handles them at a different layer than
1151
+ // deploy-state-machine errors.
1152
+ if (err instanceof Error) {
1153
+ return new Run402DeployError(err.message, {
1154
+ code: "INTERNAL_ERROR",
1155
+ phase,
1156
+ retryable: false,
1157
+ operationId,
1158
+ planId,
1159
+ context: phase,
1160
+ });
1161
+ }
1162
+ return new Run402DeployError(String(err), {
1163
+ code: "INTERNAL_ERROR",
1164
+ phase,
1165
+ retryable: false,
1166
+ operationId,
1167
+ planId,
1168
+ context: phase,
1169
+ });
1170
+ }
1171
+ function extractGatewayError(body) {
1172
+ // Gateway returns the error in any of:
1173
+ // { error: { code, message?, phase?, ... } } — nested
1174
+ // { code, message?, phase?, ... } — top-level
1175
+ // { error: "<message>", code: "..." } — older shape, error as string
1176
+ // The only required field is `code`; `message` is convenient but
1177
+ // optional (some gateway routes return just a code on simple validation
1178
+ // failures, e.g. `{code: "invalid_spec"}`).
1179
+ if (body.error &&
1180
+ typeof body.error === "object" &&
1181
+ typeof body.error.code === "string") {
1182
+ return body.error;
1183
+ }
1184
+ if (typeof body.code === "string") {
1185
+ const out = { code: body.code };
1186
+ if (typeof body.message === "string") {
1187
+ out.message = body.message;
1188
+ }
1189
+ else if (typeof body.error === "string") {
1190
+ out.message = body.error;
1191
+ }
1192
+ else {
1193
+ out.message = `Deploy error: ${body.code}`;
1194
+ }
1195
+ if (typeof body.phase === "string")
1196
+ out.phase = body.phase;
1197
+ if (typeof body.resource === "string")
1198
+ out.resource = body.resource;
1199
+ if (typeof body.retryable === "boolean")
1200
+ out.retryable = body.retryable;
1201
+ if (body.fix !== undefined)
1202
+ out.fix = body.fix;
1203
+ if (Array.isArray(body.logs))
1204
+ out.logs = body.logs;
1205
+ if (typeof body.rolled_back === "boolean")
1206
+ out.rolled_back = body.rolled_back;
1207
+ if (typeof body.operation_id === "string")
1208
+ out.operation_id = body.operation_id;
1209
+ if (typeof body.plan_id === "string")
1210
+ out.plan_id = body.plan_id;
1211
+ return out;
1212
+ }
1213
+ return null;
1214
+ }
1215
+ function translateGatewayError(gw, phase, planId, operationId) {
1216
+ if (!gw) {
1217
+ return new Run402DeployError("Deploy failed without a structured error", {
1218
+ code: "INTERNAL_ERROR",
1219
+ phase,
1220
+ retryable: false,
1221
+ operationId,
1222
+ planId,
1223
+ context: phase,
1224
+ });
1225
+ }
1226
+ // Normalize the gateway code to the SCREAMING_SNAKE_CASE convention used
1227
+ // by `Run402DeployErrorCode`. Some gateway routes return lowercase
1228
+ // (`operation_not_found`) while services return uppercase
1229
+ // (`OPERATION_NOT_FOUND`); consumers expect the canonical uppercase form.
1230
+ const normalizedCode = gw.code.toUpperCase();
1231
+ // Prefer body-supplied ids — the gateway is the authoritative source for
1232
+ // which operation/plan an error belongs to. The caller-provided arguments
1233
+ // are only used as a fallback (e.g., commit failures where the call site
1234
+ // already knows the plan id but the body omits it).
1235
+ const opId = (gw && gw.operation_id) ?? operationId;
1236
+ const pId = (gw && gw.plan_id) ?? planId;
1237
+ return new Run402DeployError(gw.message ?? `Deploy failed: ${gw.code}`, {
1238
+ code: normalizedCode,
1239
+ phase: gw.phase ?? phase,
1240
+ resource: gw.resource ?? null,
1241
+ retryable: gw.retryable ?? false,
1242
+ operationId: opId,
1243
+ planId: pId,
1244
+ fix: (gw.fix ?? null),
1245
+ logs: gw.logs ?? null,
1246
+ rolledBack: gw.rolled_back ?? false,
1247
+ body: gw,
1248
+ context: phase,
1249
+ });
1250
+ }
1251
+ //# sourceMappingURL=deploy.js.map