@cloudflare/sandbox 0.9.1 → 0.9.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bridge/index.js +2 -2
- package/dist/{contexts-D6kt6WyG.d.ts → contexts-D_shbnJs.d.ts} +32 -2
- package/dist/contexts-D_shbnJs.d.ts.map +1 -0
- package/dist/errors-CBi-O-pF.js +227 -0
- package/dist/errors-CBi-O-pF.js.map +1 -0
- package/dist/index.d.ts +25 -4
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +3 -3
- package/dist/openai/index.d.ts +1 -1
- package/dist/opencode/index.d.ts +2 -2
- package/dist/opencode/index.d.ts.map +1 -1
- package/dist/opencode/index.js +1 -1
- package/dist/{sandbox-PAYx1CcU.js → sandbox-BAuU-2a0.js} +553 -118
- package/dist/sandbox-BAuU-2a0.js.map +1 -0
- package/dist/{sandbox-Bb3n0SeC.d.ts → sandbox-CW4QeITP.d.ts} +221 -117
- package/dist/sandbox-CW4QeITP.d.ts.map +1 -0
- package/package.json +1 -1
- package/dist/contexts-D6kt6WyG.d.ts.map +0 -1
- package/dist/errors-LE3HHcRb.js +0 -169
- package/dist/errors-LE3HHcRb.js.map +0 -1
- package/dist/sandbox-Bb3n0SeC.d.ts.map +0 -1
- package/dist/sandbox-PAYx1CcU.js.map +0 -1
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { _ as GitLogger, b as getEnvString, c as parseSSEFrames, d as createNoOpLogger, f as TraceContext, g as DEFAULT_GIT_CLONE_TIMEOUT_MS, h as ResultImpl, i as isWSStreamChunk, l as shellEscape, m as Execution, n as isWSError, p as logCanonicalEvent, r as isWSResponse, t as generateRequestId, u as createLogger, v as extractRepoName, x as partitionEnvVars, y as filterEnvVars } from "./dist-B_eXrP83.js";
|
|
2
|
-
import { n as ErrorCode, t as
|
|
2
|
+
import { n as getHttpStatus, r as ErrorCode, t as getSuggestion } from "./errors-CBi-O-pF.js";
|
|
3
3
|
import { Container, getContainer, switchPort } from "@cloudflare/containers";
|
|
4
4
|
import { AwsClient } from "aws4fetch";
|
|
5
5
|
import { RpcSession } from "capnweb";
|
|
@@ -11,8 +11,8 @@ import path from "node:path/posix";
|
|
|
11
11
|
* Preserves all error information from container
|
|
12
12
|
*/
|
|
13
13
|
var SandboxError = class extends Error {
|
|
14
|
-
constructor(errorResponse) {
|
|
15
|
-
super(errorResponse.message);
|
|
14
|
+
constructor(errorResponse, options) {
|
|
15
|
+
super(errorResponse.message, options);
|
|
16
16
|
this.errorResponse = errorResponse;
|
|
17
17
|
this.name = "SandboxError";
|
|
18
18
|
}
|
|
@@ -664,6 +664,30 @@ var DesktopInvalidCoordinatesError = class extends SandboxError {
|
|
|
664
664
|
this.name = "DesktopInvalidCoordinatesError";
|
|
665
665
|
}
|
|
666
666
|
};
|
|
667
|
+
/**
|
|
668
|
+
* Raised when the capnweb WebSocket session itself fails on the SDK side.
|
|
669
|
+
* Unlike the rest of the SandboxError tree, the container never produces
|
|
670
|
+
* this error — it is synthesised by `translateRPCError` from the plain
|
|
671
|
+
* Errors capnweb / DeferredTransport raise when the connection dies.
|
|
672
|
+
*
|
|
673
|
+
* `kind` distinguishes the failure mode (peer close, upgrade failed, etc.)
|
|
674
|
+
* so callers can branch on a structured code instead of substring-matching
|
|
675
|
+
* on the message.
|
|
676
|
+
*
|
|
677
|
+
* Always retryable: the SDK opens a fresh connection on the next call.
|
|
678
|
+
*/
|
|
679
|
+
var RPCTransportError = class extends SandboxError {
|
|
680
|
+
constructor(errorResponse, options) {
|
|
681
|
+
super(errorResponse, options);
|
|
682
|
+
this.name = "RPCTransportError";
|
|
683
|
+
}
|
|
684
|
+
get kind() {
|
|
685
|
+
return this.errorResponse.context.kind;
|
|
686
|
+
}
|
|
687
|
+
get originalMessage() {
|
|
688
|
+
return this.errorResponse.context.originalMessage;
|
|
689
|
+
}
|
|
690
|
+
};
|
|
667
691
|
|
|
668
692
|
//#endregion
|
|
669
693
|
//#region src/errors/adapter.ts
|
|
@@ -671,7 +695,7 @@ var DesktopInvalidCoordinatesError = class extends SandboxError {
|
|
|
671
695
|
* Convert ErrorResponse to appropriate Error class
|
|
672
696
|
* Simple switch statement - we trust the container sends correct context
|
|
673
697
|
*/
|
|
674
|
-
function createErrorFromResponse(errorResponse) {
|
|
698
|
+
function createErrorFromResponse(errorResponse, options) {
|
|
675
699
|
switch (errorResponse.code) {
|
|
676
700
|
case ErrorCode.FILE_NOT_FOUND: return new FileNotFoundError(errorResponse);
|
|
677
701
|
case ErrorCode.FILE_EXISTS: return new FileExistsError(errorResponse);
|
|
@@ -727,6 +751,7 @@ function createErrorFromResponse(errorResponse) {
|
|
|
727
751
|
case ErrorCode.DESKTOP_PROCESS_CRASHED: return new DesktopProcessCrashedError(errorResponse);
|
|
728
752
|
case ErrorCode.DESKTOP_INVALID_OPTIONS: return new DesktopInvalidOptionsError(errorResponse);
|
|
729
753
|
case ErrorCode.DESKTOP_INVALID_COORDINATES: return new DesktopInvalidCoordinatesError(errorResponse);
|
|
754
|
+
case ErrorCode.RPC_TRANSPORT_ERROR: return new RPCTransportError(errorResponse, options);
|
|
730
755
|
case ErrorCode.VALIDATION_FAILED: return new ValidationFailedError(errorResponse);
|
|
731
756
|
case ErrorCode.INVALID_JSON_RESPONSE:
|
|
732
757
|
case ErrorCode.UNKNOWN_ERROR:
|
|
@@ -1470,10 +1495,10 @@ var WebSocketTransport = class extends BaseTransport {
|
|
|
1470
1495
|
//#endregion
|
|
1471
1496
|
//#region src/clients/transport/factory.ts
|
|
1472
1497
|
/**
|
|
1473
|
-
* Create a transport instance based on mode
|
|
1498
|
+
* Create a route-based compatibility transport instance based on mode.
|
|
1474
1499
|
*
|
|
1475
|
-
*
|
|
1476
|
-
*
|
|
1500
|
+
* Selects the HTTP or custom WebSocket transport for the route-based client
|
|
1501
|
+
* layer.
|
|
1477
1502
|
*
|
|
1478
1503
|
* @example
|
|
1479
1504
|
* ```typescript
|
|
@@ -1492,23 +1517,23 @@ var WebSocketTransport = class extends BaseTransport {
|
|
|
1492
1517
|
*/
|
|
1493
1518
|
function createTransport(options) {
|
|
1494
1519
|
switch (options.mode) {
|
|
1520
|
+
case "http": return new HttpTransport(options);
|
|
1495
1521
|
case "websocket": return new WebSocketTransport(options);
|
|
1496
|
-
default: return new HttpTransport(options);
|
|
1497
1522
|
}
|
|
1498
1523
|
}
|
|
1499
1524
|
|
|
1500
1525
|
//#endregion
|
|
1501
1526
|
//#region src/clients/base-client.ts
|
|
1502
1527
|
/**
|
|
1503
|
-
* Abstract base class
|
|
1528
|
+
* Abstract base class for route-based HTTP/WebSocket compatibility clients.
|
|
1504
1529
|
*
|
|
1505
|
-
*
|
|
1506
|
-
* - HTTP and WebSocket modes transparently
|
|
1507
|
-
* - Automatic retry for 503 errors
|
|
1508
|
-
* - Streaming responses
|
|
1530
|
+
* Requests go through the Transport abstraction layer, which handles:
|
|
1531
|
+
* - HTTP and WebSocket route-based modes transparently
|
|
1532
|
+
* - Automatic retry for 503 errors while the container is starting
|
|
1533
|
+
* - Streaming responses for the existing route API
|
|
1509
1534
|
*
|
|
1510
|
-
*
|
|
1511
|
-
*
|
|
1535
|
+
* DO-to-container control-channel capabilities live in `container-control/`.
|
|
1536
|
+
* This layer supports the route-based compatibility API.
|
|
1512
1537
|
*/
|
|
1513
1538
|
var BaseHttpClient = class {
|
|
1514
1539
|
options;
|
|
@@ -1629,7 +1654,7 @@ var BaseHttpClient = class {
|
|
|
1629
1654
|
/**
|
|
1630
1655
|
* Stream request handler
|
|
1631
1656
|
*
|
|
1632
|
-
*
|
|
1657
|
+
* HTTP mode uses doFetch + handleStreamResponse for typed error handling.
|
|
1633
1658
|
* For WebSocket mode, uses Transport's streaming support.
|
|
1634
1659
|
*
|
|
1635
1660
|
* @param path - The API path to call
|
|
@@ -1673,6 +1698,7 @@ var BackupClient = class extends BaseHttpClient {
|
|
|
1673
1698
|
archivePath,
|
|
1674
1699
|
gitignore: options?.gitignore ?? false,
|
|
1675
1700
|
excludes: options?.excludes ?? [],
|
|
1701
|
+
compression: options?.compression,
|
|
1676
1702
|
sessionId
|
|
1677
1703
|
};
|
|
1678
1704
|
return await this.post("/api/backup/create", data);
|
|
@@ -1691,6 +1717,12 @@ var BackupClient = class extends BaseHttpClient {
|
|
|
1691
1717
|
};
|
|
1692
1718
|
return await this.post("/api/backup/restore", data);
|
|
1693
1719
|
}
|
|
1720
|
+
async uploadParts(request, sessionId) {
|
|
1721
|
+
return this.post("/api/backup/upload-parts", {
|
|
1722
|
+
...request,
|
|
1723
|
+
sessionId: sessionId ?? request.sessionId
|
|
1724
|
+
});
|
|
1725
|
+
}
|
|
1694
1726
|
};
|
|
1695
1727
|
|
|
1696
1728
|
//#endregion
|
|
@@ -2593,13 +2625,13 @@ var WatchClient = class extends BaseHttpClient {
|
|
|
2593
2625
|
//#endregion
|
|
2594
2626
|
//#region src/clients/sandbox-client.ts
|
|
2595
2627
|
/**
|
|
2596
|
-
*
|
|
2597
|
-
*
|
|
2628
|
+
* Route-based compatibility sandbox client that composes all domain-specific
|
|
2629
|
+
* HTTP API clients.
|
|
2598
2630
|
*
|
|
2599
|
-
*
|
|
2600
|
-
*
|
|
2601
|
-
* -
|
|
2602
|
-
*
|
|
2631
|
+
* This client supports the route-based HTTP and custom WebSocket transports.
|
|
2632
|
+
* The primary DO-to-container control path is ContainerControlClient under
|
|
2633
|
+
* `container-control/`. This client supports route-based compatibility,
|
|
2634
|
+
* debugging, local development, and fallback behavior.
|
|
2603
2635
|
*/
|
|
2604
2636
|
var SandboxClient = class {
|
|
2605
2637
|
backup;
|
|
@@ -2676,9 +2708,9 @@ var SandboxClient = class {
|
|
|
2676
2708
|
/**
|
|
2677
2709
|
* Stream a file directly to the container over a binary RPC channel.
|
|
2678
2710
|
*
|
|
2679
|
-
* Requires the
|
|
2680
|
-
* method with the HTTP or WebSocket transports throws an error because
|
|
2681
|
-
* transports do not support binary streaming.
|
|
2711
|
+
* Requires the container-control path (`transport: 'rpc'`). Calling this
|
|
2712
|
+
* method with the HTTP or WebSocket route transports throws an error because
|
|
2713
|
+
* those transports do not support binary streaming.
|
|
2682
2714
|
*/
|
|
2683
2715
|
writeFileStream(_path, _content, _sessionId) {
|
|
2684
2716
|
throw new Error("writeFileStream requires the RPC transport. Enable it with transport: \"rpc\" in sandbox options.");
|
|
@@ -2712,9 +2744,17 @@ const BACKUP_ALLOWED_PREFIXES = [
|
|
|
2712
2744
|
"/var/tmp",
|
|
2713
2745
|
"/app"
|
|
2714
2746
|
];
|
|
2747
|
+
function normalizeBackupExcludePattern(pattern) {
|
|
2748
|
+
let normalized = pattern;
|
|
2749
|
+
while (normalized.startsWith("**/")) normalized = normalized.slice(3);
|
|
2750
|
+
while (normalized.includes("/**/")) normalized = normalized.replace(/\/\*\*\//g, "/");
|
|
2751
|
+
if (normalized.endsWith("/**")) normalized = normalized.slice(0, -3);
|
|
2752
|
+
if (!normalized || normalized === "**") return null;
|
|
2753
|
+
return normalized;
|
|
2754
|
+
}
|
|
2715
2755
|
|
|
2716
2756
|
//#endregion
|
|
2717
|
-
//#region src/container-connection.ts
|
|
2757
|
+
//#region src/container-control/connection.ts
|
|
2718
2758
|
const DEFAULT_CONNECT_TIMEOUT_MS = 3e4;
|
|
2719
2759
|
/**
|
|
2720
2760
|
* Manages a capnweb WebSocket RPC session to the container.
|
|
@@ -2723,7 +2763,7 @@ const DEFAULT_CONNECT_TIMEOUT_MS = 3e4;
|
|
|
2723
2763
|
* transport. Calls made before `connect()` completes are queued in the
|
|
2724
2764
|
* transport and flushed once the WebSocket is established.
|
|
2725
2765
|
*/
|
|
2726
|
-
var
|
|
2766
|
+
var ContainerControlConnection = class {
|
|
2727
2767
|
stub;
|
|
2728
2768
|
session;
|
|
2729
2769
|
transport;
|
|
@@ -2807,7 +2847,7 @@ var ContainerConnection = class {
|
|
|
2807
2847
|
ws.addEventListener("close", () => {
|
|
2808
2848
|
this.connected = false;
|
|
2809
2849
|
this.ws = null;
|
|
2810
|
-
this.logger.debug("
|
|
2850
|
+
this.logger.debug("ContainerControlConnection WebSocket closed");
|
|
2811
2851
|
});
|
|
2812
2852
|
ws.addEventListener("error", () => {
|
|
2813
2853
|
this.connected = false;
|
|
@@ -2816,12 +2856,12 @@ var ContainerConnection = class {
|
|
|
2816
2856
|
this.ws = ws;
|
|
2817
2857
|
this.transport.activate(ws);
|
|
2818
2858
|
this.connected = true;
|
|
2819
|
-
this.logger.debug("
|
|
2859
|
+
this.logger.debug("ContainerControlConnection established", { port: this.port });
|
|
2820
2860
|
} catch (error) {
|
|
2821
2861
|
clearTimeout(timeout);
|
|
2822
2862
|
this.connected = false;
|
|
2823
2863
|
this.transport.abort(error);
|
|
2824
|
-
this.logger.error("
|
|
2864
|
+
this.logger.error("ContainerControlConnection failed", error instanceof Error ? error : new Error(String(error)));
|
|
2825
2865
|
throw error;
|
|
2826
2866
|
}
|
|
2827
2867
|
}
|
|
@@ -2847,12 +2887,13 @@ var DeferredTransport = class {
|
|
|
2847
2887
|
this.#receiveResolver = void 0;
|
|
2848
2888
|
this.#receiveRejecter = void 0;
|
|
2849
2889
|
} else this.#receiveQueue.push(event.data);
|
|
2890
|
+
else this.#fail(/* @__PURE__ */ new TypeError("Received non-string message from WebSocket."));
|
|
2850
2891
|
});
|
|
2851
2892
|
ws.addEventListener("close", (event) => {
|
|
2852
2893
|
this.#fail(/* @__PURE__ */ new Error(`Peer closed WebSocket: ${event.code} ${event.reason}`));
|
|
2853
2894
|
});
|
|
2854
2895
|
ws.addEventListener("error", () => {
|
|
2855
|
-
this.#fail(/* @__PURE__ */ new Error("WebSocket connection failed"));
|
|
2896
|
+
this.#fail(/* @__PURE__ */ new Error("WebSocket connection failed."));
|
|
2856
2897
|
});
|
|
2857
2898
|
for (const msg of this.#sendQueue) ws.send(msg);
|
|
2858
2899
|
this.#sendQueue = [];
|
|
@@ -2886,7 +2927,7 @@ var DeferredTransport = class {
|
|
|
2886
2927
|
};
|
|
2887
2928
|
|
|
2888
2929
|
//#endregion
|
|
2889
|
-
//#region src/
|
|
2930
|
+
//#region src/container-control/client.ts
|
|
2890
2931
|
/** Close the idle capnweb WebSocket promptly so the DO can sleep. */
|
|
2891
2932
|
const DEFAULT_IDLE_DISCONNECT_MS = 1e3;
|
|
2892
2933
|
/**
|
|
@@ -2924,19 +2965,65 @@ const IDLE_EXPORT_THRESHOLD = 1;
|
|
|
2924
2965
|
* string: `{"code":"...","message":"...","context":{...}}`.
|
|
2925
2966
|
*/
|
|
2926
2967
|
function translateRPCError(error) {
|
|
2927
|
-
if (error instanceof Error)
|
|
2928
|
-
|
|
2929
|
-
|
|
2968
|
+
if (error instanceof Error) {
|
|
2969
|
+
let payload;
|
|
2970
|
+
try {
|
|
2971
|
+
payload = JSON.parse(error.message);
|
|
2972
|
+
} catch {}
|
|
2973
|
+
if (payload && typeof payload.code === "string" && typeof payload.message === "string") throw createErrorFromResponse({
|
|
2930
2974
|
code: payload.code,
|
|
2931
2975
|
message: payload.message,
|
|
2932
2976
|
context: payload.context ?? {},
|
|
2933
2977
|
httpStatus: getHttpStatus(payload.code),
|
|
2934
2978
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
2935
2979
|
});
|
|
2936
|
-
|
|
2937
|
-
if (e instanceof Error && e !== error) throw e;
|
|
2980
|
+
throw createErrorFromResponse(buildTransportErrorResponse(error), { cause: error });
|
|
2938
2981
|
}
|
|
2939
|
-
throw error;
|
|
2982
|
+
throw createErrorFromResponse(buildTransportErrorResponse(new Error(String(error))), { cause: error });
|
|
2983
|
+
}
|
|
2984
|
+
/**
|
|
2985
|
+
* Inspect a transport-level Error's message and produce the ErrorResponse
|
|
2986
|
+
* that becomes an RPCTransportError. Pattern strings are pinned to the exact
|
|
2987
|
+
* messages emitted by capnweb's WebSocketTransport (see capnweb's
|
|
2988
|
+
* src/websocket.ts) and our DeferredTransport in container-control/connection.ts —
|
|
2989
|
+
* notably the trailing period in `WebSocket connection failed.` matches
|
|
2990
|
+
* capnweb verbatim. The DeferredTransport tests in
|
|
2991
|
+
* tests/container-connection.test.ts pin the literal strings.
|
|
2992
|
+
*/
|
|
2993
|
+
function buildTransportErrorResponse(error) {
|
|
2994
|
+
const message = error.message;
|
|
2995
|
+
const errorName = error.name;
|
|
2996
|
+
let kind = "unknown";
|
|
2997
|
+
let closeCode;
|
|
2998
|
+
let closeReason;
|
|
2999
|
+
if (errorName === "TypeError") kind = "invalid_frame";
|
|
3000
|
+
else if (errorName === "SyntaxError") kind = "protocol_error";
|
|
3001
|
+
else {
|
|
3002
|
+
const peerCloseMatch = message.match(/^Peer closed WebSocket: (\d+) ?(.*)$/);
|
|
3003
|
+
if (peerCloseMatch) {
|
|
3004
|
+
kind = "peer_closed";
|
|
3005
|
+
closeCode = Number(peerCloseMatch[1]);
|
|
3006
|
+
closeReason = peerCloseMatch[2] || void 0;
|
|
3007
|
+
} else if (message === "WebSocket connection failed.") kind = "connection_failed";
|
|
3008
|
+
else if (message.startsWith("WebSocket upgrade failed")) kind = "upgrade_failed";
|
|
3009
|
+
else if (message === "No WebSocket in upgrade response") kind = "upgrade_failed";
|
|
3010
|
+
else if (message === "RPC session was shut down by disposing the main stub" || message === "RPC was canceled because the RpcPromise was disposed.") kind = "session_disposed";
|
|
3011
|
+
}
|
|
3012
|
+
const context = {
|
|
3013
|
+
kind,
|
|
3014
|
+
originalMessage: message,
|
|
3015
|
+
errorName,
|
|
3016
|
+
...closeCode !== void 0 ? { closeCode } : {},
|
|
3017
|
+
...closeReason !== void 0 ? { closeReason } : {}
|
|
3018
|
+
};
|
|
3019
|
+
return {
|
|
3020
|
+
code: ErrorCode.RPC_TRANSPORT_ERROR,
|
|
3021
|
+
message,
|
|
3022
|
+
context,
|
|
3023
|
+
httpStatus: getHttpStatus(ErrorCode.RPC_TRANSPORT_ERROR),
|
|
3024
|
+
suggestion: getSuggestion(ErrorCode.RPC_TRANSPORT_ERROR, context),
|
|
3025
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
3026
|
+
};
|
|
2940
3027
|
}
|
|
2941
3028
|
/**
|
|
2942
3029
|
* Wrap a capnweb RPC stub so that every method call translates errors
|
|
@@ -2944,7 +3031,7 @@ function translateRPCError(error) {
|
|
|
2944
3031
|
* activity at call start.
|
|
2945
3032
|
*
|
|
2946
3033
|
* `onCallStarted` fires synchronously when an RPC method is invoked. The
|
|
2947
|
-
*
|
|
3034
|
+
* ContainerControlClient uses this to renew the DO's activity timeout
|
|
2948
3035
|
* immediately, so even a call that completes entirely between two
|
|
2949
3036
|
* busy-poll ticks still pushes the sleepAfter deadline forward.
|
|
2950
3037
|
*
|
|
@@ -2970,20 +3057,19 @@ function wrapStub(stub, onCallStarted) {
|
|
|
2970
3057
|
} });
|
|
2971
3058
|
}
|
|
2972
3059
|
/**
|
|
2973
|
-
* SandboxClient backed by direct capnweb RPC.
|
|
3060
|
+
* SandboxClient-compatible facade backed by direct capnweb RPC.
|
|
2974
3061
|
*
|
|
2975
|
-
*
|
|
2976
|
-
*
|
|
2977
|
-
* bypassing the HTTP handler/router layer entirely.
|
|
3062
|
+
* All operations call the container's SandboxAPI control interface directly
|
|
3063
|
+
* over capnweb, bypassing the HTTP handler/router layer entirely.
|
|
2978
3064
|
*
|
|
2979
|
-
* Manages its own WebSocket lifecycle: a fresh `
|
|
3065
|
+
* Manages its own WebSocket lifecycle: a fresh `ContainerControlConnection` is
|
|
2980
3066
|
* created on demand and torn down after `idleDisconnectMs` of inactivity.
|
|
2981
3067
|
* Busy/idle detection relies on `RpcSession.getStats()` which tracks all
|
|
2982
3068
|
* in-flight RPC calls and stream exports — including long-lived streaming
|
|
2983
3069
|
* RPCs that would be invisible to a simple per-call request counter (see
|
|
2984
3070
|
* the file-level comment for the full rationale).
|
|
2985
3071
|
*/
|
|
2986
|
-
var
|
|
3072
|
+
var ContainerControlClient = class {
|
|
2987
3073
|
connOptions;
|
|
2988
3074
|
idleDisconnectMs;
|
|
2989
3075
|
busyPollIntervalMs;
|
|
@@ -3017,13 +3103,12 @@ var RPCSandboxClient = class {
|
|
|
3017
3103
|
this.onSessionIdle = options.onSessionIdle;
|
|
3018
3104
|
}
|
|
3019
3105
|
/**
|
|
3020
|
-
* Return the current connection, creating
|
|
3021
|
-
*
|
|
3022
|
-
* timer the first time a connection is materialized.
|
|
3106
|
+
* Return the current connection, creating one when the client is disconnected.
|
|
3107
|
+
* Starts the busy-poll timer the first time a connection is materialized.
|
|
3023
3108
|
*/
|
|
3024
3109
|
getConnection() {
|
|
3025
3110
|
if (!this.conn) {
|
|
3026
|
-
this.conn = new
|
|
3111
|
+
this.conn = new ContainerControlConnection(this.connOptions);
|
|
3027
3112
|
this.startBusyPoll();
|
|
3028
3113
|
}
|
|
3029
3114
|
return this.conn;
|
|
@@ -4111,7 +4196,7 @@ function isLocalhostPattern(hostname) {
|
|
|
4111
4196
|
* This file is auto-updated by .github/changeset-version.ts during releases
|
|
4112
4197
|
* DO NOT EDIT MANUALLY - Changes will be overwritten on the next version bump
|
|
4113
4198
|
*/
|
|
4114
|
-
const SDK_VERSION = "0.9.
|
|
4199
|
+
const SDK_VERSION = "0.9.3";
|
|
4115
4200
|
|
|
4116
4201
|
//#endregion
|
|
4117
4202
|
//#region src/sandbox.ts
|
|
@@ -4122,6 +4207,63 @@ const BACKUP_CONTAINER_DIR = "/var/backups";
|
|
|
4122
4207
|
const BACKUP_STORAGE_PREFIX = "backups";
|
|
4123
4208
|
const BACKUP_ARCHIVE_OBJECT_NAME = "data.sqsh";
|
|
4124
4209
|
const BACKUP_METADATA_OBJECT_NAME = "meta.json";
|
|
4210
|
+
const BACKUP_DEFAULT_COMPRESSION = "lz4";
|
|
4211
|
+
const BACKUP_DEFAULT_COMPRESS_THREADS = 8;
|
|
4212
|
+
const BACKUP_MULTIPART_MIN_SIZE = 10 * 1024 * 1024;
|
|
4213
|
+
const BACKUP_MULTIPART_TARGET_PARTS = 16;
|
|
4214
|
+
const BACKUP_MULTIPART_MIN_PART_SIZE = 5 * 1024 * 1024;
|
|
4215
|
+
const BACKUP_MULTIPART_MAX_PARTS = 64;
|
|
4216
|
+
const BACKUP_DOWNLOAD_PARALLEL_PARTS = 8;
|
|
4217
|
+
const BACKUP_DOWNLOAD_PARALLEL_MIN_SIZE = 10 * 1024 * 1024;
|
|
4218
|
+
const BACKUP_DOWNLOAD_MAX_PARTS = 64;
|
|
4219
|
+
/**
|
|
4220
|
+
* Calculate the optimal number of parts for multipart upload/download
|
|
4221
|
+
* based on archive size. Larger archives benefit from more parallelism.
|
|
4222
|
+
*/
|
|
4223
|
+
function calculatePartCount(sizeBytes, defaultParts, maxParts) {
|
|
4224
|
+
if (sizeBytes < 100 * 1024 * 1024) return defaultParts;
|
|
4225
|
+
if (sizeBytes < 1024 * 1024 * 1024) return Math.min(32, defaultParts * 2);
|
|
4226
|
+
return maxParts;
|
|
4227
|
+
}
|
|
4228
|
+
/**
|
|
4229
|
+
* Tagged template literal that shell-escapes every interpolated value.
|
|
4230
|
+
* Use for composing in-container scripts where the template body is
|
|
4231
|
+
* trusted shell and the interpolations are untrusted strings.
|
|
4232
|
+
*/
|
|
4233
|
+
function sh(strings, ...values) {
|
|
4234
|
+
let out = strings[0];
|
|
4235
|
+
for (let i = 0; i < values.length; i++) out += shellEscape(String(values[i])) + strings[i + 1];
|
|
4236
|
+
return out;
|
|
4237
|
+
}
|
|
4238
|
+
/**
|
|
4239
|
+
* Hex string of `bytes` random bytes (length = bytes * 2). Used for short
|
|
4240
|
+
* non-cryptographic identifiers — e.g. tempfile suffixes.
|
|
4241
|
+
*/
|
|
4242
|
+
function randomHex(bytes) {
|
|
4243
|
+
const buf = new Uint8Array(bytes);
|
|
4244
|
+
crypto.getRandomValues(buf);
|
|
4245
|
+
return Array.from(buf, (b) => b.toString(16).padStart(2, "0")).join("");
|
|
4246
|
+
}
|
|
4247
|
+
/**
|
|
4248
|
+
* Parse an array of `key=value` / bare-flag s3fs options into a Record.
|
|
4249
|
+
* Bare flags become `{ flag: true }`. Later entries overwrite earlier ones.
|
|
4250
|
+
*/
|
|
4251
|
+
function parseS3fsOptions(entries) {
|
|
4252
|
+
const result = {};
|
|
4253
|
+
for (const entry of entries) {
|
|
4254
|
+
const eq = entry.indexOf("=");
|
|
4255
|
+
if (eq === -1) result[entry] = true;
|
|
4256
|
+
else result[entry.slice(0, eq)] = entry.slice(eq + 1);
|
|
4257
|
+
}
|
|
4258
|
+
return result;
|
|
4259
|
+
}
|
|
4260
|
+
/**
|
|
4261
|
+
* Serialise an s3fs options Record into the comma-separated `-o` argument.
|
|
4262
|
+
* Boolean true emits the bare flag; false drops it.
|
|
4263
|
+
*/
|
|
4264
|
+
function serializeS3fsOptions(options) {
|
|
4265
|
+
return Object.entries(options).filter(([, v]) => v !== false).map(([k, v]) => v === true ? k : `${k}=${v}`).join(",");
|
|
4266
|
+
}
|
|
4125
4267
|
function getNamespaceConfigurationCache(namespace) {
|
|
4126
4268
|
const existing = sandboxConfigurationCache.get(namespace);
|
|
4127
4269
|
if (existing) return existing;
|
|
@@ -4357,7 +4499,8 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4357
4499
|
return Math.max(12e4, startupBudgetMs + 3e4);
|
|
4358
4500
|
}
|
|
4359
4501
|
/**
|
|
4360
|
-
* Create
|
|
4502
|
+
* Create the route-based compatibility client with current HTTP/WebSocket
|
|
4503
|
+
* transport settings.
|
|
4361
4504
|
*/
|
|
4362
4505
|
createSandboxClient() {
|
|
4363
4506
|
return new SandboxClient({
|
|
@@ -4373,12 +4516,15 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4373
4516
|
});
|
|
4374
4517
|
}
|
|
4375
4518
|
/**
|
|
4376
|
-
* Create the appropriate client for
|
|
4519
|
+
* Create the appropriate client for the configured control path.
|
|
4520
|
+
*
|
|
4521
|
+
* `rpc` currently selects the primary container-control client. `http` and
|
|
4522
|
+
* `websocket` select the route-based compatibility client.
|
|
4377
4523
|
*/
|
|
4378
4524
|
createClientForTransport(transport) {
|
|
4379
4525
|
if (transport === "rpc") {
|
|
4380
4526
|
const self = this;
|
|
4381
|
-
return new
|
|
4527
|
+
return new ContainerControlClient({
|
|
4382
4528
|
stub: this,
|
|
4383
4529
|
port: 3e3,
|
|
4384
4530
|
logger: this.logger,
|
|
@@ -4663,6 +4809,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4663
4809
|
let mountError;
|
|
4664
4810
|
let passwordFilePath;
|
|
4665
4811
|
let provider = null;
|
|
4812
|
+
let dirExisted = true;
|
|
4666
4813
|
try {
|
|
4667
4814
|
this.validateMountOptions(bucket, mountPath, {
|
|
4668
4815
|
...options,
|
|
@@ -4694,6 +4841,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4694
4841
|
};
|
|
4695
4842
|
this.activeMounts.set(mountPath, mountInfo);
|
|
4696
4843
|
await this.createPasswordFile(passwordFilePath, bucket, credentials);
|
|
4844
|
+
dirExisted = (await this.execInternal(`test -d ${shellEscape(mountPath)}`)).exitCode === 0;
|
|
4697
4845
|
await this.execInternal(`mkdir -p ${shellEscape(mountPath)}`);
|
|
4698
4846
|
await this.executeS3FSMount(s3fsSource, mountPath, options, provider, passwordFilePath);
|
|
4699
4847
|
mountInfo.mounted = true;
|
|
@@ -4701,6 +4849,12 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4701
4849
|
} catch (error) {
|
|
4702
4850
|
mountError = error instanceof Error ? error : new Error(String(error));
|
|
4703
4851
|
if (passwordFilePath) await this.deletePasswordFile(passwordFilePath);
|
|
4852
|
+
try {
|
|
4853
|
+
await this.execInternal(`mountpoint -q ${shellEscape(mountPath)} && fusermount -u ${shellEscape(mountPath)}`);
|
|
4854
|
+
} catch {}
|
|
4855
|
+
if (!dirExisted) try {
|
|
4856
|
+
await this.execInternal(`rmdir ${shellEscape(mountPath)} 2>/dev/null`);
|
|
4857
|
+
} catch {}
|
|
4704
4858
|
this.activeMounts.delete(mountPath);
|
|
4705
4859
|
throw error;
|
|
4706
4860
|
} finally {
|
|
@@ -4817,16 +4971,31 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4817
4971
|
* Execute S3FS mount command
|
|
4818
4972
|
*/
|
|
4819
4973
|
async executeS3FSMount(bucket, mountPath, options, provider, passwordFilePath, sessionId) {
|
|
4820
|
-
const
|
|
4821
|
-
|
|
4822
|
-
|
|
4823
|
-
|
|
4824
|
-
|
|
4825
|
-
|
|
4826
|
-
|
|
4827
|
-
|
|
4828
|
-
const
|
|
4829
|
-
|
|
4974
|
+
const s3fsOptions = {
|
|
4975
|
+
logfile: `/tmp/.s3fs-log-${randomHex(4)}`,
|
|
4976
|
+
...parseS3fsOptions(resolveS3fsOptions(provider)),
|
|
4977
|
+
...parseS3fsOptions(options.s3fsOptions ?? []),
|
|
4978
|
+
passwd_file: passwordFilePath,
|
|
4979
|
+
url: options.endpoint,
|
|
4980
|
+
...options.readOnly ? { ro: true } : {}
|
|
4981
|
+
};
|
|
4982
|
+
const logFile = s3fsOptions.logfile;
|
|
4983
|
+
const script = sh`(
|
|
4984
|
+
s3fs ${bucket} ${mountPath} -o ${serializeS3fsOptions(s3fsOptions)} >${logFile} 2>&1
|
|
4985
|
+
rc=$?
|
|
4986
|
+
if [ "$rc" -ne 0 ]; then tail -n 20 ${logFile} 2>/dev/null || true; exit 2; fi
|
|
4987
|
+
for _ in $(seq 1 60); do
|
|
4988
|
+
if mountpoint -q ${mountPath}; then exit 0; fi
|
|
4989
|
+
sleep 0.1
|
|
4990
|
+
done
|
|
4991
|
+
tail -n 20 ${logFile} 2>/dev/null || true
|
|
4992
|
+
exit 3
|
|
4993
|
+
)`;
|
|
4994
|
+
const result = await (sessionId ? (cmd) => this.execWithSession(cmd, sessionId, { origin: "internal" }) : (cmd) => this.execInternal(cmd))(script);
|
|
4995
|
+
if (result.exitCode === 0) return;
|
|
4996
|
+
const detail = result.stdout?.trim() || result.stderr?.trim() || "";
|
|
4997
|
+
if (result.exitCode === 2) throw new S3FSMountError(`S3FS mount failed: ${detail || "Unknown error"}`);
|
|
4998
|
+
throw new S3FSMountError(`S3FS mount failed: FUSE filesystem never appeared at ${mountPath}. ${detail ? `s3fs log: ${detail}` : "No s3fs log output captured. The s3fs daemon may have exited before writing logs."}`);
|
|
4830
4999
|
}
|
|
4831
5000
|
/**
|
|
4832
5001
|
* In-flight `destroy()` promise. While set, concurrent callers coalesce
|
|
@@ -6362,6 +6531,58 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6362
6531
|
});
|
|
6363
6532
|
return this.backupBucket;
|
|
6364
6533
|
}
|
|
6534
|
+
normalizeBackupExcludes(excludes) {
|
|
6535
|
+
const normalizedExcludes = [];
|
|
6536
|
+
for (const pattern of excludes) {
|
|
6537
|
+
const normalized = normalizeBackupExcludePattern(pattern);
|
|
6538
|
+
if (normalized === null) {
|
|
6539
|
+
this.logger.warn("Exclude pattern reduced to empty after globstar normalization; skipping", { original: pattern });
|
|
6540
|
+
continue;
|
|
6541
|
+
}
|
|
6542
|
+
if (normalized !== pattern) this.logger.warn("Exclude pattern contained ** (globstar) which mksquashfs does not support; normalized automatically", {
|
|
6543
|
+
original: pattern,
|
|
6544
|
+
normalized
|
|
6545
|
+
});
|
|
6546
|
+
normalizedExcludes.push(normalized);
|
|
6547
|
+
}
|
|
6548
|
+
return normalizedExcludes;
|
|
6549
|
+
}
|
|
6550
|
+
resolveBackupCompression(compression) {
|
|
6551
|
+
if (compression !== void 0) {
|
|
6552
|
+
if (typeof compression !== "object" || compression === null) throw new InvalidBackupConfigError({
|
|
6553
|
+
message: "BackupOptions.compression must be an object",
|
|
6554
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6555
|
+
httpStatus: 400,
|
|
6556
|
+
context: { reason: "compression must be an object" },
|
|
6557
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6558
|
+
});
|
|
6559
|
+
}
|
|
6560
|
+
const compressionOptions = compression;
|
|
6561
|
+
const format = compressionOptions?.format ?? BACKUP_DEFAULT_COMPRESSION;
|
|
6562
|
+
const threads = compressionOptions?.threads ?? BACKUP_DEFAULT_COMPRESS_THREADS;
|
|
6563
|
+
if (typeof format !== "string" || ![
|
|
6564
|
+
"gzip",
|
|
6565
|
+
"lz4",
|
|
6566
|
+
"zstd"
|
|
6567
|
+
].includes(format)) throw new InvalidBackupConfigError({
|
|
6568
|
+
message: "BackupOptions.compression.format must be one of: gzip, lz4, zstd",
|
|
6569
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6570
|
+
httpStatus: 400,
|
|
6571
|
+
context: { reason: "compression.format must be one of: gzip, lz4, zstd" },
|
|
6572
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6573
|
+
});
|
|
6574
|
+
if (typeof threads !== "number" || !Number.isInteger(threads) || threads < 1) throw new InvalidBackupConfigError({
|
|
6575
|
+
message: "BackupOptions.compression.threads must be a positive integer",
|
|
6576
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6577
|
+
httpStatus: 400,
|
|
6578
|
+
context: { reason: "compression.threads must be a positive integer" },
|
|
6579
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6580
|
+
});
|
|
6581
|
+
return {
|
|
6582
|
+
format,
|
|
6583
|
+
threads
|
|
6584
|
+
};
|
|
6585
|
+
}
|
|
6365
6586
|
static PRESIGNED_URL_EXPIRY_SECONDS = 3600;
|
|
6366
6587
|
/**
|
|
6367
6588
|
* Create a unique, dedicated session for a single backup operation.
|
|
@@ -6403,6 +6624,18 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6403
6624
|
};
|
|
6404
6625
|
}
|
|
6405
6626
|
/**
|
|
6627
|
+
* Generate a presigned GET URL for downloading an object from R2.
|
|
6628
|
+
* The container can curl this URL directly without credentials.
|
|
6629
|
+
*/
|
|
6630
|
+
async generatePresignedGetUrl(r2Key) {
|
|
6631
|
+
const { client, accountId, bucketName } = this.requirePresignedUrlSupport();
|
|
6632
|
+
const encodedBucket = encodeURIComponent(bucketName);
|
|
6633
|
+
const encodedKey = r2Key.split("/").map((seg) => encodeURIComponent(seg)).join("/");
|
|
6634
|
+
const url = new URL(`https://${accountId}.r2.cloudflarestorage.com/${encodedBucket}/${encodedKey}`);
|
|
6635
|
+
url.searchParams.set("X-Amz-Expires", String(Sandbox.PRESIGNED_URL_EXPIRY_SECONDS));
|
|
6636
|
+
return (await client.sign(new Request(url), { aws: { signQuery: true } })).url;
|
|
6637
|
+
}
|
|
6638
|
+
/**
|
|
6406
6639
|
* Generate a presigned PUT URL for uploading an object to R2.
|
|
6407
6640
|
* The container can curl PUT to this URL directly without credentials.
|
|
6408
6641
|
*/
|
|
@@ -6462,51 +6695,248 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6462
6695
|
}
|
|
6463
6696
|
}
|
|
6464
6697
|
/**
|
|
6465
|
-
*
|
|
6698
|
+
* Generate a presigned PUT URL for a single part in a multipart upload.
|
|
6466
6699
|
*/
|
|
6467
|
-
async
|
|
6468
|
-
const { accountId, bucketName } = this.requirePresignedUrlSupport();
|
|
6469
|
-
const
|
|
6470
|
-
const
|
|
6471
|
-
const
|
|
6472
|
-
|
|
6473
|
-
|
|
6474
|
-
|
|
6475
|
-
|
|
6476
|
-
|
|
6477
|
-
|
|
6478
|
-
|
|
6479
|
-
|
|
6480
|
-
|
|
6481
|
-
|
|
6482
|
-
|
|
6483
|
-
|
|
6484
|
-
|
|
6485
|
-
|
|
6486
|
-
|
|
6700
|
+
async generatePresignedPartUrl(r2Key, uploadId, partNumber) {
|
|
6701
|
+
const { client, accountId, bucketName } = this.requirePresignedUrlSupport();
|
|
6702
|
+
const encodedBucket = encodeURIComponent(bucketName);
|
|
6703
|
+
const encodedKey = r2Key.split("/").map((seg) => encodeURIComponent(seg)).join("/");
|
|
6704
|
+
const url = new URL(`https://${accountId}.r2.cloudflarestorage.com/${encodedBucket}/${encodedKey}`);
|
|
6705
|
+
url.searchParams.set("X-Amz-Expires", String(Sandbox.PRESIGNED_URL_EXPIRY_SECONDS));
|
|
6706
|
+
url.searchParams.set("partNumber", String(partNumber));
|
|
6707
|
+
url.searchParams.set("uploadId", uploadId);
|
|
6708
|
+
return (await client.sign(new Request(url, { method: "PUT" }), { aws: { signQuery: true } })).url;
|
|
6709
|
+
}
|
|
6710
|
+
/**
|
|
6711
|
+
* Upload a backup archive to R2 using parallel multipart upload.
|
|
6712
|
+
* Uses the S3-compatible API exclusively for create/complete/abort so that
|
|
6713
|
+
* the uploadId is in the same namespace as the presigned part PUT URLs.
|
|
6714
|
+
*/
|
|
6715
|
+
async uploadBackupMultipart(archivePath, r2Key, sizeBytes, backupId, dir, backupSession) {
|
|
6716
|
+
const targetParts = calculatePartCount(sizeBytes, BACKUP_MULTIPART_TARGET_PARTS, BACKUP_MULTIPART_MAX_PARTS);
|
|
6717
|
+
const numParts = Math.min(targetParts, Math.floor(sizeBytes / BACKUP_MULTIPART_MIN_PART_SIZE));
|
|
6718
|
+
if (numParts <= 1) return this.uploadBackupPresigned(archivePath, r2Key, sizeBytes, backupId, dir, backupSession);
|
|
6719
|
+
const { client, accountId, bucketName } = this.requirePresignedUrlSupport();
|
|
6720
|
+
const objectUrl = `https://${accountId}.r2.cloudflarestorage.com/${encodeURIComponent(bucketName)}/${r2Key.split("/").map((seg) => encodeURIComponent(seg)).join("/")}`;
|
|
6721
|
+
const createResp = await client.fetch(`${objectUrl}?uploads`, { method: "POST" });
|
|
6722
|
+
if (!createResp.ok) throw new BackupCreateError({
|
|
6723
|
+
message: `Failed to initiate multipart upload: HTTP ${createResp.status}`,
|
|
6724
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6725
|
+
httpStatus: 500,
|
|
6726
|
+
context: {
|
|
6727
|
+
dir,
|
|
6728
|
+
backupId
|
|
6729
|
+
},
|
|
6730
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6731
|
+
});
|
|
6732
|
+
const uploadId = (await createResp.text()).match(/<UploadId>([^<]+)<\/UploadId>/)?.[1];
|
|
6733
|
+
if (!uploadId) throw new BackupCreateError({
|
|
6734
|
+
message: "Multipart upload response did not contain an UploadId",
|
|
6735
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6736
|
+
httpStatus: 500,
|
|
6737
|
+
context: {
|
|
6738
|
+
dir,
|
|
6739
|
+
backupId
|
|
6740
|
+
},
|
|
6741
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6487
6742
|
});
|
|
6488
|
-
const
|
|
6489
|
-
|
|
6490
|
-
bucket: s3fsSource,
|
|
6491
|
-
mountPath,
|
|
6492
|
-
endpoint,
|
|
6493
|
-
provider: "r2",
|
|
6494
|
-
passwordFilePath,
|
|
6495
|
-
mounted: false
|
|
6743
|
+
const abortMultipart = async () => {
|
|
6744
|
+
await client.fetch(`${objectUrl}?uploadId=${encodeURIComponent(uploadId)}`, { method: "DELETE" }).catch(() => {});
|
|
6496
6745
|
};
|
|
6497
|
-
this.activeMounts.set(mountPath, mountInfo);
|
|
6498
6746
|
try {
|
|
6499
|
-
|
|
6500
|
-
await
|
|
6501
|
-
|
|
6502
|
-
|
|
6747
|
+
const partSize = Math.ceil(sizeBytes / numParts);
|
|
6748
|
+
const parts = await Promise.all(Array.from({ length: numParts }, (_, i) => ({
|
|
6749
|
+
partNumber: i + 1,
|
|
6750
|
+
url: "",
|
|
6751
|
+
offset: i * partSize,
|
|
6752
|
+
size: i === numParts - 1 ? sizeBytes - i * partSize : partSize
|
|
6753
|
+
})).map(async (part) => ({
|
|
6754
|
+
...part,
|
|
6755
|
+
url: await this.generatePresignedPartUrl(r2Key, uploadId, part.partNumber)
|
|
6756
|
+
})));
|
|
6757
|
+
let uploadResult;
|
|
6758
|
+
try {
|
|
6759
|
+
uploadResult = await this.client.backup.uploadParts({
|
|
6760
|
+
archivePath,
|
|
6761
|
+
parts,
|
|
6762
|
+
sessionId: backupSession
|
|
6763
|
+
});
|
|
6764
|
+
} catch (err) {
|
|
6765
|
+
if (err instanceof SandboxError && err.errorResponse.httpStatus === 404) {
|
|
6766
|
+
await abortMultipart();
|
|
6767
|
+
return this.uploadBackupPresigned(archivePath, r2Key, sizeBytes, backupId, dir, backupSession);
|
|
6768
|
+
}
|
|
6769
|
+
throw err;
|
|
6770
|
+
}
|
|
6771
|
+
if (!uploadResult.success || uploadResult.parts.length !== numParts) throw new BackupCreateError({
|
|
6772
|
+
message: `Multipart upload returned ${uploadResult.parts.length} of ${numParts} parts`,
|
|
6773
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6774
|
+
httpStatus: 500,
|
|
6775
|
+
context: {
|
|
6776
|
+
dir,
|
|
6777
|
+
backupId
|
|
6778
|
+
},
|
|
6779
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6780
|
+
});
|
|
6781
|
+
const completeXml = [
|
|
6782
|
+
"<CompleteMultipartUpload>",
|
|
6783
|
+
...uploadResult.parts.map((p) => `<Part><PartNumber>${p.partNumber}</PartNumber><ETag>${p.etag}</ETag></Part>`),
|
|
6784
|
+
"</CompleteMultipartUpload>"
|
|
6785
|
+
].join("");
|
|
6786
|
+
const completeResp = await client.fetch(`${objectUrl}?uploadId=${encodeURIComponent(uploadId)}`, {
|
|
6787
|
+
method: "POST",
|
|
6788
|
+
headers: { "Content-Type": "application/xml" },
|
|
6789
|
+
body: completeXml
|
|
6790
|
+
});
|
|
6791
|
+
if (!completeResp.ok) {
|
|
6792
|
+
const body = await completeResp.text().catch(() => "");
|
|
6793
|
+
throw new BackupCreateError({
|
|
6794
|
+
message: `Multipart upload completion failed: HTTP ${completeResp.status} ${body}`,
|
|
6795
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6796
|
+
httpStatus: 500,
|
|
6797
|
+
context: {
|
|
6798
|
+
dir,
|
|
6799
|
+
backupId
|
|
6800
|
+
},
|
|
6801
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6802
|
+
});
|
|
6803
|
+
}
|
|
6804
|
+
const head = await this.requireBackupBucket().head(r2Key);
|
|
6805
|
+
if (!head || head.size !== sizeBytes) throw new BackupCreateError({
|
|
6806
|
+
message: `Multipart upload verification failed: expected ${sizeBytes} bytes, got ${head?.size ?? 0}`,
|
|
6807
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6808
|
+
httpStatus: 500,
|
|
6809
|
+
context: {
|
|
6810
|
+
dir,
|
|
6811
|
+
backupId
|
|
6812
|
+
},
|
|
6813
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6814
|
+
});
|
|
6503
6815
|
} catch (error) {
|
|
6504
|
-
await
|
|
6505
|
-
this.activeMounts.delete(mountPath);
|
|
6816
|
+
await abortMultipart();
|
|
6506
6817
|
throw error;
|
|
6507
6818
|
}
|
|
6508
6819
|
}
|
|
6509
6820
|
/**
|
|
6821
|
+
* Download a backup archive from R2 via presigned GET URL.
|
|
6822
|
+
* For archives >= BACKUP_DOWNLOAD_PARALLEL_MIN_SIZE, uses BACKUP_DOWNLOAD_PARALLEL_PARTS
|
|
6823
|
+
* concurrent curl processes (each downloading a byte-range) to maximise both
|
|
6824
|
+
* network and disk-write throughput. Parts are written into a pre-sized file
|
|
6825
|
+
* with dd using byte offsets, then atomically moved to the final path.
|
|
6826
|
+
*/
|
|
6827
|
+
async downloadBackupParallel(archivePath, r2Key, expectedSize, backupId, dir, backupSession) {
|
|
6828
|
+
const presignedUrl = await this.generatePresignedGetUrl(r2Key);
|
|
6829
|
+
await this.execWithSession(`mkdir -p ${BACKUP_CONTAINER_DIR}`, backupSession, { origin: "internal" });
|
|
6830
|
+
const tmpPath = `${archivePath}.tmp`;
|
|
6831
|
+
if (expectedSize < BACKUP_DOWNLOAD_PARALLEL_MIN_SIZE) {
|
|
6832
|
+
const curlCmd = [
|
|
6833
|
+
"curl -sSf",
|
|
6834
|
+
"--connect-timeout 10",
|
|
6835
|
+
"--max-time 1800",
|
|
6836
|
+
"--retry 2",
|
|
6837
|
+
"--retry-max-time 60",
|
|
6838
|
+
`-o ${shellEscape(tmpPath)}`,
|
|
6839
|
+
shellEscape(presignedUrl)
|
|
6840
|
+
].join(" ");
|
|
6841
|
+
const result = await this.execWithSession(curlCmd, backupSession, {
|
|
6842
|
+
timeout: 181e4,
|
|
6843
|
+
origin: "internal"
|
|
6844
|
+
});
|
|
6845
|
+
if (result.exitCode !== 0) {
|
|
6846
|
+
await this.execWithSession(`rm -f ${shellEscape(tmpPath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6847
|
+
throw new BackupRestoreError({
|
|
6848
|
+
message: `Presigned URL download failed (exit code ${result.exitCode}): ${result.stderr}`,
|
|
6849
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
6850
|
+
httpStatus: 500,
|
|
6851
|
+
context: {
|
|
6852
|
+
dir,
|
|
6853
|
+
backupId
|
|
6854
|
+
},
|
|
6855
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6856
|
+
});
|
|
6857
|
+
}
|
|
6858
|
+
} else {
|
|
6859
|
+
const numParts = calculatePartCount(expectedSize, BACKUP_DOWNLOAD_PARALLEL_PARTS, BACKUP_DOWNLOAD_MAX_PARTS);
|
|
6860
|
+
const partSize = Math.floor(expectedSize / numParts);
|
|
6861
|
+
const startLines = Array.from({ length: numParts }, (_, i) => {
|
|
6862
|
+
const start = i * partSize;
|
|
6863
|
+
return {
|
|
6864
|
+
start,
|
|
6865
|
+
range: `${start}-${i < numParts - 1 ? start + partSize - 1 : expectedSize - 1}`
|
|
6866
|
+
};
|
|
6867
|
+
}).map(({ start, range }) => [
|
|
6868
|
+
"curl -sSf",
|
|
6869
|
+
"--connect-timeout 10",
|
|
6870
|
+
"--max-time 1800",
|
|
6871
|
+
`-H ${shellEscape(`Range: bytes=${range}`)}`,
|
|
6872
|
+
shellEscape(presignedUrl),
|
|
6873
|
+
"|",
|
|
6874
|
+
"dd",
|
|
6875
|
+
`of=${shellEscape(tmpPath)}`,
|
|
6876
|
+
"oflag=seek_bytes",
|
|
6877
|
+
`seek=${start}`,
|
|
6878
|
+
"conv=notrunc",
|
|
6879
|
+
"2>/dev/null"
|
|
6880
|
+
].join(" ")).map((cmd, i) => `(set -o pipefail; ${cmd}) & J${i}=$!`);
|
|
6881
|
+
const waitLines = Array.from({ length: numParts }, (_, i) => `wait $J${i}; E${i}=$?`);
|
|
6882
|
+
const exitVars = Array.from({ length: numParts }, (_, i) => `$E${i}`);
|
|
6883
|
+
const script = [
|
|
6884
|
+
`rm -f ${shellEscape(tmpPath)}`,
|
|
6885
|
+
`truncate -s ${expectedSize} ${shellEscape(tmpPath)}`,
|
|
6886
|
+
...startLines,
|
|
6887
|
+
...waitLines,
|
|
6888
|
+
`FAILED=$(( ${exitVars.join(" + ")} ))`,
|
|
6889
|
+
`if [ "$FAILED" -ne 0 ]; then rm -f ${shellEscape(tmpPath)}; exit 1; fi`
|
|
6890
|
+
].join("; ");
|
|
6891
|
+
const result = await this.execWithSession(script, backupSession, {
|
|
6892
|
+
timeout: 181e4,
|
|
6893
|
+
origin: "internal"
|
|
6894
|
+
});
|
|
6895
|
+
if (result.exitCode !== 0) {
|
|
6896
|
+
await this.execWithSession(`rm -f ${shellEscape(tmpPath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6897
|
+
throw new BackupRestoreError({
|
|
6898
|
+
message: `Parallel download failed (exit code ${result.exitCode}): ${result.stderr}`,
|
|
6899
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
6900
|
+
httpStatus: 500,
|
|
6901
|
+
context: {
|
|
6902
|
+
dir,
|
|
6903
|
+
backupId
|
|
6904
|
+
},
|
|
6905
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6906
|
+
});
|
|
6907
|
+
}
|
|
6908
|
+
}
|
|
6909
|
+
const sizeCheck = await this.execWithSession(`stat -c %s ${shellEscape(tmpPath)}`, backupSession, { origin: "internal" });
|
|
6910
|
+
const actualSize = parseInt(sizeCheck.stdout.trim(), 10);
|
|
6911
|
+
if (actualSize !== expectedSize) {
|
|
6912
|
+
await this.execWithSession(`rm -f ${shellEscape(tmpPath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6913
|
+
throw new BackupRestoreError({
|
|
6914
|
+
message: `Downloaded archive size mismatch: expected ${expectedSize}, got ${actualSize}`,
|
|
6915
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
6916
|
+
httpStatus: 500,
|
|
6917
|
+
context: {
|
|
6918
|
+
dir,
|
|
6919
|
+
backupId
|
|
6920
|
+
},
|
|
6921
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6922
|
+
});
|
|
6923
|
+
}
|
|
6924
|
+
const mvResult = await this.execWithSession(`mv ${shellEscape(tmpPath)} ${shellEscape(archivePath)}`, backupSession, { origin: "internal" });
|
|
6925
|
+
if (mvResult.exitCode !== 0) {
|
|
6926
|
+
await this.execWithSession(`rm -f ${shellEscape(tmpPath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6927
|
+
throw new BackupRestoreError({
|
|
6928
|
+
message: `Failed to finalize downloaded archive: ${mvResult.stderr}`,
|
|
6929
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
6930
|
+
httpStatus: 500,
|
|
6931
|
+
context: {
|
|
6932
|
+
dir,
|
|
6933
|
+
backupId
|
|
6934
|
+
},
|
|
6935
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6936
|
+
});
|
|
6937
|
+
}
|
|
6938
|
+
}
|
|
6939
|
+
/**
|
|
6510
6940
|
* Serialize backup operations on this sandbox instance.
|
|
6511
6941
|
* Concurrent backup/restore calls are queued so the multi-step
|
|
6512
6942
|
* create-archive → read → upload (or mount → extract) flow
|
|
@@ -6546,7 +6976,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6546
6976
|
async doCreateBackup(options) {
|
|
6547
6977
|
const bucket = this.requireBackupBucket();
|
|
6548
6978
|
this.requirePresignedUrlSupport();
|
|
6549
|
-
const { dir, name, ttl = BACKUP_DEFAULT_TTL_SECONDS, gitignore = false, excludes = [] } = options;
|
|
6979
|
+
const { dir, name, ttl = BACKUP_DEFAULT_TTL_SECONDS, gitignore = false, excludes = [], compression, multipart = true } = options;
|
|
6550
6980
|
const backupStartTime = Date.now();
|
|
6551
6981
|
let backupId;
|
|
6552
6982
|
let sizeBytes;
|
|
@@ -6592,12 +7022,15 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6592
7022
|
context: { reason: "excludes must be an array of strings" },
|
|
6593
7023
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6594
7024
|
});
|
|
7025
|
+
const resolvedCompression = this.resolveBackupCompression(compression);
|
|
7026
|
+
const normalizedExcludes = this.normalizeBackupExcludes(excludes);
|
|
6595
7027
|
backupSession = await this.ensureBackupSession();
|
|
6596
7028
|
backupId = crypto.randomUUID();
|
|
6597
7029
|
const archivePath = `${BACKUP_CONTAINER_DIR}/${backupId}.sqsh`;
|
|
6598
7030
|
const createResult = await this.client.backup.createArchive(dir, archivePath, backupSession, {
|
|
6599
7031
|
gitignore,
|
|
6600
|
-
excludes
|
|
7032
|
+
excludes: normalizedExcludes,
|
|
7033
|
+
compression: resolvedCompression
|
|
6601
7034
|
});
|
|
6602
7035
|
if (!createResult.success) throw new BackupCreateError({
|
|
6603
7036
|
message: "Container failed to create backup archive",
|
|
@@ -6612,7 +7045,8 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6612
7045
|
sizeBytes = createResult.sizeBytes;
|
|
6613
7046
|
const r2Key = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_ARCHIVE_OBJECT_NAME}`;
|
|
6614
7047
|
const metaKey = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_METADATA_OBJECT_NAME}`;
|
|
6615
|
-
await this.
|
|
7048
|
+
if (multipart && createResult.sizeBytes >= BACKUP_MULTIPART_MIN_SIZE) await this.uploadBackupMultipart(archivePath, r2Key, createResult.sizeBytes, backupId, dir, backupSession);
|
|
7049
|
+
else await this.uploadBackupPresigned(archivePath, r2Key, createResult.sizeBytes, backupId, dir, backupSession);
|
|
6616
7050
|
const metadata = {
|
|
6617
7051
|
id: backupId,
|
|
6618
7052
|
dir,
|
|
@@ -6659,7 +7093,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6659
7093
|
* Archive format is identical to production (squashfs + meta.json).
|
|
6660
7094
|
*/
|
|
6661
7095
|
async doCreateBackupLocal(options) {
|
|
6662
|
-
const { dir, name, ttl = BACKUP_DEFAULT_TTL_SECONDS, gitignore = false, excludes = [] } = options;
|
|
7096
|
+
const { dir, name, ttl = BACKUP_DEFAULT_TTL_SECONDS, gitignore = false, excludes = [], compression } = options;
|
|
6663
7097
|
const backupStartTime = Date.now();
|
|
6664
7098
|
let backupId;
|
|
6665
7099
|
let sizeBytes;
|
|
@@ -6713,12 +7147,15 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6713
7147
|
context: { reason: "excludes must be an array of strings" },
|
|
6714
7148
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6715
7149
|
});
|
|
7150
|
+
const resolvedCompression = this.resolveBackupCompression(compression);
|
|
7151
|
+
const normalizedExcludes = this.normalizeBackupExcludes(excludes);
|
|
6716
7152
|
backupSession = await this.ensureBackupSession();
|
|
6717
7153
|
backupId = crypto.randomUUID();
|
|
6718
7154
|
const archivePath = `${BACKUP_CONTAINER_DIR}/${backupId}.sqsh`;
|
|
6719
7155
|
const createResult = await this.client.backup.createArchive(dir, archivePath, backupSession, {
|
|
6720
7156
|
gitignore,
|
|
6721
|
-
excludes
|
|
7157
|
+
excludes: normalizedExcludes,
|
|
7158
|
+
compression: resolvedCompression
|
|
6722
7159
|
});
|
|
6723
7160
|
if (!createResult.success) throw new BackupCreateError({
|
|
6724
7161
|
message: "Container failed to create backup archive",
|
|
@@ -6884,7 +7321,8 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6884
7321
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6885
7322
|
});
|
|
6886
7323
|
const r2Key = `${BACKUP_STORAGE_PREFIX}/${id}/${BACKUP_ARCHIVE_OBJECT_NAME}`;
|
|
6887
|
-
|
|
7324
|
+
const archiveHead = await bucket.head(r2Key);
|
|
7325
|
+
if (!archiveHead) throw new BackupNotFoundError({
|
|
6888
7326
|
message: `Backup archive not found in R2: ${id}. The archive may have been deleted by R2 lifecycle rules.`,
|
|
6889
7327
|
code: ErrorCode.BACKUP_NOT_FOUND,
|
|
6890
7328
|
httpStatus: 404,
|
|
@@ -6892,19 +7330,12 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6892
7330
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6893
7331
|
});
|
|
6894
7332
|
backupSession = await this.ensureBackupSession();
|
|
6895
|
-
const
|
|
6896
|
-
const
|
|
6897
|
-
const mountGlob = `/var/backups/mounts/r2mount/${id}/data`;
|
|
7333
|
+
const archivePath = `${BACKUP_CONTAINER_DIR}/${id}.sqsh`;
|
|
7334
|
+
const mountGlob = `${BACKUP_CONTAINER_DIR}/mounts/${id}`;
|
|
6898
7335
|
await this.execWithSession(`/usr/bin/fusermount3 -uz ${shellEscape(dir)} 2>/dev/null || true`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6899
7336
|
await this.execWithSession(`for d in ${shellEscape(mountGlob)}_*/lower ${shellEscape(mountGlob)}/lower; do [ -d "$d" ] && /usr/bin/fusermount3 -uz "$d" 2>/dev/null; done; true`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6900
|
-
await this.execWithSession(
|
|
6901
|
-
|
|
6902
|
-
if (previousBackupMount?.mountType === "fuse") {
|
|
6903
|
-
previousBackupMount.mounted = false;
|
|
6904
|
-
this.activeMounts.delete(r2MountPath);
|
|
6905
|
-
await this.deletePasswordFile(previousBackupMount.passwordFilePath);
|
|
6906
|
-
}
|
|
6907
|
-
await this.mountBackupR2(r2MountPath, `backups/${id}/`, backupSession);
|
|
7337
|
+
const sizeCheck = await this.execWithSession(`stat -c %s ${shellEscape(archivePath)} 2>/dev/null || echo 0`, backupSession, { origin: "internal" }).catch(() => ({ stdout: "0" }));
|
|
7338
|
+
if (Number.parseInt((sizeCheck.stdout ?? "0").trim(), 10) !== archiveHead.size) await this.downloadBackupParallel(archivePath, r2Key, archiveHead.size, id, dir, backupSession);
|
|
6908
7339
|
if (!(await this.client.backup.restoreArchive(dir, archivePath, backupSession)).success) throw new BackupRestoreError({
|
|
6909
7340
|
message: "Container failed to restore backup archive",
|
|
6910
7341
|
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
@@ -6923,6 +7354,10 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6923
7354
|
};
|
|
6924
7355
|
} catch (error) {
|
|
6925
7356
|
caughtError = error instanceof Error ? error : new Error(String(error));
|
|
7357
|
+
if (id && backupSession) {
|
|
7358
|
+
const cleanupPath = `${BACKUP_CONTAINER_DIR}/${id}.sqsh`;
|
|
7359
|
+
await this.execWithSession(`rm -f ${shellEscape(cleanupPath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
7360
|
+
}
|
|
6926
7361
|
throw error;
|
|
6927
7362
|
} finally {
|
|
6928
7363
|
if (backupSession) await this.client.utils.deleteSession(backupSession).catch(() => {});
|
|
@@ -7070,5 +7505,5 @@ var Sandbox = class Sandbox extends Container {
|
|
|
7070
7505
|
};
|
|
7071
7506
|
|
|
7072
7507
|
//#endregion
|
|
7073
|
-
export { DesktopInvalidOptionsError as A, CommandClient as C, BackupNotFoundError as D, BackupExpiredError as E, InvalidBackupConfigError as F, ProcessExitedBeforeReadyError as I, ProcessReadyTimeoutError as L, DesktopProcessCrashedError as M, DesktopStartFailedError as N, BackupRestoreError as O, DesktopUnavailableError as P,
|
|
7074
|
-
//# sourceMappingURL=sandbox-
|
|
7508
|
+
export { DesktopInvalidOptionsError as A, CommandClient as C, BackupNotFoundError as D, BackupExpiredError as E, InvalidBackupConfigError as F, ProcessExitedBeforeReadyError as I, ProcessReadyTimeoutError as L, DesktopProcessCrashedError as M, DesktopStartFailedError as N, BackupRestoreError as O, DesktopUnavailableError as P, RPCTransportError as R, DesktopClient as S, BackupCreateError as T, UtilityClient as _, BucketMountError as a, GitClient as b, MissingCredentialsError as c, parseSSEStream as d, responseToAsyncIterable as f, SandboxClient as g, streamFile as h, proxyTerminal as i, DesktopNotStartedError as j, DesktopInvalidCoordinatesError as k, S3FSMountError as l, collectFile as m, getSandbox as n, BucketUnmountError as o, CodeInterpreter as p, proxyToSandbox as r, InvalidMountConfigError as s, Sandbox as t, asyncIterableToSSEStream as u, ProcessClient as v, BackupClient as w, FileClient as x, PortClient as y, SessionTerminatedError as z };
|
|
7509
|
+
//# sourceMappingURL=sandbox-BAuU-2a0.js.map
|