@cloudflare/sandbox 0.9.2 → 0.9.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bridge/index.js +1 -1
- package/dist/index.d.ts +2 -2
- package/dist/index.js +1 -1
- package/dist/openai/index.d.ts +1 -1
- package/dist/opencode/index.d.ts +1 -1
- package/dist/{sandbox-CReFGUtF.js → sandbox-BAuU-2a0.js} +444 -107
- package/dist/sandbox-BAuU-2a0.js.map +1 -0
- package/dist/{sandbox-YMrVC62F.d.ts → sandbox-CW4QeITP.d.ts} +220 -117
- package/dist/sandbox-CW4QeITP.d.ts.map +1 -0
- package/package.json +1 -1
- package/dist/sandbox-CReFGUtF.js.map +0 -1
- package/dist/sandbox-YMrVC62F.d.ts.map +0 -1
|
@@ -1495,10 +1495,10 @@ var WebSocketTransport = class extends BaseTransport {
|
|
|
1495
1495
|
//#endregion
|
|
1496
1496
|
//#region src/clients/transport/factory.ts
|
|
1497
1497
|
/**
|
|
1498
|
-
* Create a transport instance based on mode
|
|
1498
|
+
* Create a route-based compatibility transport instance based on mode.
|
|
1499
1499
|
*
|
|
1500
|
-
*
|
|
1501
|
-
*
|
|
1500
|
+
* Selects the HTTP or custom WebSocket transport for the route-based client
|
|
1501
|
+
* layer.
|
|
1502
1502
|
*
|
|
1503
1503
|
* @example
|
|
1504
1504
|
* ```typescript
|
|
@@ -1517,23 +1517,23 @@ var WebSocketTransport = class extends BaseTransport {
|
|
|
1517
1517
|
*/
|
|
1518
1518
|
function createTransport(options) {
|
|
1519
1519
|
switch (options.mode) {
|
|
1520
|
+
case "http": return new HttpTransport(options);
|
|
1520
1521
|
case "websocket": return new WebSocketTransport(options);
|
|
1521
|
-
default: return new HttpTransport(options);
|
|
1522
1522
|
}
|
|
1523
1523
|
}
|
|
1524
1524
|
|
|
1525
1525
|
//#endregion
|
|
1526
1526
|
//#region src/clients/base-client.ts
|
|
1527
1527
|
/**
|
|
1528
|
-
* Abstract base class
|
|
1528
|
+
* Abstract base class for route-based HTTP/WebSocket compatibility clients.
|
|
1529
1529
|
*
|
|
1530
|
-
*
|
|
1531
|
-
* - HTTP and WebSocket modes transparently
|
|
1532
|
-
* - Automatic retry for 503 errors
|
|
1533
|
-
* - Streaming responses
|
|
1530
|
+
* Requests go through the Transport abstraction layer, which handles:
|
|
1531
|
+
* - HTTP and WebSocket route-based modes transparently
|
|
1532
|
+
* - Automatic retry for 503 errors while the container is starting
|
|
1533
|
+
* - Streaming responses for the existing route API
|
|
1534
1534
|
*
|
|
1535
|
-
*
|
|
1536
|
-
*
|
|
1535
|
+
* DO-to-container control-channel capabilities live in `container-control/`.
|
|
1536
|
+
* This layer supports the route-based compatibility API.
|
|
1537
1537
|
*/
|
|
1538
1538
|
var BaseHttpClient = class {
|
|
1539
1539
|
options;
|
|
@@ -1654,7 +1654,7 @@ var BaseHttpClient = class {
|
|
|
1654
1654
|
/**
|
|
1655
1655
|
* Stream request handler
|
|
1656
1656
|
*
|
|
1657
|
-
*
|
|
1657
|
+
* HTTP mode uses doFetch + handleStreamResponse for typed error handling.
|
|
1658
1658
|
* For WebSocket mode, uses Transport's streaming support.
|
|
1659
1659
|
*
|
|
1660
1660
|
* @param path - The API path to call
|
|
@@ -1698,6 +1698,7 @@ var BackupClient = class extends BaseHttpClient {
|
|
|
1698
1698
|
archivePath,
|
|
1699
1699
|
gitignore: options?.gitignore ?? false,
|
|
1700
1700
|
excludes: options?.excludes ?? [],
|
|
1701
|
+
compression: options?.compression,
|
|
1701
1702
|
sessionId
|
|
1702
1703
|
};
|
|
1703
1704
|
return await this.post("/api/backup/create", data);
|
|
@@ -1716,6 +1717,12 @@ var BackupClient = class extends BaseHttpClient {
|
|
|
1716
1717
|
};
|
|
1717
1718
|
return await this.post("/api/backup/restore", data);
|
|
1718
1719
|
}
|
|
1720
|
+
async uploadParts(request, sessionId) {
|
|
1721
|
+
return this.post("/api/backup/upload-parts", {
|
|
1722
|
+
...request,
|
|
1723
|
+
sessionId: sessionId ?? request.sessionId
|
|
1724
|
+
});
|
|
1725
|
+
}
|
|
1719
1726
|
};
|
|
1720
1727
|
|
|
1721
1728
|
//#endregion
|
|
@@ -2618,13 +2625,13 @@ var WatchClient = class extends BaseHttpClient {
|
|
|
2618
2625
|
//#endregion
|
|
2619
2626
|
//#region src/clients/sandbox-client.ts
|
|
2620
2627
|
/**
|
|
2621
|
-
*
|
|
2622
|
-
*
|
|
2628
|
+
* Route-based compatibility sandbox client that composes all domain-specific
|
|
2629
|
+
* HTTP API clients.
|
|
2623
2630
|
*
|
|
2624
|
-
*
|
|
2625
|
-
*
|
|
2626
|
-
* -
|
|
2627
|
-
*
|
|
2631
|
+
* This client supports the route-based HTTP and custom WebSocket transports.
|
|
2632
|
+
* The primary DO-to-container control path is ContainerControlClient under
|
|
2633
|
+
* `container-control/`. This client supports route-based compatibility,
|
|
2634
|
+
* debugging, local development, and fallback behavior.
|
|
2628
2635
|
*/
|
|
2629
2636
|
var SandboxClient = class {
|
|
2630
2637
|
backup;
|
|
@@ -2701,9 +2708,9 @@ var SandboxClient = class {
|
|
|
2701
2708
|
/**
|
|
2702
2709
|
* Stream a file directly to the container over a binary RPC channel.
|
|
2703
2710
|
*
|
|
2704
|
-
* Requires the
|
|
2705
|
-
* method with the HTTP or WebSocket transports throws an error because
|
|
2706
|
-
* transports do not support binary streaming.
|
|
2711
|
+
* Requires the container-control path (`transport: 'rpc'`). Calling this
|
|
2712
|
+
* method with the HTTP or WebSocket route transports throws an error because
|
|
2713
|
+
* those transports do not support binary streaming.
|
|
2707
2714
|
*/
|
|
2708
2715
|
writeFileStream(_path, _content, _sessionId) {
|
|
2709
2716
|
throw new Error("writeFileStream requires the RPC transport. Enable it with transport: \"rpc\" in sandbox options.");
|
|
@@ -2747,7 +2754,7 @@ function normalizeBackupExcludePattern(pattern) {
|
|
|
2747
2754
|
}
|
|
2748
2755
|
|
|
2749
2756
|
//#endregion
|
|
2750
|
-
//#region src/container-connection.ts
|
|
2757
|
+
//#region src/container-control/connection.ts
|
|
2751
2758
|
const DEFAULT_CONNECT_TIMEOUT_MS = 3e4;
|
|
2752
2759
|
/**
|
|
2753
2760
|
* Manages a capnweb WebSocket RPC session to the container.
|
|
@@ -2756,7 +2763,7 @@ const DEFAULT_CONNECT_TIMEOUT_MS = 3e4;
|
|
|
2756
2763
|
* transport. Calls made before `connect()` completes are queued in the
|
|
2757
2764
|
* transport and flushed once the WebSocket is established.
|
|
2758
2765
|
*/
|
|
2759
|
-
var
|
|
2766
|
+
var ContainerControlConnection = class {
|
|
2760
2767
|
stub;
|
|
2761
2768
|
session;
|
|
2762
2769
|
transport;
|
|
@@ -2840,7 +2847,7 @@ var ContainerConnection = class {
|
|
|
2840
2847
|
ws.addEventListener("close", () => {
|
|
2841
2848
|
this.connected = false;
|
|
2842
2849
|
this.ws = null;
|
|
2843
|
-
this.logger.debug("
|
|
2850
|
+
this.logger.debug("ContainerControlConnection WebSocket closed");
|
|
2844
2851
|
});
|
|
2845
2852
|
ws.addEventListener("error", () => {
|
|
2846
2853
|
this.connected = false;
|
|
@@ -2849,12 +2856,12 @@ var ContainerConnection = class {
|
|
|
2849
2856
|
this.ws = ws;
|
|
2850
2857
|
this.transport.activate(ws);
|
|
2851
2858
|
this.connected = true;
|
|
2852
|
-
this.logger.debug("
|
|
2859
|
+
this.logger.debug("ContainerControlConnection established", { port: this.port });
|
|
2853
2860
|
} catch (error) {
|
|
2854
2861
|
clearTimeout(timeout);
|
|
2855
2862
|
this.connected = false;
|
|
2856
2863
|
this.transport.abort(error);
|
|
2857
|
-
this.logger.error("
|
|
2864
|
+
this.logger.error("ContainerControlConnection failed", error instanceof Error ? error : new Error(String(error)));
|
|
2858
2865
|
throw error;
|
|
2859
2866
|
}
|
|
2860
2867
|
}
|
|
@@ -2920,7 +2927,7 @@ var DeferredTransport = class {
|
|
|
2920
2927
|
};
|
|
2921
2928
|
|
|
2922
2929
|
//#endregion
|
|
2923
|
-
//#region src/
|
|
2930
|
+
//#region src/container-control/client.ts
|
|
2924
2931
|
/** Close the idle capnweb WebSocket promptly so the DO can sleep. */
|
|
2925
2932
|
const DEFAULT_IDLE_DISCONNECT_MS = 1e3;
|
|
2926
2933
|
/**
|
|
@@ -2978,7 +2985,7 @@ function translateRPCError(error) {
|
|
|
2978
2985
|
* Inspect a transport-level Error's message and produce the ErrorResponse
|
|
2979
2986
|
* that becomes an RPCTransportError. Pattern strings are pinned to the exact
|
|
2980
2987
|
* messages emitted by capnweb's WebSocketTransport (see capnweb's
|
|
2981
|
-
* src/websocket.ts) and our DeferredTransport in container-connection.ts —
|
|
2988
|
+
* src/websocket.ts) and our DeferredTransport in container-control/connection.ts —
|
|
2982
2989
|
* notably the trailing period in `WebSocket connection failed.` matches
|
|
2983
2990
|
* capnweb verbatim. The DeferredTransport tests in
|
|
2984
2991
|
* tests/container-connection.test.ts pin the literal strings.
|
|
@@ -3024,7 +3031,7 @@ function buildTransportErrorResponse(error) {
|
|
|
3024
3031
|
* activity at call start.
|
|
3025
3032
|
*
|
|
3026
3033
|
* `onCallStarted` fires synchronously when an RPC method is invoked. The
|
|
3027
|
-
*
|
|
3034
|
+
* ContainerControlClient uses this to renew the DO's activity timeout
|
|
3028
3035
|
* immediately, so even a call that completes entirely between two
|
|
3029
3036
|
* busy-poll ticks still pushes the sleepAfter deadline forward.
|
|
3030
3037
|
*
|
|
@@ -3050,20 +3057,19 @@ function wrapStub(stub, onCallStarted) {
|
|
|
3050
3057
|
} });
|
|
3051
3058
|
}
|
|
3052
3059
|
/**
|
|
3053
|
-
* SandboxClient backed by direct capnweb RPC.
|
|
3060
|
+
* SandboxClient-compatible facade backed by direct capnweb RPC.
|
|
3054
3061
|
*
|
|
3055
|
-
*
|
|
3056
|
-
*
|
|
3057
|
-
* bypassing the HTTP handler/router layer entirely.
|
|
3062
|
+
* All operations call the container's SandboxAPI control interface directly
|
|
3063
|
+
* over capnweb, bypassing the HTTP handler/router layer entirely.
|
|
3058
3064
|
*
|
|
3059
|
-
* Manages its own WebSocket lifecycle: a fresh `
|
|
3065
|
+
* Manages its own WebSocket lifecycle: a fresh `ContainerControlConnection` is
|
|
3060
3066
|
* created on demand and torn down after `idleDisconnectMs` of inactivity.
|
|
3061
3067
|
* Busy/idle detection relies on `RpcSession.getStats()` which tracks all
|
|
3062
3068
|
* in-flight RPC calls and stream exports — including long-lived streaming
|
|
3063
3069
|
* RPCs that would be invisible to a simple per-call request counter (see
|
|
3064
3070
|
* the file-level comment for the full rationale).
|
|
3065
3071
|
*/
|
|
3066
|
-
var
|
|
3072
|
+
var ContainerControlClient = class {
|
|
3067
3073
|
connOptions;
|
|
3068
3074
|
idleDisconnectMs;
|
|
3069
3075
|
busyPollIntervalMs;
|
|
@@ -3097,13 +3103,12 @@ var RPCSandboxClient = class {
|
|
|
3097
3103
|
this.onSessionIdle = options.onSessionIdle;
|
|
3098
3104
|
}
|
|
3099
3105
|
/**
|
|
3100
|
-
* Return the current connection, creating
|
|
3101
|
-
*
|
|
3102
|
-
* timer the first time a connection is materialized.
|
|
3106
|
+
* Return the current connection, creating one when the client is disconnected.
|
|
3107
|
+
* Starts the busy-poll timer the first time a connection is materialized.
|
|
3103
3108
|
*/
|
|
3104
3109
|
getConnection() {
|
|
3105
3110
|
if (!this.conn) {
|
|
3106
|
-
this.conn = new
|
|
3111
|
+
this.conn = new ContainerControlConnection(this.connOptions);
|
|
3107
3112
|
this.startBusyPoll();
|
|
3108
3113
|
}
|
|
3109
3114
|
return this.conn;
|
|
@@ -4191,7 +4196,7 @@ function isLocalhostPattern(hostname) {
|
|
|
4191
4196
|
* This file is auto-updated by .github/changeset-version.ts during releases
|
|
4192
4197
|
* DO NOT EDIT MANUALLY - Changes will be overwritten on the next version bump
|
|
4193
4198
|
*/
|
|
4194
|
-
const SDK_VERSION = "0.9.
|
|
4199
|
+
const SDK_VERSION = "0.9.3";
|
|
4195
4200
|
|
|
4196
4201
|
//#endregion
|
|
4197
4202
|
//#region src/sandbox.ts
|
|
@@ -4202,6 +4207,63 @@ const BACKUP_CONTAINER_DIR = "/var/backups";
|
|
|
4202
4207
|
const BACKUP_STORAGE_PREFIX = "backups";
|
|
4203
4208
|
const BACKUP_ARCHIVE_OBJECT_NAME = "data.sqsh";
|
|
4204
4209
|
const BACKUP_METADATA_OBJECT_NAME = "meta.json";
|
|
4210
|
+
const BACKUP_DEFAULT_COMPRESSION = "lz4";
|
|
4211
|
+
const BACKUP_DEFAULT_COMPRESS_THREADS = 8;
|
|
4212
|
+
const BACKUP_MULTIPART_MIN_SIZE = 10 * 1024 * 1024;
|
|
4213
|
+
const BACKUP_MULTIPART_TARGET_PARTS = 16;
|
|
4214
|
+
const BACKUP_MULTIPART_MIN_PART_SIZE = 5 * 1024 * 1024;
|
|
4215
|
+
const BACKUP_MULTIPART_MAX_PARTS = 64;
|
|
4216
|
+
const BACKUP_DOWNLOAD_PARALLEL_PARTS = 8;
|
|
4217
|
+
const BACKUP_DOWNLOAD_PARALLEL_MIN_SIZE = 10 * 1024 * 1024;
|
|
4218
|
+
const BACKUP_DOWNLOAD_MAX_PARTS = 64;
|
|
4219
|
+
/**
|
|
4220
|
+
* Calculate the optimal number of parts for multipart upload/download
|
|
4221
|
+
* based on archive size. Larger archives benefit from more parallelism.
|
|
4222
|
+
*/
|
|
4223
|
+
function calculatePartCount(sizeBytes, defaultParts, maxParts) {
|
|
4224
|
+
if (sizeBytes < 100 * 1024 * 1024) return defaultParts;
|
|
4225
|
+
if (sizeBytes < 1024 * 1024 * 1024) return Math.min(32, defaultParts * 2);
|
|
4226
|
+
return maxParts;
|
|
4227
|
+
}
|
|
4228
|
+
/**
|
|
4229
|
+
* Tagged template literal that shell-escapes every interpolated value.
|
|
4230
|
+
* Use for composing in-container scripts where the template body is
|
|
4231
|
+
* trusted shell and the interpolations are untrusted strings.
|
|
4232
|
+
*/
|
|
4233
|
+
function sh(strings, ...values) {
|
|
4234
|
+
let out = strings[0];
|
|
4235
|
+
for (let i = 0; i < values.length; i++) out += shellEscape(String(values[i])) + strings[i + 1];
|
|
4236
|
+
return out;
|
|
4237
|
+
}
|
|
4238
|
+
/**
|
|
4239
|
+
* Hex string of `bytes` random bytes (length = bytes * 2). Used for short
|
|
4240
|
+
* non-cryptographic identifiers — e.g. tempfile suffixes.
|
|
4241
|
+
*/
|
|
4242
|
+
function randomHex(bytes) {
|
|
4243
|
+
const buf = new Uint8Array(bytes);
|
|
4244
|
+
crypto.getRandomValues(buf);
|
|
4245
|
+
return Array.from(buf, (b) => b.toString(16).padStart(2, "0")).join("");
|
|
4246
|
+
}
|
|
4247
|
+
/**
|
|
4248
|
+
* Parse an array of `key=value` / bare-flag s3fs options into a Record.
|
|
4249
|
+
* Bare flags become `{ flag: true }`. Later entries overwrite earlier ones.
|
|
4250
|
+
*/
|
|
4251
|
+
function parseS3fsOptions(entries) {
|
|
4252
|
+
const result = {};
|
|
4253
|
+
for (const entry of entries) {
|
|
4254
|
+
const eq = entry.indexOf("=");
|
|
4255
|
+
if (eq === -1) result[entry] = true;
|
|
4256
|
+
else result[entry.slice(0, eq)] = entry.slice(eq + 1);
|
|
4257
|
+
}
|
|
4258
|
+
return result;
|
|
4259
|
+
}
|
|
4260
|
+
/**
|
|
4261
|
+
* Serialise an s3fs options Record into the comma-separated `-o` argument.
|
|
4262
|
+
* Boolean true emits the bare flag; false drops it.
|
|
4263
|
+
*/
|
|
4264
|
+
function serializeS3fsOptions(options) {
|
|
4265
|
+
return Object.entries(options).filter(([, v]) => v !== false).map(([k, v]) => v === true ? k : `${k}=${v}`).join(",");
|
|
4266
|
+
}
|
|
4205
4267
|
function getNamespaceConfigurationCache(namespace) {
|
|
4206
4268
|
const existing = sandboxConfigurationCache.get(namespace);
|
|
4207
4269
|
if (existing) return existing;
|
|
@@ -4437,7 +4499,8 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4437
4499
|
return Math.max(12e4, startupBudgetMs + 3e4);
|
|
4438
4500
|
}
|
|
4439
4501
|
/**
|
|
4440
|
-
* Create
|
|
4502
|
+
* Create the route-based compatibility client with current HTTP/WebSocket
|
|
4503
|
+
* transport settings.
|
|
4441
4504
|
*/
|
|
4442
4505
|
createSandboxClient() {
|
|
4443
4506
|
return new SandboxClient({
|
|
@@ -4453,12 +4516,15 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4453
4516
|
});
|
|
4454
4517
|
}
|
|
4455
4518
|
/**
|
|
4456
|
-
* Create the appropriate client for
|
|
4519
|
+
* Create the appropriate client for the configured control path.
|
|
4520
|
+
*
|
|
4521
|
+
* `rpc` currently selects the primary container-control client. `http` and
|
|
4522
|
+
* `websocket` select the route-based compatibility client.
|
|
4457
4523
|
*/
|
|
4458
4524
|
createClientForTransport(transport) {
|
|
4459
4525
|
if (transport === "rpc") {
|
|
4460
4526
|
const self = this;
|
|
4461
|
-
return new
|
|
4527
|
+
return new ContainerControlClient({
|
|
4462
4528
|
stub: this,
|
|
4463
4529
|
port: 3e3,
|
|
4464
4530
|
logger: this.logger,
|
|
@@ -4743,6 +4809,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4743
4809
|
let mountError;
|
|
4744
4810
|
let passwordFilePath;
|
|
4745
4811
|
let provider = null;
|
|
4812
|
+
let dirExisted = true;
|
|
4746
4813
|
try {
|
|
4747
4814
|
this.validateMountOptions(bucket, mountPath, {
|
|
4748
4815
|
...options,
|
|
@@ -4774,6 +4841,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4774
4841
|
};
|
|
4775
4842
|
this.activeMounts.set(mountPath, mountInfo);
|
|
4776
4843
|
await this.createPasswordFile(passwordFilePath, bucket, credentials);
|
|
4844
|
+
dirExisted = (await this.execInternal(`test -d ${shellEscape(mountPath)}`)).exitCode === 0;
|
|
4777
4845
|
await this.execInternal(`mkdir -p ${shellEscape(mountPath)}`);
|
|
4778
4846
|
await this.executeS3FSMount(s3fsSource, mountPath, options, provider, passwordFilePath);
|
|
4779
4847
|
mountInfo.mounted = true;
|
|
@@ -4781,6 +4849,12 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4781
4849
|
} catch (error) {
|
|
4782
4850
|
mountError = error instanceof Error ? error : new Error(String(error));
|
|
4783
4851
|
if (passwordFilePath) await this.deletePasswordFile(passwordFilePath);
|
|
4852
|
+
try {
|
|
4853
|
+
await this.execInternal(`mountpoint -q ${shellEscape(mountPath)} && fusermount -u ${shellEscape(mountPath)}`);
|
|
4854
|
+
} catch {}
|
|
4855
|
+
if (!dirExisted) try {
|
|
4856
|
+
await this.execInternal(`rmdir ${shellEscape(mountPath)} 2>/dev/null`);
|
|
4857
|
+
} catch {}
|
|
4784
4858
|
this.activeMounts.delete(mountPath);
|
|
4785
4859
|
throw error;
|
|
4786
4860
|
} finally {
|
|
@@ -4897,16 +4971,31 @@ var Sandbox = class Sandbox extends Container {
|
|
|
4897
4971
|
* Execute S3FS mount command
|
|
4898
4972
|
*/
|
|
4899
4973
|
async executeS3FSMount(bucket, mountPath, options, provider, passwordFilePath, sessionId) {
|
|
4900
|
-
const
|
|
4901
|
-
|
|
4902
|
-
|
|
4903
|
-
|
|
4904
|
-
|
|
4905
|
-
|
|
4906
|
-
|
|
4907
|
-
|
|
4908
|
-
const
|
|
4909
|
-
|
|
4974
|
+
const s3fsOptions = {
|
|
4975
|
+
logfile: `/tmp/.s3fs-log-${randomHex(4)}`,
|
|
4976
|
+
...parseS3fsOptions(resolveS3fsOptions(provider)),
|
|
4977
|
+
...parseS3fsOptions(options.s3fsOptions ?? []),
|
|
4978
|
+
passwd_file: passwordFilePath,
|
|
4979
|
+
url: options.endpoint,
|
|
4980
|
+
...options.readOnly ? { ro: true } : {}
|
|
4981
|
+
};
|
|
4982
|
+
const logFile = s3fsOptions.logfile;
|
|
4983
|
+
const script = sh`(
|
|
4984
|
+
s3fs ${bucket} ${mountPath} -o ${serializeS3fsOptions(s3fsOptions)} >${logFile} 2>&1
|
|
4985
|
+
rc=$?
|
|
4986
|
+
if [ "$rc" -ne 0 ]; then tail -n 20 ${logFile} 2>/dev/null || true; exit 2; fi
|
|
4987
|
+
for _ in $(seq 1 60); do
|
|
4988
|
+
if mountpoint -q ${mountPath}; then exit 0; fi
|
|
4989
|
+
sleep 0.1
|
|
4990
|
+
done
|
|
4991
|
+
tail -n 20 ${logFile} 2>/dev/null || true
|
|
4992
|
+
exit 3
|
|
4993
|
+
)`;
|
|
4994
|
+
const result = await (sessionId ? (cmd) => this.execWithSession(cmd, sessionId, { origin: "internal" }) : (cmd) => this.execInternal(cmd))(script);
|
|
4995
|
+
if (result.exitCode === 0) return;
|
|
4996
|
+
const detail = result.stdout?.trim() || result.stderr?.trim() || "";
|
|
4997
|
+
if (result.exitCode === 2) throw new S3FSMountError(`S3FS mount failed: ${detail || "Unknown error"}`);
|
|
4998
|
+
throw new S3FSMountError(`S3FS mount failed: FUSE filesystem never appeared at ${mountPath}. ${detail ? `s3fs log: ${detail}` : "No s3fs log output captured. The s3fs daemon may have exited before writing logs."}`);
|
|
4910
4999
|
}
|
|
4911
5000
|
/**
|
|
4912
5001
|
* In-flight `destroy()` promise. While set, concurrent callers coalesce
|
|
@@ -6458,6 +6547,42 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6458
6547
|
}
|
|
6459
6548
|
return normalizedExcludes;
|
|
6460
6549
|
}
|
|
6550
|
+
resolveBackupCompression(compression) {
|
|
6551
|
+
if (compression !== void 0) {
|
|
6552
|
+
if (typeof compression !== "object" || compression === null) throw new InvalidBackupConfigError({
|
|
6553
|
+
message: "BackupOptions.compression must be an object",
|
|
6554
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6555
|
+
httpStatus: 400,
|
|
6556
|
+
context: { reason: "compression must be an object" },
|
|
6557
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6558
|
+
});
|
|
6559
|
+
}
|
|
6560
|
+
const compressionOptions = compression;
|
|
6561
|
+
const format = compressionOptions?.format ?? BACKUP_DEFAULT_COMPRESSION;
|
|
6562
|
+
const threads = compressionOptions?.threads ?? BACKUP_DEFAULT_COMPRESS_THREADS;
|
|
6563
|
+
if (typeof format !== "string" || ![
|
|
6564
|
+
"gzip",
|
|
6565
|
+
"lz4",
|
|
6566
|
+
"zstd"
|
|
6567
|
+
].includes(format)) throw new InvalidBackupConfigError({
|
|
6568
|
+
message: "BackupOptions.compression.format must be one of: gzip, lz4, zstd",
|
|
6569
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6570
|
+
httpStatus: 400,
|
|
6571
|
+
context: { reason: "compression.format must be one of: gzip, lz4, zstd" },
|
|
6572
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6573
|
+
});
|
|
6574
|
+
if (typeof threads !== "number" || !Number.isInteger(threads) || threads < 1) throw new InvalidBackupConfigError({
|
|
6575
|
+
message: "BackupOptions.compression.threads must be a positive integer",
|
|
6576
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
6577
|
+
httpStatus: 400,
|
|
6578
|
+
context: { reason: "compression.threads must be a positive integer" },
|
|
6579
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6580
|
+
});
|
|
6581
|
+
return {
|
|
6582
|
+
format,
|
|
6583
|
+
threads
|
|
6584
|
+
};
|
|
6585
|
+
}
|
|
6461
6586
|
static PRESIGNED_URL_EXPIRY_SECONDS = 3600;
|
|
6462
6587
|
/**
|
|
6463
6588
|
* Create a unique, dedicated session for a single backup operation.
|
|
@@ -6499,6 +6624,18 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6499
6624
|
};
|
|
6500
6625
|
}
|
|
6501
6626
|
/**
|
|
6627
|
+
* Generate a presigned GET URL for downloading an object from R2.
|
|
6628
|
+
* The container can curl this URL directly without credentials.
|
|
6629
|
+
*/
|
|
6630
|
+
async generatePresignedGetUrl(r2Key) {
|
|
6631
|
+
const { client, accountId, bucketName } = this.requirePresignedUrlSupport();
|
|
6632
|
+
const encodedBucket = encodeURIComponent(bucketName);
|
|
6633
|
+
const encodedKey = r2Key.split("/").map((seg) => encodeURIComponent(seg)).join("/");
|
|
6634
|
+
const url = new URL(`https://${accountId}.r2.cloudflarestorage.com/${encodedBucket}/${encodedKey}`);
|
|
6635
|
+
url.searchParams.set("X-Amz-Expires", String(Sandbox.PRESIGNED_URL_EXPIRY_SECONDS));
|
|
6636
|
+
return (await client.sign(new Request(url), { aws: { signQuery: true } })).url;
|
|
6637
|
+
}
|
|
6638
|
+
/**
|
|
6502
6639
|
* Generate a presigned PUT URL for uploading an object to R2.
|
|
6503
6640
|
* The container can curl PUT to this URL directly without credentials.
|
|
6504
6641
|
*/
|
|
@@ -6558,51 +6695,248 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6558
6695
|
}
|
|
6559
6696
|
}
|
|
6560
6697
|
/**
|
|
6561
|
-
*
|
|
6698
|
+
* Generate a presigned PUT URL for a single part in a multipart upload.
|
|
6562
6699
|
*/
|
|
6563
|
-
async
|
|
6564
|
-
const { accountId, bucketName } = this.requirePresignedUrlSupport();
|
|
6565
|
-
const
|
|
6566
|
-
const
|
|
6567
|
-
const
|
|
6568
|
-
|
|
6569
|
-
|
|
6570
|
-
|
|
6571
|
-
|
|
6572
|
-
|
|
6573
|
-
|
|
6574
|
-
|
|
6575
|
-
|
|
6576
|
-
|
|
6577
|
-
|
|
6578
|
-
|
|
6579
|
-
|
|
6580
|
-
|
|
6581
|
-
|
|
6582
|
-
|
|
6700
|
+
async generatePresignedPartUrl(r2Key, uploadId, partNumber) {
|
|
6701
|
+
const { client, accountId, bucketName } = this.requirePresignedUrlSupport();
|
|
6702
|
+
const encodedBucket = encodeURIComponent(bucketName);
|
|
6703
|
+
const encodedKey = r2Key.split("/").map((seg) => encodeURIComponent(seg)).join("/");
|
|
6704
|
+
const url = new URL(`https://${accountId}.r2.cloudflarestorage.com/${encodedBucket}/${encodedKey}`);
|
|
6705
|
+
url.searchParams.set("X-Amz-Expires", String(Sandbox.PRESIGNED_URL_EXPIRY_SECONDS));
|
|
6706
|
+
url.searchParams.set("partNumber", String(partNumber));
|
|
6707
|
+
url.searchParams.set("uploadId", uploadId);
|
|
6708
|
+
return (await client.sign(new Request(url, { method: "PUT" }), { aws: { signQuery: true } })).url;
|
|
6709
|
+
}
|
|
6710
|
+
/**
|
|
6711
|
+
* Upload a backup archive to R2 using parallel multipart upload.
|
|
6712
|
+
* Uses the S3-compatible API exclusively for create/complete/abort so that
|
|
6713
|
+
* the uploadId is in the same namespace as the presigned part PUT URLs.
|
|
6714
|
+
*/
|
|
6715
|
+
async uploadBackupMultipart(archivePath, r2Key, sizeBytes, backupId, dir, backupSession) {
|
|
6716
|
+
const targetParts = calculatePartCount(sizeBytes, BACKUP_MULTIPART_TARGET_PARTS, BACKUP_MULTIPART_MAX_PARTS);
|
|
6717
|
+
const numParts = Math.min(targetParts, Math.floor(sizeBytes / BACKUP_MULTIPART_MIN_PART_SIZE));
|
|
6718
|
+
if (numParts <= 1) return this.uploadBackupPresigned(archivePath, r2Key, sizeBytes, backupId, dir, backupSession);
|
|
6719
|
+
const { client, accountId, bucketName } = this.requirePresignedUrlSupport();
|
|
6720
|
+
const objectUrl = `https://${accountId}.r2.cloudflarestorage.com/${encodeURIComponent(bucketName)}/${r2Key.split("/").map((seg) => encodeURIComponent(seg)).join("/")}`;
|
|
6721
|
+
const createResp = await client.fetch(`${objectUrl}?uploads`, { method: "POST" });
|
|
6722
|
+
if (!createResp.ok) throw new BackupCreateError({
|
|
6723
|
+
message: `Failed to initiate multipart upload: HTTP ${createResp.status}`,
|
|
6724
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6725
|
+
httpStatus: 500,
|
|
6726
|
+
context: {
|
|
6727
|
+
dir,
|
|
6728
|
+
backupId
|
|
6729
|
+
},
|
|
6730
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6583
6731
|
});
|
|
6584
|
-
const
|
|
6585
|
-
|
|
6586
|
-
|
|
6587
|
-
|
|
6588
|
-
|
|
6589
|
-
|
|
6590
|
-
|
|
6591
|
-
|
|
6732
|
+
const uploadId = (await createResp.text()).match(/<UploadId>([^<]+)<\/UploadId>/)?.[1];
|
|
6733
|
+
if (!uploadId) throw new BackupCreateError({
|
|
6734
|
+
message: "Multipart upload response did not contain an UploadId",
|
|
6735
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6736
|
+
httpStatus: 500,
|
|
6737
|
+
context: {
|
|
6738
|
+
dir,
|
|
6739
|
+
backupId
|
|
6740
|
+
},
|
|
6741
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6742
|
+
});
|
|
6743
|
+
const abortMultipart = async () => {
|
|
6744
|
+
await client.fetch(`${objectUrl}?uploadId=${encodeURIComponent(uploadId)}`, { method: "DELETE" }).catch(() => {});
|
|
6592
6745
|
};
|
|
6593
|
-
this.activeMounts.set(mountPath, mountInfo);
|
|
6594
6746
|
try {
|
|
6595
|
-
|
|
6596
|
-
await
|
|
6597
|
-
|
|
6598
|
-
|
|
6747
|
+
const partSize = Math.ceil(sizeBytes / numParts);
|
|
6748
|
+
const parts = await Promise.all(Array.from({ length: numParts }, (_, i) => ({
|
|
6749
|
+
partNumber: i + 1,
|
|
6750
|
+
url: "",
|
|
6751
|
+
offset: i * partSize,
|
|
6752
|
+
size: i === numParts - 1 ? sizeBytes - i * partSize : partSize
|
|
6753
|
+
})).map(async (part) => ({
|
|
6754
|
+
...part,
|
|
6755
|
+
url: await this.generatePresignedPartUrl(r2Key, uploadId, part.partNumber)
|
|
6756
|
+
})));
|
|
6757
|
+
let uploadResult;
|
|
6758
|
+
try {
|
|
6759
|
+
uploadResult = await this.client.backup.uploadParts({
|
|
6760
|
+
archivePath,
|
|
6761
|
+
parts,
|
|
6762
|
+
sessionId: backupSession
|
|
6763
|
+
});
|
|
6764
|
+
} catch (err) {
|
|
6765
|
+
if (err instanceof SandboxError && err.errorResponse.httpStatus === 404) {
|
|
6766
|
+
await abortMultipart();
|
|
6767
|
+
return this.uploadBackupPresigned(archivePath, r2Key, sizeBytes, backupId, dir, backupSession);
|
|
6768
|
+
}
|
|
6769
|
+
throw err;
|
|
6770
|
+
}
|
|
6771
|
+
if (!uploadResult.success || uploadResult.parts.length !== numParts) throw new BackupCreateError({
|
|
6772
|
+
message: `Multipart upload returned ${uploadResult.parts.length} of ${numParts} parts`,
|
|
6773
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6774
|
+
httpStatus: 500,
|
|
6775
|
+
context: {
|
|
6776
|
+
dir,
|
|
6777
|
+
backupId
|
|
6778
|
+
},
|
|
6779
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6780
|
+
});
|
|
6781
|
+
const completeXml = [
|
|
6782
|
+
"<CompleteMultipartUpload>",
|
|
6783
|
+
...uploadResult.parts.map((p) => `<Part><PartNumber>${p.partNumber}</PartNumber><ETag>${p.etag}</ETag></Part>`),
|
|
6784
|
+
"</CompleteMultipartUpload>"
|
|
6785
|
+
].join("");
|
|
6786
|
+
const completeResp = await client.fetch(`${objectUrl}?uploadId=${encodeURIComponent(uploadId)}`, {
|
|
6787
|
+
method: "POST",
|
|
6788
|
+
headers: { "Content-Type": "application/xml" },
|
|
6789
|
+
body: completeXml
|
|
6790
|
+
});
|
|
6791
|
+
if (!completeResp.ok) {
|
|
6792
|
+
const body = await completeResp.text().catch(() => "");
|
|
6793
|
+
throw new BackupCreateError({
|
|
6794
|
+
message: `Multipart upload completion failed: HTTP ${completeResp.status} ${body}`,
|
|
6795
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6796
|
+
httpStatus: 500,
|
|
6797
|
+
context: {
|
|
6798
|
+
dir,
|
|
6799
|
+
backupId
|
|
6800
|
+
},
|
|
6801
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6802
|
+
});
|
|
6803
|
+
}
|
|
6804
|
+
const head = await this.requireBackupBucket().head(r2Key);
|
|
6805
|
+
if (!head || head.size !== sizeBytes) throw new BackupCreateError({
|
|
6806
|
+
message: `Multipart upload verification failed: expected ${sizeBytes} bytes, got ${head?.size ?? 0}`,
|
|
6807
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
6808
|
+
httpStatus: 500,
|
|
6809
|
+
context: {
|
|
6810
|
+
dir,
|
|
6811
|
+
backupId
|
|
6812
|
+
},
|
|
6813
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6814
|
+
});
|
|
6599
6815
|
} catch (error) {
|
|
6600
|
-
await
|
|
6601
|
-
this.activeMounts.delete(mountPath);
|
|
6816
|
+
await abortMultipart();
|
|
6602
6817
|
throw error;
|
|
6603
6818
|
}
|
|
6604
6819
|
}
|
|
6605
6820
|
/**
|
|
6821
|
+
* Download a backup archive from R2 via presigned GET URL.
|
|
6822
|
+
* For archives >= BACKUP_DOWNLOAD_PARALLEL_MIN_SIZE, uses BACKUP_DOWNLOAD_PARALLEL_PARTS
|
|
6823
|
+
* concurrent curl processes (each downloading a byte-range) to maximise both
|
|
6824
|
+
* network and disk-write throughput. Parts are written into a pre-sized file
|
|
6825
|
+
* with dd using byte offsets, then atomically moved to the final path.
|
|
6826
|
+
*/
|
|
6827
|
+
async downloadBackupParallel(archivePath, r2Key, expectedSize, backupId, dir, backupSession) {
|
|
6828
|
+
const presignedUrl = await this.generatePresignedGetUrl(r2Key);
|
|
6829
|
+
await this.execWithSession(`mkdir -p ${BACKUP_CONTAINER_DIR}`, backupSession, { origin: "internal" });
|
|
6830
|
+
const tmpPath = `${archivePath}.tmp`;
|
|
6831
|
+
if (expectedSize < BACKUP_DOWNLOAD_PARALLEL_MIN_SIZE) {
|
|
6832
|
+
const curlCmd = [
|
|
6833
|
+
"curl -sSf",
|
|
6834
|
+
"--connect-timeout 10",
|
|
6835
|
+
"--max-time 1800",
|
|
6836
|
+
"--retry 2",
|
|
6837
|
+
"--retry-max-time 60",
|
|
6838
|
+
`-o ${shellEscape(tmpPath)}`,
|
|
6839
|
+
shellEscape(presignedUrl)
|
|
6840
|
+
].join(" ");
|
|
6841
|
+
const result = await this.execWithSession(curlCmd, backupSession, {
|
|
6842
|
+
timeout: 181e4,
|
|
6843
|
+
origin: "internal"
|
|
6844
|
+
});
|
|
6845
|
+
if (result.exitCode !== 0) {
|
|
6846
|
+
await this.execWithSession(`rm -f ${shellEscape(tmpPath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6847
|
+
throw new BackupRestoreError({
|
|
6848
|
+
message: `Presigned URL download failed (exit code ${result.exitCode}): ${result.stderr}`,
|
|
6849
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
6850
|
+
httpStatus: 500,
|
|
6851
|
+
context: {
|
|
6852
|
+
dir,
|
|
6853
|
+
backupId
|
|
6854
|
+
},
|
|
6855
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6856
|
+
});
|
|
6857
|
+
}
|
|
6858
|
+
} else {
|
|
6859
|
+
const numParts = calculatePartCount(expectedSize, BACKUP_DOWNLOAD_PARALLEL_PARTS, BACKUP_DOWNLOAD_MAX_PARTS);
|
|
6860
|
+
const partSize = Math.floor(expectedSize / numParts);
|
|
6861
|
+
const startLines = Array.from({ length: numParts }, (_, i) => {
|
|
6862
|
+
const start = i * partSize;
|
|
6863
|
+
return {
|
|
6864
|
+
start,
|
|
6865
|
+
range: `${start}-${i < numParts - 1 ? start + partSize - 1 : expectedSize - 1}`
|
|
6866
|
+
};
|
|
6867
|
+
}).map(({ start, range }) => [
|
|
6868
|
+
"curl -sSf",
|
|
6869
|
+
"--connect-timeout 10",
|
|
6870
|
+
"--max-time 1800",
|
|
6871
|
+
`-H ${shellEscape(`Range: bytes=${range}`)}`,
|
|
6872
|
+
shellEscape(presignedUrl),
|
|
6873
|
+
"|",
|
|
6874
|
+
"dd",
|
|
6875
|
+
`of=${shellEscape(tmpPath)}`,
|
|
6876
|
+
"oflag=seek_bytes",
|
|
6877
|
+
`seek=${start}`,
|
|
6878
|
+
"conv=notrunc",
|
|
6879
|
+
"2>/dev/null"
|
|
6880
|
+
].join(" ")).map((cmd, i) => `(set -o pipefail; ${cmd}) & J${i}=$!`);
|
|
6881
|
+
const waitLines = Array.from({ length: numParts }, (_, i) => `wait $J${i}; E${i}=$?`);
|
|
6882
|
+
const exitVars = Array.from({ length: numParts }, (_, i) => `$E${i}`);
|
|
6883
|
+
const script = [
|
|
6884
|
+
`rm -f ${shellEscape(tmpPath)}`,
|
|
6885
|
+
`truncate -s ${expectedSize} ${shellEscape(tmpPath)}`,
|
|
6886
|
+
...startLines,
|
|
6887
|
+
...waitLines,
|
|
6888
|
+
`FAILED=$(( ${exitVars.join(" + ")} ))`,
|
|
6889
|
+
`if [ "$FAILED" -ne 0 ]; then rm -f ${shellEscape(tmpPath)}; exit 1; fi`
|
|
6890
|
+
].join("; ");
|
|
6891
|
+
const result = await this.execWithSession(script, backupSession, {
|
|
6892
|
+
timeout: 181e4,
|
|
6893
|
+
origin: "internal"
|
|
6894
|
+
});
|
|
6895
|
+
if (result.exitCode !== 0) {
|
|
6896
|
+
await this.execWithSession(`rm -f ${shellEscape(tmpPath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6897
|
+
throw new BackupRestoreError({
|
|
6898
|
+
message: `Parallel download failed (exit code ${result.exitCode}): ${result.stderr}`,
|
|
6899
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
6900
|
+
httpStatus: 500,
|
|
6901
|
+
context: {
|
|
6902
|
+
dir,
|
|
6903
|
+
backupId
|
|
6904
|
+
},
|
|
6905
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6906
|
+
});
|
|
6907
|
+
}
|
|
6908
|
+
}
|
|
6909
|
+
const sizeCheck = await this.execWithSession(`stat -c %s ${shellEscape(tmpPath)}`, backupSession, { origin: "internal" });
|
|
6910
|
+
const actualSize = parseInt(sizeCheck.stdout.trim(), 10);
|
|
6911
|
+
if (actualSize !== expectedSize) {
|
|
6912
|
+
await this.execWithSession(`rm -f ${shellEscape(tmpPath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6913
|
+
throw new BackupRestoreError({
|
|
6914
|
+
message: `Downloaded archive size mismatch: expected ${expectedSize}, got ${actualSize}`,
|
|
6915
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
6916
|
+
httpStatus: 500,
|
|
6917
|
+
context: {
|
|
6918
|
+
dir,
|
|
6919
|
+
backupId
|
|
6920
|
+
},
|
|
6921
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6922
|
+
});
|
|
6923
|
+
}
|
|
6924
|
+
const mvResult = await this.execWithSession(`mv ${shellEscape(tmpPath)} ${shellEscape(archivePath)}`, backupSession, { origin: "internal" });
|
|
6925
|
+
if (mvResult.exitCode !== 0) {
|
|
6926
|
+
await this.execWithSession(`rm -f ${shellEscape(tmpPath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6927
|
+
throw new BackupRestoreError({
|
|
6928
|
+
message: `Failed to finalize downloaded archive: ${mvResult.stderr}`,
|
|
6929
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
6930
|
+
httpStatus: 500,
|
|
6931
|
+
context: {
|
|
6932
|
+
dir,
|
|
6933
|
+
backupId
|
|
6934
|
+
},
|
|
6935
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6936
|
+
});
|
|
6937
|
+
}
|
|
6938
|
+
}
|
|
6939
|
+
/**
|
|
6606
6940
|
* Serialize backup operations on this sandbox instance.
|
|
6607
6941
|
* Concurrent backup/restore calls are queued so the multi-step
|
|
6608
6942
|
* create-archive → read → upload (or mount → extract) flow
|
|
@@ -6642,7 +6976,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6642
6976
|
async doCreateBackup(options) {
|
|
6643
6977
|
const bucket = this.requireBackupBucket();
|
|
6644
6978
|
this.requirePresignedUrlSupport();
|
|
6645
|
-
const { dir, name, ttl = BACKUP_DEFAULT_TTL_SECONDS, gitignore = false, excludes = [] } = options;
|
|
6979
|
+
const { dir, name, ttl = BACKUP_DEFAULT_TTL_SECONDS, gitignore = false, excludes = [], compression, multipart = true } = options;
|
|
6646
6980
|
const backupStartTime = Date.now();
|
|
6647
6981
|
let backupId;
|
|
6648
6982
|
let sizeBytes;
|
|
@@ -6688,13 +7022,15 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6688
7022
|
context: { reason: "excludes must be an array of strings" },
|
|
6689
7023
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6690
7024
|
});
|
|
7025
|
+
const resolvedCompression = this.resolveBackupCompression(compression);
|
|
6691
7026
|
const normalizedExcludes = this.normalizeBackupExcludes(excludes);
|
|
6692
7027
|
backupSession = await this.ensureBackupSession();
|
|
6693
7028
|
backupId = crypto.randomUUID();
|
|
6694
7029
|
const archivePath = `${BACKUP_CONTAINER_DIR}/${backupId}.sqsh`;
|
|
6695
7030
|
const createResult = await this.client.backup.createArchive(dir, archivePath, backupSession, {
|
|
6696
7031
|
gitignore,
|
|
6697
|
-
excludes: normalizedExcludes
|
|
7032
|
+
excludes: normalizedExcludes,
|
|
7033
|
+
compression: resolvedCompression
|
|
6698
7034
|
});
|
|
6699
7035
|
if (!createResult.success) throw new BackupCreateError({
|
|
6700
7036
|
message: "Container failed to create backup archive",
|
|
@@ -6709,7 +7045,8 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6709
7045
|
sizeBytes = createResult.sizeBytes;
|
|
6710
7046
|
const r2Key = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_ARCHIVE_OBJECT_NAME}`;
|
|
6711
7047
|
const metaKey = `${BACKUP_STORAGE_PREFIX}/${backupId}/${BACKUP_METADATA_OBJECT_NAME}`;
|
|
6712
|
-
await this.
|
|
7048
|
+
if (multipart && createResult.sizeBytes >= BACKUP_MULTIPART_MIN_SIZE) await this.uploadBackupMultipart(archivePath, r2Key, createResult.sizeBytes, backupId, dir, backupSession);
|
|
7049
|
+
else await this.uploadBackupPresigned(archivePath, r2Key, createResult.sizeBytes, backupId, dir, backupSession);
|
|
6713
7050
|
const metadata = {
|
|
6714
7051
|
id: backupId,
|
|
6715
7052
|
dir,
|
|
@@ -6756,7 +7093,7 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6756
7093
|
* Archive format is identical to production (squashfs + meta.json).
|
|
6757
7094
|
*/
|
|
6758
7095
|
async doCreateBackupLocal(options) {
|
|
6759
|
-
const { dir, name, ttl = BACKUP_DEFAULT_TTL_SECONDS, gitignore = false, excludes = [] } = options;
|
|
7096
|
+
const { dir, name, ttl = BACKUP_DEFAULT_TTL_SECONDS, gitignore = false, excludes = [], compression } = options;
|
|
6760
7097
|
const backupStartTime = Date.now();
|
|
6761
7098
|
let backupId;
|
|
6762
7099
|
let sizeBytes;
|
|
@@ -6810,13 +7147,15 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6810
7147
|
context: { reason: "excludes must be an array of strings" },
|
|
6811
7148
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6812
7149
|
});
|
|
7150
|
+
const resolvedCompression = this.resolveBackupCompression(compression);
|
|
6813
7151
|
const normalizedExcludes = this.normalizeBackupExcludes(excludes);
|
|
6814
7152
|
backupSession = await this.ensureBackupSession();
|
|
6815
7153
|
backupId = crypto.randomUUID();
|
|
6816
7154
|
const archivePath = `${BACKUP_CONTAINER_DIR}/${backupId}.sqsh`;
|
|
6817
7155
|
const createResult = await this.client.backup.createArchive(dir, archivePath, backupSession, {
|
|
6818
7156
|
gitignore,
|
|
6819
|
-
excludes: normalizedExcludes
|
|
7157
|
+
excludes: normalizedExcludes,
|
|
7158
|
+
compression: resolvedCompression
|
|
6820
7159
|
});
|
|
6821
7160
|
if (!createResult.success) throw new BackupCreateError({
|
|
6822
7161
|
message: "Container failed to create backup archive",
|
|
@@ -6982,7 +7321,8 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6982
7321
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6983
7322
|
});
|
|
6984
7323
|
const r2Key = `${BACKUP_STORAGE_PREFIX}/${id}/${BACKUP_ARCHIVE_OBJECT_NAME}`;
|
|
6985
|
-
|
|
7324
|
+
const archiveHead = await bucket.head(r2Key);
|
|
7325
|
+
if (!archiveHead) throw new BackupNotFoundError({
|
|
6986
7326
|
message: `Backup archive not found in R2: ${id}. The archive may have been deleted by R2 lifecycle rules.`,
|
|
6987
7327
|
code: ErrorCode.BACKUP_NOT_FOUND,
|
|
6988
7328
|
httpStatus: 404,
|
|
@@ -6990,19 +7330,12 @@ var Sandbox = class Sandbox extends Container {
|
|
|
6990
7330
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
6991
7331
|
});
|
|
6992
7332
|
backupSession = await this.ensureBackupSession();
|
|
6993
|
-
const
|
|
6994
|
-
const
|
|
6995
|
-
const mountGlob = `/var/backups/mounts/r2mount/${id}/data`;
|
|
7333
|
+
const archivePath = `${BACKUP_CONTAINER_DIR}/${id}.sqsh`;
|
|
7334
|
+
const mountGlob = `${BACKUP_CONTAINER_DIR}/mounts/${id}`;
|
|
6996
7335
|
await this.execWithSession(`/usr/bin/fusermount3 -uz ${shellEscape(dir)} 2>/dev/null || true`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6997
7336
|
await this.execWithSession(`for d in ${shellEscape(mountGlob)}_*/lower ${shellEscape(mountGlob)}/lower; do [ -d "$d" ] && /usr/bin/fusermount3 -uz "$d" 2>/dev/null; done; true`, backupSession, { origin: "internal" }).catch(() => {});
|
|
6998
|
-
await this.execWithSession(
|
|
6999
|
-
|
|
7000
|
-
if (previousBackupMount?.mountType === "fuse") {
|
|
7001
|
-
previousBackupMount.mounted = false;
|
|
7002
|
-
this.activeMounts.delete(r2MountPath);
|
|
7003
|
-
await this.deletePasswordFile(previousBackupMount.passwordFilePath);
|
|
7004
|
-
}
|
|
7005
|
-
await this.mountBackupR2(r2MountPath, `backups/${id}/`, backupSession);
|
|
7337
|
+
const sizeCheck = await this.execWithSession(`stat -c %s ${shellEscape(archivePath)} 2>/dev/null || echo 0`, backupSession, { origin: "internal" }).catch(() => ({ stdout: "0" }));
|
|
7338
|
+
if (Number.parseInt((sizeCheck.stdout ?? "0").trim(), 10) !== archiveHead.size) await this.downloadBackupParallel(archivePath, r2Key, archiveHead.size, id, dir, backupSession);
|
|
7006
7339
|
if (!(await this.client.backup.restoreArchive(dir, archivePath, backupSession)).success) throw new BackupRestoreError({
|
|
7007
7340
|
message: "Container failed to restore backup archive",
|
|
7008
7341
|
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
@@ -7021,6 +7354,10 @@ var Sandbox = class Sandbox extends Container {
|
|
|
7021
7354
|
};
|
|
7022
7355
|
} catch (error) {
|
|
7023
7356
|
caughtError = error instanceof Error ? error : new Error(String(error));
|
|
7357
|
+
if (id && backupSession) {
|
|
7358
|
+
const cleanupPath = `${BACKUP_CONTAINER_DIR}/${id}.sqsh`;
|
|
7359
|
+
await this.execWithSession(`rm -f ${shellEscape(cleanupPath)}`, backupSession, { origin: "internal" }).catch(() => {});
|
|
7360
|
+
}
|
|
7024
7361
|
throw error;
|
|
7025
7362
|
} finally {
|
|
7026
7363
|
if (backupSession) await this.client.utils.deleteSession(backupSession).catch(() => {});
|
|
@@ -7169,4 +7506,4 @@ var Sandbox = class Sandbox extends Container {
|
|
|
7169
7506
|
|
|
7170
7507
|
//#endregion
|
|
7171
7508
|
export { DesktopInvalidOptionsError as A, CommandClient as C, BackupNotFoundError as D, BackupExpiredError as E, InvalidBackupConfigError as F, ProcessExitedBeforeReadyError as I, ProcessReadyTimeoutError as L, DesktopProcessCrashedError as M, DesktopStartFailedError as N, BackupRestoreError as O, DesktopUnavailableError as P, RPCTransportError as R, DesktopClient as S, BackupCreateError as T, UtilityClient as _, BucketMountError as a, GitClient as b, MissingCredentialsError as c, parseSSEStream as d, responseToAsyncIterable as f, SandboxClient as g, streamFile as h, proxyTerminal as i, DesktopNotStartedError as j, DesktopInvalidCoordinatesError as k, S3FSMountError as l, collectFile as m, getSandbox as n, BucketUnmountError as o, CodeInterpreter as p, proxyToSandbox as r, InvalidMountConfigError as s, Sandbox as t, asyncIterableToSSEStream as u, ProcessClient as v, BackupClient as w, FileClient as x, PortClient as y, SessionTerminatedError as z };
|
|
7172
|
-
//# sourceMappingURL=sandbox-
|
|
7509
|
+
//# sourceMappingURL=sandbox-BAuU-2a0.js.map
|