@cloudflare/sandbox 0.7.4 → 0.7.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Dockerfile +8 -2
- package/dist/{contexts-uY_burk0.d.ts → contexts-BHx40XTT.d.ts} +35 -2
- package/dist/contexts-BHx40XTT.d.ts.map +1 -0
- package/dist/{errors-Bzl0ZNia.js → errors-CYUY62c6.js} +11 -1
- package/dist/errors-CYUY62c6.js.map +1 -0
- package/dist/index.d.ts +42 -42
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +638 -5
- package/dist/index.js.map +1 -1
- package/dist/openai/index.d.ts +1 -1
- package/dist/opencode/index.d.ts +11 -3
- package/dist/opencode/index.d.ts.map +1 -1
- package/dist/opencode/index.js +17 -6
- package/dist/opencode/index.js.map +1 -1
- package/dist/{sandbox-CgjQQZGw.d.ts → sandbox-8qsR1OnB.d.ts} +241 -2
- package/dist/sandbox-8qsR1OnB.d.ts.map +1 -0
- package/package.json +3 -2
- package/dist/contexts-uY_burk0.d.ts.map +0 -1
- package/dist/errors-Bzl0ZNia.js.map +0 -1
- package/dist/sandbox-CgjQQZGw.d.ts.map +0 -1
package/dist/index.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { _ as getEnvString, a as isExecResult, c as shellEscape, d as TraceContext, f as Execution, g as filterEnvVars, h as extractRepoName, i as isWSStreamChunk, l as createLogger, m as GitLogger, n as isWSError, o as isProcess, p as ResultImpl, r as isWSResponse, s as isProcessStatus, t as generateRequestId, u as createNoOpLogger, v as partitionEnvVars } from "./dist-D9B_6gn_.js";
|
|
2
|
-
import { t as ErrorCode } from "./errors-
|
|
2
|
+
import { t as ErrorCode } from "./errors-CYUY62c6.js";
|
|
3
3
|
import { Container, getContainer, switchPort } from "@cloudflare/containers";
|
|
4
|
+
import { AwsClient } from "aws4fetch";
|
|
4
5
|
|
|
5
6
|
//#region src/errors/classes.ts
|
|
6
7
|
/**
|
|
@@ -511,6 +512,75 @@ var ProcessExitedBeforeReadyError = class extends SandboxError {
|
|
|
511
512
|
return this.context.exitCode;
|
|
512
513
|
}
|
|
513
514
|
};
|
|
515
|
+
/**
|
|
516
|
+
* Error thrown when a backup is not found in R2
|
|
517
|
+
*/
|
|
518
|
+
var BackupNotFoundError = class extends SandboxError {
|
|
519
|
+
constructor(errorResponse) {
|
|
520
|
+
super(errorResponse);
|
|
521
|
+
this.name = "BackupNotFoundError";
|
|
522
|
+
}
|
|
523
|
+
get backupId() {
|
|
524
|
+
return this.context.backupId;
|
|
525
|
+
}
|
|
526
|
+
};
|
|
527
|
+
/**
|
|
528
|
+
* Error thrown when a backup has expired (past its TTL)
|
|
529
|
+
*/
|
|
530
|
+
var BackupExpiredError = class extends SandboxError {
|
|
531
|
+
constructor(errorResponse) {
|
|
532
|
+
super(errorResponse);
|
|
533
|
+
this.name = "BackupExpiredError";
|
|
534
|
+
}
|
|
535
|
+
get backupId() {
|
|
536
|
+
return this.context.backupId;
|
|
537
|
+
}
|
|
538
|
+
get expiredAt() {
|
|
539
|
+
return this.context.expiredAt;
|
|
540
|
+
}
|
|
541
|
+
};
|
|
542
|
+
/**
|
|
543
|
+
* Error thrown when backup configuration or inputs are invalid
|
|
544
|
+
*/
|
|
545
|
+
var InvalidBackupConfigError = class extends SandboxError {
|
|
546
|
+
constructor(errorResponse) {
|
|
547
|
+
super(errorResponse);
|
|
548
|
+
this.name = "InvalidBackupConfigError";
|
|
549
|
+
}
|
|
550
|
+
get reason() {
|
|
551
|
+
return this.context.reason;
|
|
552
|
+
}
|
|
553
|
+
};
|
|
554
|
+
/**
|
|
555
|
+
* Error thrown when backup creation fails
|
|
556
|
+
*/
|
|
557
|
+
var BackupCreateError = class extends SandboxError {
|
|
558
|
+
constructor(errorResponse) {
|
|
559
|
+
super(errorResponse);
|
|
560
|
+
this.name = "BackupCreateError";
|
|
561
|
+
}
|
|
562
|
+
get dir() {
|
|
563
|
+
return this.context.dir;
|
|
564
|
+
}
|
|
565
|
+
get backupId() {
|
|
566
|
+
return this.context.backupId;
|
|
567
|
+
}
|
|
568
|
+
};
|
|
569
|
+
/**
|
|
570
|
+
* Error thrown when backup restoration fails
|
|
571
|
+
*/
|
|
572
|
+
var BackupRestoreError = class extends SandboxError {
|
|
573
|
+
constructor(errorResponse) {
|
|
574
|
+
super(errorResponse);
|
|
575
|
+
this.name = "BackupRestoreError";
|
|
576
|
+
}
|
|
577
|
+
get dir() {
|
|
578
|
+
return this.context.dir;
|
|
579
|
+
}
|
|
580
|
+
get backupId() {
|
|
581
|
+
return this.context.backupId;
|
|
582
|
+
}
|
|
583
|
+
};
|
|
514
584
|
|
|
515
585
|
//#endregion
|
|
516
586
|
//#region src/errors/adapter.ts
|
|
@@ -557,6 +627,11 @@ function createErrorFromResponse(errorResponse) {
|
|
|
557
627
|
case ErrorCode.GIT_CHECKOUT_FAILED: return new GitCheckoutError(errorResponse);
|
|
558
628
|
case ErrorCode.INVALID_GIT_URL: return new InvalidGitUrlError(errorResponse);
|
|
559
629
|
case ErrorCode.GIT_OPERATION_FAILED: return new GitError(errorResponse);
|
|
630
|
+
case ErrorCode.BACKUP_NOT_FOUND: return new BackupNotFoundError(errorResponse);
|
|
631
|
+
case ErrorCode.BACKUP_EXPIRED: return new BackupExpiredError(errorResponse);
|
|
632
|
+
case ErrorCode.INVALID_BACKUP_CONFIG: return new InvalidBackupConfigError(errorResponse);
|
|
633
|
+
case ErrorCode.BACKUP_CREATE_FAILED: return new BackupCreateError(errorResponse);
|
|
634
|
+
case ErrorCode.BACKUP_RESTORE_FAILED: return new BackupRestoreError(errorResponse);
|
|
560
635
|
case ErrorCode.INTERPRETER_NOT_READY: return new InterpreterNotReadyError(errorResponse);
|
|
561
636
|
case ErrorCode.CONTEXT_NOT_FOUND: return new ContextNotFoundError(errorResponse);
|
|
562
637
|
case ErrorCode.CODE_EXECUTION_ERROR: return new CodeExecutionError(errorResponse);
|
|
@@ -1254,6 +1329,60 @@ var BaseHttpClient = class {
|
|
|
1254
1329
|
}
|
|
1255
1330
|
};
|
|
1256
1331
|
|
|
1332
|
+
//#endregion
|
|
1333
|
+
//#region src/clients/backup-client.ts
|
|
1334
|
+
/**
|
|
1335
|
+
* Client for backup operations.
|
|
1336
|
+
*
|
|
1337
|
+
* Handles communication with the container's backup endpoints.
|
|
1338
|
+
* The container creates/extracts squashfs archives locally.
|
|
1339
|
+
* R2 upload/download is handled by the Sandbox DO, not by this client.
|
|
1340
|
+
*/
|
|
1341
|
+
var BackupClient = class extends BaseHttpClient {
|
|
1342
|
+
/**
|
|
1343
|
+
* Tell the container to create a squashfs archive from a directory.
|
|
1344
|
+
* @param dir - Directory to back up
|
|
1345
|
+
* @param archivePath - Where the container should write the archive
|
|
1346
|
+
* @param sessionId - Session context
|
|
1347
|
+
*/
|
|
1348
|
+
async createArchive(dir, archivePath, sessionId) {
|
|
1349
|
+
try {
|
|
1350
|
+
const data = {
|
|
1351
|
+
dir,
|
|
1352
|
+
archivePath,
|
|
1353
|
+
sessionId
|
|
1354
|
+
};
|
|
1355
|
+
const response = await this.post("/api/backup/create", data);
|
|
1356
|
+
this.logSuccess("Backup archive created", `${dir} -> ${archivePath}`);
|
|
1357
|
+
return response;
|
|
1358
|
+
} catch (error) {
|
|
1359
|
+
this.logError("createArchive", error);
|
|
1360
|
+
throw error;
|
|
1361
|
+
}
|
|
1362
|
+
}
|
|
1363
|
+
/**
|
|
1364
|
+
* Tell the container to restore a squashfs archive into a directory.
|
|
1365
|
+
* @param dir - Target directory
|
|
1366
|
+
* @param archivePath - Path to the archive file in the container
|
|
1367
|
+
* @param sessionId - Session context
|
|
1368
|
+
*/
|
|
1369
|
+
async restoreArchive(dir, archivePath, sessionId) {
|
|
1370
|
+
try {
|
|
1371
|
+
const data = {
|
|
1372
|
+
dir,
|
|
1373
|
+
archivePath,
|
|
1374
|
+
sessionId
|
|
1375
|
+
};
|
|
1376
|
+
const response = await this.post("/api/backup/restore", data);
|
|
1377
|
+
this.logSuccess("Backup archive restored", `${archivePath} -> ${dir}`);
|
|
1378
|
+
return response;
|
|
1379
|
+
} catch (error) {
|
|
1380
|
+
this.logError("restoreArchive", error);
|
|
1381
|
+
throw error;
|
|
1382
|
+
}
|
|
1383
|
+
}
|
|
1384
|
+
};
|
|
1385
|
+
|
|
1257
1386
|
//#endregion
|
|
1258
1387
|
//#region src/clients/command-client.ts
|
|
1259
1388
|
/**
|
|
@@ -2010,6 +2139,7 @@ var UtilityClient = class extends BaseHttpClient {
|
|
|
2010
2139
|
* WebSocket mode reduces sub-request count when running inside Workers/Durable Objects.
|
|
2011
2140
|
*/
|
|
2012
2141
|
var SandboxClient = class {
|
|
2142
|
+
backup;
|
|
2013
2143
|
commands;
|
|
2014
2144
|
files;
|
|
2015
2145
|
processes;
|
|
@@ -2032,6 +2162,7 @@ var SandboxClient = class {
|
|
|
2032
2162
|
...options,
|
|
2033
2163
|
transport: this.transport ?? options.transport
|
|
2034
2164
|
};
|
|
2165
|
+
this.backup = new BackupClient(clientOptions);
|
|
2035
2166
|
this.commands = new CommandClient(clientOptions);
|
|
2036
2167
|
this.files = new FileClient(clientOptions);
|
|
2037
2168
|
this.processes = new ProcessClient(clientOptions);
|
|
@@ -2567,7 +2698,7 @@ function buildS3fsSource(bucket, prefix) {
|
|
|
2567
2698
|
* This file is auto-updated by .github/changeset-version.ts during releases
|
|
2568
2699
|
* DO NOT EDIT MANUALLY - Changes will be overwritten on the next version bump
|
|
2569
2700
|
*/
|
|
2570
|
-
const SDK_VERSION = "0.7.
|
|
2701
|
+
const SDK_VERSION = "0.7.5";
|
|
2571
2702
|
|
|
2572
2703
|
//#endregion
|
|
2573
2704
|
//#region src/sandbox.ts
|
|
@@ -2612,7 +2743,14 @@ function connect(stub) {
|
|
|
2612
2743
|
return await stub.fetch(portSwitchedRequest);
|
|
2613
2744
|
};
|
|
2614
2745
|
}
|
|
2615
|
-
|
|
2746
|
+
/**
|
|
2747
|
+
* Type guard for R2Bucket binding.
|
|
2748
|
+
* Checks for the minimal R2Bucket interface methods we use.
|
|
2749
|
+
*/
|
|
2750
|
+
function isR2Bucket(value) {
|
|
2751
|
+
return typeof value === "object" && value !== null && "put" in value && typeof value.put === "function" && "get" in value && typeof value.get === "function" && "head" in value && typeof value.head === "function" && "delete" in value && typeof value.delete === "function";
|
|
2752
|
+
}
|
|
2753
|
+
var Sandbox = class Sandbox extends Container {
|
|
2616
2754
|
defaultPort = 3e3;
|
|
2617
2755
|
sleepAfter = "10m";
|
|
2618
2756
|
client;
|
|
@@ -2626,6 +2764,24 @@ var Sandbox = class extends Container {
|
|
|
2626
2764
|
keepAliveEnabled = false;
|
|
2627
2765
|
activeMounts = /* @__PURE__ */ new Map();
|
|
2628
2766
|
transport = "http";
|
|
2767
|
+
backupBucket = null;
|
|
2768
|
+
/**
|
|
2769
|
+
* Serializes backup operations to prevent concurrent create/restore on the same sandbox.
|
|
2770
|
+
*
|
|
2771
|
+
* This is in-memory state — it resets if the Durable Object is evicted and
|
|
2772
|
+
* re-instantiated (e.g. after sleep). This is acceptable because the container
|
|
2773
|
+
* filesystem is also lost on eviction, so there is no archive to race on.
|
|
2774
|
+
*/
|
|
2775
|
+
backupInProgress = Promise.resolve();
|
|
2776
|
+
/**
|
|
2777
|
+
* R2 presigned URL credentials for direct container-to-R2 transfers.
|
|
2778
|
+
* All four fields plus the R2 binding must be configured for backup to work.
|
|
2779
|
+
*/
|
|
2780
|
+
r2AccessKeyId = null;
|
|
2781
|
+
r2SecretAccessKey = null;
|
|
2782
|
+
r2AccountId = null;
|
|
2783
|
+
backupBucketName = null;
|
|
2784
|
+
r2Client = null;
|
|
2629
2785
|
/**
|
|
2630
2786
|
* Default container startup timeouts (conservative for production)
|
|
2631
2787
|
* Based on Cloudflare docs: "Containers take several minutes to provision"
|
|
@@ -2668,6 +2824,16 @@ var Sandbox = class extends Container {
|
|
|
2668
2824
|
const transportEnv = envObj?.SANDBOX_TRANSPORT;
|
|
2669
2825
|
if (transportEnv === "websocket") this.transport = "websocket";
|
|
2670
2826
|
else if (transportEnv != null && transportEnv !== "http") this.logger.warn(`Invalid SANDBOX_TRANSPORT value: "${transportEnv}". Must be "http" or "websocket". Defaulting to "http".`);
|
|
2827
|
+
const backupBucket = envObj?.BACKUP_BUCKET;
|
|
2828
|
+
if (isR2Bucket(backupBucket)) this.backupBucket = backupBucket;
|
|
2829
|
+
this.r2AccountId = getEnvString(envObj, "CLOUDFLARE_ACCOUNT_ID") ?? null;
|
|
2830
|
+
this.r2AccessKeyId = getEnvString(envObj, "R2_ACCESS_KEY_ID") ?? null;
|
|
2831
|
+
this.r2SecretAccessKey = getEnvString(envObj, "R2_SECRET_ACCESS_KEY") ?? null;
|
|
2832
|
+
this.backupBucketName = getEnvString(envObj, "BACKUP_BUCKET_NAME") ?? null;
|
|
2833
|
+
if (this.r2AccessKeyId && this.r2SecretAccessKey) this.r2Client = new AwsClient({
|
|
2834
|
+
accessKeyId: this.r2AccessKeyId,
|
|
2835
|
+
secretAccessKey: this.r2SecretAccessKey
|
|
2836
|
+
});
|
|
2671
2837
|
this.client = this.createSandboxClient();
|
|
2672
2838
|
this.codeInterpreter = new CodeInterpreter(this);
|
|
2673
2839
|
this.ctx.blockConcurrencyWhile(async () => {
|
|
@@ -3896,7 +4062,9 @@ var Sandbox = class extends Container {
|
|
|
3896
4062
|
listCodeContexts: () => this.codeInterpreter.listCodeContexts(),
|
|
3897
4063
|
deleteCodeContext: (contextId) => this.codeInterpreter.deleteCodeContext(contextId),
|
|
3898
4064
|
mountBucket: (bucket, mountPath, options) => this.mountBucket(bucket, mountPath, options),
|
|
3899
|
-
unmountBucket: (mountPath) => this.unmountBucket(mountPath)
|
|
4065
|
+
unmountBucket: (mountPath) => this.unmountBucket(mountPath),
|
|
4066
|
+
createBackup: (options) => this.createBackup(options),
|
|
4067
|
+
restoreBackup: (backup) => this.restoreBackup(backup)
|
|
3900
4068
|
};
|
|
3901
4069
|
}
|
|
3902
4070
|
async createCodeContext(options) {
|
|
@@ -3914,6 +4082,471 @@ var Sandbox = class extends Container {
|
|
|
3914
4082
|
async deleteCodeContext(contextId) {
|
|
3915
4083
|
return this.codeInterpreter.deleteCodeContext(contextId);
|
|
3916
4084
|
}
|
|
4085
|
+
/** UUID v4 format validator for backup IDs */
|
|
4086
|
+
static UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
|
|
4087
|
+
/**
|
|
4088
|
+
* Validate that a directory path is safe for backup operations.
|
|
4089
|
+
* Rejects empty, relative, traversal, and null-byte paths.
|
|
4090
|
+
*/
|
|
4091
|
+
static validateBackupDir(dir, label) {
|
|
4092
|
+
if (!dir || !dir.startsWith("/")) throw new InvalidBackupConfigError({
|
|
4093
|
+
message: `${label} must be an absolute path`,
|
|
4094
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
4095
|
+
httpStatus: 400,
|
|
4096
|
+
context: { reason: `${label} must be an absolute path` },
|
|
4097
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4098
|
+
});
|
|
4099
|
+
if (dir.includes("\0")) throw new InvalidBackupConfigError({
|
|
4100
|
+
message: `${label} must not contain null bytes`,
|
|
4101
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
4102
|
+
httpStatus: 400,
|
|
4103
|
+
context: { reason: `${label} must not contain null bytes` },
|
|
4104
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4105
|
+
});
|
|
4106
|
+
if (dir.split("/").includes("..")) throw new InvalidBackupConfigError({
|
|
4107
|
+
message: `${label} must not contain ".." path segments`,
|
|
4108
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
4109
|
+
httpStatus: 400,
|
|
4110
|
+
context: { reason: `${label} must not contain ".." path segments` },
|
|
4111
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4112
|
+
});
|
|
4113
|
+
}
|
|
4114
|
+
/**
|
|
4115
|
+
* Returns the R2 bucket or throws if backup is not configured.
|
|
4116
|
+
*/
|
|
4117
|
+
requireBackupBucket() {
|
|
4118
|
+
if (!this.backupBucket) throw new InvalidBackupConfigError({
|
|
4119
|
+
message: "Backup not configured. Add a BACKUP_BUCKET R2 binding to your wrangler.jsonc.",
|
|
4120
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
4121
|
+
httpStatus: 400,
|
|
4122
|
+
context: { reason: "Missing BACKUP_BUCKET R2 binding" },
|
|
4123
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4124
|
+
});
|
|
4125
|
+
return this.backupBucket;
|
|
4126
|
+
}
|
|
4127
|
+
static PRESIGNED_URL_EXPIRY_SECONDS = 3600;
|
|
4128
|
+
/**
|
|
4129
|
+
* Ensure a dedicated session for backup operations exists.
|
|
4130
|
+
* Isolates backup shell commands (curl, stat, rm, mkdir) from user exec()
|
|
4131
|
+
* calls to prevent session state interference and interleaving.
|
|
4132
|
+
*/
|
|
4133
|
+
async ensureBackupSession() {
|
|
4134
|
+
const sessionId = "__sandbox_backup__";
|
|
4135
|
+
try {
|
|
4136
|
+
await this.client.utils.createSession({
|
|
4137
|
+
id: sessionId,
|
|
4138
|
+
cwd: "/"
|
|
4139
|
+
});
|
|
4140
|
+
} catch (error) {
|
|
4141
|
+
if (!(error instanceof SessionAlreadyExistsError)) throw error;
|
|
4142
|
+
}
|
|
4143
|
+
return sessionId;
|
|
4144
|
+
}
|
|
4145
|
+
/**
|
|
4146
|
+
* Returns validated presigned URL configuration or throws if not configured.
|
|
4147
|
+
* All credential fields plus the R2 binding are required for backup to work.
|
|
4148
|
+
*/
|
|
4149
|
+
requirePresignedUrlSupport() {
|
|
4150
|
+
if (!this.r2Client || !this.r2AccountId || !this.backupBucketName) {
|
|
4151
|
+
const missing = [];
|
|
4152
|
+
if (!this.r2AccountId) missing.push("CLOUDFLARE_ACCOUNT_ID");
|
|
4153
|
+
if (!this.r2AccessKeyId) missing.push("R2_ACCESS_KEY_ID");
|
|
4154
|
+
if (!this.r2SecretAccessKey) missing.push("R2_SECRET_ACCESS_KEY");
|
|
4155
|
+
if (!this.backupBucketName) missing.push("BACKUP_BUCKET_NAME");
|
|
4156
|
+
throw new InvalidBackupConfigError({
|
|
4157
|
+
message: `Backup requires R2 presigned URL credentials. Missing: ${missing.join(", ")}. Set these as environment variables or secrets in your wrangler.jsonc.`,
|
|
4158
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
4159
|
+
httpStatus: 400,
|
|
4160
|
+
context: { reason: `Missing env vars: ${missing.join(", ")}` },
|
|
4161
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4162
|
+
});
|
|
4163
|
+
}
|
|
4164
|
+
return {
|
|
4165
|
+
client: this.r2Client,
|
|
4166
|
+
accountId: this.r2AccountId,
|
|
4167
|
+
bucketName: this.backupBucketName
|
|
4168
|
+
};
|
|
4169
|
+
}
|
|
4170
|
+
/**
|
|
4171
|
+
* Generate a presigned GET URL for downloading an object from R2.
|
|
4172
|
+
* The container can curl this URL directly without credentials.
|
|
4173
|
+
*/
|
|
4174
|
+
async generatePresignedGetUrl(r2Key) {
|
|
4175
|
+
const { client, accountId, bucketName } = this.requirePresignedUrlSupport();
|
|
4176
|
+
const encodedBucket = encodeURIComponent(bucketName);
|
|
4177
|
+
const encodedKey = r2Key.split("/").map((seg) => encodeURIComponent(seg)).join("/");
|
|
4178
|
+
const url = new URL(`https://${accountId}.r2.cloudflarestorage.com/${encodedBucket}/${encodedKey}`);
|
|
4179
|
+
url.searchParams.set("X-Amz-Expires", String(Sandbox.PRESIGNED_URL_EXPIRY_SECONDS));
|
|
4180
|
+
return (await client.sign(new Request(url), { aws: { signQuery: true } })).url;
|
|
4181
|
+
}
|
|
4182
|
+
/**
|
|
4183
|
+
* Generate a presigned PUT URL for uploading an object to R2.
|
|
4184
|
+
* The container can curl PUT to this URL directly without credentials.
|
|
4185
|
+
*/
|
|
4186
|
+
async generatePresignedPutUrl(r2Key) {
|
|
4187
|
+
const { client, accountId, bucketName } = this.requirePresignedUrlSupport();
|
|
4188
|
+
const encodedBucket = encodeURIComponent(bucketName);
|
|
4189
|
+
const encodedKey = r2Key.split("/").map((seg) => encodeURIComponent(seg)).join("/");
|
|
4190
|
+
const url = new URL(`https://${accountId}.r2.cloudflarestorage.com/${encodedBucket}/${encodedKey}`);
|
|
4191
|
+
url.searchParams.set("X-Amz-Expires", String(Sandbox.PRESIGNED_URL_EXPIRY_SECONDS));
|
|
4192
|
+
return (await client.sign(new Request(url, { method: "PUT" }), { aws: { signQuery: true } })).url;
|
|
4193
|
+
}
|
|
4194
|
+
/**
|
|
4195
|
+
* Upload a backup archive via presigned PUT URL.
|
|
4196
|
+
* The container curls the archive directly to R2, bypassing the DO.
|
|
4197
|
+
* ~24 MB/s throughput vs ~0.6 MB/s for base64 readFile.
|
|
4198
|
+
*/
|
|
4199
|
+
async uploadBackupPresigned(archivePath, r2Key, archiveSize, backupId, dir, backupSession) {
|
|
4200
|
+
const presignedUrl = await this.generatePresignedPutUrl(r2Key);
|
|
4201
|
+
this.logger.info("Uploading backup via presigned PUT", {
|
|
4202
|
+
r2Key,
|
|
4203
|
+
archiveSize,
|
|
4204
|
+
backupId
|
|
4205
|
+
});
|
|
4206
|
+
const curlCmd = [
|
|
4207
|
+
"curl -sSf",
|
|
4208
|
+
"-X PUT",
|
|
4209
|
+
"-H 'Content-Type: application/octet-stream'",
|
|
4210
|
+
"--connect-timeout 10",
|
|
4211
|
+
"--max-time 300",
|
|
4212
|
+
"--retry 2",
|
|
4213
|
+
"--retry-max-time 60",
|
|
4214
|
+
`--data-binary @${shellEscape(archivePath)}`,
|
|
4215
|
+
shellEscape(presignedUrl)
|
|
4216
|
+
].join(" ");
|
|
4217
|
+
const result = await this.execWithSession(curlCmd, backupSession, { timeout: 31e4 });
|
|
4218
|
+
if (result.exitCode !== 0) throw new BackupCreateError({
|
|
4219
|
+
message: `Presigned URL upload failed (exit code ${result.exitCode}): ${result.stderr}`,
|
|
4220
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
4221
|
+
httpStatus: 500,
|
|
4222
|
+
context: {
|
|
4223
|
+
dir,
|
|
4224
|
+
backupId
|
|
4225
|
+
},
|
|
4226
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4227
|
+
});
|
|
4228
|
+
const head = await this.requireBackupBucket().head(r2Key);
|
|
4229
|
+
if (!head || head.size !== archiveSize) throw new BackupCreateError({
|
|
4230
|
+
message: `Upload verification failed: expected ${archiveSize} bytes, got ${head?.size ?? 0}`,
|
|
4231
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
4232
|
+
httpStatus: 500,
|
|
4233
|
+
context: {
|
|
4234
|
+
dir,
|
|
4235
|
+
backupId
|
|
4236
|
+
},
|
|
4237
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4238
|
+
});
|
|
4239
|
+
}
|
|
4240
|
+
/**
|
|
4241
|
+
* Download a backup archive via presigned GET URL.
|
|
4242
|
+
* The container curls the archive directly from R2, bypassing the DO.
|
|
4243
|
+
* ~93 MB/s throughput vs ~0.6 MB/s for base64 writeFile.
|
|
4244
|
+
*/
|
|
4245
|
+
async downloadBackupPresigned(archivePath, r2Key, expectedSize, backupId, dir, backupSession) {
|
|
4246
|
+
const presignedUrl = await this.generatePresignedGetUrl(r2Key);
|
|
4247
|
+
this.logger.info("Downloading backup via presigned GET", {
|
|
4248
|
+
r2Key,
|
|
4249
|
+
expectedSize,
|
|
4250
|
+
backupId
|
|
4251
|
+
});
|
|
4252
|
+
await this.execWithSession("mkdir -p /var/backups", backupSession);
|
|
4253
|
+
const tmpPath = `${archivePath}.tmp`;
|
|
4254
|
+
const curlCmd = [
|
|
4255
|
+
"curl -sSf",
|
|
4256
|
+
"--connect-timeout 10",
|
|
4257
|
+
"--max-time 300",
|
|
4258
|
+
"--retry 2",
|
|
4259
|
+
"--retry-max-time 60",
|
|
4260
|
+
`-o ${shellEscape(tmpPath)}`,
|
|
4261
|
+
shellEscape(presignedUrl)
|
|
4262
|
+
].join(" ");
|
|
4263
|
+
const result = await this.execWithSession(curlCmd, backupSession, { timeout: 31e4 });
|
|
4264
|
+
if (result.exitCode !== 0) {
|
|
4265
|
+
await this.execWithSession(`rm -f ${shellEscape(tmpPath)}`, backupSession).catch(() => {});
|
|
4266
|
+
throw new BackupRestoreError({
|
|
4267
|
+
message: `Presigned URL download failed (exit code ${result.exitCode}): ${result.stderr}`,
|
|
4268
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
4269
|
+
httpStatus: 500,
|
|
4270
|
+
context: {
|
|
4271
|
+
dir,
|
|
4272
|
+
backupId
|
|
4273
|
+
},
|
|
4274
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4275
|
+
});
|
|
4276
|
+
}
|
|
4277
|
+
const sizeCheck = await this.execWithSession(`stat -c %s ${shellEscape(tmpPath)}`, backupSession);
|
|
4278
|
+
const actualSize = parseInt(sizeCheck.stdout.trim(), 10);
|
|
4279
|
+
if (actualSize !== expectedSize) {
|
|
4280
|
+
await this.execWithSession(`rm -f ${shellEscape(tmpPath)}`, backupSession).catch(() => {});
|
|
4281
|
+
throw new BackupRestoreError({
|
|
4282
|
+
message: `Downloaded archive size mismatch: expected ${expectedSize}, got ${actualSize}`,
|
|
4283
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
4284
|
+
httpStatus: 500,
|
|
4285
|
+
context: {
|
|
4286
|
+
dir,
|
|
4287
|
+
backupId
|
|
4288
|
+
},
|
|
4289
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4290
|
+
});
|
|
4291
|
+
}
|
|
4292
|
+
const mvResult = await this.execWithSession(`mv ${shellEscape(tmpPath)} ${shellEscape(archivePath)}`, backupSession);
|
|
4293
|
+
if (mvResult.exitCode !== 0) {
|
|
4294
|
+
await this.execWithSession(`rm -f ${shellEscape(tmpPath)}`, backupSession).catch(() => {});
|
|
4295
|
+
throw new BackupRestoreError({
|
|
4296
|
+
message: `Failed to finalize downloaded archive: ${mvResult.stderr}`,
|
|
4297
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
4298
|
+
httpStatus: 500,
|
|
4299
|
+
context: {
|
|
4300
|
+
dir,
|
|
4301
|
+
backupId
|
|
4302
|
+
},
|
|
4303
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4304
|
+
});
|
|
4305
|
+
}
|
|
4306
|
+
}
|
|
4307
|
+
/**
|
|
4308
|
+
* Serialize backup operations on this sandbox instance.
|
|
4309
|
+
* Concurrent backup/restore calls are queued so the multi-step
|
|
4310
|
+
* create-archive → read → upload (or download → write → extract) flow
|
|
4311
|
+
* is not interleaved with another backup operation on the same directory.
|
|
4312
|
+
*/
|
|
4313
|
+
enqueueBackupOp(fn) {
|
|
4314
|
+
const next = this.backupInProgress.then(fn, () => fn());
|
|
4315
|
+
this.backupInProgress = next.catch(() => {});
|
|
4316
|
+
return next;
|
|
4317
|
+
}
|
|
4318
|
+
/**
|
|
4319
|
+
* Create a backup of a directory and upload it to R2.
|
|
4320
|
+
*
|
|
4321
|
+
* Flow:
|
|
4322
|
+
* 1. Container creates squashfs archive from the directory
|
|
4323
|
+
* 2. Container uploads the archive directly to R2 via presigned URL
|
|
4324
|
+
* 3. DO writes metadata to R2
|
|
4325
|
+
* 4. Container cleans up the local archive
|
|
4326
|
+
*
|
|
4327
|
+
* The returned DirectoryBackup handle is serializable. Store it anywhere
|
|
4328
|
+
* (KV, D1, DO storage) and pass it to restoreBackup() later.
|
|
4329
|
+
*
|
|
4330
|
+
* Concurrent backup/restore calls on the same sandbox are serialized.
|
|
4331
|
+
*
|
|
4332
|
+
* Partially-written files in the target directory may not be captured
|
|
4333
|
+
* consistently. Completed writes are captured.
|
|
4334
|
+
*
|
|
4335
|
+
* NOTE: Expired backups are not automatically deleted from R2. Configure
|
|
4336
|
+
* R2 lifecycle rules on the BACKUP_BUCKET to garbage-collect objects
|
|
4337
|
+
* under the `backups/` prefix after the desired retention period.
|
|
4338
|
+
*/
|
|
4339
|
+
async createBackup(options) {
|
|
4340
|
+
this.requireBackupBucket();
|
|
4341
|
+
return this.enqueueBackupOp(() => this.doCreateBackup(options));
|
|
4342
|
+
}
|
|
4343
|
+
async doCreateBackup(options) {
|
|
4344
|
+
const bucket = this.requireBackupBucket();
|
|
4345
|
+
this.requirePresignedUrlSupport();
|
|
4346
|
+
const DEFAULT_TTL_SECONDS = 259200;
|
|
4347
|
+
const MAX_NAME_LENGTH = 256;
|
|
4348
|
+
const { dir, name, ttl = DEFAULT_TTL_SECONDS } = options;
|
|
4349
|
+
Sandbox.validateBackupDir(dir, "BackupOptions.dir");
|
|
4350
|
+
if (name !== void 0) {
|
|
4351
|
+
if (typeof name !== "string" || name.length > MAX_NAME_LENGTH) throw new InvalidBackupConfigError({
|
|
4352
|
+
message: `BackupOptions.name must be a string of at most ${MAX_NAME_LENGTH} characters`,
|
|
4353
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
4354
|
+
httpStatus: 400,
|
|
4355
|
+
context: { reason: `name must be a string of at most ${MAX_NAME_LENGTH} characters` },
|
|
4356
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4357
|
+
});
|
|
4358
|
+
if (/[\u0000-\u001f\u007f]/.test(name)) throw new InvalidBackupConfigError({
|
|
4359
|
+
message: "BackupOptions.name must not contain control characters",
|
|
4360
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
4361
|
+
httpStatus: 400,
|
|
4362
|
+
context: { reason: "name must not contain control characters" },
|
|
4363
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4364
|
+
});
|
|
4365
|
+
}
|
|
4366
|
+
if (ttl <= 0) throw new InvalidBackupConfigError({
|
|
4367
|
+
message: "BackupOptions.ttl must be a positive number of seconds",
|
|
4368
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
4369
|
+
httpStatus: 400,
|
|
4370
|
+
context: { reason: "ttl must be a positive number of seconds" },
|
|
4371
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4372
|
+
});
|
|
4373
|
+
const backupSession = await this.ensureBackupSession();
|
|
4374
|
+
const backupId = crypto.randomUUID();
|
|
4375
|
+
const archivePath = `/var/backups/${backupId}.sqsh`;
|
|
4376
|
+
this.logger.info("Creating backup", {
|
|
4377
|
+
backupId,
|
|
4378
|
+
dir,
|
|
4379
|
+
name
|
|
4380
|
+
});
|
|
4381
|
+
const createResult = await this.client.backup.createArchive(dir, archivePath, backupSession);
|
|
4382
|
+
if (!createResult.success) throw new BackupCreateError({
|
|
4383
|
+
message: "Container failed to create backup archive",
|
|
4384
|
+
code: ErrorCode.BACKUP_CREATE_FAILED,
|
|
4385
|
+
httpStatus: 500,
|
|
4386
|
+
context: {
|
|
4387
|
+
dir,
|
|
4388
|
+
backupId
|
|
4389
|
+
},
|
|
4390
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4391
|
+
});
|
|
4392
|
+
const r2Key = `backups/${backupId}/data.sqsh`;
|
|
4393
|
+
const metaKey = `backups/${backupId}/meta.json`;
|
|
4394
|
+
try {
|
|
4395
|
+
await this.uploadBackupPresigned(archivePath, r2Key, createResult.sizeBytes, backupId, dir, backupSession);
|
|
4396
|
+
const metadata = {
|
|
4397
|
+
id: backupId,
|
|
4398
|
+
dir,
|
|
4399
|
+
name: name || null,
|
|
4400
|
+
sizeBytes: createResult.sizeBytes,
|
|
4401
|
+
ttl,
|
|
4402
|
+
createdAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
4403
|
+
};
|
|
4404
|
+
await bucket.put(metaKey, JSON.stringify(metadata));
|
|
4405
|
+
this.logger.info("Backup uploaded to R2", {
|
|
4406
|
+
backupId,
|
|
4407
|
+
r2Key,
|
|
4408
|
+
sizeBytes: createResult.sizeBytes
|
|
4409
|
+
});
|
|
4410
|
+
await this.execWithSession(`rm -f ${shellEscape(archivePath)}`, backupSession).catch(() => {});
|
|
4411
|
+
return {
|
|
4412
|
+
id: backupId,
|
|
4413
|
+
dir
|
|
4414
|
+
};
|
|
4415
|
+
} catch (error) {
|
|
4416
|
+
await this.execWithSession(`rm -f ${shellEscape(archivePath)}`, backupSession).catch(() => {});
|
|
4417
|
+
await bucket.delete(r2Key).catch(() => {});
|
|
4418
|
+
await bucket.delete(metaKey).catch(() => {});
|
|
4419
|
+
throw error;
|
|
4420
|
+
}
|
|
4421
|
+
}
|
|
4422
|
+
/**
|
|
4423
|
+
* Restore a backup from R2 into a directory.
|
|
4424
|
+
*
|
|
4425
|
+
* Flow:
|
|
4426
|
+
* 1. DO reads metadata from R2 and checks TTL
|
|
4427
|
+
* 2. Container downloads the archive directly from R2 via presigned URL
|
|
4428
|
+
* 3. Container mounts the squashfs archive with FUSE overlayfs
|
|
4429
|
+
*
|
|
4430
|
+
* The target directory becomes an overlay mount with the backup as a
|
|
4431
|
+
* read-only lower layer and a writable upper layer for copy-on-write.
|
|
4432
|
+
* Any processes writing to the directory should be stopped first.
|
|
4433
|
+
*
|
|
4434
|
+
* **Mount Lifecycle**: The FUSE overlay mount persists only while the
|
|
4435
|
+
* container is running. When the sandbox sleeps or the container restarts,
|
|
4436
|
+
* the mount is lost and the directory becomes empty. Re-restore from the
|
|
4437
|
+
* backup handle to recover. This is an ephemeral restore, not a persistent
|
|
4438
|
+
* extraction.
|
|
4439
|
+
*
|
|
4440
|
+
* The backup is restored into `backup.dir`. This may differ from the
|
|
4441
|
+
* directory that was originally backed up, allowing cross-directory restore.
|
|
4442
|
+
*
|
|
4443
|
+
* Overlapping backups are independent: restoring a parent directory
|
|
4444
|
+
* overwrites everything inside it, including subdirectories that were
|
|
4445
|
+
* backed up separately. When restoring both, restore the parent first.
|
|
4446
|
+
*
|
|
4447
|
+
* Concurrent backup/restore calls on the same sandbox are serialized.
|
|
4448
|
+
*/
|
|
4449
|
+
async restoreBackup(backup) {
|
|
4450
|
+
this.requireBackupBucket();
|
|
4451
|
+
return this.enqueueBackupOp(() => this.doRestoreBackup(backup));
|
|
4452
|
+
}
|
|
4453
|
+
async doRestoreBackup(backup) {
|
|
4454
|
+
const bucket = this.requireBackupBucket();
|
|
4455
|
+
this.requirePresignedUrlSupport();
|
|
4456
|
+
const { id: backupId, dir } = backup;
|
|
4457
|
+
if (!backupId || typeof backupId !== "string") throw new InvalidBackupConfigError({
|
|
4458
|
+
message: "Invalid backup: missing or invalid id",
|
|
4459
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
4460
|
+
httpStatus: 400,
|
|
4461
|
+
context: { reason: "missing or invalid id" },
|
|
4462
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4463
|
+
});
|
|
4464
|
+
if (!Sandbox.UUID_REGEX.test(backupId)) throw new InvalidBackupConfigError({
|
|
4465
|
+
message: "Invalid backup: id must be a valid UUID (e.g. from createBackup)",
|
|
4466
|
+
code: ErrorCode.INVALID_BACKUP_CONFIG,
|
|
4467
|
+
httpStatus: 400,
|
|
4468
|
+
context: { reason: "id must be a valid UUID" },
|
|
4469
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4470
|
+
});
|
|
4471
|
+
Sandbox.validateBackupDir(dir, "Invalid backup: dir");
|
|
4472
|
+
this.logger.info("Restoring backup", {
|
|
4473
|
+
backupId,
|
|
4474
|
+
dir
|
|
4475
|
+
});
|
|
4476
|
+
const metaKey = `backups/${backupId}/meta.json`;
|
|
4477
|
+
const metaObject = await bucket.get(metaKey);
|
|
4478
|
+
if (!metaObject) throw new BackupNotFoundError({
|
|
4479
|
+
message: `Backup not found: ${backupId}. Verify the backup ID is correct and the backup has not been deleted.`,
|
|
4480
|
+
code: ErrorCode.BACKUP_NOT_FOUND,
|
|
4481
|
+
httpStatus: 404,
|
|
4482
|
+
context: { backupId },
|
|
4483
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4484
|
+
});
|
|
4485
|
+
const metadata = await metaObject.json();
|
|
4486
|
+
const TTL_BUFFER_MS = 60 * 1e3;
|
|
4487
|
+
const createdAt = new Date(metadata.createdAt).getTime();
|
|
4488
|
+
if (Number.isNaN(createdAt)) throw new BackupRestoreError({
|
|
4489
|
+
message: `Backup metadata has invalid createdAt timestamp: ${metadata.createdAt}`,
|
|
4490
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
4491
|
+
httpStatus: 500,
|
|
4492
|
+
context: {
|
|
4493
|
+
dir,
|
|
4494
|
+
backupId
|
|
4495
|
+
},
|
|
4496
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4497
|
+
});
|
|
4498
|
+
const expiresAt = createdAt + metadata.ttl * 1e3;
|
|
4499
|
+
if (Date.now() + TTL_BUFFER_MS > expiresAt) throw new BackupExpiredError({
|
|
4500
|
+
message: `Backup ${backupId} has expired (created: ${metadata.createdAt}, TTL: ${metadata.ttl}s). Create a new backup.`,
|
|
4501
|
+
code: ErrorCode.BACKUP_EXPIRED,
|
|
4502
|
+
httpStatus: 400,
|
|
4503
|
+
context: {
|
|
4504
|
+
backupId,
|
|
4505
|
+
expiredAt: new Date(expiresAt).toISOString()
|
|
4506
|
+
},
|
|
4507
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4508
|
+
});
|
|
4509
|
+
const r2Key = `backups/${backupId}/data.sqsh`;
|
|
4510
|
+
const archiveHead = await bucket.head(r2Key);
|
|
4511
|
+
if (!archiveHead) throw new BackupNotFoundError({
|
|
4512
|
+
message: `Backup archive not found in R2: ${backupId}. The archive may have been deleted by R2 lifecycle rules.`,
|
|
4513
|
+
code: ErrorCode.BACKUP_NOT_FOUND,
|
|
4514
|
+
httpStatus: 404,
|
|
4515
|
+
context: { backupId },
|
|
4516
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4517
|
+
});
|
|
4518
|
+
const backupSession = await this.ensureBackupSession();
|
|
4519
|
+
const archivePath = `/var/backups/${backupId}.sqsh`;
|
|
4520
|
+
try {
|
|
4521
|
+
const mountGlob = `/var/backups/mounts/${backupId}`;
|
|
4522
|
+
await this.execWithSession(`/usr/bin/fusermount3 -uz ${shellEscape(dir)} 2>/dev/null || true`, backupSession).catch(() => {});
|
|
4523
|
+
await this.execWithSession(`for d in ${shellEscape(mountGlob)}_*/lower ${shellEscape(mountGlob)}/lower; do [ -d "$d" ] && /usr/bin/fusermount3 -uz "$d" 2>/dev/null; done; true`, backupSession).catch(() => {});
|
|
4524
|
+
const sizeCheck = await this.execWithSession(`stat -c %s ${shellEscape(archivePath)} 2>/dev/null || echo 0`, backupSession).catch(() => ({ stdout: "0" }));
|
|
4525
|
+
if (Number.parseInt((sizeCheck.stdout ?? "0").trim(), 10) !== archiveHead.size) await this.downloadBackupPresigned(archivePath, r2Key, archiveHead.size, backupId, dir, backupSession);
|
|
4526
|
+
if (!(await this.client.backup.restoreArchive(dir, archivePath, backupSession)).success) throw new BackupRestoreError({
|
|
4527
|
+
message: "Container failed to restore backup archive",
|
|
4528
|
+
code: ErrorCode.BACKUP_RESTORE_FAILED,
|
|
4529
|
+
httpStatus: 500,
|
|
4530
|
+
context: {
|
|
4531
|
+
dir,
|
|
4532
|
+
backupId
|
|
4533
|
+
},
|
|
4534
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
4535
|
+
});
|
|
4536
|
+
this.logger.info("Backup restored", {
|
|
4537
|
+
backupId,
|
|
4538
|
+
dir
|
|
4539
|
+
});
|
|
4540
|
+
return {
|
|
4541
|
+
success: true,
|
|
4542
|
+
dir,
|
|
4543
|
+
id: backupId
|
|
4544
|
+
};
|
|
4545
|
+
} catch (error) {
|
|
4546
|
+
await this.execWithSession(`rm -f ${shellEscape(archivePath)}`, backupSession).catch(() => {});
|
|
4547
|
+
throw error;
|
|
4548
|
+
}
|
|
4549
|
+
}
|
|
3917
4550
|
};
|
|
3918
4551
|
|
|
3919
4552
|
//#endregion
|
|
@@ -4036,5 +4669,5 @@ async function collectFile(stream) {
|
|
|
4036
4669
|
}
|
|
4037
4670
|
|
|
4038
4671
|
//#endregion
|
|
4039
|
-
export { BucketMountError, CodeInterpreter, CommandClient, FileClient, GitClient, InvalidMountConfigError, MissingCredentialsError, PortClient, ProcessClient, ProcessExitedBeforeReadyError, ProcessReadyTimeoutError, S3FSMountError, Sandbox, SandboxClient, UtilityClient, asyncIterableToSSEStream, collectFile, getSandbox, isExecResult, isProcess, isProcessStatus, parseSSEStream, proxyTerminal, proxyToSandbox, responseToAsyncIterable, streamFile };
|
|
4672
|
+
export { BackupClient, BackupCreateError, BackupExpiredError, BackupNotFoundError, BackupRestoreError, BucketMountError, CodeInterpreter, CommandClient, FileClient, GitClient, InvalidBackupConfigError, InvalidMountConfigError, MissingCredentialsError, PortClient, ProcessClient, ProcessExitedBeforeReadyError, ProcessReadyTimeoutError, S3FSMountError, Sandbox, SandboxClient, UtilityClient, asyncIterableToSSEStream, collectFile, getSandbox, isExecResult, isProcess, isProcessStatus, parseSSEStream, proxyTerminal, proxyToSandbox, responseToAsyncIterable, streamFile };
|
|
4040
4673
|
//# sourceMappingURL=index.js.map
|