@townco/agent 0.1.141 → 0.1.143
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/acp-server/adapter.d.ts +1 -1
- package/dist/acp-server/adapter.js +9 -1
- package/dist/acp-server/session-storage.d.ts +1 -1
- package/dist/acp-server/session-storage.js +1 -1
- package/dist/runner/e2b-sandbox-manager.d.ts +10 -2
- package/dist/runner/e2b-sandbox-manager.js +35 -24
- package/dist/runner/hooks/predefined/compaction-tool.js +3 -5
- package/dist/runner/langchain/index.js +75 -61
- package/dist/runner/langchain/tools/e2b.js +59 -17
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +7 -7
- package/dist/runner/langchain/tools/conversation_search.d.ts +0 -22
- package/dist/runner/langchain/tools/conversation_search.js +0 -137
|
@@ -465,7 +465,15 @@ export class AgentAcpAdapter {
|
|
|
465
465
|
return citationSource;
|
|
466
466
|
};
|
|
467
467
|
// Check if this is a search results array (library__search_keyword)
|
|
468
|
-
|
|
468
|
+
// Results may be at top level or nested in structuredContent (MCP tool wrapper)
|
|
469
|
+
const structuredContent = typeof outputContent.structuredContent === "object" &&
|
|
470
|
+
outputContent.structuredContent !== null
|
|
471
|
+
? outputContent.structuredContent
|
|
472
|
+
: null;
|
|
473
|
+
const results = outputContent.results ??
|
|
474
|
+
outputContent.documents ??
|
|
475
|
+
structuredContent?.results ??
|
|
476
|
+
structuredContent?.documents;
|
|
469
477
|
if (Array.isArray(results)) {
|
|
470
478
|
// Handle array of search results
|
|
471
479
|
logger.debug("Processing library search results array", {
|
|
@@ -168,7 +168,7 @@ const sessionMetadataSchema = z.object({
|
|
|
168
168
|
// Citation schemas - matches SourceSchema from packages/ui/src/core/schemas/source.ts
|
|
169
169
|
const persistedCitationSourceSchema = z.object({
|
|
170
170
|
id: z.string(),
|
|
171
|
-
url: z.string(),
|
|
171
|
+
url: z.string().optional(), // Optional for backward compatibility with sessions that have missing URLs
|
|
172
172
|
title: z.string(),
|
|
173
173
|
snippet: z.string().optional(),
|
|
174
174
|
favicon: z.string().optional(),
|
|
@@ -9,14 +9,22 @@ import type { Sandbox } from "@e2b/code-interpreter";
|
|
|
9
9
|
*/
|
|
10
10
|
export declare function getSessionSandbox(apiKey: string): Promise<Sandbox>;
|
|
11
11
|
/**
|
|
12
|
-
*
|
|
13
|
-
*
|
|
12
|
+
* Pause a session's sandbox (called on session end or cleanup).
|
|
13
|
+
* The sandbox can be resumed later using the persisted sandboxId.
|
|
14
|
+
* State is preserved for up to 30 days.
|
|
14
15
|
*/
|
|
15
16
|
export declare function destroySessionSandbox(sessionId: string): Promise<void>;
|
|
16
17
|
/**
|
|
17
18
|
* Check if a session has an active sandbox.
|
|
18
19
|
*/
|
|
19
20
|
export declare function hasSessionSandbox(sessionId: string): boolean;
|
|
21
|
+
/**
|
|
22
|
+
* Clear the in-memory sandbox reference for a session.
|
|
23
|
+
* Call this when a sandbox operation fails due to the sandbox being
|
|
24
|
+
* paused/expired (e.g., "sandbox is probably not running anymore").
|
|
25
|
+
* The next call to getSessionSandbox() will attempt to reconnect.
|
|
26
|
+
*/
|
|
27
|
+
export declare function clearStaleSandbox(sessionId: string): void;
|
|
20
28
|
/**
|
|
21
29
|
* Get an existing sandbox by sessionId without creating a new one.
|
|
22
30
|
* Returns undefined if no sandbox exists for this session.
|
|
@@ -10,8 +10,8 @@ const sandboxActivity = new Map();
|
|
|
10
10
|
const cleanupTimeouts = new Map();
|
|
11
11
|
// Map sessionId -> Promise<Sandbox> for in-flight creations (prevents race condition)
|
|
12
12
|
const sandboxCreationPromises = new Map();
|
|
13
|
-
// Sandbox timeout in milliseconds (default:
|
|
14
|
-
const SANDBOX_TIMEOUT_MS =
|
|
13
|
+
// Sandbox timeout in milliseconds (default: 24 hours)
|
|
14
|
+
const SANDBOX_TIMEOUT_MS = 24 * 60 * 60 * 1000;
|
|
15
15
|
/**
|
|
16
16
|
* Collect environment variables that should be passed to E2B sandbox
|
|
17
17
|
* for tool usage (image generation, etc.)
|
|
@@ -76,6 +76,7 @@ async function createSandboxForSession(sessionId, apiKey) {
|
|
|
76
76
|
const { Sandbox: SandboxClass } = await import("@e2b/code-interpreter");
|
|
77
77
|
const sandbox = await SandboxClass.connect(persistedSandboxId, {
|
|
78
78
|
apiKey,
|
|
79
|
+
timeoutMs: SANDBOX_TIMEOUT_MS,
|
|
79
80
|
});
|
|
80
81
|
logger.info("Successfully reconnected to sandbox", {
|
|
81
82
|
sessionId,
|
|
@@ -121,7 +122,11 @@ async function createSandboxForSession(sessionId, apiKey) {
|
|
|
121
122
|
config.template = templateId;
|
|
122
123
|
logger.info("Using custom E2B template", { templateId });
|
|
123
124
|
}
|
|
124
|
-
const sandbox = await SandboxClass.
|
|
125
|
+
const sandbox = await SandboxClass.betaCreate({
|
|
126
|
+
...config,
|
|
127
|
+
autoPause: true,
|
|
128
|
+
timeoutMs: SANDBOX_TIMEOUT_MS,
|
|
129
|
+
});
|
|
125
130
|
logger.info("Created new sandbox", {
|
|
126
131
|
sessionId,
|
|
127
132
|
sandboxId: sandbox.sandboxId,
|
|
@@ -197,18 +202,23 @@ export async function getSessionSandbox(apiKey) {
|
|
|
197
202
|
}
|
|
198
203
|
}
|
|
199
204
|
/**
|
|
200
|
-
*
|
|
201
|
-
*
|
|
205
|
+
* Pause a session's sandbox (called on session end or cleanup).
|
|
206
|
+
* The sandbox can be resumed later using the persisted sandboxId.
|
|
207
|
+
* State is preserved for up to 30 days.
|
|
202
208
|
*/
|
|
203
209
|
export async function destroySessionSandbox(sessionId) {
|
|
204
210
|
const sandbox = sessionSandboxes.get(sessionId);
|
|
205
211
|
if (sandbox) {
|
|
206
|
-
logger.info("
|
|
212
|
+
logger.info("Pausing sandbox", { sessionId, sandboxId: sandbox.sandboxId });
|
|
207
213
|
try {
|
|
208
|
-
await sandbox.
|
|
214
|
+
await sandbox.betaPause();
|
|
215
|
+
logger.info("Sandbox paused successfully", {
|
|
216
|
+
sessionId,
|
|
217
|
+
sandboxId: sandbox.sandboxId,
|
|
218
|
+
});
|
|
209
219
|
}
|
|
210
220
|
catch (error) {
|
|
211
|
-
logger.error("Error
|
|
221
|
+
logger.error("Error pausing sandbox", { sessionId, error });
|
|
212
222
|
}
|
|
213
223
|
sessionSandboxes.delete(sessionId);
|
|
214
224
|
sandboxActivity.delete(sessionId);
|
|
@@ -218,22 +228,7 @@ export async function destroySessionSandbox(sessionId) {
|
|
|
218
228
|
clearTimeout(timeout);
|
|
219
229
|
cleanupTimeouts.delete(sessionId);
|
|
220
230
|
}
|
|
221
|
-
//
|
|
222
|
-
// We do this without requiring session context since this can be called
|
|
223
|
-
// from HTTP endpoints that don't have session context
|
|
224
|
-
try {
|
|
225
|
-
// Try to get storage if we have context, but don't fail if we don't
|
|
226
|
-
if (hasSessionContext()) {
|
|
227
|
-
const storage = getSessionStorage();
|
|
228
|
-
if (storage) {
|
|
229
|
-
await storage.updateSandboxId(sessionId, undefined);
|
|
230
|
-
logger.debug("Cleared persisted sandboxId", { sessionId });
|
|
231
|
-
}
|
|
232
|
-
}
|
|
233
|
-
}
|
|
234
|
-
catch (error) {
|
|
235
|
-
logger.warn("Failed to clear persisted sandboxId", { sessionId, error });
|
|
236
|
-
}
|
|
231
|
+
// Note: We intentionally keep the sandboxId in storage so we can resume later
|
|
237
232
|
}
|
|
238
233
|
}
|
|
239
234
|
/**
|
|
@@ -267,6 +262,22 @@ function rescheduleCleanup(sessionId) {
|
|
|
267
262
|
export function hasSessionSandbox(sessionId) {
|
|
268
263
|
return sessionSandboxes.has(sessionId);
|
|
269
264
|
}
|
|
265
|
+
/**
|
|
266
|
+
* Clear the in-memory sandbox reference for a session.
|
|
267
|
+
* Call this when a sandbox operation fails due to the sandbox being
|
|
268
|
+
* paused/expired (e.g., "sandbox is probably not running anymore").
|
|
269
|
+
* The next call to getSessionSandbox() will attempt to reconnect.
|
|
270
|
+
*/
|
|
271
|
+
export function clearStaleSandbox(sessionId) {
|
|
272
|
+
logger.info("Clearing stale sandbox reference", { sessionId });
|
|
273
|
+
sessionSandboxes.delete(sessionId);
|
|
274
|
+
sandboxActivity.delete(sessionId);
|
|
275
|
+
const timeout = cleanupTimeouts.get(sessionId);
|
|
276
|
+
if (timeout) {
|
|
277
|
+
clearTimeout(timeout);
|
|
278
|
+
cleanupTimeouts.delete(sessionId);
|
|
279
|
+
}
|
|
280
|
+
}
|
|
270
281
|
/**
|
|
271
282
|
* Get an existing sandbox by sessionId without creating a new one.
|
|
272
283
|
* Returns undefined if no sandbox exists for this session.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { ChatAnthropic } from "@langchain/anthropic";
|
|
2
1
|
import { HumanMessage, SystemMessage } from "@langchain/core/messages";
|
|
3
2
|
import { createLogger } from "../../../logger.js";
|
|
3
|
+
import { createModelFromString } from "../../langchain/model-factory.js";
|
|
4
4
|
import { createContextEntry, createFullMessageEntry, } from "../types";
|
|
5
5
|
import { applyTokenPadding } from "./token-utils.js";
|
|
6
6
|
const logger = createLogger("compaction-tool");
|
|
@@ -67,10 +67,8 @@ export const compactionTool = async (ctx) => {
|
|
|
67
67
|
const hasLibraryMcp = ctx.agent.mcps?.some((mcp) => typeof mcp === "string" ? mcp === "library" : mcp.name === "library");
|
|
68
68
|
try {
|
|
69
69
|
// Create the LLM client using the same model as the agent
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
temperature: 0,
|
|
73
|
-
});
|
|
70
|
+
// Use model factory to properly handle town- prefixed models (routes through shed proxy)
|
|
71
|
+
const model = await createModelFromString(ctx.model);
|
|
74
72
|
// Build the conversation history to compact
|
|
75
73
|
const messagesToCompact = ctx.session.messages;
|
|
76
74
|
// Convert session messages to text for context, including tool calls and results
|
|
@@ -185,7 +185,15 @@ function extractSourcesBeforeCompaction(toolName, rawOutput) {
|
|
|
185
185
|
};
|
|
186
186
|
};
|
|
187
187
|
// Check for results array (library__search_keyword, library__semantic_search)
|
|
188
|
-
|
|
188
|
+
// Results may be at top level or nested in structuredContent (MCP tool wrapper)
|
|
189
|
+
const structuredContent = typeof actualOutput.structuredContent === "object" &&
|
|
190
|
+
actualOutput.structuredContent !== null
|
|
191
|
+
? actualOutput.structuredContent
|
|
192
|
+
: null;
|
|
193
|
+
const results = actualOutput.results ??
|
|
194
|
+
actualOutput.documents ??
|
|
195
|
+
structuredContent?.results ??
|
|
196
|
+
structuredContent?.documents;
|
|
189
197
|
if (Array.isArray(results)) {
|
|
190
198
|
for (const result of results) {
|
|
191
199
|
if (result && typeof result === "object") {
|
|
@@ -202,6 +210,32 @@ function extractSourcesBeforeCompaction(toolName, rawOutput) {
|
|
|
202
210
|
sources.push(source);
|
|
203
211
|
}
|
|
204
212
|
}
|
|
213
|
+
// Handle subagent tool outputs (SubagentResult format: { text, sources })
|
|
214
|
+
const isSubagentTool = toolName === "subagent" || toolName === "Task";
|
|
215
|
+
if (isSubagentTool && Array.isArray(actualOutput.sources)) {
|
|
216
|
+
for (const source of actualOutput.sources) {
|
|
217
|
+
if (source &&
|
|
218
|
+
typeof source === "object" &&
|
|
219
|
+
typeof source.url === "string" &&
|
|
220
|
+
source.url) {
|
|
221
|
+
sourceCounter++;
|
|
222
|
+
sources.push({
|
|
223
|
+
id: typeof source.id === "string" ? source.id : String(sourceCounter),
|
|
224
|
+
url: source.url,
|
|
225
|
+
title: typeof source.title === "string" ? source.title : "Untitled",
|
|
226
|
+
snippet: typeof source.snippet === "string"
|
|
227
|
+
? source.snippet.slice(0, 200)
|
|
228
|
+
: undefined,
|
|
229
|
+
favicon: typeof source.favicon === "string"
|
|
230
|
+
? source.favicon
|
|
231
|
+
: getFaviconFromUrl(source.url),
|
|
232
|
+
sourceName: typeof source.sourceName === "string"
|
|
233
|
+
? source.sourceName
|
|
234
|
+
: getSourceNameFromUrl(source.url),
|
|
235
|
+
});
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
}
|
|
205
239
|
return sources;
|
|
206
240
|
}
|
|
207
241
|
function stableStringify(value) {
|
|
@@ -742,7 +776,41 @@ export class LangchainAgent {
|
|
|
742
776
|
const toolCallId = hasInflightToolCompaction
|
|
743
777
|
? await consumeToolCallId(originalTool.name, input)
|
|
744
778
|
: `unknown_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`;
|
|
745
|
-
|
|
779
|
+
let result = await originalTool.invoke(input);
|
|
780
|
+
// Apply subagent source renumbering BEFORE extraction
|
|
781
|
+
// This ensures pre-extracted sources have the same IDs as the text references
|
|
782
|
+
const isSubagentToolResult = originalTool.name === SUBAGENT_TOOL_NAME &&
|
|
783
|
+
result &&
|
|
784
|
+
typeof result === "object" &&
|
|
785
|
+
"sources" in result &&
|
|
786
|
+
Array.isArray(result.sources) &&
|
|
787
|
+
result.sources.length > 0;
|
|
788
|
+
if (isSubagentToolResult) {
|
|
789
|
+
const subagentResult = result;
|
|
790
|
+
subagentCallCounter++;
|
|
791
|
+
const baseOffset = subagentCallCounter * 1000;
|
|
792
|
+
let sourceIndex = 0;
|
|
793
|
+
// Create ID mapping and re-number sources with offset
|
|
794
|
+
const idMapping = new Map();
|
|
795
|
+
const renumberedSources = subagentResult.sources.map((source) => {
|
|
796
|
+
sourceIndex++;
|
|
797
|
+
const newId = String(baseOffset + sourceIndex);
|
|
798
|
+
idMapping.set(source.id, newId);
|
|
799
|
+
return { ...source, id: newId };
|
|
800
|
+
});
|
|
801
|
+
// Update citation references in the text [[oldId]] -> [[newId]]
|
|
802
|
+
let updatedText = subagentResult.text;
|
|
803
|
+
for (const [oldId, newId] of idMapping) {
|
|
804
|
+
const pattern = new RegExp(`\\[\\[${oldId}\\]\\]`, "g");
|
|
805
|
+
updatedText = updatedText.replace(pattern, `[[${newId}]]`);
|
|
806
|
+
}
|
|
807
|
+
_logger.info("Re-numbered subagent citation sources (in-flight)", {
|
|
808
|
+
subagentCall: subagentCallCounter,
|
|
809
|
+
originalCount: subagentResult.sources.length,
|
|
810
|
+
idRange: `${baseOffset + 1}-${baseOffset + sourceIndex}`,
|
|
811
|
+
});
|
|
812
|
+
result = { text: updatedText, sources: renumberedSources };
|
|
813
|
+
}
|
|
746
814
|
if (!inflightHookExecutor || !hasInflightToolCompaction) {
|
|
747
815
|
return result;
|
|
748
816
|
}
|
|
@@ -756,6 +824,7 @@ export class LangchainAgent {
|
|
|
756
824
|
const outputTokens = await countToolResultTokens(rawOutput);
|
|
757
825
|
// Extract citation sources BEFORE compaction to preserve URLs
|
|
758
826
|
// Compaction LLM may remove URLs as "unnecessary" during summarization
|
|
827
|
+
// NOTE: For subagent tools, sources are already renumbered above
|
|
759
828
|
const preExtractedSources = extractSourcesBeforeCompaction(originalTool.name, rawOutput);
|
|
760
829
|
// Include current prompt as the last user message for better context.
|
|
761
830
|
const nowIso = new Date().toISOString();
|
|
@@ -899,65 +968,10 @@ export class LangchainAgent {
|
|
|
899
968
|
allowedToolNames.has(t.name));
|
|
900
969
|
});
|
|
901
970
|
}
|
|
902
|
-
//
|
|
903
|
-
//
|
|
904
|
-
//
|
|
905
|
-
//
|
|
906
|
-
finalTools = finalTools.map((t) => {
|
|
907
|
-
if (t.name !== SUBAGENT_TOOL_NAME) {
|
|
908
|
-
return t;
|
|
909
|
-
}
|
|
910
|
-
const wrappedFunc = async (input) => {
|
|
911
|
-
const result = (await t.invoke(input));
|
|
912
|
-
// Check if result has sources to re-number
|
|
913
|
-
if (!result ||
|
|
914
|
-
typeof result !== "object" ||
|
|
915
|
-
!Array.isArray(result.sources) ||
|
|
916
|
-
result.sources.length === 0) {
|
|
917
|
-
return result;
|
|
918
|
-
}
|
|
919
|
-
// Increment subagent call counter and calculate base offset
|
|
920
|
-
subagentCallCounter++;
|
|
921
|
-
const baseOffset = subagentCallCounter * 1000;
|
|
922
|
-
let sourceIndex = 0;
|
|
923
|
-
// Create ID mapping and re-number sources with offset
|
|
924
|
-
const idMapping = new Map();
|
|
925
|
-
const renumberedSources = result.sources.map((source) => {
|
|
926
|
-
sourceIndex++;
|
|
927
|
-
const newId = String(baseOffset + sourceIndex);
|
|
928
|
-
idMapping.set(source.id, newId);
|
|
929
|
-
return { ...source, id: newId };
|
|
930
|
-
});
|
|
931
|
-
// Update citation references in the text [[oldId]] -> [[newId]]
|
|
932
|
-
let updatedText = result.text;
|
|
933
|
-
for (const [oldId, newId] of idMapping) {
|
|
934
|
-
const pattern = new RegExp(`\\[\\[${oldId}\\]\\]`, "g");
|
|
935
|
-
updatedText = updatedText.replace(pattern, `[[${newId}]]`);
|
|
936
|
-
}
|
|
937
|
-
_logger.info("Re-numbered subagent citation sources", {
|
|
938
|
-
subagentCall: subagentCallCounter,
|
|
939
|
-
originalCount: result.sources.length,
|
|
940
|
-
idRange: `${baseOffset + 1}-${baseOffset + sourceIndex}`,
|
|
941
|
-
});
|
|
942
|
-
return { text: updatedText, sources: renumberedSources };
|
|
943
|
-
};
|
|
944
|
-
// Create new tool with wrapped function
|
|
945
|
-
// biome-ignore lint/suspicious/noExplicitAny: Need to pass function with dynamic signature
|
|
946
|
-
const wrappedTool = tool(wrappedFunc, {
|
|
947
|
-
name: t.name,
|
|
948
|
-
description: t.description,
|
|
949
|
-
// biome-ignore lint/suspicious/noExplicitAny: Accessing internal schema property
|
|
950
|
-
schema: t.schema,
|
|
951
|
-
});
|
|
952
|
-
// Preserve metadata
|
|
953
|
-
// biome-ignore lint/suspicious/noExplicitAny: Need to add custom properties to LangChain tool
|
|
954
|
-
wrappedTool.prettyName = t.prettyName;
|
|
955
|
-
// biome-ignore lint/suspicious/noExplicitAny: Need to add custom properties to LangChain tool
|
|
956
|
-
wrappedTool.icon = t.icon;
|
|
957
|
-
// biome-ignore lint/suspicious/noExplicitAny: Need to preserve subagentConfigs for metadata
|
|
958
|
-
wrappedTool.subagentConfigs = t.subagentConfigs;
|
|
959
|
-
return wrappedTool;
|
|
960
|
-
});
|
|
971
|
+
// NOTE: Subagent source renumbering now happens earlier in the wrappedTools
|
|
972
|
+
// wrapper (around line 1050) to ensure pre-extracted sources have matching IDs.
|
|
973
|
+
// This ensures that when sources are extracted before compaction, they already
|
|
974
|
+
// have the renumbered IDs (1001+, 2001+, etc.) that match the text references.
|
|
961
975
|
// Create the model instance using the factory
|
|
962
976
|
// This detects the provider from the model string:
|
|
963
977
|
// - "gemini-2.0-flash" → Google Generative AI
|
|
@@ -4,7 +4,7 @@ import { getShedAuth } from "@townco/core/auth";
|
|
|
4
4
|
import { tool } from "langchain";
|
|
5
5
|
import { z } from "zod";
|
|
6
6
|
import { createLogger } from "../../../logger.js";
|
|
7
|
-
import { getSessionSandbox } from "../../e2b-sandbox-manager";
|
|
7
|
+
import { clearStaleSandbox, getSessionSandbox, } from "../../e2b-sandbox-manager";
|
|
8
8
|
import { getEmitUpdate, getSessionContext, getToolOutputDir, hasSessionContext, } from "../../session-context";
|
|
9
9
|
const logger = createLogger("e2b-tools");
|
|
10
10
|
// Cached API key from Town proxy
|
|
@@ -48,6 +48,45 @@ export async function getTownE2BApiKey() {
|
|
|
48
48
|
_apiKeyFetchPromise = null;
|
|
49
49
|
}
|
|
50
50
|
}
|
|
51
|
+
/**
|
|
52
|
+
* Check if an error indicates the sandbox is stale (paused/expired).
|
|
53
|
+
* E2B throws this when the sandbox was auto-paused or timed out.
|
|
54
|
+
*/
|
|
55
|
+
function isStaleSandboxError(error) {
|
|
56
|
+
if (error instanceof Error) {
|
|
57
|
+
const msg = error.message.toLowerCase();
|
|
58
|
+
return (msg.includes("sandbox is probably not running") ||
|
|
59
|
+
msg.includes("sandbox not found") ||
|
|
60
|
+
msg.includes("not running anymore"));
|
|
61
|
+
}
|
|
62
|
+
return false;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Execute a sandbox operation with automatic retry on stale sandbox errors.
|
|
66
|
+
* If the sandbox was auto-paused by E2B, this clears the stale reference
|
|
67
|
+
* and reconnects before retrying.
|
|
68
|
+
*/
|
|
69
|
+
async function withSandboxRetry(getSandbox, operation) {
|
|
70
|
+
let sandbox = await getSandbox();
|
|
71
|
+
try {
|
|
72
|
+
return await operation(sandbox);
|
|
73
|
+
}
|
|
74
|
+
catch (error) {
|
|
75
|
+
if (isStaleSandboxError(error)) {
|
|
76
|
+
logger.info("Sandbox appears stale (auto-paused), clearing and reconnecting...");
|
|
77
|
+
// Clear the stale sandbox reference
|
|
78
|
+
if (hasSessionContext()) {
|
|
79
|
+
const { sessionId } = getSessionContext();
|
|
80
|
+
clearStaleSandbox(sessionId);
|
|
81
|
+
}
|
|
82
|
+
// Get a fresh sandbox (will reconnect/resume the paused sandbox)
|
|
83
|
+
sandbox = await getSandbox();
|
|
84
|
+
// Retry the operation
|
|
85
|
+
return await operation(sandbox);
|
|
86
|
+
}
|
|
87
|
+
throw error;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
51
90
|
/**
|
|
52
91
|
* Helper to save image artifacts from code execution results.
|
|
53
92
|
*/
|
|
@@ -73,9 +112,8 @@ async function saveImageArtifact(base64Data, format) {
|
|
|
73
112
|
function makeE2BToolsInternal(getSandbox) {
|
|
74
113
|
// Tool 1: Run Code (Python or JavaScript)
|
|
75
114
|
const runCode = tool(async ({ code, language = "python" }) => {
|
|
76
|
-
const sandbox = await getSandbox();
|
|
77
115
|
try {
|
|
78
|
-
const result = await sandbox.runCode(code, { language });
|
|
116
|
+
const result = await withSandboxRetry(getSandbox, (sandbox) => sandbox.runCode(code, { language }));
|
|
79
117
|
// Format output
|
|
80
118
|
let output = "";
|
|
81
119
|
if (result.logs?.stdout && result.logs.stdout.length > 0) {
|
|
@@ -174,9 +212,8 @@ function makeE2BToolsInternal(getSandbox) {
|
|
|
174
212
|
};
|
|
175
213
|
// Tool 2: Run Bash Command
|
|
176
214
|
const runBash = tool(async ({ command }) => {
|
|
177
|
-
const sandbox = await getSandbox();
|
|
178
215
|
try {
|
|
179
|
-
const result = await sandbox.commands.run(command);
|
|
216
|
+
const result = await withSandboxRetry(getSandbox, (sandbox) => sandbox.commands.run(command));
|
|
180
217
|
let output = "";
|
|
181
218
|
if (result.stdout) {
|
|
182
219
|
output += result.stdout;
|
|
@@ -244,9 +281,8 @@ function makeE2BToolsInternal(getSandbox) {
|
|
|
244
281
|
};
|
|
245
282
|
// Tool 3: Read File from Sandbox
|
|
246
283
|
const readSandboxFile = tool(async ({ path: filePath }) => {
|
|
247
|
-
const sandbox = await getSandbox();
|
|
248
284
|
try {
|
|
249
|
-
const content = await sandbox.files.read(filePath);
|
|
285
|
+
const content = await withSandboxRetry(getSandbox, (sandbox) => sandbox.files.read(filePath));
|
|
250
286
|
return content;
|
|
251
287
|
}
|
|
252
288
|
catch (error) {
|
|
@@ -267,9 +303,8 @@ function makeE2BToolsInternal(getSandbox) {
|
|
|
267
303
|
readSandboxFile.icon = "FileText";
|
|
268
304
|
// Tool 4: Write File to Sandbox
|
|
269
305
|
const writeSandboxFile = tool(async ({ path: filePath, content }) => {
|
|
270
|
-
const sandbox = await getSandbox();
|
|
271
306
|
try {
|
|
272
|
-
await sandbox.files.write(filePath, content);
|
|
307
|
+
await withSandboxRetry(getSandbox, (sandbox) => sandbox.files.write(filePath, content));
|
|
273
308
|
// Emit file change notification
|
|
274
309
|
const emitUpdate = getEmitUpdate();
|
|
275
310
|
if (emitUpdate) {
|
|
@@ -312,13 +347,12 @@ function makeE2BToolsInternal(getSandbox) {
|
|
|
312
347
|
if (!hasSessionContext()) {
|
|
313
348
|
throw new Error("Sandbox_Share requires session context");
|
|
314
349
|
}
|
|
315
|
-
const sandbox = await getSandbox();
|
|
316
350
|
const { sessionId } = getSessionContext();
|
|
317
351
|
const toolOutputDir = getToolOutputDir("E2B");
|
|
318
352
|
try {
|
|
319
353
|
// Step 1: Download from sandbox to local artifacts
|
|
320
354
|
// Use base64 encoding to safely transfer binary data
|
|
321
|
-
const result = await sandbox.commands.run(`base64 ${sandboxPath}`);
|
|
355
|
+
const result = await withSandboxRetry(getSandbox, (sandbox) => sandbox.commands.run(`base64 ${sandboxPath}`));
|
|
322
356
|
if (result.exitCode !== 0) {
|
|
323
357
|
throw new Error(`Failed to read file: ${result.stderr}`);
|
|
324
358
|
}
|
|
@@ -405,13 +439,19 @@ function makeE2BToolsInternal(getSandbox) {
|
|
|
405
439
|
shareSandboxFile.icon = "Share";
|
|
406
440
|
// Tool 6: Load Library Documents to Sandbox
|
|
407
441
|
const loadLibraryDocuments = tool(async ({ document_ids }) => {
|
|
408
|
-
const sandbox = await getSandbox();
|
|
409
442
|
try {
|
|
410
443
|
const libraryApiUrl = process.env.LIBRARY_API_URL;
|
|
411
444
|
const libraryApiKey = process.env.LIBRARY_API_KEY;
|
|
412
445
|
if (!libraryApiUrl || !libraryApiKey) {
|
|
413
446
|
throw new Error("LIBRARY_API_URL and LIBRARY_API_KEY environment variables are required");
|
|
414
447
|
}
|
|
448
|
+
// Get sandbox (with retry in case it was auto-paused) to ensure it's active
|
|
449
|
+
// The library API needs the sandbox to be running to upload files
|
|
450
|
+
const sandbox = await withSandboxRetry(getSandbox, async (s) => {
|
|
451
|
+
// Run a simple command to verify the sandbox is actually running
|
|
452
|
+
await s.commands.run("true");
|
|
453
|
+
return s;
|
|
454
|
+
});
|
|
415
455
|
const response = await fetch(`${libraryApiUrl}/sandbox/upload_documents_to_sandbox`, {
|
|
416
456
|
method: "POST",
|
|
417
457
|
headers: {
|
|
@@ -475,7 +515,6 @@ function makeE2BToolsInternal(getSandbox) {
|
|
|
475
515
|
};
|
|
476
516
|
// Tool 7: Generate Image in Sandbox
|
|
477
517
|
const generateImage = tool(async ({ prompt }) => {
|
|
478
|
-
const sandbox = await getSandbox();
|
|
479
518
|
try {
|
|
480
519
|
// JavaScript script to call Gemini API using @google/genai
|
|
481
520
|
const escapedPrompt = prompt
|
|
@@ -566,10 +605,13 @@ async function generateImage() {
|
|
|
566
605
|
|
|
567
606
|
generateImage();
|
|
568
607
|
`;
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
608
|
+
// Run all sandbox operations with retry support
|
|
609
|
+
const result = await withSandboxRetry(getSandbox, async (sandbox) => {
|
|
610
|
+
await sandbox.files.write("/home/user/gen_img.js", script);
|
|
611
|
+
// Install @google/genai if not already installed (should be pre-installed in template)
|
|
612
|
+
await sandbox.commands.run("cd /home/user && npm install @google/genai");
|
|
613
|
+
return sandbox.commands.run("cd /home/user && node gen_img.js");
|
|
614
|
+
});
|
|
573
615
|
logger.info("Image generation command result", {
|
|
574
616
|
exitCode: result.exitCode,
|
|
575
617
|
stdout: result.stdout,
|