deepagents 1.7.6 → 1.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -1
- package/dist/index.cjs +943 -17
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +304 -150
- package/dist/index.d.ts +303 -149
- package/dist/index.js +937 -15
- package/dist/index.js.map +1 -1
- package/package.json +6 -5
package/dist/index.cjs
CHANGED
|
@@ -37,8 +37,9 @@ let _langchain_core_messages = require("@langchain/core/messages");
|
|
|
37
37
|
let zod = require("zod");
|
|
38
38
|
let yaml = require("yaml");
|
|
39
39
|
yaml = __toESM(yaml);
|
|
40
|
-
require("uuid");
|
|
41
|
-
require("langchain/
|
|
40
|
+
let uuid = require("uuid");
|
|
41
|
+
let _langchain_core_errors = require("@langchain/core/errors");
|
|
42
|
+
let langchain_chat_models_universal = require("langchain/chat_models/universal");
|
|
42
43
|
let node_fs_promises = require("node:fs/promises");
|
|
43
44
|
node_fs_promises = __toESM(node_fs_promises);
|
|
44
45
|
let node_fs = require("node:fs");
|
|
@@ -46,6 +47,7 @@ node_fs = __toESM(node_fs);
|
|
|
46
47
|
let node_path = require("node:path");
|
|
47
48
|
node_path = __toESM(node_path);
|
|
48
49
|
let node_child_process = require("node:child_process");
|
|
50
|
+
node_child_process = __toESM(node_child_process);
|
|
49
51
|
let fast_glob = require("fast-glob");
|
|
50
52
|
fast_glob = __toESM(fast_glob);
|
|
51
53
|
let node_os = require("node:os");
|
|
@@ -254,7 +256,7 @@ function performStringReplacement(content, oldString, newString, replaceAll) {
|
|
|
254
256
|
if (oldString === "") return "Error: oldString cannot be empty when file has content";
|
|
255
257
|
const occurrences = content.split(oldString).length - 1;
|
|
256
258
|
if (occurrences === 0) return `Error: String not found in file: '${oldString}'`;
|
|
257
|
-
if (occurrences > 1 && !replaceAll) return `Error: String '${oldString}' appears ${occurrences} times in file. Use replace_all=True to replace all instances, or provide a more specific string with surrounding context.`;
|
|
259
|
+
if (occurrences > 1 && !replaceAll) return `Error: String '${oldString}' has multiple occurrences (appears ${occurrences} times) in file. Use replace_all=True to replace all instances, or provide a more specific string with surrounding context.`;
|
|
258
260
|
return [content.split(oldString).join(newString), occurrences];
|
|
259
261
|
}
|
|
260
262
|
/**
|
|
@@ -280,8 +282,8 @@ function performStringReplacement(content, oldString, newString, replaceAll) {
|
|
|
280
282
|
* validatePath("C:\\Users\\file") // Throws: Windows absolute paths not supported
|
|
281
283
|
* ```
|
|
282
284
|
*/
|
|
283
|
-
function validatePath(path$
|
|
284
|
-
const pathStr = path$
|
|
285
|
+
function validatePath(path$5) {
|
|
286
|
+
const pathStr = path$5 || "/";
|
|
285
287
|
if (!pathStr || pathStr.trim() === "") throw new Error("Path cannot be empty");
|
|
286
288
|
let normalized = pathStr.startsWith("/") ? pathStr : "/" + pathStr;
|
|
287
289
|
if (!normalized.endsWith("/")) normalized += "/";
|
|
@@ -303,10 +305,10 @@ function validatePath(path$4) {
|
|
|
303
305
|
* // Returns: "/test.py\n/src/main.py" (sorted by modified_at)
|
|
304
306
|
* ```
|
|
305
307
|
*/
|
|
306
|
-
function globSearchFiles(files, pattern, path$
|
|
308
|
+
function globSearchFiles(files, pattern, path$7 = "/") {
|
|
307
309
|
let normalizedPath;
|
|
308
310
|
try {
|
|
309
|
-
normalizedPath = validatePath(path$
|
|
311
|
+
normalizedPath = validatePath(path$7);
|
|
310
312
|
} catch {
|
|
311
313
|
return "No files found";
|
|
312
314
|
}
|
|
@@ -338,10 +340,10 @@ function globSearchFiles(files, pattern, path$6 = "/") {
|
|
|
338
340
|
* We deliberately do not raise here to keep backends non-throwing in tool
|
|
339
341
|
* contexts and preserve user-facing error messages.
|
|
340
342
|
*/
|
|
341
|
-
function grepMatchesFromFiles(files, pattern, path$
|
|
343
|
+
function grepMatchesFromFiles(files, pattern, path$9 = null, glob = null) {
|
|
342
344
|
let normalizedPath;
|
|
343
345
|
try {
|
|
344
|
-
normalizedPath = validatePath(path$
|
|
346
|
+
normalizedPath = validatePath(path$9);
|
|
345
347
|
} catch {
|
|
346
348
|
return [];
|
|
347
349
|
}
|
|
@@ -903,7 +905,7 @@ function createWriteFileTool(backend, options) {
|
|
|
903
905
|
description: customDescription || WRITE_FILE_TOOL_DESCRIPTION,
|
|
904
906
|
schema: zod_v4.z.object({
|
|
905
907
|
file_path: zod_v4.z.string().describe("Absolute path to the file to write"),
|
|
906
|
-
content: zod_v4.z.string().describe("Content to write to the file")
|
|
908
|
+
content: zod_v4.z.string().default("").describe("Content to write to the file")
|
|
907
909
|
})
|
|
908
910
|
});
|
|
909
911
|
}
|
|
@@ -2279,6 +2281,76 @@ function createSkillsMiddleware(options) {
|
|
|
2279
2281
|
* For simple use cases without backend offloading, use `summarizationMiddleware`
|
|
2280
2282
|
* from `langchain` directly.
|
|
2281
2283
|
*/
|
|
2284
|
+
const DEFAULT_MESSAGES_TO_KEEP = 20;
|
|
2285
|
+
const DEFAULT_TRIM_TOKEN_LIMIT = 4e3;
|
|
2286
|
+
const FALLBACK_TRIGGER = {
|
|
2287
|
+
type: "tokens",
|
|
2288
|
+
value: 17e4
|
|
2289
|
+
};
|
|
2290
|
+
const FALLBACK_KEEP = {
|
|
2291
|
+
type: "messages",
|
|
2292
|
+
value: 6
|
|
2293
|
+
};
|
|
2294
|
+
const FALLBACK_TRUNCATE_ARGS = {
|
|
2295
|
+
trigger: {
|
|
2296
|
+
type: "messages",
|
|
2297
|
+
value: 20
|
|
2298
|
+
},
|
|
2299
|
+
keep: {
|
|
2300
|
+
type: "messages",
|
|
2301
|
+
value: 20
|
|
2302
|
+
}
|
|
2303
|
+
};
|
|
2304
|
+
const PROFILE_TRIGGER = {
|
|
2305
|
+
type: "fraction",
|
|
2306
|
+
value: .85
|
|
2307
|
+
};
|
|
2308
|
+
const PROFILE_KEEP = {
|
|
2309
|
+
type: "fraction",
|
|
2310
|
+
value: .1
|
|
2311
|
+
};
|
|
2312
|
+
const PROFILE_TRUNCATE_ARGS = {
|
|
2313
|
+
trigger: {
|
|
2314
|
+
type: "fraction",
|
|
2315
|
+
value: .85
|
|
2316
|
+
},
|
|
2317
|
+
keep: {
|
|
2318
|
+
type: "fraction",
|
|
2319
|
+
value: .1
|
|
2320
|
+
}
|
|
2321
|
+
};
|
|
2322
|
+
/**
|
|
2323
|
+
* Compute summarization defaults based on model profile.
|
|
2324
|
+
* Mirrors Python's `_compute_summarization_defaults`.
|
|
2325
|
+
*
|
|
2326
|
+
* If the model has a profile with `maxInputTokens`, uses fraction-based
|
|
2327
|
+
* settings. Otherwise, uses fixed token/message counts.
|
|
2328
|
+
*
|
|
2329
|
+
* @param resolvedModel - The resolved chat model instance.
|
|
2330
|
+
*/
|
|
2331
|
+
function computeSummarizationDefaults(resolvedModel) {
|
|
2332
|
+
if (resolvedModel.profile && typeof resolvedModel.profile === "object" && "maxInputTokens" in resolvedModel.profile && typeof resolvedModel.profile.maxInputTokens === "number") return {
|
|
2333
|
+
trigger: PROFILE_TRIGGER,
|
|
2334
|
+
keep: PROFILE_KEEP,
|
|
2335
|
+
truncateArgsSettings: PROFILE_TRUNCATE_ARGS
|
|
2336
|
+
};
|
|
2337
|
+
return {
|
|
2338
|
+
trigger: FALLBACK_TRIGGER,
|
|
2339
|
+
keep: FALLBACK_KEEP,
|
|
2340
|
+
truncateArgsSettings: FALLBACK_TRUNCATE_ARGS
|
|
2341
|
+
};
|
|
2342
|
+
}
|
|
2343
|
+
const DEFAULT_SUMMARY_PROMPT = `You are a conversation summarizer. Your task is to create a concise summary of the conversation that captures:
|
|
2344
|
+
1. The main topics discussed
|
|
2345
|
+
2. Key decisions or conclusions reached
|
|
2346
|
+
3. Any important context that would be needed for continuing the conversation
|
|
2347
|
+
|
|
2348
|
+
Keep the summary focused and informative. Do not include unnecessary details.
|
|
2349
|
+
|
|
2350
|
+
Conversation to summarize:
|
|
2351
|
+
{conversation}
|
|
2352
|
+
|
|
2353
|
+
Summary:`;
|
|
2282
2354
|
/**
|
|
2283
2355
|
* Zod schema for a summarization event that tracks what was summarized and
|
|
2284
2356
|
* where the cutoff is.
|
|
@@ -2299,6 +2371,548 @@ const SummarizationStateSchema = zod.z.object({
|
|
|
2299
2371
|
_summarizationSessionId: zod.z.string().optional(),
|
|
2300
2372
|
_summarizationEvent: SummarizationEventSchema.optional()
|
|
2301
2373
|
});
|
|
2374
|
+
/**
|
|
2375
|
+
* Check if a message is a previous summarization message.
|
|
2376
|
+
* Summary messages are HumanMessage objects with lc_source='summarization' in additional_kwargs.
|
|
2377
|
+
*/
|
|
2378
|
+
function isSummaryMessage(msg) {
|
|
2379
|
+
if (!langchain.HumanMessage.isInstance(msg)) return false;
|
|
2380
|
+
return msg.additional_kwargs?.lc_source === "summarization";
|
|
2381
|
+
}
|
|
2382
|
+
/**
|
|
2383
|
+
* Create summarization middleware with backend support for conversation history offloading.
|
|
2384
|
+
*
|
|
2385
|
+
* This middleware:
|
|
2386
|
+
* 1. Monitors conversation length against configured thresholds
|
|
2387
|
+
* 2. When triggered, offloads old messages to backend storage
|
|
2388
|
+
* 3. Generates a summary of offloaded messages
|
|
2389
|
+
* 4. Replaces old messages with the summary, preserving recent context
|
|
2390
|
+
*
|
|
2391
|
+
* @param options - Configuration options
|
|
2392
|
+
* @returns AgentMiddleware for summarization and history offloading
|
|
2393
|
+
*/
|
|
2394
|
+
function createSummarizationMiddleware(options) {
|
|
2395
|
+
const { model, backend, summaryPrompt = DEFAULT_SUMMARY_PROMPT, trimTokensToSummarize = DEFAULT_TRIM_TOKEN_LIMIT, historyPathPrefix = "/conversation_history" } = options;
|
|
2396
|
+
let trigger = options.trigger;
|
|
2397
|
+
let keep = options.keep ?? {
|
|
2398
|
+
type: "messages",
|
|
2399
|
+
value: DEFAULT_MESSAGES_TO_KEEP
|
|
2400
|
+
};
|
|
2401
|
+
let truncateArgsSettings = options.truncateArgsSettings;
|
|
2402
|
+
let defaultsComputed = trigger != null;
|
|
2403
|
+
let truncateTrigger = truncateArgsSettings?.trigger;
|
|
2404
|
+
let truncateKeep = truncateArgsSettings?.keep ?? {
|
|
2405
|
+
type: "messages",
|
|
2406
|
+
value: 20
|
|
2407
|
+
};
|
|
2408
|
+
let maxArgLength = truncateArgsSettings?.maxLength ?? 2e3;
|
|
2409
|
+
let truncationText = truncateArgsSettings?.truncationText ?? "...(argument truncated)";
|
|
2410
|
+
/**
|
|
2411
|
+
* Lazily compute defaults from model profile when trigger was not provided.
|
|
2412
|
+
* Called once when the model is first resolved.
|
|
2413
|
+
*/
|
|
2414
|
+
function applyModelDefaults(resolvedModel) {
|
|
2415
|
+
if (defaultsComputed) return;
|
|
2416
|
+
defaultsComputed = true;
|
|
2417
|
+
const defaults = computeSummarizationDefaults(resolvedModel);
|
|
2418
|
+
trigger = defaults.trigger;
|
|
2419
|
+
keep = options.keep ?? defaults.keep;
|
|
2420
|
+
if (!options.truncateArgsSettings) {
|
|
2421
|
+
truncateArgsSettings = defaults.truncateArgsSettings;
|
|
2422
|
+
truncateTrigger = defaults.truncateArgsSettings.trigger;
|
|
2423
|
+
truncateKeep = defaults.truncateArgsSettings.keep ?? {
|
|
2424
|
+
type: "messages",
|
|
2425
|
+
value: 20
|
|
2426
|
+
};
|
|
2427
|
+
maxArgLength = defaults.truncateArgsSettings.maxLength ?? 2e3;
|
|
2428
|
+
truncationText = defaults.truncateArgsSettings.truncationText ?? "...(argument truncated)";
|
|
2429
|
+
}
|
|
2430
|
+
}
|
|
2431
|
+
let sessionId = null;
|
|
2432
|
+
let tokenEstimationMultiplier = 1;
|
|
2433
|
+
/**
|
|
2434
|
+
* Resolve backend from instance or factory.
|
|
2435
|
+
*/
|
|
2436
|
+
function getBackend(state) {
|
|
2437
|
+
if (typeof backend === "function") return backend({ state });
|
|
2438
|
+
return backend;
|
|
2439
|
+
}
|
|
2440
|
+
/**
|
|
2441
|
+
* Get or create session ID for history file naming.
|
|
2442
|
+
*/
|
|
2443
|
+
function getSessionId(state) {
|
|
2444
|
+
if (state._summarizationSessionId) return state._summarizationSessionId;
|
|
2445
|
+
if (!sessionId) sessionId = `session_${(0, uuid.v4)().substring(0, 8)}`;
|
|
2446
|
+
return sessionId;
|
|
2447
|
+
}
|
|
2448
|
+
/**
|
|
2449
|
+
* Get the history file path.
|
|
2450
|
+
*/
|
|
2451
|
+
function getHistoryPath(state) {
|
|
2452
|
+
return `${historyPathPrefix}/${getSessionId(state)}.md`;
|
|
2453
|
+
}
|
|
2454
|
+
/**
|
|
2455
|
+
* Cached resolved model to avoid repeated initChatModel calls
|
|
2456
|
+
*/
|
|
2457
|
+
let cachedModel = void 0;
|
|
2458
|
+
/**
|
|
2459
|
+
* Resolve the chat model.
|
|
2460
|
+
* Uses initChatModel to support any model provider from a string name.
|
|
2461
|
+
* The resolved model is cached for subsequent calls.
|
|
2462
|
+
*/
|
|
2463
|
+
async function getChatModel() {
|
|
2464
|
+
if (cachedModel) return cachedModel;
|
|
2465
|
+
if (typeof model === "string") cachedModel = await (0, langchain_chat_models_universal.initChatModel)(model);
|
|
2466
|
+
else cachedModel = model;
|
|
2467
|
+
return cachedModel;
|
|
2468
|
+
}
|
|
2469
|
+
/**
|
|
2470
|
+
* Get the max input tokens from the model's profile.
|
|
2471
|
+
* Similar to Python's _get_profile_limits.
|
|
2472
|
+
*
|
|
2473
|
+
* When the profile is unavailable, returns undefined. In that case the
|
|
2474
|
+
* middleware uses fixed token/message-count fallback defaults for
|
|
2475
|
+
* trigger/keep, and relies on the ContextOverflowError catch as a
|
|
2476
|
+
* safety net if the prompt still exceeds the model's actual limit.
|
|
2477
|
+
*/
|
|
2478
|
+
function getMaxInputTokens(resolvedModel) {
|
|
2479
|
+
const profile = resolvedModel.profile;
|
|
2480
|
+
if (profile && typeof profile === "object" && "maxInputTokens" in profile && typeof profile.maxInputTokens === "number") return profile.maxInputTokens;
|
|
2481
|
+
}
|
|
2482
|
+
/**
|
|
2483
|
+
* Check if summarization should be triggered.
|
|
2484
|
+
*/
|
|
2485
|
+
function shouldSummarize(messages, totalTokens, maxInputTokens) {
|
|
2486
|
+
if (!trigger) return false;
|
|
2487
|
+
const adjustedTokens = totalTokens * tokenEstimationMultiplier;
|
|
2488
|
+
const triggers = Array.isArray(trigger) ? trigger : [trigger];
|
|
2489
|
+
for (const t of triggers) {
|
|
2490
|
+
if (t.type === "messages" && messages.length >= t.value) return true;
|
|
2491
|
+
if (t.type === "tokens" && adjustedTokens >= t.value) return true;
|
|
2492
|
+
if (t.type === "fraction" && maxInputTokens) {
|
|
2493
|
+
if (adjustedTokens >= Math.floor(maxInputTokens * t.value)) return true;
|
|
2494
|
+
}
|
|
2495
|
+
}
|
|
2496
|
+
return false;
|
|
2497
|
+
}
|
|
2498
|
+
/**
|
|
2499
|
+
* Find a safe cutoff point that doesn't split AI/Tool message pairs.
|
|
2500
|
+
*
|
|
2501
|
+
* If the message at `cutoffIndex` is a ToolMessage, this adjusts the boundary
|
|
2502
|
+
* so that related AI and Tool messages stay together. Two strategies are used:
|
|
2503
|
+
*
|
|
2504
|
+
* 1. **Move backward** to include the AIMessage that produced the tool calls,
|
|
2505
|
+
* keeping the pair in the preserved set. Preferred when it doesn't move
|
|
2506
|
+
* the cutoff too far back.
|
|
2507
|
+
*
|
|
2508
|
+
* 2. **Advance forward** past all consecutive ToolMessages, putting the entire
|
|
2509
|
+
* pair into the summarized set. Used when moving backward would preserve
|
|
2510
|
+
* too many messages (e.g., a single AIMessage made 20+ tool calls).
|
|
2511
|
+
*/
|
|
2512
|
+
function findSafeCutoffPoint(messages, cutoffIndex) {
|
|
2513
|
+
if (cutoffIndex >= messages.length || !langchain.ToolMessage.isInstance(messages[cutoffIndex])) return cutoffIndex;
|
|
2514
|
+
let forwardIdx = cutoffIndex;
|
|
2515
|
+
while (forwardIdx < messages.length && langchain.ToolMessage.isInstance(messages[forwardIdx])) forwardIdx++;
|
|
2516
|
+
const toolCallIds = /* @__PURE__ */ new Set();
|
|
2517
|
+
for (let i = cutoffIndex; i < forwardIdx; i++) {
|
|
2518
|
+
const toolMsg = messages[i];
|
|
2519
|
+
if (toolMsg.tool_call_id) toolCallIds.add(toolMsg.tool_call_id);
|
|
2520
|
+
}
|
|
2521
|
+
let backwardIdx = null;
|
|
2522
|
+
for (let i = cutoffIndex - 1; i >= 0; i--) {
|
|
2523
|
+
const msg = messages[i];
|
|
2524
|
+
if (langchain.AIMessage.isInstance(msg) && msg.tool_calls) {
|
|
2525
|
+
const aiToolCallIds = new Set(msg.tool_calls.map((tc) => tc.id).filter((id) => id != null));
|
|
2526
|
+
for (const id of toolCallIds) if (aiToolCallIds.has(id)) {
|
|
2527
|
+
backwardIdx = i;
|
|
2528
|
+
break;
|
|
2529
|
+
}
|
|
2530
|
+
if (backwardIdx !== null) break;
|
|
2531
|
+
}
|
|
2532
|
+
}
|
|
2533
|
+
if (backwardIdx === null) return forwardIdx;
|
|
2534
|
+
if (cutoffIndex - backwardIdx > cutoffIndex / 2 && cutoffIndex > 2) return forwardIdx;
|
|
2535
|
+
return backwardIdx;
|
|
2536
|
+
}
|
|
2537
|
+
/**
|
|
2538
|
+
* Determine cutoff index for messages to summarize.
|
|
2539
|
+
* Messages at index < cutoff will be summarized.
|
|
2540
|
+
* Messages at index >= cutoff will be preserved.
|
|
2541
|
+
*
|
|
2542
|
+
* Uses findSafeCutoffPoint to ensure tool call/result pairs stay together.
|
|
2543
|
+
*/
|
|
2544
|
+
function determineCutoffIndex(messages, maxInputTokens) {
|
|
2545
|
+
let rawCutoff;
|
|
2546
|
+
if (keep.type === "messages") {
|
|
2547
|
+
if (messages.length <= keep.value) return 0;
|
|
2548
|
+
rawCutoff = messages.length - keep.value;
|
|
2549
|
+
} else if (keep.type === "tokens" || keep.type === "fraction") {
|
|
2550
|
+
const targetTokenCount = keep.type === "fraction" && maxInputTokens ? Math.floor(maxInputTokens * keep.value) : keep.value;
|
|
2551
|
+
let tokensKept = 0;
|
|
2552
|
+
rawCutoff = 0;
|
|
2553
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
2554
|
+
const msgTokens = (0, langchain.countTokensApproximately)([messages[i]]);
|
|
2555
|
+
if (tokensKept + msgTokens > targetTokenCount) {
|
|
2556
|
+
rawCutoff = i + 1;
|
|
2557
|
+
break;
|
|
2558
|
+
}
|
|
2559
|
+
tokensKept += msgTokens;
|
|
2560
|
+
}
|
|
2561
|
+
} else return 0;
|
|
2562
|
+
return findSafeCutoffPoint(messages, rawCutoff);
|
|
2563
|
+
}
|
|
2564
|
+
/**
|
|
2565
|
+
* Check if argument truncation should be triggered.
|
|
2566
|
+
*/
|
|
2567
|
+
function shouldTruncateArgs(messages, totalTokens, maxInputTokens) {
|
|
2568
|
+
if (!truncateTrigger) return false;
|
|
2569
|
+
const adjustedTokens = totalTokens * tokenEstimationMultiplier;
|
|
2570
|
+
if (truncateTrigger.type === "messages") return messages.length >= truncateTrigger.value;
|
|
2571
|
+
if (truncateTrigger.type === "tokens") return adjustedTokens >= truncateTrigger.value;
|
|
2572
|
+
if (truncateTrigger.type === "fraction" && maxInputTokens) return adjustedTokens >= Math.floor(maxInputTokens * truncateTrigger.value);
|
|
2573
|
+
return false;
|
|
2574
|
+
}
|
|
2575
|
+
/**
|
|
2576
|
+
* Determine cutoff index for argument truncation.
|
|
2577
|
+
* Uses findSafeCutoffPoint to ensure tool call/result pairs stay together.
|
|
2578
|
+
*/
|
|
2579
|
+
function determineTruncateCutoffIndex(messages, maxInputTokens) {
|
|
2580
|
+
let rawCutoff;
|
|
2581
|
+
if (truncateKeep.type === "messages") {
|
|
2582
|
+
if (messages.length <= truncateKeep.value) return messages.length;
|
|
2583
|
+
rawCutoff = messages.length - truncateKeep.value;
|
|
2584
|
+
} else if (truncateKeep.type === "tokens" || truncateKeep.type === "fraction") {
|
|
2585
|
+
const targetTokenCount = truncateKeep.type === "fraction" && maxInputTokens ? Math.floor(maxInputTokens * truncateKeep.value) : truncateKeep.value;
|
|
2586
|
+
let tokensKept = 0;
|
|
2587
|
+
rawCutoff = 0;
|
|
2588
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
2589
|
+
const msgTokens = (0, langchain.countTokensApproximately)([messages[i]]);
|
|
2590
|
+
if (tokensKept + msgTokens > targetTokenCount) {
|
|
2591
|
+
rawCutoff = i + 1;
|
|
2592
|
+
break;
|
|
2593
|
+
}
|
|
2594
|
+
tokensKept += msgTokens;
|
|
2595
|
+
}
|
|
2596
|
+
} else return messages.length;
|
|
2597
|
+
return findSafeCutoffPoint(messages, rawCutoff);
|
|
2598
|
+
}
|
|
2599
|
+
/**
|
|
2600
|
+
* Count tokens including system message and tools, matching Python's approach.
|
|
2601
|
+
* This gives a more accurate picture of what actually gets sent to the model.
|
|
2602
|
+
*/
|
|
2603
|
+
function countTotalTokens(messages, systemMessage, tools) {
|
|
2604
|
+
return (0, langchain.countTokensApproximately)(systemMessage && langchain.SystemMessage.isInstance(systemMessage) ? [systemMessage, ...messages] : [...messages], tools && Array.isArray(tools) && tools.length > 0 ? tools : null);
|
|
2605
|
+
}
|
|
2606
|
+
/**
|
|
2607
|
+
* Truncate ToolMessage content so that the total payload fits within the
|
|
2608
|
+
* model's context window. Each ToolMessage gets an equal share of the
|
|
2609
|
+
* remaining token budget after accounting for non-tool messages, system
|
|
2610
|
+
* message, and tool schemas.
|
|
2611
|
+
*
|
|
2612
|
+
* This is critical for conversations where a single AIMessage triggers
|
|
2613
|
+
* many tool calls whose results collectively exceed the context window.
|
|
2614
|
+
* Without this, findSafeCutoffPoint cannot split the AI/Tool group and
|
|
2615
|
+
* summarization would discard everything, causing the model to re-call
|
|
2616
|
+
* the same tools in an infinite loop.
|
|
2617
|
+
*/
|
|
2618
|
+
function compactToolResults(messages, maxInputTokens, systemMessage, tools) {
|
|
2619
|
+
const toolMessageIndices = [];
|
|
2620
|
+
for (let i = 0; i < messages.length; i++) if (langchain.ToolMessage.isInstance(messages[i])) toolMessageIndices.push(i);
|
|
2621
|
+
if (toolMessageIndices.length === 0) return {
|
|
2622
|
+
messages,
|
|
2623
|
+
modified: false
|
|
2624
|
+
};
|
|
2625
|
+
const overheadTokens = countTotalTokens(messages.filter((m) => !langchain.ToolMessage.isInstance(m)), systemMessage, tools);
|
|
2626
|
+
const adjustedMax = maxInputTokens / tokenEstimationMultiplier;
|
|
2627
|
+
const budgetForTools = Math.max(adjustedMax * .7 - overheadTokens, 1e3);
|
|
2628
|
+
const perToolBudgetChars = Math.floor(budgetForTools / toolMessageIndices.length) * 4;
|
|
2629
|
+
let modified = false;
|
|
2630
|
+
const result = [...messages];
|
|
2631
|
+
for (const idx of toolMessageIndices) {
|
|
2632
|
+
const msg = messages[idx];
|
|
2633
|
+
const content = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
2634
|
+
if (content.length > perToolBudgetChars) {
|
|
2635
|
+
result[idx] = new langchain.ToolMessage({
|
|
2636
|
+
content: content.substring(0, perToolBudgetChars) + "\n...(result truncated)",
|
|
2637
|
+
tool_call_id: msg.tool_call_id,
|
|
2638
|
+
name: msg.name
|
|
2639
|
+
});
|
|
2640
|
+
modified = true;
|
|
2641
|
+
}
|
|
2642
|
+
}
|
|
2643
|
+
return {
|
|
2644
|
+
messages: result,
|
|
2645
|
+
modified
|
|
2646
|
+
};
|
|
2647
|
+
}
|
|
2648
|
+
/**
|
|
2649
|
+
* Truncate large tool arguments in old messages.
|
|
2650
|
+
*/
|
|
2651
|
+
function truncateArgs(messages, maxInputTokens, systemMessage, tools) {
|
|
2652
|
+
if (!shouldTruncateArgs(messages, countTotalTokens(messages, systemMessage, tools), maxInputTokens)) return {
|
|
2653
|
+
messages,
|
|
2654
|
+
modified: false
|
|
2655
|
+
};
|
|
2656
|
+
const cutoffIndex = determineTruncateCutoffIndex(messages, maxInputTokens);
|
|
2657
|
+
if (cutoffIndex >= messages.length) return {
|
|
2658
|
+
messages,
|
|
2659
|
+
modified: false
|
|
2660
|
+
};
|
|
2661
|
+
const truncatedMessages = [];
|
|
2662
|
+
let modified = false;
|
|
2663
|
+
for (let i = 0; i < messages.length; i++) {
|
|
2664
|
+
const msg = messages[i];
|
|
2665
|
+
if (i < cutoffIndex && langchain.AIMessage.isInstance(msg) && msg.tool_calls) {
|
|
2666
|
+
const truncatedToolCalls = msg.tool_calls.map((toolCall) => {
|
|
2667
|
+
const args = toolCall.args || {};
|
|
2668
|
+
const truncatedArgs = {};
|
|
2669
|
+
let toolModified = false;
|
|
2670
|
+
for (const [key, value] of Object.entries(args)) if (typeof value === "string" && value.length > maxArgLength && (toolCall.name === "write_file" || toolCall.name === "edit_file")) {
|
|
2671
|
+
truncatedArgs[key] = value.substring(0, 20) + truncationText;
|
|
2672
|
+
toolModified = true;
|
|
2673
|
+
} else truncatedArgs[key] = value;
|
|
2674
|
+
if (toolModified) {
|
|
2675
|
+
modified = true;
|
|
2676
|
+
return {
|
|
2677
|
+
...toolCall,
|
|
2678
|
+
args: truncatedArgs
|
|
2679
|
+
};
|
|
2680
|
+
}
|
|
2681
|
+
return toolCall;
|
|
2682
|
+
});
|
|
2683
|
+
if (modified) {
|
|
2684
|
+
const truncatedMsg = new langchain.AIMessage({
|
|
2685
|
+
content: msg.content,
|
|
2686
|
+
tool_calls: truncatedToolCalls,
|
|
2687
|
+
additional_kwargs: msg.additional_kwargs
|
|
2688
|
+
});
|
|
2689
|
+
truncatedMessages.push(truncatedMsg);
|
|
2690
|
+
} else truncatedMessages.push(msg);
|
|
2691
|
+
} else truncatedMessages.push(msg);
|
|
2692
|
+
}
|
|
2693
|
+
return {
|
|
2694
|
+
messages: truncatedMessages,
|
|
2695
|
+
modified
|
|
2696
|
+
};
|
|
2697
|
+
}
|
|
2698
|
+
/**
|
|
2699
|
+
* Filter out previous summary messages.
|
|
2700
|
+
*/
|
|
2701
|
+
function filterSummaryMessages(messages) {
|
|
2702
|
+
return messages.filter((msg) => !isSummaryMessage(msg));
|
|
2703
|
+
}
|
|
2704
|
+
/**
|
|
2705
|
+
* Offload messages to backend.
|
|
2706
|
+
*/
|
|
2707
|
+
async function offloadToBackend(resolvedBackend, messages, state) {
|
|
2708
|
+
const path = getHistoryPath(state);
|
|
2709
|
+
const filteredMessages = filterSummaryMessages(messages);
|
|
2710
|
+
const newSection = `## Summarized at ${(/* @__PURE__ */ new Date()).toISOString()}\n\n${(0, _langchain_core_messages.getBufferString)(filteredMessages)}\n\n`;
|
|
2711
|
+
let existingContent = "";
|
|
2712
|
+
try {
|
|
2713
|
+
if (resolvedBackend.downloadFiles) {
|
|
2714
|
+
const responses = await resolvedBackend.downloadFiles([path]);
|
|
2715
|
+
if (responses.length > 0 && responses[0].content && !responses[0].error) existingContent = new TextDecoder().decode(responses[0].content);
|
|
2716
|
+
}
|
|
2717
|
+
} catch {}
|
|
2718
|
+
const combinedContent = existingContent + newSection;
|
|
2719
|
+
try {
|
|
2720
|
+
let result;
|
|
2721
|
+
if (existingContent) result = await resolvedBackend.edit(path, existingContent, combinedContent);
|
|
2722
|
+
else result = await resolvedBackend.write(path, combinedContent);
|
|
2723
|
+
if (result.error) {
|
|
2724
|
+
console.warn(`Failed to offload conversation history to ${path}: ${result.error}`);
|
|
2725
|
+
return null;
|
|
2726
|
+
}
|
|
2727
|
+
return path;
|
|
2728
|
+
} catch (e) {
|
|
2729
|
+
console.warn(`Exception offloading conversation history to ${path}:`, e);
|
|
2730
|
+
return null;
|
|
2731
|
+
}
|
|
2732
|
+
}
|
|
2733
|
+
/**
|
|
2734
|
+
* Create summary of messages.
|
|
2735
|
+
*/
|
|
2736
|
+
async function createSummary(messages, chatModel) {
|
|
2737
|
+
let messagesToSummarize = messages;
|
|
2738
|
+
if ((0, langchain.countTokensApproximately)(messages) > trimTokensToSummarize) {
|
|
2739
|
+
let kept = 0;
|
|
2740
|
+
const trimmedMessages = [];
|
|
2741
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
2742
|
+
const msgTokens = (0, langchain.countTokensApproximately)([messages[i]]);
|
|
2743
|
+
if (kept + msgTokens > trimTokensToSummarize) break;
|
|
2744
|
+
trimmedMessages.unshift(messages[i]);
|
|
2745
|
+
kept += msgTokens;
|
|
2746
|
+
}
|
|
2747
|
+
messagesToSummarize = trimmedMessages;
|
|
2748
|
+
}
|
|
2749
|
+
const conversation = (0, _langchain_core_messages.getBufferString)(messagesToSummarize);
|
|
2750
|
+
const prompt = summaryPrompt.replace("{conversation}", conversation);
|
|
2751
|
+
const response = await chatModel.invoke([new langchain.HumanMessage({ content: prompt })]);
|
|
2752
|
+
return typeof response.content === "string" ? response.content : JSON.stringify(response.content);
|
|
2753
|
+
}
|
|
2754
|
+
/**
|
|
2755
|
+
* Build the summary message with file path reference.
|
|
2756
|
+
*/
|
|
2757
|
+
function buildSummaryMessage(summary, filePath) {
|
|
2758
|
+
let content;
|
|
2759
|
+
if (filePath) content = `You are in the middle of a conversation that has been summarized.
|
|
2760
|
+
|
|
2761
|
+
The full conversation history has been saved to ${filePath} should you need to refer back to it for details.
|
|
2762
|
+
|
|
2763
|
+
A condensed summary follows:
|
|
2764
|
+
|
|
2765
|
+
<summary>
|
|
2766
|
+
${summary}
|
|
2767
|
+
</summary>`;
|
|
2768
|
+
else content = `Here is a summary of the conversation to date:\n\n${summary}`;
|
|
2769
|
+
return new langchain.HumanMessage({
|
|
2770
|
+
content,
|
|
2771
|
+
additional_kwargs: { lc_source: "summarization" }
|
|
2772
|
+
});
|
|
2773
|
+
}
|
|
2774
|
+
/**
|
|
2775
|
+
* Reconstruct the effective message list based on any previous summarization event.
|
|
2776
|
+
*
|
|
2777
|
+
* After summarization, instead of using all messages from state, we use the summary
|
|
2778
|
+
* message plus messages after the cutoff index. This avoids full state rewrites.
|
|
2779
|
+
*/
|
|
2780
|
+
function getEffectiveMessages(messages, state) {
|
|
2781
|
+
const event = state._summarizationEvent;
|
|
2782
|
+
if (!event) return messages;
|
|
2783
|
+
const result = [event.summaryMessage];
|
|
2784
|
+
result.push(...messages.slice(event.cutoffIndex));
|
|
2785
|
+
return result;
|
|
2786
|
+
}
|
|
2787
|
+
/**
|
|
2788
|
+
* Summarize a set of messages using the given model and build the
|
|
2789
|
+
* summary message + backend offload. Returns the summary message,
|
|
2790
|
+
* the file path, and the state cutoff index.
|
|
2791
|
+
*/
|
|
2792
|
+
async function summarizeMessages(messagesToSummarize, resolvedModel, state, previousCutoffIndex, cutoffIndex) {
|
|
2793
|
+
const filePath = await offloadToBackend(getBackend(state), messagesToSummarize, state);
|
|
2794
|
+
if (filePath === null) console.warn(`[SummarizationMiddleware] Backend offload failed during summarization. Proceeding with summary generation.`);
|
|
2795
|
+
return {
|
|
2796
|
+
summaryMessage: buildSummaryMessage(await createSummary(messagesToSummarize, resolvedModel), filePath),
|
|
2797
|
+
filePath,
|
|
2798
|
+
stateCutoffIndex: previousCutoffIndex != null ? previousCutoffIndex + cutoffIndex - 1 : cutoffIndex
|
|
2799
|
+
};
|
|
2800
|
+
}
|
|
2801
|
+
/**
|
|
2802
|
+
* Check if an error (possibly wrapped in MiddlewareError layers) is a
|
|
2803
|
+
* ContextOverflowError by walking the `cause` chain.
|
|
2804
|
+
*/
|
|
2805
|
+
function isContextOverflow(err) {
|
|
2806
|
+
let cause = err;
|
|
2807
|
+
while (cause != null) {
|
|
2808
|
+
if (_langchain_core_errors.ContextOverflowError.isInstance(cause)) return true;
|
|
2809
|
+
cause = typeof cause === "object" && cause !== null && "cause" in cause ? cause.cause : void 0;
|
|
2810
|
+
}
|
|
2811
|
+
return false;
|
|
2812
|
+
}
|
|
2813
|
+
async function performSummarization(request, handler, truncatedMessages, resolvedModel, maxInputTokens) {
|
|
2814
|
+
const cutoffIndex = determineCutoffIndex(truncatedMessages, maxInputTokens);
|
|
2815
|
+
if (cutoffIndex <= 0) return handler({
|
|
2816
|
+
...request,
|
|
2817
|
+
messages: truncatedMessages
|
|
2818
|
+
});
|
|
2819
|
+
const messagesToSummarize = truncatedMessages.slice(0, cutoffIndex);
|
|
2820
|
+
const preservedMessages = truncatedMessages.slice(cutoffIndex);
|
|
2821
|
+
if (preservedMessages.length === 0 && maxInputTokens) {
|
|
2822
|
+
const compact = compactToolResults(truncatedMessages, maxInputTokens, request.systemMessage, request.tools);
|
|
2823
|
+
if (compact.modified) try {
|
|
2824
|
+
return await handler({
|
|
2825
|
+
...request,
|
|
2826
|
+
messages: compact.messages
|
|
2827
|
+
});
|
|
2828
|
+
} catch (err) {
|
|
2829
|
+
if (!isContextOverflow(err)) throw err;
|
|
2830
|
+
}
|
|
2831
|
+
}
|
|
2832
|
+
const previousEvent = request.state._summarizationEvent;
|
|
2833
|
+
const previousCutoffIndex = previousEvent != null ? previousEvent.cutoffIndex : void 0;
|
|
2834
|
+
const { summaryMessage, filePath, stateCutoffIndex } = await summarizeMessages(messagesToSummarize, resolvedModel, request.state, previousCutoffIndex, cutoffIndex);
|
|
2835
|
+
let modifiedMessages = [summaryMessage, ...preservedMessages];
|
|
2836
|
+
const modifiedTokens = countTotalTokens(modifiedMessages, request.systemMessage, request.tools);
|
|
2837
|
+
let finalStateCutoffIndex = stateCutoffIndex;
|
|
2838
|
+
let finalSummaryMessage = summaryMessage;
|
|
2839
|
+
let finalFilePath = filePath;
|
|
2840
|
+
try {
|
|
2841
|
+
await handler({
|
|
2842
|
+
...request,
|
|
2843
|
+
messages: modifiedMessages
|
|
2844
|
+
});
|
|
2845
|
+
} catch (err) {
|
|
2846
|
+
if (!isContextOverflow(err)) throw err;
|
|
2847
|
+
if (maxInputTokens && modifiedTokens > 0) {
|
|
2848
|
+
const observedRatio = maxInputTokens / modifiedTokens;
|
|
2849
|
+
if (observedRatio > tokenEstimationMultiplier) tokenEstimationMultiplier = observedRatio * 1.1;
|
|
2850
|
+
}
|
|
2851
|
+
const reSumResult = await summarizeMessages([...messagesToSummarize, ...preservedMessages], resolvedModel, request.state, previousCutoffIndex, truncatedMessages.length);
|
|
2852
|
+
finalSummaryMessage = reSumResult.summaryMessage;
|
|
2853
|
+
finalFilePath = reSumResult.filePath;
|
|
2854
|
+
finalStateCutoffIndex = reSumResult.stateCutoffIndex;
|
|
2855
|
+
modifiedMessages = [reSumResult.summaryMessage];
|
|
2856
|
+
await handler({
|
|
2857
|
+
...request,
|
|
2858
|
+
messages: modifiedMessages
|
|
2859
|
+
});
|
|
2860
|
+
}
|
|
2861
|
+
return new _langchain_langgraph.Command({ update: {
|
|
2862
|
+
_summarizationEvent: {
|
|
2863
|
+
cutoffIndex: finalStateCutoffIndex,
|
|
2864
|
+
summaryMessage: finalSummaryMessage,
|
|
2865
|
+
filePath: finalFilePath
|
|
2866
|
+
},
|
|
2867
|
+
_summarizationSessionId: getSessionId(request.state)
|
|
2868
|
+
} });
|
|
2869
|
+
}
|
|
2870
|
+
return (0, langchain.createMiddleware)({
|
|
2871
|
+
name: "SummarizationMiddleware",
|
|
2872
|
+
stateSchema: SummarizationStateSchema,
|
|
2873
|
+
async wrapModelCall(request, handler) {
|
|
2874
|
+
const effectiveMessages = getEffectiveMessages(request.messages ?? [], request.state);
|
|
2875
|
+
if (effectiveMessages.length === 0) return handler(request);
|
|
2876
|
+
/**
|
|
2877
|
+
* Resolve the chat model and get max input tokens from its profile.
|
|
2878
|
+
*/
|
|
2879
|
+
const resolvedModel = await getChatModel();
|
|
2880
|
+
const maxInputTokens = getMaxInputTokens(resolvedModel);
|
|
2881
|
+
applyModelDefaults(resolvedModel);
|
|
2882
|
+
/**
|
|
2883
|
+
* Step 1: Truncate args if configured
|
|
2884
|
+
*/
|
|
2885
|
+
const { messages: truncatedMessages } = truncateArgs(effectiveMessages, maxInputTokens, request.systemMessage, request.tools);
|
|
2886
|
+
/**
|
|
2887
|
+
* Step 2: Check if summarization should happen.
|
|
2888
|
+
* Count tokens including system message and tools to match what's
|
|
2889
|
+
* actually sent to the model (matching Python implementation).
|
|
2890
|
+
*/
|
|
2891
|
+
const totalTokens = countTotalTokens(truncatedMessages, request.systemMessage, request.tools);
|
|
2892
|
+
/**
|
|
2893
|
+
* If no summarization needed, try passing through.
|
|
2894
|
+
* If the handler throws a ContextOverflowError, fall back to
|
|
2895
|
+
* emergency summarization (matching Python's behavior).
|
|
2896
|
+
*/
|
|
2897
|
+
if (!shouldSummarize(truncatedMessages, totalTokens, maxInputTokens)) try {
|
|
2898
|
+
return await handler({
|
|
2899
|
+
...request,
|
|
2900
|
+
messages: truncatedMessages
|
|
2901
|
+
});
|
|
2902
|
+
} catch (err) {
|
|
2903
|
+
if (!isContextOverflow(err)) throw err;
|
|
2904
|
+
if (maxInputTokens && totalTokens > 0) {
|
|
2905
|
+
const observedRatio = maxInputTokens / totalTokens;
|
|
2906
|
+
if (observedRatio > tokenEstimationMultiplier) tokenEstimationMultiplier = observedRatio * 1.1;
|
|
2907
|
+
}
|
|
2908
|
+
}
|
|
2909
|
+
/**
|
|
2910
|
+
* Step 3: Perform summarization
|
|
2911
|
+
*/
|
|
2912
|
+
return performSummarization(request, handler, truncatedMessages, resolvedModel, maxInputTokens);
|
|
2913
|
+
}
|
|
2914
|
+
});
|
|
2915
|
+
}
|
|
2302
2916
|
|
|
2303
2917
|
//#endregion
|
|
2304
2918
|
//#region src/backends/store.ts
|
|
@@ -3381,6 +3995,311 @@ var CompositeBackend = class {
|
|
|
3381
3995
|
}
|
|
3382
3996
|
};
|
|
3383
3997
|
|
|
3998
|
+
//#endregion
|
|
3999
|
+
//#region src/backends/local-shell.ts
|
|
4000
|
+
/**
|
|
4001
|
+
* LocalShellBackend: Node.js implementation of the filesystem backend with unrestricted local shell execution.
|
|
4002
|
+
*
|
|
4003
|
+
* This backend extends FilesystemBackend to add shell command execution on the local
|
|
4004
|
+
* host system. It provides NO sandboxing or isolation - all operations run directly
|
|
4005
|
+
* on the host machine with full system access.
|
|
4006
|
+
*
|
|
4007
|
+
* @module
|
|
4008
|
+
*/
|
|
4009
|
+
/**
|
|
4010
|
+
* Filesystem backend with unrestricted local shell command execution.
|
|
4011
|
+
*
|
|
4012
|
+
* This backend extends FilesystemBackend to add shell command execution
|
|
4013
|
+
* capabilities. Commands are executed directly on the host system without any
|
|
4014
|
+
* sandboxing, process isolation, or security restrictions.
|
|
4015
|
+
*
|
|
4016
|
+
* **Security Warning:**
|
|
4017
|
+
* This backend grants agents BOTH direct filesystem access AND unrestricted
|
|
4018
|
+
* shell execution on your local machine. Use with extreme caution and only in
|
|
4019
|
+
* appropriate environments.
|
|
4020
|
+
*
|
|
4021
|
+
* **Appropriate use cases:**
|
|
4022
|
+
* - Local development CLIs (coding assistants, development tools)
|
|
4023
|
+
* - Personal development environments where you trust the agent's code
|
|
4024
|
+
* - CI/CD pipelines with proper secret management
|
|
4025
|
+
*
|
|
4026
|
+
* **Inappropriate use cases:**
|
|
4027
|
+
* - Production environments (e.g., web servers, APIs, multi-tenant systems)
|
|
4028
|
+
* - Processing untrusted user input or executing untrusted code
|
|
4029
|
+
*
|
|
4030
|
+
* Use StateBackend, StoreBackend, or extend BaseSandbox for production.
|
|
4031
|
+
*
|
|
4032
|
+
* @example
|
|
4033
|
+
* ```typescript
|
|
4034
|
+
* import { LocalShellBackend } from "@langchain/deepagents";
|
|
4035
|
+
*
|
|
4036
|
+
* // Create backend with explicit environment
|
|
4037
|
+
* const backend = new LocalShellBackend({
|
|
4038
|
+
* rootDir: "/home/user/project",
|
|
4039
|
+
* env: { PATH: "/usr/bin:/bin" },
|
|
4040
|
+
* });
|
|
4041
|
+
*
|
|
4042
|
+
* // Execute shell commands (runs directly on host)
|
|
4043
|
+
* const result = await backend.execute("ls -la");
|
|
4044
|
+
* console.log(result.output);
|
|
4045
|
+
* console.log(result.exitCode);
|
|
4046
|
+
*
|
|
4047
|
+
* // Use filesystem operations (inherited from FilesystemBackend)
|
|
4048
|
+
* const content = await backend.read("/README.md");
|
|
4049
|
+
* await backend.write("/output.txt", "Hello world");
|
|
4050
|
+
*
|
|
4051
|
+
* // Inherit all environment variables
|
|
4052
|
+
* const backend2 = new LocalShellBackend({
|
|
4053
|
+
* rootDir: "/home/user/project",
|
|
4054
|
+
* inheritEnv: true,
|
|
4055
|
+
* });
|
|
4056
|
+
* ```
|
|
4057
|
+
*/
|
|
4058
|
+
var LocalShellBackend = class LocalShellBackend extends FilesystemBackend {
|
|
4059
|
+
#timeout;
|
|
4060
|
+
#maxOutputBytes;
|
|
4061
|
+
#env;
|
|
4062
|
+
#sandboxId;
|
|
4063
|
+
#initialized = false;
|
|
4064
|
+
constructor(options = {}) {
|
|
4065
|
+
const { rootDir, virtualMode = false, timeout = 120, maxOutputBytes = 1e5, env, inheritEnv = false } = options;
|
|
4066
|
+
super({
|
|
4067
|
+
rootDir,
|
|
4068
|
+
virtualMode,
|
|
4069
|
+
maxFileSizeMb: 10
|
|
4070
|
+
});
|
|
4071
|
+
this.#timeout = timeout;
|
|
4072
|
+
this.#maxOutputBytes = maxOutputBytes;
|
|
4073
|
+
const bytes = new Uint8Array(4);
|
|
4074
|
+
crypto.getRandomValues(bytes);
|
|
4075
|
+
this.#sandboxId = `local-${[...bytes].map((b) => b.toString(16).padStart(2, "0")).join("")}`;
|
|
4076
|
+
if (inheritEnv) {
|
|
4077
|
+
this.#env = { ...process.env };
|
|
4078
|
+
if (env) Object.assign(this.#env, env);
|
|
4079
|
+
} else this.#env = env ?? {};
|
|
4080
|
+
}
|
|
4081
|
+
/** Unique identifier for this backend instance (format: "local-{random_hex}"). */
|
|
4082
|
+
get id() {
|
|
4083
|
+
return this.#sandboxId;
|
|
4084
|
+
}
|
|
4085
|
+
/** Whether the backend has been initialized and is ready to use. */
|
|
4086
|
+
get isInitialized() {
|
|
4087
|
+
return this.#initialized;
|
|
4088
|
+
}
|
|
4089
|
+
/** Alias for `isInitialized`, matching the standard sandbox interface. */
|
|
4090
|
+
get isRunning() {
|
|
4091
|
+
return this.#initialized;
|
|
4092
|
+
}
|
|
4093
|
+
/**
|
|
4094
|
+
* Initialize the backend by ensuring the rootDir exists.
|
|
4095
|
+
*
|
|
4096
|
+
* Creates the rootDir (and any parent directories) if it does not already
|
|
4097
|
+
* exist. Safe to call on an existing directory. Must be called before
|
|
4098
|
+
* `execute()`, or use the static `LocalShellBackend.create()` factory.
|
|
4099
|
+
*
|
|
4100
|
+
* @throws {SandboxError} If already initialized (`ALREADY_INITIALIZED`)
|
|
4101
|
+
*/
|
|
4102
|
+
async initialize() {
|
|
4103
|
+
if (this.#initialized) throw new SandboxError("Backend is already initialized. Each LocalShellBackend instance can only be initialized once.", "ALREADY_INITIALIZED");
|
|
4104
|
+
await node_fs_promises.default.mkdir(this.cwd, { recursive: true });
|
|
4105
|
+
this.#initialized = true;
|
|
4106
|
+
}
|
|
4107
|
+
/**
|
|
4108
|
+
* Mark the backend as no longer running.
|
|
4109
|
+
*
|
|
4110
|
+
* For local shell backends there is no remote resource to tear down,
|
|
4111
|
+
* so this simply flips the `isRunning` / `isInitialized` flag.
|
|
4112
|
+
*/
|
|
4113
|
+
async close() {
|
|
4114
|
+
this.#initialized = false;
|
|
4115
|
+
}
|
|
4116
|
+
/**
|
|
4117
|
+
* Read a file, adapting error messages to the standard sandbox format.
|
|
4118
|
+
*/
|
|
4119
|
+
async read(filePath, offset = 0, limit = 500) {
|
|
4120
|
+
const result = await super.read(filePath, offset, limit);
|
|
4121
|
+
if (typeof result === "string" && result.startsWith("Error reading file") && result.includes("ENOENT")) return `Error: File '${filePath}' not found`;
|
|
4122
|
+
return result;
|
|
4123
|
+
}
|
|
4124
|
+
/**
|
|
4125
|
+
* Edit a file, adapting error messages to the standard sandbox format.
|
|
4126
|
+
*/
|
|
4127
|
+
async edit(filePath, oldString, newString, replaceAll = false) {
|
|
4128
|
+
const result = await super.edit(filePath, oldString, newString, replaceAll);
|
|
4129
|
+
if (result.error?.includes("ENOENT")) return {
|
|
4130
|
+
...result,
|
|
4131
|
+
error: `Error: File '${filePath}' not found`
|
|
4132
|
+
};
|
|
4133
|
+
return result;
|
|
4134
|
+
}
|
|
4135
|
+
/**
|
|
4136
|
+
* List directory contents, returning paths relative to rootDir.
|
|
4137
|
+
*/
|
|
4138
|
+
async lsInfo(dirPath) {
|
|
4139
|
+
const results = await super.lsInfo(dirPath);
|
|
4140
|
+
if (this.virtualMode) return results;
|
|
4141
|
+
const cwdPrefix = this.cwd.endsWith(node_path.default.sep) ? this.cwd : this.cwd + node_path.default.sep;
|
|
4142
|
+
return results.map((info) => ({
|
|
4143
|
+
...info,
|
|
4144
|
+
path: info.path.startsWith(cwdPrefix) ? info.path.slice(cwdPrefix.length) : info.path
|
|
4145
|
+
}));
|
|
4146
|
+
}
|
|
4147
|
+
/**
|
|
4148
|
+
* Glob matching that returns relative paths and includes directories.
|
|
4149
|
+
*/
|
|
4150
|
+
async globInfo(pattern, searchPath = "/") {
|
|
4151
|
+
if (pattern.startsWith("/")) pattern = pattern.substring(1);
|
|
4152
|
+
const resolvedSearchPath = searchPath === "/" || searchPath === "" ? this.cwd : this.virtualMode ? node_path.default.resolve(this.cwd, searchPath.replace(/^\//, "")) : node_path.default.resolve(this.cwd, searchPath);
|
|
4153
|
+
try {
|
|
4154
|
+
if (!(await node_fs_promises.default.stat(resolvedSearchPath)).isDirectory()) return [];
|
|
4155
|
+
} catch {
|
|
4156
|
+
return [];
|
|
4157
|
+
}
|
|
4158
|
+
const formatPath = (rel) => this.virtualMode ? `/${rel}` : rel;
|
|
4159
|
+
const globOpts = {
|
|
4160
|
+
cwd: resolvedSearchPath,
|
|
4161
|
+
absolute: false,
|
|
4162
|
+
dot: true
|
|
4163
|
+
};
|
|
4164
|
+
const [fileMatches, dirMatches] = await Promise.all([(0, fast_glob.default)(pattern, {
|
|
4165
|
+
...globOpts,
|
|
4166
|
+
onlyFiles: true
|
|
4167
|
+
}), (0, fast_glob.default)(pattern, {
|
|
4168
|
+
...globOpts,
|
|
4169
|
+
onlyDirectories: true
|
|
4170
|
+
})]);
|
|
4171
|
+
const statFile = async (match) => {
|
|
4172
|
+
try {
|
|
4173
|
+
const entryStat = await node_fs_promises.default.stat(node_path.default.join(resolvedSearchPath, match));
|
|
4174
|
+
if (entryStat.isFile()) return {
|
|
4175
|
+
path: formatPath(match),
|
|
4176
|
+
is_dir: false,
|
|
4177
|
+
size: entryStat.size,
|
|
4178
|
+
modified_at: entryStat.mtime.toISOString()
|
|
4179
|
+
};
|
|
4180
|
+
} catch {}
|
|
4181
|
+
return null;
|
|
4182
|
+
};
|
|
4183
|
+
const statDir = async (match) => {
|
|
4184
|
+
try {
|
|
4185
|
+
const entryStat = await node_fs_promises.default.stat(node_path.default.join(resolvedSearchPath, match));
|
|
4186
|
+
if (entryStat.isDirectory()) return {
|
|
4187
|
+
path: formatPath(match),
|
|
4188
|
+
is_dir: true,
|
|
4189
|
+
size: 0,
|
|
4190
|
+
modified_at: entryStat.mtime.toISOString()
|
|
4191
|
+
};
|
|
4192
|
+
} catch {}
|
|
4193
|
+
return null;
|
|
4194
|
+
};
|
|
4195
|
+
const [fileInfos, dirInfos] = await Promise.all([Promise.all(fileMatches.map(statFile)), Promise.all(dirMatches.map(statDir))]);
|
|
4196
|
+
const results = [...fileInfos, ...dirInfos].filter((info) => info !== null);
|
|
4197
|
+
results.sort((a, b) => a.path.localeCompare(b.path));
|
|
4198
|
+
return results;
|
|
4199
|
+
}
|
|
4200
|
+
/**
|
|
4201
|
+
* Execute a shell command directly on the host system.
|
|
4202
|
+
*
|
|
4203
|
+
* Commands are executed directly on your host system using `spawn()`
|
|
4204
|
+
* with `shell: true`. There is NO sandboxing, isolation, or security
|
|
4205
|
+
* restrictions. The command runs with your user's full permissions.
|
|
4206
|
+
*
|
|
4207
|
+
* The command is executed using the system shell with the working directory
|
|
4208
|
+
* set to the backend's rootDir. Stdout and stderr are combined into a single
|
|
4209
|
+
* output stream, with stderr lines prefixed with `[stderr]`.
|
|
4210
|
+
*
|
|
4211
|
+
* @param command - Shell command string to execute
|
|
4212
|
+
* @returns ExecuteResponse containing output, exit code, and truncation flag
|
|
4213
|
+
*/
|
|
4214
|
+
async execute(command) {
|
|
4215
|
+
if (!command || typeof command !== "string") return {
|
|
4216
|
+
output: "Error: Command must be a non-empty string.",
|
|
4217
|
+
exitCode: 1,
|
|
4218
|
+
truncated: false
|
|
4219
|
+
};
|
|
4220
|
+
return new Promise((resolve) => {
|
|
4221
|
+
let stdout = "";
|
|
4222
|
+
let stderr = "";
|
|
4223
|
+
let timedOut = false;
|
|
4224
|
+
const child = node_child_process.default.spawn(command, {
|
|
4225
|
+
shell: true,
|
|
4226
|
+
env: this.#env,
|
|
4227
|
+
cwd: this.cwd
|
|
4228
|
+
});
|
|
4229
|
+
const timer = setTimeout(() => {
|
|
4230
|
+
timedOut = true;
|
|
4231
|
+
child.kill("SIGTERM");
|
|
4232
|
+
}, this.#timeout * 1e3);
|
|
4233
|
+
child.stdout.on("data", (data) => {
|
|
4234
|
+
stdout += data.toString();
|
|
4235
|
+
});
|
|
4236
|
+
child.stderr.on("data", (data) => {
|
|
4237
|
+
stderr += data.toString();
|
|
4238
|
+
});
|
|
4239
|
+
child.on("error", (err) => {
|
|
4240
|
+
clearTimeout(timer);
|
|
4241
|
+
resolve({
|
|
4242
|
+
output: `Error executing command: ${err.message}`,
|
|
4243
|
+
exitCode: 1,
|
|
4244
|
+
truncated: false
|
|
4245
|
+
});
|
|
4246
|
+
});
|
|
4247
|
+
child.on("close", (code, signal) => {
|
|
4248
|
+
clearTimeout(timer);
|
|
4249
|
+
if (timedOut || signal === "SIGTERM") {
|
|
4250
|
+
resolve({
|
|
4251
|
+
output: `Error: Command timed out after ${this.#timeout.toFixed(1)} seconds.`,
|
|
4252
|
+
exitCode: 124,
|
|
4253
|
+
truncated: false
|
|
4254
|
+
});
|
|
4255
|
+
return;
|
|
4256
|
+
}
|
|
4257
|
+
const outputParts = [];
|
|
4258
|
+
if (stdout) outputParts.push(stdout);
|
|
4259
|
+
if (stderr) {
|
|
4260
|
+
const stderrLines = stderr.trim().split("\n");
|
|
4261
|
+
outputParts.push(...stderrLines.map((line) => `[stderr] ${line}`));
|
|
4262
|
+
}
|
|
4263
|
+
let output = outputParts.length > 0 ? outputParts.join("\n") : "<no output>";
|
|
4264
|
+
let truncated = false;
|
|
4265
|
+
if (output.length > this.#maxOutputBytes) {
|
|
4266
|
+
output = output.slice(0, this.#maxOutputBytes);
|
|
4267
|
+
output += `\n\n... Output truncated at ${this.#maxOutputBytes} bytes.`;
|
|
4268
|
+
truncated = true;
|
|
4269
|
+
}
|
|
4270
|
+
const exitCode = code ?? 1;
|
|
4271
|
+
if (exitCode !== 0) output = `${output.trimEnd()}\n\nExit code: ${exitCode}`;
|
|
4272
|
+
resolve({
|
|
4273
|
+
output,
|
|
4274
|
+
exitCode,
|
|
4275
|
+
truncated
|
|
4276
|
+
});
|
|
4277
|
+
});
|
|
4278
|
+
});
|
|
4279
|
+
}
|
|
4280
|
+
/**
|
|
4281
|
+
* Create and initialize a new LocalShellBackend in one step.
|
|
4282
|
+
*
|
|
4283
|
+
* This is the recommended way to create a backend when the rootDir may
|
|
4284
|
+
* not exist yet. It combines construction and initialization (ensuring
|
|
4285
|
+
* rootDir exists) into a single async operation.
|
|
4286
|
+
*
|
|
4287
|
+
* @param options - Configuration options for the backend
|
|
4288
|
+
* @returns An initialized and ready-to-use backend
|
|
4289
|
+
*/
|
|
4290
|
+
static async create(options = {}) {
|
|
4291
|
+
const { initialFiles, ...backendOptions } = options;
|
|
4292
|
+
const backend = new LocalShellBackend(backendOptions);
|
|
4293
|
+
await backend.initialize();
|
|
4294
|
+
if (initialFiles) {
|
|
4295
|
+
const encoder = new TextEncoder();
|
|
4296
|
+
const files = Object.entries(initialFiles).map(([filePath, content]) => [filePath, encoder.encode(content)]);
|
|
4297
|
+
await backend.uploadFiles(files);
|
|
4298
|
+
}
|
|
4299
|
+
return backend;
|
|
4300
|
+
}
|
|
4301
|
+
};
|
|
4302
|
+
|
|
3384
4303
|
//#endregion
|
|
3385
4304
|
//#region src/backends/sandbox.ts
|
|
3386
4305
|
/**
|
|
@@ -3722,7 +4641,7 @@ const BASE_PROMPT = `In order to complete the objective that the user asks of yo
|
|
|
3722
4641
|
* - Todo management (todoListMiddleware)
|
|
3723
4642
|
* - Filesystem tools (createFilesystemMiddleware)
|
|
3724
4643
|
* - Subagent delegation (createSubAgentMiddleware)
|
|
3725
|
-
* - Conversation summarization (
|
|
4644
|
+
* - Conversation summarization (createSummarizationMiddleware) with backend offloading
|
|
3726
4645
|
* - Prompt caching (anthropicPromptCachingMiddleware)
|
|
3727
4646
|
* - Tool call patching (createPatchToolCallsMiddleware)
|
|
3728
4647
|
* - Human-in-the-loop (humanInTheLoopMiddleware) - optional
|
|
@@ -3810,14 +4729,19 @@ function createDeepAgent(params = {}) {
|
|
|
3810
4729
|
/**
|
|
3811
4730
|
* Middleware for custom subagents (does NOT include skills from main agent).
|
|
3812
4731
|
* Custom subagents must define their own `skills` property to get skills.
|
|
4732
|
+
*
|
|
4733
|
+
* Uses createSummarizationMiddleware (deepagents version) with backend support
|
|
4734
|
+
* and auto-computed defaults from model profile, matching Python's create_deep_agent.
|
|
4735
|
+
* When trigger is not provided, defaults are lazily computed:
|
|
4736
|
+
* - With model profile: fraction-based (trigger=0.85, keep=0.10)
|
|
4737
|
+
* - Without profile: fixed (trigger=170k tokens, keep=6 messages)
|
|
3813
4738
|
*/
|
|
3814
4739
|
const subagentMiddleware = [
|
|
3815
4740
|
(0, langchain.todoListMiddleware)(),
|
|
3816
4741
|
createFilesystemMiddleware({ backend: filesystemBackend }),
|
|
3817
|
-
(
|
|
4742
|
+
createSummarizationMiddleware({
|
|
3818
4743
|
model,
|
|
3819
|
-
|
|
3820
|
-
keep: { messages: 6 }
|
|
4744
|
+
backend: filesystemBackend
|
|
3821
4745
|
}),
|
|
3822
4746
|
(0, langchain.anthropicPromptCachingMiddleware)({ unsupportedModelBehavior: "ignore" }),
|
|
3823
4747
|
createPatchToolCallsMiddleware()
|
|
@@ -3848,10 +4772,9 @@ function createDeepAgent(params = {}) {
|
|
|
3848
4772
|
subagents: processedSubagents,
|
|
3849
4773
|
generalPurposeAgent: true
|
|
3850
4774
|
}),
|
|
3851
|
-
(
|
|
4775
|
+
createSummarizationMiddleware({
|
|
3852
4776
|
model,
|
|
3853
|
-
|
|
3854
|
-
keep: { messages: 6 }
|
|
4777
|
+
backend: filesystemBackend
|
|
3855
4778
|
}),
|
|
3856
4779
|
(0, langchain.anthropicPromptCachingMiddleware)({ unsupportedModelBehavior: "ignore" }),
|
|
3857
4780
|
createPatchToolCallsMiddleware()
|
|
@@ -4420,6 +5343,7 @@ exports.DEFAULT_GENERAL_PURPOSE_DESCRIPTION = DEFAULT_GENERAL_PURPOSE_DESCRIPTIO
|
|
|
4420
5343
|
exports.DEFAULT_SUBAGENT_PROMPT = DEFAULT_SUBAGENT_PROMPT;
|
|
4421
5344
|
exports.FilesystemBackend = FilesystemBackend;
|
|
4422
5345
|
exports.GENERAL_PURPOSE_SUBAGENT = GENERAL_PURPOSE_SUBAGENT;
|
|
5346
|
+
exports.LocalShellBackend = LocalShellBackend;
|
|
4423
5347
|
exports.MAX_SKILL_DESCRIPTION_LENGTH = MAX_SKILL_DESCRIPTION_LENGTH;
|
|
4424
5348
|
exports.MAX_SKILL_FILE_SIZE = MAX_SKILL_FILE_SIZE;
|
|
4425
5349
|
exports.MAX_SKILL_NAME_LENGTH = MAX_SKILL_NAME_LENGTH;
|
|
@@ -4427,6 +5351,7 @@ exports.SandboxError = SandboxError;
|
|
|
4427
5351
|
exports.StateBackend = StateBackend;
|
|
4428
5352
|
exports.StoreBackend = StoreBackend;
|
|
4429
5353
|
exports.TASK_SYSTEM_PROMPT = TASK_SYSTEM_PROMPT;
|
|
5354
|
+
exports.computeSummarizationDefaults = computeSummarizationDefaults;
|
|
4430
5355
|
exports.createAgentMemoryMiddleware = createAgentMemoryMiddleware;
|
|
4431
5356
|
exports.createDeepAgent = createDeepAgent;
|
|
4432
5357
|
exports.createFilesystemMiddleware = createFilesystemMiddleware;
|
|
@@ -4435,6 +5360,7 @@ exports.createPatchToolCallsMiddleware = createPatchToolCallsMiddleware;
|
|
|
4435
5360
|
exports.createSettings = createSettings;
|
|
4436
5361
|
exports.createSkillsMiddleware = createSkillsMiddleware;
|
|
4437
5362
|
exports.createSubAgentMiddleware = createSubAgentMiddleware;
|
|
5363
|
+
exports.createSummarizationMiddleware = createSummarizationMiddleware;
|
|
4438
5364
|
exports.filesValue = filesValue;
|
|
4439
5365
|
exports.findProjectRoot = findProjectRoot;
|
|
4440
5366
|
exports.isSandboxBackend = isSandboxBackend;
|