@librechat/agents 3.1.73 → 3.1.75

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +66 -0
  2. package/dist/cjs/agents/AgentContext.cjs +146 -57
  3. package/dist/cjs/agents/AgentContext.cjs.map +1 -1
  4. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +4 -1
  5. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  6. package/dist/cjs/main.cjs +1 -0
  7. package/dist/cjs/main.cjs.map +1 -1
  8. package/dist/cjs/messages/cache.cjs +37 -3
  9. package/dist/cjs/messages/cache.cjs.map +1 -1
  10. package/dist/cjs/tools/BashExecutor.cjs +21 -11
  11. package/dist/cjs/tools/BashExecutor.cjs.map +1 -1
  12. package/dist/cjs/tools/CodeExecutor.cjs +37 -10
  13. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -1
  14. package/dist/cjs/tools/ProgrammaticToolCalling.cjs +16 -11
  15. package/dist/cjs/tools/ProgrammaticToolCalling.cjs.map +1 -1
  16. package/dist/esm/agents/AgentContext.mjs +147 -58
  17. package/dist/esm/agents/AgentContext.mjs.map +1 -1
  18. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +4 -1
  19. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  20. package/dist/esm/main.mjs +1 -1
  21. package/dist/esm/messages/cache.mjs +37 -3
  22. package/dist/esm/messages/cache.mjs.map +1 -1
  23. package/dist/esm/tools/BashExecutor.mjs +22 -12
  24. package/dist/esm/tools/BashExecutor.mjs.map +1 -1
  25. package/dist/esm/tools/CodeExecutor.mjs +37 -11
  26. package/dist/esm/tools/CodeExecutor.mjs.map +1 -1
  27. package/dist/esm/tools/ProgrammaticToolCalling.mjs +17 -12
  28. package/dist/esm/tools/ProgrammaticToolCalling.mjs.map +1 -1
  29. package/dist/types/agents/AgentContext.d.ts +29 -4
  30. package/dist/types/agents/__tests__/promptCacheLiveHelpers.d.ts +46 -0
  31. package/dist/types/tools/CodeExecutor.d.ts +6 -0
  32. package/dist/types/types/graph.d.ts +3 -1
  33. package/dist/types/types/run.d.ts +2 -0
  34. package/dist/types/types/tools.d.ts +9 -0
  35. package/package.json +1 -1
  36. package/src/agents/AgentContext.ts +189 -71
  37. package/src/agents/__tests__/AgentContext.anthropic.live.test.ts +116 -0
  38. package/src/agents/__tests__/AgentContext.bedrock.live.test.ts +149 -0
  39. package/src/agents/__tests__/AgentContext.test.ts +333 -2
  40. package/src/agents/__tests__/promptCacheLiveHelpers.ts +165 -0
  41. package/src/llm/anthropic/utils/message_inputs.ts +6 -1
  42. package/src/llm/anthropic/utils/server-tool-inputs.test.ts +77 -0
  43. package/src/messages/cache.test.ts +104 -3
  44. package/src/messages/cache.ts +54 -3
  45. package/src/specs/anthropic.simple.test.ts +61 -0
  46. package/src/specs/summarization.test.ts +7 -3
  47. package/src/tools/BashExecutor.ts +37 -13
  48. package/src/tools/CodeExecutor.ts +55 -11
  49. package/src/tools/ProgrammaticToolCalling.ts +29 -14
  50. package/src/tools/__tests__/ProgrammaticToolCalling.test.ts +60 -0
  51. package/src/types/graph.ts +3 -1
  52. package/src/types/run.ts +2 -0
  53. package/src/types/tools.ts +9 -0
@@ -1,8 +1,9 @@
1
1
  import {
2
2
  AIMessage,
3
3
  BaseMessage,
4
- ToolMessage,
5
4
  HumanMessage,
5
+ SystemMessage,
6
+ ToolMessage,
6
7
  MessageContentComplex,
7
8
  } from '@langchain/core/messages';
8
9
  import type Anthropic from '@anthropic-ai/sdk';
@@ -404,7 +405,107 @@ describe('addBedrockCacheControl (Bedrock cache checkpoints)', () => {
404
405
  expect(first[1]).toEqual({ cachePoint: { type: 'default' } });
405
406
  });
406
407
 
407
- it('works with the example from the langchain pr (with multi-turn behavior)', () => {
408
+ it('preserves LangChain system message content unchanged', () => {
409
+ const systemContent = [
410
+ { type: ContentTypes.TEXT, text: 'Stable system text' },
411
+ { cachePoint: { type: 'default' } },
412
+ { type: ContentTypes.TEXT, text: 'Dynamic system text' },
413
+ ] as MessageContentComplex[];
414
+ const messages: BaseMessage[] = [
415
+ new SystemMessage({ content: systemContent }),
416
+ new HumanMessage('Hello'),
417
+ new AIMessage('Hi'),
418
+ ];
419
+
420
+ const result = addBedrockCacheControl(messages);
421
+
422
+ expect(result[0]).toBe(messages[0]);
423
+ expect(result[0].content).toEqual(systemContent);
424
+ });
425
+
426
+ it('preserves serialized system message content unchanged', () => {
427
+ const systemContent = [
428
+ { type: ContentTypes.TEXT, text: 'Stable system text' },
429
+ { cachePoint: { type: 'default' } },
430
+ { type: ContentTypes.TEXT, text: 'Dynamic system text' },
431
+ ] as MessageContentComplex[];
432
+ const messages: TestMsg[] = [
433
+ { role: 'system', content: systemContent },
434
+ { role: 'user', content: 'Hello' },
435
+ { role: 'assistant', content: 'Hi' },
436
+ ];
437
+
438
+ const result = addBedrockCacheControl(messages);
439
+
440
+ expect(result[0]).toBe(messages[0]);
441
+ expect(result[0].content).toEqual(systemContent);
442
+ });
443
+
444
+ it('strips Anthropic cache_control from LangChain system messages without moving cache points', () => {
445
+ const systemContent = [
446
+ {
447
+ type: ContentTypes.TEXT,
448
+ text: 'Stable system text',
449
+ cache_control: { type: 'ephemeral' },
450
+ } as MessageContentComplex,
451
+ { cachePoint: { type: 'default' } },
452
+ {
453
+ type: ContentTypes.TEXT,
454
+ text: 'Dynamic system text',
455
+ cache_control: { type: 'ephemeral' },
456
+ } as MessageContentComplex,
457
+ ] as MessageContentComplex[];
458
+ const messages: BaseMessage[] = [
459
+ new SystemMessage({ content: systemContent }),
460
+ new HumanMessage('Hello'),
461
+ new AIMessage('Hi'),
462
+ ];
463
+
464
+ const result = addBedrockCacheControl(messages);
465
+
466
+ expect(result[0]).not.toBe(messages[0]);
467
+ expect(result[0].content).toEqual([
468
+ { type: ContentTypes.TEXT, text: 'Stable system text' },
469
+ { cachePoint: { type: 'default' } },
470
+ { type: ContentTypes.TEXT, text: 'Dynamic system text' },
471
+ ]);
472
+ expect(systemContent[0]).toHaveProperty('cache_control');
473
+ expect(systemContent[2]).toHaveProperty('cache_control');
474
+ });
475
+
476
+ it('strips Anthropic cache_control from serialized system messages without moving cache points', () => {
477
+ const systemContent = [
478
+ {
479
+ type: ContentTypes.TEXT,
480
+ text: 'Stable system text',
481
+ cache_control: { type: 'ephemeral' },
482
+ } as MessageContentComplex,
483
+ { cachePoint: { type: 'default' } },
484
+ {
485
+ type: ContentTypes.TEXT,
486
+ text: 'Dynamic system text',
487
+ cache_control: { type: 'ephemeral' },
488
+ } as MessageContentComplex,
489
+ ] as MessageContentComplex[];
490
+ const messages: TestMsg[] = [
491
+ { role: 'system', content: systemContent },
492
+ { role: 'user', content: 'Hello' },
493
+ { role: 'assistant', content: 'Hi' },
494
+ ];
495
+
496
+ const result = addBedrockCacheControl(messages);
497
+
498
+ expect(result[0]).not.toBe(messages[0]);
499
+ expect(result[0].content).toEqual([
500
+ { type: ContentTypes.TEXT, text: 'Stable system text' },
501
+ { cachePoint: { type: 'default' } },
502
+ { type: ContentTypes.TEXT, text: 'Dynamic system text' },
503
+ ]);
504
+ expect(systemContent[0]).toHaveProperty('cache_control');
505
+ expect(systemContent[2]).toHaveProperty('cache_control');
506
+ });
507
+
508
+ it('skips serialized system messages while adding cache points to non-system turns', () => {
408
509
  const messages: TestMsg[] = [
409
510
  {
410
511
  role: 'system',
@@ -429,7 +530,7 @@ describe('addBedrockCacheControl (Bedrock cache checkpoints)', () => {
429
530
  type: ContentTypes.TEXT,
430
531
  text: 'You\'re an advanced AI assistant.',
431
532
  });
432
- expect(system[1]).toEqual({ cachePoint: { type: 'default' } });
533
+ expect(system).toHaveLength(1);
433
534
  expect(user[0]).toEqual({
434
535
  type: ContentTypes.TEXT,
435
536
  text: 'What is the capital of France?',
@@ -14,6 +14,10 @@ type MessageWithContent = {
14
14
  content?: string | MessageContentComplex[];
15
15
  };
16
16
 
17
+ type MessageContentWithCacheControl = MessageContentComplex & {
18
+ cache_control?: unknown;
19
+ };
20
+
17
21
  /**
18
22
  * Deep clones a message's content to prevent mutation of the original.
19
23
  */
@@ -101,6 +105,40 @@ function cloneMessage<T extends MessageWithContent>(
101
105
  return cloned;
102
106
  }
103
107
 
108
+ function stripAnthropicCacheControlFromBlocks(
109
+ content: MessageContentComplex[]
110
+ ): { content: MessageContentComplex[]; modified: boolean } {
111
+ let modified = false;
112
+ const strippedContent = content.map((block) => {
113
+ if (!('cache_control' in block)) {
114
+ return block;
115
+ }
116
+
117
+ const cloned: MessageContentWithCacheControl = { ...block };
118
+ delete cloned.cache_control;
119
+ modified = true;
120
+ return cloned;
121
+ });
122
+
123
+ return { content: strippedContent, modified };
124
+ }
125
+
126
+ function sanitizeBedrockSystemMessage<T extends MessageWithContent>(
127
+ message: T
128
+ ): T {
129
+ const content = message.content;
130
+ if (!Array.isArray(content)) {
131
+ return message;
132
+ }
133
+
134
+ const stripped = stripAnthropicCacheControlFromBlocks(content);
135
+ if (!stripped.modified) {
136
+ return message;
137
+ }
138
+
139
+ return cloneMessage(message, stripped.content);
140
+ }
141
+
104
142
  /**
105
143
  * Anthropic API: Adds cache control to the appropriate user messages in the payload.
106
144
  * Strips ALL existing cache control (both Anthropic and Bedrock formats) from all messages,
@@ -310,11 +348,24 @@ export function addBedrockCacheControl<
310
348
 
311
349
  for (let i = updatedMessages.length - 1; i >= 0; i--) {
312
350
  const originalMessage = updatedMessages[i];
313
- const isToolMessage =
351
+ const messageType =
314
352
  'getType' in originalMessage &&
315
- typeof originalMessage.getType === 'function' &&
316
- originalMessage.getType() === 'tool';
353
+ typeof originalMessage.getType === 'function'
354
+ ? originalMessage.getType()
355
+ : undefined;
356
+ const messageRole =
357
+ 'role' in originalMessage && typeof originalMessage.role === 'string'
358
+ ? originalMessage.role
359
+ : undefined;
360
+
361
+ const isSystemMessage =
362
+ messageType === 'system' || messageRole === 'system';
363
+ if (isSystemMessage) {
364
+ updatedMessages[i] = sanitizeBedrockSystemMessage(originalMessage);
365
+ continue;
366
+ }
317
367
 
368
+ const isToolMessage = messageType === 'tool' || messageRole === 'tool';
318
369
  const content = originalMessage.content;
319
370
  const hasArrayContent = Array.isArray(content);
320
371
  const isEmptyString = typeof content === 'string' && content === '';
@@ -376,6 +376,67 @@ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
376
376
  );
377
377
  });
378
378
 
379
+ test(`${capitalizeFirstLetter(provider)}: follow-up after assistant message with only whitespace text content`, async () => {
380
+ /**
381
+ * Regression for LibreChat discussion #12806.
382
+ *
383
+ * The Anthropic API has two distinct rejection rules (verified against
384
+ * the live API):
385
+ * 1. Strict empty `text: ''` → rejected anywhere
386
+ * "messages: text content blocks must be non-empty"
387
+ * 2. Whitespace-only `text: ' '` / '\n' / '\t' → rejected when the
388
+ * assistant message has no other accepted blocks (no tool blocks,
389
+ * no non-whitespace text)
390
+ * "messages: text content blocks must contain non-whitespace text"
391
+ *
392
+ * Anthropic responses for some prompts include a whitespace-only text
393
+ * block as the sole text content. Re-sending that history on a
394
+ * follow-up turn triggers rule 2.
395
+ *
396
+ * The wire-send filter in `_formatContent` must drop any text block
397
+ * whose trimmed content is empty. The previous filter used strict
398
+ * `text === ''` only, which caught rule 1 but not rule 2.
399
+ */
400
+ const llmConfig = getLLMConfig(provider);
401
+ const customHandlers1 = setupCustomHandlers();
402
+
403
+ const followUpRun = await Run.create<t.IState>({
404
+ runId: 'repro-12806-followup',
405
+ graphConfig: {
406
+ type: 'standard',
407
+ llmConfig,
408
+ instructions: 'You are a friendly AI assistant.',
409
+ },
410
+ returnContent: true,
411
+ skipCleanup: true,
412
+ customHandlers: customHandlers1,
413
+ });
414
+
415
+ // Build history with an assistant message whose entire content array
416
+ // is a single whitespace-only text block. This is the precise shape
417
+ // the API rejects under rule 2 above.
418
+ conversationHistory = [
419
+ new HumanMessage('hi'),
420
+ new (require('@langchain/core/messages').AIMessage)({
421
+ content: [{ type: 'text', text: ' ' }],
422
+ }),
423
+ new HumanMessage('please respond with a short greeting'),
424
+ ];
425
+
426
+ // With the fix: `_formatContent` drops the whitespace text block,
427
+ // the assistant content becomes an empty array, and the API accepts.
428
+ // Without the fix: the whitespace block is forwarded and the API
429
+ // rejects with "messages: text content blocks must contain non-whitespace text".
430
+ const finalContentParts = await followUpRun.processStream(
431
+ { messages: conversationHistory },
432
+ config
433
+ );
434
+ expect(finalContentParts).toBeDefined();
435
+ const finalMessages = followUpRun.getRunMessages();
436
+ expect(finalMessages).toBeDefined();
437
+ expect(finalMessages?.length).toBeGreaterThan(0);
438
+ });
439
+
379
440
  test('should handle errors appropriately', async () => {
380
441
  // Test error scenarios
381
442
  await expect(async () => {
@@ -22,6 +22,8 @@ import { formatAgentMessages } from '@/messages/format';
22
22
  import { FakeListChatModel } from '@langchain/core/utils/testing';
23
23
  import * as providers from '@/llm/providers';
24
24
 
25
+ const SUMMARY_WRAPPER_OVERHEAD_TOKENS = 33;
26
+
25
27
  /** Extract plain text from a SummaryContentBlock's content array (test helper). */
26
28
  function getSummaryText(summary: t.SummaryContentBlock | undefined): string {
27
29
  if (!summary) return '';
@@ -1443,7 +1445,8 @@ describe('Cross-run summary lifecycle (no API keys)', () => {
1443
1445
  expect(completePayload.summary!.tokenCount ?? 0).toBeGreaterThan(0);
1444
1446
 
1445
1447
  const expectedTokenCount =
1446
- tokenCounter(new SystemMessage(KNOWN_SUMMARY)) + 33;
1448
+ tokenCounter(new SystemMessage(KNOWN_SUMMARY)) +
1449
+ SUMMARY_WRAPPER_OVERHEAD_TOKENS;
1447
1450
  expect(completePayload.summary!.tokenCount).toBe(expectedTokenCount);
1448
1451
 
1449
1452
  const summaryBlock = completePayload.summary!;
@@ -2605,8 +2608,9 @@ const hasAnyApiKey =
2605
2608
  const summaryText = getSummaryText(completePayload.summary);
2606
2609
  const reportedTokenCount = completePayload.summary!.tokenCount ?? 0;
2607
2610
 
2608
- // Count tokens locally using the same tokenizer
2609
- const localTokenCount = tokenCounter(new SystemMessage(summaryText));
2611
+ const localTokenCount =
2612
+ tokenCounter(new SystemMessage(summaryText)) +
2613
+ SUMMARY_WRAPPER_OVERHEAD_TOKENS;
2610
2614
 
2611
2615
  console.log(
2612
2616
  ` Token match: reported=${reportedTokenCount}, local=${localTokenCount}`
@@ -3,17 +3,23 @@ import fetch, { RequestInit } from 'node-fetch';
3
3
  import { HttpsProxyAgent } from 'https-proxy-agent';
4
4
  import { tool, DynamicStructuredTool } from '@langchain/core/tools';
5
5
  import type * as t from '@/types';
6
- import { imageExtRegex, getCodeBaseURL } from './CodeExecutor';
6
+ import { getCodeBaseURL, renderFileSection } from './CodeExecutor';
7
7
  import { Constants } from '@/common';
8
8
 
9
9
  config();
10
10
 
11
- const imageMessage = 'Image is already displayed to the user';
12
11
  const otherMessage = 'File is already downloaded by the user';
12
+ const inheritedFileMessage =
13
+ 'Available as an input — already known to the user';
13
14
  const accessMessage =
14
15
  'Note: Files from previous executions are automatically available and can be modified.';
15
16
  const emptyOutputMessage =
16
17
  'stdout: Empty. Ensure you\'re writing output explicitly.\n';
18
+ const inheritedFilesHeader =
19
+ 'Available files (inputs, not generated by this execution):';
20
+ const generatedFilesHeader = 'Generated files:';
21
+ const inheritedNote =
22
+ 'Note: Files in "Available files" are inputs the user (or a skill) already provided to the sandbox. They were not produced by this execution and you should not present them as new outputs in your response.';
17
23
 
18
24
  const baseEndpoint = getCodeBaseURL();
19
25
  const EXEC_ENDPOINT = `${baseEndpoint}/exec`;
@@ -198,20 +204,38 @@ function createBashExecutionTool(
198
204
  }
199
205
  if (result.stderr) formattedOutput += `stderr:\n${result.stderr}\n`;
200
206
  if (result.files && result.files.length > 0) {
201
- formattedOutput += 'Generated files:\n';
207
+ /* Split inherited (read-only / unchanged-input passthroughs from
208
+ * codeapi) from genuine generated outputs. The LLM was previously
209
+ * shown skill files under "Generated files:" with the message
210
+ * "File is already downloaded by the user", which led it to
211
+ * (a) believe it had just produced files it merely referenced
212
+ * and (b) sometimes invent paths like /mnt/user-data/uploads/
213
+ * trying to find the "originals". Labeling them as inputs makes
214
+ * the mental model accurate. */
215
+ const inheritedFiles = result.files.filter(
216
+ (f) => f.inherited === true
217
+ );
218
+ const generatedFiles = result.files.filter(
219
+ (f) => f.inherited !== true
220
+ );
202
221
 
203
- const fileCount = result.files.length;
204
- for (let i = 0; i < fileCount; i++) {
205
- const file = result.files[i];
206
- const isImage = imageExtRegex.test(file.name);
207
- formattedOutput += `- /mnt/data/${file.name} | ${isImage ? imageMessage : otherMessage}`;
222
+ formattedOutput += renderFileSection(
223
+ generatedFilesHeader,
224
+ generatedFiles,
225
+ otherMessage
226
+ );
227
+ formattedOutput += renderFileSection(
228
+ inheritedFilesHeader,
229
+ inheritedFiles,
230
+ inheritedFileMessage
231
+ );
208
232
 
209
- if (i < fileCount - 1) {
210
- formattedOutput += fileCount <= 3 ? ', ' : ',\n';
211
- }
233
+ if (generatedFiles.length > 0) {
234
+ formattedOutput += `\n\n${accessMessage}`;
235
+ }
236
+ if (inheritedFiles.length > 0) {
237
+ formattedOutput += `\n\n${inheritedNote}`;
212
238
  }
213
-
214
- formattedOutput += `\n\n${accessMessage}`;
215
239
  return [
216
240
  formattedOutput.trim(),
217
241
  {
@@ -15,10 +15,41 @@ export const getCodeBaseURL = (): string =>
15
15
 
16
16
  const imageMessage = 'Image is already displayed to the user';
17
17
  const otherMessage = 'File is already downloaded by the user';
18
+ const inheritedFileMessage =
19
+ 'Available as an input — already known to the user';
18
20
  const accessMessage =
19
21
  'Note: Files from previous executions are automatically available and can be modified.';
20
22
  const emptyOutputMessage =
21
23
  'stdout: Empty. Ensure you\'re writing output explicitly.\n';
24
+ const inheritedFilesHeader =
25
+ 'Available files (inputs, not generated by this execution):';
26
+ const generatedFilesHeader = 'Generated files:';
27
+ const inheritedNote =
28
+ 'Note: Files in "Available files" are inputs the user (or a skill) already provided to the sandbox. They were not produced by this execution and you should not present them as new outputs in your response.';
29
+
30
+ /**
31
+ * Renders one section of the post-execution file listing. Used by the
32
+ * code/bash tool formatters to keep generated outputs and inherited
33
+ * inputs visually separated. See BashExecutor for full docs.
34
+ */
35
+ export function renderFileSection(
36
+ header: string,
37
+ files: t.FileRefs,
38
+ defaultMessage: string
39
+ ): string {
40
+ if (files.length === 0) return '';
41
+ let out = `${header}\n`;
42
+ for (let i = 0; i < files.length; i++) {
43
+ const file = files[i];
44
+ const isImage = imageExtRegex.test(file.name);
45
+ out += `- /mnt/data/${file.name} | ${isImage ? imageMessage : defaultMessage}`;
46
+ if (i < files.length - 1) {
47
+ out += files.length <= 3 ? ', ' : ',\n';
48
+ }
49
+ }
50
+ out += '\n';
51
+ return out;
52
+ }
22
53
 
23
54
  const SUPPORTED_LANGUAGES = [
24
55
  'py',
@@ -196,20 +227,33 @@ function createCodeExecutionTool(
196
227
  }
197
228
  if (result.stderr) formattedOutput += `stderr:\n${result.stderr}\n`;
198
229
  if (result.files && result.files.length > 0) {
199
- formattedOutput += 'Generated files:\n';
230
+ /* See BashExecutor for the rationale: split inherited (read-only
231
+ * passthrough) inputs from real generated outputs so the LLM
232
+ * doesn't conflate skill files with newly-produced artifacts. */
233
+ const inheritedFiles = result.files.filter(
234
+ (f) => f.inherited === true
235
+ );
236
+ const generatedFiles = result.files.filter(
237
+ (f) => f.inherited !== true
238
+ );
200
239
 
201
- const fileCount = result.files.length;
202
- for (let i = 0; i < fileCount; i++) {
203
- const file = result.files[i];
204
- const isImage = imageExtRegex.test(file.name);
205
- formattedOutput += `- /mnt/data/${file.name} | ${isImage ? imageMessage : otherMessage}`;
240
+ formattedOutput += renderFileSection(
241
+ generatedFilesHeader,
242
+ generatedFiles,
243
+ otherMessage
244
+ );
245
+ formattedOutput += renderFileSection(
246
+ inheritedFilesHeader,
247
+ inheritedFiles,
248
+ inheritedFileMessage
249
+ );
206
250
 
207
- if (i < fileCount - 1) {
208
- formattedOutput += fileCount <= 3 ? ', ' : ',\n';
209
- }
251
+ if (generatedFiles.length > 0) {
252
+ formattedOutput += `\n\n${accessMessage}`;
253
+ }
254
+ if (inheritedFiles.length > 0) {
255
+ formattedOutput += `\n\n${inheritedNote}`;
210
256
  }
211
-
212
- formattedOutput += `\n\n${accessMessage}`;
213
257
  return [
214
258
  formattedOutput.trim(),
215
259
  {
@@ -5,7 +5,7 @@ import { HttpsProxyAgent } from 'https-proxy-agent';
5
5
  import { tool, DynamicStructuredTool } from '@langchain/core/tools';
6
6
  import type { ToolCall } from '@langchain/core/messages/tool';
7
7
  import type * as t from '@/types';
8
- import { imageExtRegex, getCodeBaseURL } from './CodeExecutor';
8
+ import { getCodeBaseURL, renderFileSection } from './CodeExecutor';
9
9
  import { Constants } from '@/common';
10
10
 
11
11
  config();
@@ -14,8 +14,14 @@ config();
14
14
  // Constants
15
15
  // ============================================================================
16
16
 
17
- const imageMessage = 'Image is already displayed to the user';
18
17
  const otherMessage = 'File is already downloaded by the user';
18
+ const inheritedFileMessage =
19
+ 'Available as an input — already known to the user';
20
+ const inheritedFilesHeader =
21
+ 'Available files (inputs, not generated by this execution):';
22
+ const generatedFilesHeader = 'Generated files:';
23
+ const inheritedNote =
24
+ 'Note: Files in "Available files" are inputs the user (or a skill) already provided to the sandbox. They were not produced by this execution and you should not present them as new outputs in your response.';
19
25
  const accessMessage =
20
26
  'Note: Files from previous executions are automatically available and can be modified.';
21
27
  const emptyOutputMessage =
@@ -552,20 +558,29 @@ export function formatCompletedResponse(
552
558
  }
553
559
 
554
560
  if (response.files && response.files.length > 0) {
555
- formatted += 'Generated files:\n';
556
-
557
- const fileCount = response.files.length;
558
- for (let i = 0; i < fileCount; i++) {
559
- const file = response.files[i];
560
- const isImage = imageExtRegex.test(file.name);
561
- formatted += `- /mnt/data/${file.name} | ${isImage ? imageMessage : otherMessage}`;
561
+ /* See BashExecutor for the rationale: split inherited (read-only
562
+ * passthrough) inputs from real generated outputs so the LLM doesn't
563
+ * conflate skill files with newly-produced artifacts. */
564
+ const inheritedFiles = response.files.filter((f) => f.inherited === true);
565
+ const generatedFiles = response.files.filter((f) => f.inherited !== true);
566
+
567
+ formatted += renderFileSection(
568
+ generatedFilesHeader,
569
+ generatedFiles,
570
+ otherMessage
571
+ );
572
+ formatted += renderFileSection(
573
+ inheritedFilesHeader,
574
+ inheritedFiles,
575
+ inheritedFileMessage
576
+ );
562
577
 
563
- if (i < fileCount - 1) {
564
- formatted += fileCount <= 3 ? ', ' : ',\n';
565
- }
578
+ if (generatedFiles.length > 0) {
579
+ formatted += `\n\n${accessMessage}`;
580
+ }
581
+ if (inheritedFiles.length > 0) {
582
+ formatted += `\n\n${inheritedNote}`;
566
583
  }
567
-
568
- formatted += `\n\n${accessMessage}`;
569
584
  }
570
585
 
571
586
  return [
@@ -664,6 +664,66 @@ for member in team:
664
664
  expect(output).toContain('chart.png');
665
665
  expect(output).toContain('Image is already displayed to the user');
666
666
  });
667
+
668
+ it('splits inherited inputs from generated outputs into distinct sections', () => {
669
+ const response: t.ProgrammaticExecutionResponse = {
670
+ status: 'completed',
671
+ stdout: 'analysis done\n',
672
+ stderr: '',
673
+ files: [
674
+ { id: 'g1', name: 'report.pdf' },
675
+ { id: 'i1', name: 'pptx/SKILL.md', inherited: true },
676
+ { id: 'i2', name: 'pptx/scripts/clean.py', inherited: true },
677
+ { id: 'g2', name: 'chart.png' },
678
+ ],
679
+ session_id: 'sess_abc123',
680
+ };
681
+
682
+ const [output, artifact] = formatCompletedResponse(response);
683
+
684
+ /* Generated section lists only outputs the run produced. */
685
+ const generatedIdx = output.indexOf('Generated files:');
686
+ const inheritedIdx = output.indexOf('Available files (inputs');
687
+ expect(generatedIdx).toBeGreaterThan(-1);
688
+ expect(inheritedIdx).toBeGreaterThan(generatedIdx);
689
+
690
+ /* Slice each section so we can assert membership without
691
+ * cross-talk between the two listings. */
692
+ const generatedSection = output.slice(generatedIdx, inheritedIdx);
693
+ const inheritedSection = output.slice(inheritedIdx);
694
+
695
+ expect(generatedSection).toContain('report.pdf');
696
+ expect(generatedSection).toContain('chart.png');
697
+ expect(generatedSection).not.toContain('SKILL.md');
698
+
699
+ expect(inheritedSection).toContain('pptx/SKILL.md');
700
+ expect(inheritedSection).toContain('pptx/scripts/clean.py');
701
+ expect(inheritedSection).toContain('Available as an input');
702
+
703
+ /* The artifact still carries every file so the host can still
704
+ * thread per-file ids through to subsequent calls. */
705
+ expect(artifact.files).toHaveLength(4);
706
+ });
707
+
708
+ it('omits the Generated files header when every entry is inherited', () => {
709
+ const response: t.ProgrammaticExecutionResponse = {
710
+ status: 'completed',
711
+ stdout: 'cat: ok\n',
712
+ stderr: '',
713
+ files: [
714
+ { id: 'i1', name: 'pptx/SKILL.md', inherited: true },
715
+ { id: 'i2', name: 'pptx/editing.md', inherited: true },
716
+ ],
717
+ session_id: 'sess_abc123',
718
+ };
719
+
720
+ const [output] = formatCompletedResponse(response);
721
+
722
+ expect(output).not.toContain('Generated files:');
723
+ expect(output).toContain('Available files (inputs');
724
+ expect(output).toContain('pptx/SKILL.md');
725
+ expect(output).toContain('pptx/editing.md');
726
+ });
667
727
  });
668
728
 
669
729
  describe('createProgrammaticToolCallingTool - Manual Invocation', () => {
@@ -471,10 +471,12 @@ export interface AgentInputs {
471
471
  toolMap?: ToolMap;
472
472
  tools?: GraphTools;
473
473
  provider: Providers;
474
+ /** Stable/cacheable system instructions. */
474
475
  instructions?: string;
475
476
  streamBuffer?: number;
476
477
  maxContextTokens?: number;
477
478
  clientOptions?: ClientOptions;
479
+ /** Dynamic system tail appended after stable instructions without provider cache markers. */
478
480
  additional_instructions?: string;
479
481
  reasoningKey?: 'reasoning_content' | 'reasoning';
480
482
  /** Format content blocks as strings (for legacy compatibility i.e. Ollama/Azure Serverless) */
@@ -500,7 +502,7 @@ export interface AgentInputs {
500
502
  summarizationEnabled?: boolean;
501
503
  summarizationConfig?: SummarizationConfig;
502
504
  /** Cross-run summary from a previous run, forwarded from formatAgentMessages.
503
- * Injected into the system message via AgentContext.buildInstructionsString(). */
505
+ * Injected into the dynamic system tail via AgentContext. */
504
506
  initialSummary?: { text: string; tokenCount: number };
505
507
  contextPruningConfig?: ContextPruningConfig;
506
508
  maxToolResultChars?: number;
package/src/types/run.ts CHANGED
@@ -75,7 +75,9 @@ export interface AgentStateChannels {
75
75
  messages: BaseMessage[];
76
76
  next: string;
77
77
  [key: string]: unknown;
78
+ /** Stable/cacheable system instructions. */
78
79
  instructions?: string;
80
+ /** Dynamic system tail appended after stable instructions. */
79
81
  additional_instructions?: string;
80
82
  }
81
83
 
@@ -113,6 +113,15 @@ export type FileRef = {
113
113
  path?: string;
114
114
  /** Session ID this file belongs to (for multi-session file tracking) */
115
115
  session_id?: string;
116
+ /**
117
+ * `true` when the codeapi sandbox echoed this entry as an unchanged
118
+ * passthrough of an input the caller already owns (skill files,
119
+ * downloaded inputs whose hash matched the baseline, inherited
120
+ * `.dirkeep` markers). The tool-result formatter renders these as
121
+ * "Available files" rather than "Generated files" so the LLM doesn't
122
+ * conflate infrastructure inputs with newly-produced outputs.
123
+ */
124
+ inherited?: true;
116
125
  };
117
126
 
118
127
  export type FileRefs = FileRef[];