@librechat/agents 3.0.75 → 3.0.77

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/dist/cjs/agents/AgentContext.cjs +1 -4
  2. package/dist/cjs/agents/AgentContext.cjs.map +1 -1
  3. package/dist/cjs/graphs/Graph.cjs +36 -8
  4. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  5. package/dist/cjs/tools/CodeExecutor.cjs +22 -21
  6. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -1
  7. package/dist/cjs/tools/ProgrammaticToolCalling.cjs +14 -11
  8. package/dist/cjs/tools/ProgrammaticToolCalling.cjs.map +1 -1
  9. package/dist/cjs/tools/ToolNode.cjs +28 -1
  10. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  11. package/dist/esm/agents/AgentContext.mjs +1 -4
  12. package/dist/esm/agents/AgentContext.mjs.map +1 -1
  13. package/dist/esm/graphs/Graph.mjs +37 -9
  14. package/dist/esm/graphs/Graph.mjs.map +1 -1
  15. package/dist/esm/tools/CodeExecutor.mjs +22 -21
  16. package/dist/esm/tools/CodeExecutor.mjs.map +1 -1
  17. package/dist/esm/tools/ProgrammaticToolCalling.mjs +14 -11
  18. package/dist/esm/tools/ProgrammaticToolCalling.mjs.map +1 -1
  19. package/dist/esm/tools/ToolNode.mjs +28 -1
  20. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  21. package/dist/types/graphs/Graph.d.ts +6 -0
  22. package/dist/types/tools/CodeExecutor.d.ts +0 -3
  23. package/dist/types/tools/ProgrammaticToolCalling.d.ts +0 -3
  24. package/dist/types/tools/ToolNode.d.ts +3 -1
  25. package/dist/types/types/llm.d.ts +3 -1
  26. package/dist/types/types/tools.d.ts +32 -0
  27. package/package.json +3 -2
  28. package/src/agents/AgentContext.ts +1 -8
  29. package/src/graphs/Graph.ts +46 -13
  30. package/src/scripts/caching.ts +27 -19
  31. package/src/scripts/code_exec_files.ts +58 -15
  32. package/src/scripts/code_exec_session.ts +282 -0
  33. package/src/scripts/test_code_api.ts +361 -0
  34. package/src/tools/CodeExecutor.ts +26 -23
  35. package/src/tools/ProgrammaticToolCalling.ts +18 -14
  36. package/src/tools/ToolNode.ts +33 -0
  37. package/src/tools/__tests__/ProgrammaticToolCalling.test.ts +0 -2
  38. package/src/types/llm.ts +3 -1
  39. package/src/types/tools.ts +40 -0
@@ -6,18 +6,15 @@ export declare const getCodeBaseURL: () => string;
6
6
  declare const CodeExecutionToolSchema: z.ZodObject<{
7
7
  lang: z.ZodEnum<["py", "js", "ts", "c", "cpp", "java", "php", "rs", "go", "d", "f90", "r"]>;
8
8
  code: z.ZodString;
9
- session_id: z.ZodOptional<z.ZodString>;
10
9
  args: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
11
10
  }, "strip", z.ZodTypeAny, {
12
11
  code: string;
13
12
  lang: "r" | "d" | "py" | "js" | "ts" | "c" | "cpp" | "java" | "php" | "rs" | "go" | "f90";
14
13
  args?: string[] | undefined;
15
- session_id?: string | undefined;
16
14
  }, {
17
15
  code: string;
18
16
  lang: "r" | "d" | "py" | "js" | "ts" | "c" | "cpp" | "java" | "php" | "rs" | "go" | "f90";
19
17
  args?: string[] | undefined;
20
- session_id?: string | undefined;
21
18
  }>;
22
19
  declare function createCodeExecutionTool(params?: t.CodeExecutionToolParams): DynamicStructuredTool<typeof CodeExecutionToolSchema>;
23
20
  export { createCodeExecutionTool };
@@ -3,16 +3,13 @@ import { DynamicStructuredTool } from '@langchain/core/tools';
3
3
  import type * as t from '@/types';
4
4
  declare const ProgrammaticToolCallingSchema: z.ZodObject<{
5
5
  code: z.ZodString;
6
- session_id: z.ZodOptional<z.ZodString>;
7
6
  timeout: z.ZodDefault<z.ZodOptional<z.ZodNumber>>;
8
7
  }, "strip", z.ZodTypeAny, {
9
8
  code: string;
10
9
  timeout: number;
11
- session_id?: string | undefined;
12
10
  }, {
13
11
  code: string;
14
12
  timeout?: number | undefined;
15
- session_id?: string | undefined;
16
13
  }>;
17
14
  /**
18
15
  * Normalizes a tool name to Python identifier format.
@@ -16,7 +16,9 @@ export declare class ToolNode<T = any> extends RunnableCallable<T, T> {
16
16
  private toolRegistry?;
17
17
  /** Cached programmatic tools (computed once on first PTC call) */
18
18
  private programmaticCache?;
19
- constructor({ tools, toolMap, name, tags, errorHandler, toolCallStepIds, handleToolErrors, loadRuntimeTools, toolRegistry, }: t.ToolNodeConstructorParams);
19
+ /** Reference to Graph's sessions map for automatic session injection */
20
+ private sessions?;
21
+ constructor({ tools, toolMap, name, tags, errorHandler, toolCallStepIds, handleToolErrors, loadRuntimeTools, toolRegistry, sessions, }: t.ToolNodeConstructorParams);
20
22
  /**
21
23
  * Returns cached programmatic tools, computing once on first access.
22
24
  * Single iteration builds both toolMap and toolDefs simultaneously.
@@ -37,7 +37,9 @@ export type AnthropicReasoning = {
37
37
  thinkingBudget?: number;
38
38
  };
39
39
  export type OpenAIClientOptions = ChatOpenAIFields;
40
- export type AnthropicClientOptions = AnthropicInput;
40
+ export type AnthropicClientOptions = AnthropicInput & {
41
+ promptCache?: boolean;
42
+ };
41
43
  export type MistralAIClientOptions = ChatMistralAIInput;
42
44
  export type VertexAIClientOptions = ChatVertexAIInput & {
43
45
  includeThoughts?: boolean;
@@ -29,6 +29,8 @@ export type ToolNodeOptions = {
29
29
  errorHandler?: (data: ToolErrorData, metadata?: Record<string, unknown>) => Promise<void>;
30
30
  /** Tool registry for lazy computation of programmatic tools and tool search */
31
31
  toolRegistry?: LCToolRegistry;
32
+ /** Reference to Graph's sessions map for automatic session injection */
33
+ sessions?: ToolSessionMap;
32
34
  };
33
35
  export type ToolNodeConstructorParams = ToolRefs & ToolNodeOptions;
34
36
  export type ToolEndEvent = {
@@ -205,3 +207,33 @@ export type ProgrammaticToolCallingParams = {
205
207
  /** Environment variable key for API key */
206
208
  [key: string]: unknown;
207
209
  };
210
+ /**
211
+ * Tracks code execution session state for automatic file persistence.
212
+ * Stored in Graph.sessions and injected into subsequent tool invocations.
213
+ */
214
+ export type CodeSessionContext = {
215
+ /** Session ID from the code execution environment */
216
+ session_id: string;
217
+ /** Files generated in this session (for context/tracking) */
218
+ files: FileRefs;
219
+ /** Timestamp of last update */
220
+ lastUpdated: number;
221
+ };
222
+ /**
223
+ * Artifact structure returned by code execution tools (CodeExecutor, PTC).
224
+ * Used to extract session context after tool completion.
225
+ */
226
+ export type CodeExecutionArtifact = {
227
+ session_id?: string;
228
+ files?: FileRefs;
229
+ };
230
+ /**
231
+ * Generic session context union type for different tool types.
232
+ * Extend this as new tool session types are added.
233
+ */
234
+ export type ToolSessionContext = CodeSessionContext;
235
+ /**
236
+ * Map of tool names to their session contexts.
237
+ * Keys are tool constants (e.g., Constants.EXECUTE_CODE, Constants.PROGRAMMATIC_TOOL_CALLING).
238
+ */
239
+ export type ToolSessionMap = Map<string, ToolSessionContext>;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "3.0.75",
3
+ "version": "3.0.77",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
@@ -50,7 +50,8 @@
50
50
  "code_exec": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
51
51
  "image": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/image.ts --provider 'google' --name 'Jo' --location 'New York, NY'",
52
52
  "code_exec_files": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_files.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
53
- "code_exec_simple": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_simple.ts --provider 'google' --name 'Jo' --location 'New York, NY'",
53
+ "code_exec_session": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_session.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
54
+ "code_exec_simple": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_simple.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
54
55
  "simple": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/simple.ts --provider 'openrouter' --name 'Jo' --location 'New York, NY'",
55
56
  "caching": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/caching.ts --name 'Jo' --location 'New York, NY'",
56
57
  "thinking": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/thinking.ts --name 'Jo' --location 'New York, NY'",
@@ -398,14 +398,7 @@ export class AgentContext {
398
398
  const anthropicOptions = this.clientOptions as
399
399
  | t.AnthropicClientOptions
400
400
  | undefined;
401
- const defaultHeaders = anthropicOptions?.clientOptions?.defaultHeaders as
402
- | Record<string, string>
403
- | undefined;
404
- const anthropicBeta = defaultHeaders?.['anthropic-beta'];
405
- if (
406
- typeof anthropicBeta === 'string' &&
407
- anthropicBeta.includes('prompt-caching')
408
- ) {
401
+ if (anthropicOptions?.promptCache === true) {
409
402
  finalInstructions = {
410
403
  content: [
411
404
  {
@@ -35,6 +35,7 @@ import {
35
35
  GraphEvents,
36
36
  Providers,
37
37
  StepTypes,
38
+ Constants,
38
39
  } from '@/common';
39
40
  import {
40
41
  formatAnthropicArtifactContent,
@@ -135,6 +136,12 @@ export abstract class Graph<
135
136
  /** Set of invoked tool call IDs from non-message run steps completed mid-run, if any */
136
137
  invokedToolIds?: Set<string>;
137
138
  handlerRegistry: HandlerRegistry | undefined;
139
+ /**
140
+ * Tool session contexts for automatic state persistence across tool invocations.
141
+ * Keyed by tool name (e.g., Constants.EXECUTE_CODE).
142
+ * Currently supports code execution session tracking (session_id, files).
143
+ */
144
+ sessions: t.ToolSessionMap = new Map();
138
145
  }
139
146
 
140
147
  export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
@@ -416,11 +423,7 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
416
423
  finalInstructions != null &&
417
424
  finalInstructions &&
418
425
  provider === Providers.ANTHROPIC &&
419
- ((
420
- (clientOptions as t.AnthropicClientOptions).clientOptions
421
- ?.defaultHeaders as Record<string, string> | undefined
422
- )?.['anthropic-beta']?.includes('prompt-caching') ??
423
- false)
426
+ (clientOptions as t.AnthropicClientOptions).promptCache === true
424
427
  ) {
425
428
  finalInstructions = {
426
429
  content: [
@@ -457,6 +460,7 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
457
460
  errorHandler: (data, metadata) =>
458
461
  StandardGraph.handleToolCallErrorStatic(this, data, metadata),
459
462
  toolRegistry: agentContext?.toolRegistry,
463
+ sessions: this.sessions,
460
464
  });
461
465
  }
462
466
 
@@ -735,14 +739,7 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
735
739
  const anthropicOptions = agentContext.clientOptions as
736
740
  | t.AnthropicClientOptions
737
741
  | undefined;
738
- const defaultHeaders = anthropicOptions?.clientOptions
739
- ?.defaultHeaders as Record<string, string> | undefined;
740
- const anthropicBeta = defaultHeaders?.['anthropic-beta'];
741
-
742
- if (
743
- typeof anthropicBeta === 'string' &&
744
- anthropicBeta.includes('prompt-caching')
745
- ) {
742
+ if (anthropicOptions?.promptCache === true) {
746
743
  finalMessages = addCacheControl<BaseMessage>(finalMessages);
747
744
  }
748
745
  } else if (agentContext.provider === Providers.BEDROCK) {
@@ -1043,6 +1040,42 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
1043
1040
  throw new Error(`No run step found for stepId ${stepId}`);
1044
1041
  }
1045
1042
 
1043
+ /**
1044
+ * Extract and store code execution session context from artifacts.
1045
+ * Only update session_id when files are generated - this ensures we don't
1046
+ * lose the original session that contains the files.
1047
+ */
1048
+ const toolName = output.name;
1049
+ if (
1050
+ toolName === Constants.EXECUTE_CODE ||
1051
+ toolName === Constants.PROGRAMMATIC_TOOL_CALLING
1052
+ ) {
1053
+ const artifact = output.artifact as t.CodeExecutionArtifact | undefined;
1054
+ const newFiles = artifact?.files ?? [];
1055
+ const hasNewFiles = newFiles.length > 0;
1056
+
1057
+ if (
1058
+ hasNewFiles &&
1059
+ artifact?.session_id != null &&
1060
+ artifact.session_id !== ''
1061
+ ) {
1062
+ /**
1063
+ * Files were generated - update session with the new session_id.
1064
+ * The new session_id is the one that contains these files.
1065
+ */
1066
+ const existingSession = this.sessions.get(Constants.EXECUTE_CODE) as
1067
+ | t.CodeSessionContext
1068
+ | undefined;
1069
+ const existingFiles = existingSession?.files ?? [];
1070
+
1071
+ this.sessions.set(Constants.EXECUTE_CODE, {
1072
+ session_id: artifact.session_id,
1073
+ files: [...existingFiles, ...newFiles],
1074
+ lastUpdated: Date.now(),
1075
+ });
1076
+ }
1077
+ }
1078
+
1046
1079
  const dispatchedOutput =
1047
1080
  typeof output.content === 'string'
1048
1081
  ? output.content
@@ -1,7 +1,11 @@
1
1
  // src/scripts/test-prompt-caching.ts
2
2
  import { config } from 'dotenv';
3
3
  config();
4
- import { HumanMessage, SystemMessage, BaseMessage } from '@langchain/core/messages';
4
+ import {
5
+ HumanMessage,
6
+ SystemMessage,
7
+ BaseMessage,
8
+ } from '@langchain/core/messages';
5
9
  import type { UsageMetadata } from '@langchain/core/messages';
6
10
  import type * as t from '@/types';
7
11
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
@@ -11,7 +15,7 @@ import { getLLMConfig } from '@/utils/llmConfig';
11
15
  import { getArgs } from '@/scripts/args';
12
16
  import { Run } from '@/run';
13
17
 
14
- const CACHED_TEXT = `Ahoy there, me hearties! This be a grand tale o' the mighty prompt cachin' treasure map, a secret technique used by the wise Anthropic seafarers to stash away vast hordes o' text booty on their mystical servers! Arrr, 'tis a pirate's dream indeed - no need to haul the same heavy chest o' gold doubloons across the vast digital ocean with every message! When ye mark yer precious cargo with the secret flag 'cache_control: { type: \"ephemeral\" }', the text be safely buried on their distant shores, ready for plunderin' again without the weight slowin' down yer ship! The wise pirates at Anthropic introduced this magical scroll in the summer o' 2024, markin' it with the mysterious insignia 'anthropic-beta: prompt-caching-2024-07-31' that must be flown high on yer vessel's headers. This crafty script be testin' the waters of this new treasure map system, sendin' out three separate voyages across the AI seas: first to bury the treasure, second to dig it up again without payin' the full toll, and third to see if the map still leads to gold after the sands o' time have shifted (about thirty seconds o' waitin', which be an eternity for an impatient buccaneer!). The great advantage for a scurvy pirate captain is clear as Caribbean waters - ye can load up yer vessel with all manner o' reference scrolls, ancient tomes, and navigational charts without weighin' down each and every message ye send to port! This be savin' ye countless tokens, which as any seafarin' AI wrangler knows, be as precious as Spanish gold. The cached text could contain the full history o' the Seven Seas, detailed maps o' every port from Tortuga to Singapore, or the complete collection o' pirate shanties ever sung by drunken sailors under the light o' the silvery moon. When properly implemented, this mighty cachin' system keeps all that knowledge ready at hand without the Claude kraken needin' to process it anew with each passin' breeze. By Blackbeard's beard, 'tis a revolution in how we manage our conversational ships! The script be employin' the finest LangChain riggin' and custom-carved event handlers to properly track the treasure as it flows back and forth. If ye be successful in yer implementation, ye should witness the miracle o' significantly reduced token counts in yer usage metrics, faster responses from the AI oracle, and the ability to maintain vast knowledge without payin' the full price each time! So hoist the Jolly Roger, load yer pistols with API keys, and set sail on the grand adventure o' prompt cachin'! May the winds o' efficient token usage fill yer sails, and may ye never have to pay full price for passin' the same mammoth context to Claude again! Remember, a clever pirate only pays for their tokens once, then lets the cache do the heavy liftin'! YARRR! This file also contains the secrets of the legendary Pirate Code, passed down through generations of seafarers since the Golden Age of Piracy. It includes detailed accounts of famous pirate captains like Blackbeard, Calico Jack, Anne Bonny, and Mary Read, along with their most profitable plundering routes and techniques for capturing merchant vessels. The text chronicles the exact locations of at least seventeen buried treasures across the Caribbean, complete with riddles and map coordinates that only a true pirate could decipher. There are sections dedicated to ship maintenance, including how to properly seal a leaking hull during battle and the best methods for keeping your cannons in prime firing condition even in humid tropical conditions. The document contains an extensive glossary of pirate terminology, from 'avast' to 'Yellow Jack,' ensuring any landlubber can speak like a seasoned salt with enough study. There's a comprehensive guide to navigating by the stars without modern instruments, perfect for when your GPS fails in the middle of a daring escape. The cache also includes detailed recipes for grog, hardtack that won't break your teeth, and how to keep citrus fruits fresh to prevent scurvy during long voyages. The legendary Black Spot ritual is described in terrifying detail, along with other pirate superstitions and their origins in maritime folklore. A section on pirate governance explains the democratic nature of most pirate ships, how booty was divided fairly, and how captains were elected and deposed when necessary. The file even contains sheet music for dozens of sea shanties, with notes on when each should be sung for maximum crew morale during different sailing conditions. All of this knowledge is wrapped in colorful pirate dialect that would make any AI assistant respond with appropriate 'arghs' and 'avasts' when properly prompted!`
18
+ const CACHED_TEXT = `Ahoy there, me hearties! This be a grand tale o' the mighty prompt cachin' treasure map, a secret technique used by the wise Anthropic seafarers to stash away vast hordes o' text booty on their mystical servers! Arrr, 'tis a pirate's dream indeed - no need to haul the same heavy chest o' gold doubloons across the vast digital ocean with every message! When ye mark yer precious cargo with the secret flag 'cache_control: { type: \"ephemeral\" }', the text be safely buried on their distant shores, ready for plunderin' again without the weight slowin' down yer ship! The wise pirates at Anthropic introduced this magical scroll in the summer o' 2024, markin' it with the mysterious insignia 'anthropic-beta: prompt-caching-2024-07-31' that must be flown high on yer vessel's headers. This crafty script be testin' the waters of this new treasure map system, sendin' out three separate voyages across the AI seas: first to bury the treasure, second to dig it up again without payin' the full toll, and third to see if the map still leads to gold after the sands o' time have shifted (about thirty seconds o' waitin', which be an eternity for an impatient buccaneer!). The great advantage for a scurvy pirate captain is clear as Caribbean waters - ye can load up yer vessel with all manner o' reference scrolls, ancient tomes, and navigational charts without weighin' down each and every message ye send to port! This be savin' ye countless tokens, which as any seafarin' AI wrangler knows, be as precious as Spanish gold. The cached text could contain the full history o' the Seven Seas, detailed maps o' every port from Tortuga to Singapore, or the complete collection o' pirate shanties ever sung by drunken sailors under the light o' the silvery moon. When properly implemented, this mighty cachin' system keeps all that knowledge ready at hand without the Claude kraken needin' to process it anew with each passin' breeze. By Blackbeard's beard, 'tis a revolution in how we manage our conversational ships! The script be employin' the finest LangChain riggin' and custom-carved event handlers to properly track the treasure as it flows back and forth. If ye be successful in yer implementation, ye should witness the miracle o' significantly reduced token counts in yer usage metrics, faster responses from the AI oracle, and the ability to maintain vast knowledge without payin' the full price each time! So hoist the Jolly Roger, load yer pistols with API keys, and set sail on the grand adventure o' prompt cachin'! May the winds o' efficient token usage fill yer sails, and may ye never have to pay full price for passin' the same mammoth context to Claude again! Remember, a clever pirate only pays for their tokens once, then lets the cache do the heavy liftin'! YARRR! This file also contains the secrets of the legendary Pirate Code, passed down through generations of seafarers since the Golden Age of Piracy. It includes detailed accounts of famous pirate captains like Blackbeard, Calico Jack, Anne Bonny, and Mary Read, along with their most profitable plundering routes and techniques for capturing merchant vessels. The text chronicles the exact locations of at least seventeen buried treasures across the Caribbean, complete with riddles and map coordinates that only a true pirate could decipher. There are sections dedicated to ship maintenance, including how to properly seal a leaking hull during battle and the best methods for keeping your cannons in prime firing condition even in humid tropical conditions. The document contains an extensive glossary of pirate terminology, from 'avast' to 'Yellow Jack,' ensuring any landlubber can speak like a seasoned salt with enough study. There's a comprehensive guide to navigating by the stars without modern instruments, perfect for when your GPS fails in the middle of a daring escape. The cache also includes detailed recipes for grog, hardtack that won't break your teeth, and how to keep citrus fruits fresh to prevent scurvy during long voyages. The legendary Black Spot ritual is described in terrifying detail, along with other pirate superstitions and their origins in maritime folklore. A section on pirate governance explains the democratic nature of most pirate ships, how booty was divided fairly, and how captains were elected and deposed when necessary. The file even contains sheet music for dozens of sea shanties, with notes on when each should be sung for maximum crew morale during different sailing conditions. All of this knowledge is wrapped in colorful pirate dialect that would make any AI assistant respond with appropriate 'arghs' and 'avasts' when properly prompted!`;
15
19
 
16
20
  const conversationHistory: BaseMessage[] = [];
17
21
  let _contentParts: t.MessageContentComplex[] = [];
@@ -23,7 +27,7 @@ async function testPromptCaching(): Promise<void> {
23
27
  ${CACHED_TEXT}`;
24
28
  const { contentParts, aggregateContent } = createContentAggregator();
25
29
  _contentParts = contentParts as t.MessageContentComplex[];
26
-
30
+
27
31
  // Set up event handlers
28
32
  const customHandlers = {
29
33
  [GraphEvents.TOOL_END]: new ToolEndHandler(),
@@ -33,29 +37,33 @@ ${CACHED_TEXT}`;
33
37
  [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
34
38
  // Additional handlers for tracking usage metrics
35
39
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
36
- handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
40
+ handle: (
41
+ event: GraphEvents.ON_RUN_STEP_COMPLETED,
42
+ data: t.StreamEventData
43
+ ): void => {
37
44
  console.log('====== ON_RUN_STEP_COMPLETED ======');
38
- aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent } });
39
- }
45
+ aggregateContent({
46
+ event,
47
+ data: data as unknown as { result: t.ToolEndEvent },
48
+ });
49
+ },
40
50
  },
41
51
  };
42
52
 
43
- const baseLlmConfig: t.LLMConfig & t.AnthropicClientOptions = getLLMConfig(Providers.ANTHROPIC);
44
-
53
+ const baseLlmConfig: t.LLMConfig & t.AnthropicClientOptions = getLLMConfig(
54
+ Providers.ANTHROPIC
55
+ );
56
+
45
57
  if (baseLlmConfig.provider !== 'anthropic') {
46
- console.error('This test requires Anthropic as the LLM provider. Please specify provider=anthropic');
58
+ console.error(
59
+ 'This test requires Anthropic as the LLM provider. Please specify provider=anthropic'
60
+ );
47
61
  process.exit(1);
48
62
  }
49
-
63
+
50
64
  const llmConfig = {
51
65
  ...baseLlmConfig,
52
- clientOptions: {
53
- ...baseLlmConfig.clientOptions,
54
- defaultHeaders: {
55
- ...baseLlmConfig.clientOptions?.defaultHeaders,
56
- "anthropic-beta": "prompt-caching-2024-07-31",
57
- }
58
- }
66
+ promptCache: true,
59
67
  };
60
68
 
61
69
  const run = await Run.create<t.IState>({
@@ -94,7 +102,7 @@ ${CACHED_TEXT}`;
94
102
  console.log('\n\nTest 2: Second request (should use cache)');
95
103
  const userMessage2 = `Summarize the key concepts from the context information.`;
96
104
  conversationHistory.push(new HumanMessage(userMessage2));
97
-
105
+
98
106
  console.log('Running second query to use cache...');
99
107
  const secondInputs = { messages: [...conversationHistory] };
100
108
  await run.processStream(secondInputs, config);
@@ -121,4 +129,4 @@ testPromptCaching().catch((err) => {
121
129
  console.log('Content parts:');
122
130
  console.dir(_contentParts, { depth: null });
123
131
  process.exit(1);
124
- });
132
+ });
@@ -1,4 +1,11 @@
1
- // src/scripts/cli.ts
1
+ // src/scripts/code_exec_files.ts
2
+ /**
3
+ * Tests automatic session tracking for code execution file persistence.
4
+ * Files created in one execution are automatically available in subsequent executions
5
+ * without the LLM needing to track or pass session_id.
6
+ *
7
+ * Run with: npm run code_exec_files
8
+ */
2
9
  import { config } from 'dotenv';
3
10
  config();
4
11
  import { HumanMessage, BaseMessage } from '@langchain/core/messages';
@@ -12,12 +19,39 @@ import {
12
19
  } from '@/events';
13
20
  import { getLLMConfig } from '@/utils/llmConfig';
14
21
  import { getArgs } from '@/scripts/args';
15
- import { GraphEvents } from '@/common';
22
+ import { Constants, GraphEvents } from '@/common';
16
23
  import { Run } from '@/run';
17
24
  import { createCodeExecutionTool } from '@/tools/CodeExecutor';
18
25
 
19
26
  const conversationHistory: BaseMessage[] = [];
20
27
 
28
+ /**
29
+ * Prints session context from the graph for debugging
30
+ */
31
+ function printSessionContext(run: Run<t.IState>): void {
32
+ const graph = run.Graph;
33
+ if (!graph) {
34
+ console.log('[Session] No graph available');
35
+ return;
36
+ }
37
+
38
+ const session = graph.sessions.get(Constants.EXECUTE_CODE) as
39
+ | t.CodeSessionContext
40
+ | undefined;
41
+
42
+ if (!session) {
43
+ console.log('[Session] No session context stored yet');
44
+ return;
45
+ }
46
+
47
+ console.log('[Session] Current session context:');
48
+ console.log(` - session_id: ${session.session_id}`);
49
+ console.log(` - files: ${JSON.stringify(session.files, null, 2)}`);
50
+ console.log(
51
+ ` - lastUpdated: ${new Date(session.lastUpdated).toISOString()}`
52
+ );
53
+ }
54
+
21
55
  async function testCodeExecution(): Promise<void> {
22
56
  const { userName, location, provider, currentDate } = await getArgs();
23
57
  const { contentParts, aggregateContent } = createContentAggregator();
@@ -72,7 +106,7 @@ async function testCodeExecution(): Promise<void> {
72
106
  handle: (
73
107
  _event: string,
74
108
  data: t.StreamEventData,
75
- metadata?: Record<string, unknown>
109
+ _metadata?: Record<string, unknown>
76
110
  ): void => {
77
111
  console.log('====== TOOL_START ======');
78
112
  console.dir(data, { depth: null });
@@ -96,7 +130,7 @@ async function testCodeExecution(): Promise<void> {
96
130
  customHandlers,
97
131
  });
98
132
 
99
- const config: Partial<RunnableConfig> & {
133
+ const streamConfig: Partial<RunnableConfig> & {
100
134
  version: 'v1' | 'v2';
101
135
  run_id?: string;
102
136
  streamMode: string;
@@ -107,10 +141,12 @@ async function testCodeExecution(): Promise<void> {
107
141
  },
108
142
  streamMode: 'values',
109
143
  version: 'v2' as const,
110
- // recursionLimit: 3,
111
144
  };
112
145
 
113
- console.log('Test 1: Create Project Plan');
146
+ console.log('\n========== Test 1: Create Project Plan ==========\n');
147
+ console.log(
148
+ 'Creating initial file - this establishes the session context.\n'
149
+ );
114
150
 
115
151
  const userMessage1 = `
116
152
  Hi ${userName} here. We are testing your file capabilities.
@@ -125,36 +161,43 @@ async function testCodeExecution(): Promise<void> {
125
161
  let inputs = {
126
162
  messages: conversationHistory,
127
163
  };
128
- const finalContentParts1 = await run.processStream(inputs, config);
164
+ await run.processStream(inputs, streamConfig);
129
165
  const finalMessages1 = run.getRunMessages();
130
166
  if (finalMessages1) {
131
167
  conversationHistory.push(...finalMessages1);
132
168
  }
133
- console.log('\n\n====================\n\n');
169
+
170
+ console.log('\n\n========== Session Context After Test 1 ==========\n');
171
+ printSessionContext(run);
134
172
  console.dir(contentParts, { depth: null });
135
173
 
136
- console.log('Test 2: Edit Project Plan');
174
+ console.log('\n========== Test 2: Edit Project Plan ==========\n');
175
+ console.log(
176
+ 'Editing the file from Test 1 - session_id is automatically injected.\n'
177
+ );
137
178
 
138
179
  const userMessage2 = `
139
180
  Thanks for creating the project plan. Now I'd like you to edit the same plan to:
140
181
 
141
- 1. Add a new section called "Technology Stack" that contains: "The technology stack for this project includes the following technologies" and nothing more.
142
-
182
+ 1. Read the existing project_plan.txt file
183
+ 2. Add a new section called "Technology Stack" that contains: "The technology stack for this project includes the following technologies" and nothing more.
184
+ 3. Save this as a new file called "project_plan_v2.txt" (remember files are read-only)
185
+ 4. Print the contents of both files to verify
143
186
  `;
144
187
 
145
- // Make sure to pass the file ID of the previous file you created and explicitly duplicate or rename the file in your code so we can then access it. Also print the contents of the new file to ensure we did what we wanted.`;
146
-
147
188
  conversationHistory.push(new HumanMessage(userMessage2));
148
189
 
149
190
  inputs = {
150
191
  messages: conversationHistory,
151
192
  };
152
- const finalContentParts2 = await run.processStream(inputs, config);
193
+ await run.processStream(inputs, streamConfig);
153
194
  const finalMessages2 = run.getRunMessages();
154
195
  if (finalMessages2) {
155
196
  conversationHistory.push(...finalMessages2);
156
197
  }
157
- console.log('\n\n====================\n\n');
198
+
199
+ console.log('\n\n========== Session Context After Test 2 ==========\n');
200
+ printSessionContext(run);
158
201
  console.dir(contentParts, { depth: null });
159
202
 
160
203
  const { handleLLMEnd, collected } = createMetadataAggregator();