@librechat/agents 3.0.776 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/graphs/Graph.cjs +19 -5
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/llm/bedrock/index.cjs +98 -25
- package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
- package/dist/cjs/messages/core.cjs +1 -1
- package/dist/cjs/messages/core.cjs.map +1 -1
- package/dist/cjs/stream.cjs +4 -2
- package/dist/cjs/stream.cjs.map +1 -1
- package/dist/cjs/tools/ToolNode.cjs +9 -5
- package/dist/cjs/tools/ToolNode.cjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +19 -5
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/llm/bedrock/index.mjs +97 -24
- package/dist/esm/llm/bedrock/index.mjs.map +1 -1
- package/dist/esm/messages/core.mjs +1 -1
- package/dist/esm/messages/core.mjs.map +1 -1
- package/dist/esm/stream.mjs +4 -2
- package/dist/esm/stream.mjs.map +1 -1
- package/dist/esm/tools/ToolNode.mjs +9 -5
- package/dist/esm/tools/ToolNode.mjs.map +1 -1
- package/dist/types/llm/bedrock/index.d.ts +86 -7
- package/dist/types/llm/bedrock/types.d.ts +27 -0
- package/dist/types/llm/bedrock/utils/index.d.ts +5 -0
- package/dist/types/llm/bedrock/utils/message_inputs.d.ts +31 -0
- package/dist/types/llm/bedrock/utils/message_outputs.d.ts +33 -0
- package/dist/types/types/tools.d.ts +2 -0
- package/package.json +5 -2
- package/src/graphs/Graph.ts +23 -5
- package/src/llm/bedrock/index.ts +180 -43
- package/src/llm/bedrock/llm.spec.ts +616 -0
- package/src/llm/bedrock/types.ts +51 -0
- package/src/llm/bedrock/utils/index.ts +18 -0
- package/src/llm/bedrock/utils/message_inputs.ts +563 -0
- package/src/llm/bedrock/utils/message_outputs.ts +310 -0
- package/src/messages/core.ts +1 -1
- package/src/scripts/code_exec_multi_session.ts +241 -0
- package/src/scripts/thinking-bedrock.ts +159 -0
- package/src/scripts/thinking.ts +39 -18
- package/src/scripts/tools.ts +7 -3
- package/src/stream.ts +4 -2
- package/src/tools/ToolNode.ts +9 -5
- package/src/types/tools.ts +2 -0
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Utility functions for converting Bedrock Converse responses to LangChain messages.
|
|
3
|
+
* Ported from @langchain/aws common.js
|
|
4
|
+
*/
|
|
5
|
+
import { AIMessage, AIMessageChunk } from '@langchain/core/messages';
|
|
6
|
+
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
7
|
+
import type {
|
|
8
|
+
BedrockMessage,
|
|
9
|
+
ConverseResponse,
|
|
10
|
+
ContentBlockDeltaEvent,
|
|
11
|
+
ConverseStreamMetadataEvent,
|
|
12
|
+
ContentBlockStartEvent,
|
|
13
|
+
ReasoningContentBlock,
|
|
14
|
+
ReasoningContentBlockDelta,
|
|
15
|
+
MessageContentReasoningBlock,
|
|
16
|
+
MessageContentReasoningBlockReasoningTextPartial,
|
|
17
|
+
MessageContentReasoningBlockRedacted,
|
|
18
|
+
} from '../types';
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Convert a Bedrock reasoning block delta to a LangChain partial reasoning block.
|
|
22
|
+
*/
|
|
23
|
+
export function bedrockReasoningDeltaToLangchainPartialReasoningBlock(
|
|
24
|
+
reasoningContent: ReasoningContentBlockDelta
|
|
25
|
+
):
|
|
26
|
+
| MessageContentReasoningBlockReasoningTextPartial
|
|
27
|
+
| MessageContentReasoningBlockRedacted {
|
|
28
|
+
const { text, redactedContent, signature } =
|
|
29
|
+
reasoningContent as ReasoningContentBlockDelta & {
|
|
30
|
+
text?: string;
|
|
31
|
+
redactedContent?: Uint8Array;
|
|
32
|
+
signature?: string;
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
if (typeof text === 'string') {
|
|
36
|
+
return {
|
|
37
|
+
type: 'reasoning_content',
|
|
38
|
+
reasoningText: { text },
|
|
39
|
+
};
|
|
40
|
+
}
|
|
41
|
+
if (signature != null) {
|
|
42
|
+
return {
|
|
43
|
+
type: 'reasoning_content',
|
|
44
|
+
reasoningText: { signature },
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
if (redactedContent != null) {
|
|
48
|
+
return {
|
|
49
|
+
type: 'reasoning_content',
|
|
50
|
+
redactedContent: Buffer.from(redactedContent).toString('base64'),
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
throw new Error('Invalid reasoning content');
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Convert a Bedrock reasoning block to a LangChain reasoning block.
|
|
58
|
+
*/
|
|
59
|
+
export function bedrockReasoningBlockToLangchainReasoningBlock(
|
|
60
|
+
reasoningContent: ReasoningContentBlock
|
|
61
|
+
): MessageContentReasoningBlock {
|
|
62
|
+
const { reasoningText, redactedContent } =
|
|
63
|
+
reasoningContent as ReasoningContentBlock & {
|
|
64
|
+
reasoningText?: { text?: string; signature?: string };
|
|
65
|
+
redactedContent?: Uint8Array;
|
|
66
|
+
};
|
|
67
|
+
|
|
68
|
+
if (reasoningText != null) {
|
|
69
|
+
return {
|
|
70
|
+
type: 'reasoning_content',
|
|
71
|
+
reasoningText: reasoningText,
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
if (redactedContent != null) {
|
|
75
|
+
return {
|
|
76
|
+
type: 'reasoning_content',
|
|
77
|
+
redactedContent: Buffer.from(redactedContent).toString('base64'),
|
|
78
|
+
};
|
|
79
|
+
}
|
|
80
|
+
throw new Error('Invalid reasoning content');
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Convert a Bedrock Converse message to a LangChain message.
|
|
85
|
+
*/
|
|
86
|
+
export function convertConverseMessageToLangChainMessage(
|
|
87
|
+
message: BedrockMessage,
|
|
88
|
+
responseMetadata: Omit<ConverseResponse, 'output'>
|
|
89
|
+
): AIMessage {
|
|
90
|
+
if (message.content == null) {
|
|
91
|
+
throw new Error('No message content found in response.');
|
|
92
|
+
}
|
|
93
|
+
if (message.role !== 'assistant') {
|
|
94
|
+
throw new Error(
|
|
95
|
+
`Unsupported message role received in ChatBedrockConverse response: ${message.role}`
|
|
96
|
+
);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
let requestId: string | undefined;
|
|
100
|
+
if (
|
|
101
|
+
'$metadata' in responseMetadata &&
|
|
102
|
+
responseMetadata.$metadata != null &&
|
|
103
|
+
typeof responseMetadata.$metadata === 'object' &&
|
|
104
|
+
'requestId' in responseMetadata.$metadata
|
|
105
|
+
) {
|
|
106
|
+
requestId = responseMetadata.$metadata.requestId as string;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
let tokenUsage:
|
|
110
|
+
| { input_tokens: number; output_tokens: number; total_tokens: number }
|
|
111
|
+
| undefined;
|
|
112
|
+
if (responseMetadata.usage != null) {
|
|
113
|
+
const input_tokens = responseMetadata.usage.inputTokens ?? 0;
|
|
114
|
+
const output_tokens = responseMetadata.usage.outputTokens ?? 0;
|
|
115
|
+
tokenUsage = {
|
|
116
|
+
input_tokens,
|
|
117
|
+
output_tokens,
|
|
118
|
+
total_tokens:
|
|
119
|
+
responseMetadata.usage.totalTokens ?? input_tokens + output_tokens,
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
if (
|
|
124
|
+
message.content.length === 1 &&
|
|
125
|
+
'text' in message.content[0] &&
|
|
126
|
+
typeof message.content[0].text === 'string'
|
|
127
|
+
) {
|
|
128
|
+
return new AIMessage({
|
|
129
|
+
content: message.content[0].text,
|
|
130
|
+
response_metadata: responseMetadata,
|
|
131
|
+
usage_metadata: tokenUsage,
|
|
132
|
+
id: requestId,
|
|
133
|
+
});
|
|
134
|
+
} else {
|
|
135
|
+
const toolCalls: Array<{
|
|
136
|
+
id?: string;
|
|
137
|
+
name: string;
|
|
138
|
+
args: Record<string, unknown>;
|
|
139
|
+
type: 'tool_call';
|
|
140
|
+
}> = [];
|
|
141
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
142
|
+
const content: any[] = [];
|
|
143
|
+
|
|
144
|
+
message.content.forEach((c) => {
|
|
145
|
+
if (
|
|
146
|
+
'toolUse' in c &&
|
|
147
|
+
c.toolUse != null &&
|
|
148
|
+
c.toolUse.name != null &&
|
|
149
|
+
c.toolUse.name !== '' &&
|
|
150
|
+
c.toolUse.input != null &&
|
|
151
|
+
typeof c.toolUse.input === 'object'
|
|
152
|
+
) {
|
|
153
|
+
toolCalls.push({
|
|
154
|
+
id: c.toolUse.toolUseId,
|
|
155
|
+
name: c.toolUse.name,
|
|
156
|
+
args: c.toolUse.input as Record<string, unknown>,
|
|
157
|
+
type: 'tool_call',
|
|
158
|
+
});
|
|
159
|
+
} else if ('text' in c && typeof c.text === 'string') {
|
|
160
|
+
content.push({ type: 'text', text: c.text });
|
|
161
|
+
} else if ('reasoningContent' in c && c.reasoningContent != null) {
|
|
162
|
+
content.push(
|
|
163
|
+
bedrockReasoningBlockToLangchainReasoningBlock(c.reasoningContent)
|
|
164
|
+
);
|
|
165
|
+
} else {
|
|
166
|
+
content.push(c);
|
|
167
|
+
}
|
|
168
|
+
});
|
|
169
|
+
|
|
170
|
+
return new AIMessage({
|
|
171
|
+
content: content.length ? content : '',
|
|
172
|
+
tool_calls: toolCalls.length ? toolCalls : undefined,
|
|
173
|
+
response_metadata: responseMetadata,
|
|
174
|
+
usage_metadata: tokenUsage,
|
|
175
|
+
id: requestId,
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
/**
|
|
181
|
+
* Handle a content block delta event from Bedrock Converse stream.
|
|
182
|
+
*/
|
|
183
|
+
export function handleConverseStreamContentBlockDelta(
|
|
184
|
+
contentBlockDelta: ContentBlockDeltaEvent
|
|
185
|
+
): ChatGenerationChunk {
|
|
186
|
+
if (contentBlockDelta.delta == null) {
|
|
187
|
+
throw new Error('No delta found in content block.');
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
if (typeof contentBlockDelta.delta.text === 'string') {
|
|
191
|
+
return new ChatGenerationChunk({
|
|
192
|
+
text: contentBlockDelta.delta.text,
|
|
193
|
+
message: new AIMessageChunk({
|
|
194
|
+
content: contentBlockDelta.delta.text,
|
|
195
|
+
response_metadata: {
|
|
196
|
+
contentBlockIndex: contentBlockDelta.contentBlockIndex,
|
|
197
|
+
},
|
|
198
|
+
}),
|
|
199
|
+
});
|
|
200
|
+
} else if (contentBlockDelta.delta.toolUse != null) {
|
|
201
|
+
const index = contentBlockDelta.contentBlockIndex;
|
|
202
|
+
return new ChatGenerationChunk({
|
|
203
|
+
text: '',
|
|
204
|
+
message: new AIMessageChunk({
|
|
205
|
+
content: '',
|
|
206
|
+
tool_call_chunks: [
|
|
207
|
+
{
|
|
208
|
+
args: contentBlockDelta.delta.toolUse.input as string,
|
|
209
|
+
index,
|
|
210
|
+
type: 'tool_call_chunk',
|
|
211
|
+
},
|
|
212
|
+
],
|
|
213
|
+
response_metadata: {
|
|
214
|
+
contentBlockIndex: contentBlockDelta.contentBlockIndex,
|
|
215
|
+
},
|
|
216
|
+
}),
|
|
217
|
+
});
|
|
218
|
+
} else if (contentBlockDelta.delta.reasoningContent != null) {
|
|
219
|
+
const reasoningBlock =
|
|
220
|
+
bedrockReasoningDeltaToLangchainPartialReasoningBlock(
|
|
221
|
+
contentBlockDelta.delta.reasoningContent
|
|
222
|
+
);
|
|
223
|
+
// Extract the text for additional_kwargs.reasoning_content (for stream handler compatibility)
|
|
224
|
+
const reasoningText =
|
|
225
|
+
'reasoningText' in reasoningBlock
|
|
226
|
+
? (reasoningBlock.reasoningText.text ??
|
|
227
|
+
reasoningBlock.reasoningText.signature ??
|
|
228
|
+
('redactedContent' in reasoningBlock
|
|
229
|
+
? reasoningBlock.redactedContent
|
|
230
|
+
: ''))
|
|
231
|
+
: '';
|
|
232
|
+
return new ChatGenerationChunk({
|
|
233
|
+
text: '',
|
|
234
|
+
message: new AIMessageChunk({
|
|
235
|
+
content: [reasoningBlock],
|
|
236
|
+
additional_kwargs: {
|
|
237
|
+
// Set reasoning_content for stream handler to detect reasoning mode
|
|
238
|
+
reasoning_content: reasoningText,
|
|
239
|
+
},
|
|
240
|
+
response_metadata: {
|
|
241
|
+
contentBlockIndex: contentBlockDelta.contentBlockIndex,
|
|
242
|
+
},
|
|
243
|
+
}),
|
|
244
|
+
});
|
|
245
|
+
} else {
|
|
246
|
+
throw new Error(
|
|
247
|
+
`Unsupported content block type(s): ${JSON.stringify(contentBlockDelta.delta, null, 2)}`
|
|
248
|
+
);
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/**
|
|
253
|
+
* Handle a content block start event from Bedrock Converse stream.
|
|
254
|
+
*/
|
|
255
|
+
export function handleConverseStreamContentBlockStart(
|
|
256
|
+
contentBlockStart: ContentBlockStartEvent
|
|
257
|
+
): ChatGenerationChunk | null {
|
|
258
|
+
const index = contentBlockStart.contentBlockIndex;
|
|
259
|
+
|
|
260
|
+
if (contentBlockStart.start?.toolUse != null) {
|
|
261
|
+
return new ChatGenerationChunk({
|
|
262
|
+
text: '',
|
|
263
|
+
message: new AIMessageChunk({
|
|
264
|
+
content: '',
|
|
265
|
+
tool_call_chunks: [
|
|
266
|
+
{
|
|
267
|
+
name: contentBlockStart.start.toolUse.name,
|
|
268
|
+
id: contentBlockStart.start.toolUse.toolUseId,
|
|
269
|
+
index,
|
|
270
|
+
type: 'tool_call_chunk',
|
|
271
|
+
},
|
|
272
|
+
],
|
|
273
|
+
response_metadata: {
|
|
274
|
+
contentBlockIndex: index,
|
|
275
|
+
},
|
|
276
|
+
}),
|
|
277
|
+
});
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
// Return null for non-tool content block starts (text blocks don't need special handling)
|
|
281
|
+
return null;
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
/**
|
|
285
|
+
* Handle a metadata event from Bedrock Converse stream.
|
|
286
|
+
*/
|
|
287
|
+
export function handleConverseStreamMetadata(
|
|
288
|
+
metadata: ConverseStreamMetadataEvent,
|
|
289
|
+
extra: { streamUsage: boolean }
|
|
290
|
+
): ChatGenerationChunk {
|
|
291
|
+
const inputTokens = metadata.usage?.inputTokens ?? 0;
|
|
292
|
+
const outputTokens = metadata.usage?.outputTokens ?? 0;
|
|
293
|
+
const usage_metadata = {
|
|
294
|
+
input_tokens: inputTokens,
|
|
295
|
+
output_tokens: outputTokens,
|
|
296
|
+
total_tokens: metadata.usage?.totalTokens ?? inputTokens + outputTokens,
|
|
297
|
+
};
|
|
298
|
+
|
|
299
|
+
return new ChatGenerationChunk({
|
|
300
|
+
text: '',
|
|
301
|
+
message: new AIMessageChunk({
|
|
302
|
+
content: '',
|
|
303
|
+
usage_metadata: extra.streamUsage ? usage_metadata : undefined,
|
|
304
|
+
response_metadata: {
|
|
305
|
+
// Use the same key as returned from the Converse API
|
|
306
|
+
metadata,
|
|
307
|
+
},
|
|
308
|
+
}),
|
|
309
|
+
});
|
|
310
|
+
}
|
package/src/messages/core.ts
CHANGED
|
@@ -41,7 +41,7 @@ User: ${userMessage[1]}
|
|
|
41
41
|
const _allowedTypes = ['image_url', 'text', 'tool_use', 'tool_result'];
|
|
42
42
|
const allowedTypesByProvider: Record<string, string[]> = {
|
|
43
43
|
default: _allowedTypes,
|
|
44
|
-
[Providers.ANTHROPIC]: [..._allowedTypes, 'thinking'],
|
|
44
|
+
[Providers.ANTHROPIC]: [..._allowedTypes, 'thinking', 'redacted_thinking'],
|
|
45
45
|
[Providers.BEDROCK]: [..._allowedTypes, 'reasoning_content'],
|
|
46
46
|
[Providers.OPENAI]: _allowedTypes,
|
|
47
47
|
};
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
// src/scripts/code_exec_multi_session.ts
|
|
2
|
+
/**
|
|
3
|
+
* Tests multi-session file tracking for code execution.
|
|
4
|
+
* Verifies that:
|
|
5
|
+
* 1. Files from multiple executions are accumulated
|
|
6
|
+
* 2. Each file tracks its source session_id
|
|
7
|
+
* 3. Edited/recreated files replace older versions (latest preferred)
|
|
8
|
+
*
|
|
9
|
+
* Run with: npm run code_exec_multi_session
|
|
10
|
+
*/
|
|
11
|
+
import { config } from 'dotenv';
|
|
12
|
+
config();
|
|
13
|
+
import { HumanMessage, BaseMessage } from '@langchain/core/messages';
|
|
14
|
+
import type { RunnableConfig } from '@langchain/core/runnables';
|
|
15
|
+
import type * as t from '@/types';
|
|
16
|
+
import { ChatModelStreamHandler } from '@/stream';
|
|
17
|
+
import { ToolEndHandler, ModelEndHandler } from '@/events';
|
|
18
|
+
import { getLLMConfig } from '@/utils/llmConfig';
|
|
19
|
+
import { getArgs } from '@/scripts/args';
|
|
20
|
+
import { Constants, GraphEvents } from '@/common';
|
|
21
|
+
import { Run } from '@/run';
|
|
22
|
+
import { createCodeExecutionTool } from '@/tools/CodeExecutor';
|
|
23
|
+
|
|
24
|
+
const conversationHistory: BaseMessage[] = [];
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Prints session context from the graph
|
|
28
|
+
*/
|
|
29
|
+
function printSessionContext(run: Run<t.IState>, label: string): void {
|
|
30
|
+
const graph = run.Graph;
|
|
31
|
+
if (!graph) {
|
|
32
|
+
console.log(`\n[${label}] No graph available`);
|
|
33
|
+
return;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
const session = graph.sessions.get(Constants.EXECUTE_CODE) as
|
|
37
|
+
| t.CodeSessionContext
|
|
38
|
+
| undefined;
|
|
39
|
+
|
|
40
|
+
console.log(`\n========== ${label} ==========`);
|
|
41
|
+
if (!session) {
|
|
42
|
+
console.log(' No session context stored yet');
|
|
43
|
+
return;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
console.log(` Latest session_id: ${session.session_id}`);
|
|
47
|
+
console.log(` Files tracked: ${session.files.length}`);
|
|
48
|
+
for (const file of session.files) {
|
|
49
|
+
console.log(` - ${file.name} (session: ${file.session_id})`);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
async function testMultiSessionFiles(): Promise<void> {
|
|
54
|
+
const { provider } = await getArgs();
|
|
55
|
+
|
|
56
|
+
const customHandlers = {
|
|
57
|
+
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
|
58
|
+
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
|
|
59
|
+
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
60
|
+
};
|
|
61
|
+
|
|
62
|
+
const llmConfig = getLLMConfig(provider);
|
|
63
|
+
|
|
64
|
+
const run = await Run.create<t.IState>({
|
|
65
|
+
runId: 'multi-session-test',
|
|
66
|
+
graphConfig: {
|
|
67
|
+
type: 'standard',
|
|
68
|
+
llmConfig,
|
|
69
|
+
tools: [createCodeExecutionTool()],
|
|
70
|
+
instructions: `You are a coding assistant. Execute code exactly as requested.
|
|
71
|
+
When asked to create files, use Python and save to /mnt/data/.
|
|
72
|
+
When reading files, print their contents.
|
|
73
|
+
Be concise in responses.`,
|
|
74
|
+
},
|
|
75
|
+
returnContent: true,
|
|
76
|
+
customHandlers,
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
const streamConfig: Partial<RunnableConfig> & {
|
|
80
|
+
version: 'v1' | 'v2';
|
|
81
|
+
streamMode: string;
|
|
82
|
+
} = {
|
|
83
|
+
configurable: {
|
|
84
|
+
provider,
|
|
85
|
+
thread_id: 'multi-session-test',
|
|
86
|
+
},
|
|
87
|
+
streamMode: 'values',
|
|
88
|
+
version: 'v2' as const,
|
|
89
|
+
};
|
|
90
|
+
|
|
91
|
+
// ========== TEST 1: Create first file ==========
|
|
92
|
+
console.log('\n\n' + '='.repeat(60));
|
|
93
|
+
console.log('TEST 1: Create first file (file_a.txt)');
|
|
94
|
+
console.log('='.repeat(60));
|
|
95
|
+
|
|
96
|
+
conversationHistory.push(
|
|
97
|
+
new HumanMessage(`
|
|
98
|
+
Create a file called "file_a.txt" with the content:
|
|
99
|
+
"This is file A, version 1"
|
|
100
|
+
Print confirmation when done.
|
|
101
|
+
`)
|
|
102
|
+
);
|
|
103
|
+
|
|
104
|
+
await run.processStream({ messages: conversationHistory }, streamConfig);
|
|
105
|
+
const messages1 = run.getRunMessages();
|
|
106
|
+
if (messages1) conversationHistory.push(...messages1);
|
|
107
|
+
|
|
108
|
+
printSessionContext(run, 'After Test 1');
|
|
109
|
+
|
|
110
|
+
// ========== TEST 2: Create second file (different session) ==========
|
|
111
|
+
console.log('\n\n' + '='.repeat(60));
|
|
112
|
+
console.log('TEST 2: Create second file (file_b.txt)');
|
|
113
|
+
console.log('Expecting: Both file_a.txt and file_b.txt tracked');
|
|
114
|
+
console.log('='.repeat(60));
|
|
115
|
+
|
|
116
|
+
conversationHistory.push(
|
|
117
|
+
new HumanMessage(`
|
|
118
|
+
Create a NEW file called "file_b.txt" with the content:
|
|
119
|
+
"This is file B"
|
|
120
|
+
Print confirmation when done.
|
|
121
|
+
`)
|
|
122
|
+
);
|
|
123
|
+
|
|
124
|
+
await run.processStream({ messages: conversationHistory }, streamConfig);
|
|
125
|
+
const messages2 = run.getRunMessages();
|
|
126
|
+
if (messages2) conversationHistory.push(...messages2);
|
|
127
|
+
|
|
128
|
+
printSessionContext(run, 'After Test 2');
|
|
129
|
+
|
|
130
|
+
// ========== TEST 3: Read BOTH files (verifies accumulation) ==========
|
|
131
|
+
console.log('\n\n' + '='.repeat(60));
|
|
132
|
+
console.log('TEST 3: Read BOTH files from previous executions');
|
|
133
|
+
console.log('This verifies multi-session file accumulation works');
|
|
134
|
+
console.log('='.repeat(60));
|
|
135
|
+
|
|
136
|
+
conversationHistory.push(
|
|
137
|
+
new HumanMessage(`
|
|
138
|
+
Read and print the contents of BOTH files:
|
|
139
|
+
1. file_a.txt
|
|
140
|
+
2. file_b.txt
|
|
141
|
+
|
|
142
|
+
Show me what's in each file.
|
|
143
|
+
`)
|
|
144
|
+
);
|
|
145
|
+
|
|
146
|
+
await run.processStream({ messages: conversationHistory }, streamConfig);
|
|
147
|
+
const messages3 = run.getRunMessages();
|
|
148
|
+
if (messages3) conversationHistory.push(...messages3);
|
|
149
|
+
|
|
150
|
+
printSessionContext(run, 'After Test 3');
|
|
151
|
+
|
|
152
|
+
// ========== TEST 4: Edit file_a.txt (verifies latest-wins) ==========
|
|
153
|
+
console.log('\n\n' + '='.repeat(60));
|
|
154
|
+
console.log('TEST 4: Edit file_a.txt (create new version)');
|
|
155
|
+
console.log('Expecting: Old file_a.txt replaced with new version');
|
|
156
|
+
console.log('='.repeat(60));
|
|
157
|
+
|
|
158
|
+
conversationHistory.push(
|
|
159
|
+
new HumanMessage(`
|
|
160
|
+
Create an UPDATED version of "file_a.txt" with the content:
|
|
161
|
+
"This is file A, version 2 - UPDATED"
|
|
162
|
+
Print confirmation when done.
|
|
163
|
+
`)
|
|
164
|
+
);
|
|
165
|
+
|
|
166
|
+
await run.processStream({ messages: conversationHistory }, streamConfig);
|
|
167
|
+
const messages4 = run.getRunMessages();
|
|
168
|
+
if (messages4) conversationHistory.push(...messages4);
|
|
169
|
+
|
|
170
|
+
printSessionContext(run, 'After Test 4');
|
|
171
|
+
|
|
172
|
+
// ========== TEST 5: Read file_a.txt (verifies latest version) ==========
|
|
173
|
+
console.log('\n\n' + '='.repeat(60));
|
|
174
|
+
console.log('TEST 5: Read file_a.txt to verify it has the UPDATED content');
|
|
175
|
+
console.log('Expected: "version 2 - UPDATED" NOT "version 1"');
|
|
176
|
+
console.log('='.repeat(60));
|
|
177
|
+
|
|
178
|
+
conversationHistory.push(
|
|
179
|
+
new HumanMessage(`
|
|
180
|
+
Read and print the contents of file_a.txt.
|
|
181
|
+
Tell me what version it shows.
|
|
182
|
+
`)
|
|
183
|
+
);
|
|
184
|
+
|
|
185
|
+
await run.processStream({ messages: conversationHistory }, streamConfig);
|
|
186
|
+
const messages5 = run.getRunMessages();
|
|
187
|
+
if (messages5) conversationHistory.push(...messages5);
|
|
188
|
+
|
|
189
|
+
printSessionContext(run, 'Final Session State');
|
|
190
|
+
|
|
191
|
+
// ========== SUMMARY ==========
|
|
192
|
+
console.log('\n\n' + '='.repeat(60));
|
|
193
|
+
console.log('TEST SUMMARY');
|
|
194
|
+
console.log('='.repeat(60));
|
|
195
|
+
|
|
196
|
+
const finalSession = run.Graph?.sessions.get(Constants.EXECUTE_CODE) as
|
|
197
|
+
| t.CodeSessionContext
|
|
198
|
+
| undefined;
|
|
199
|
+
|
|
200
|
+
if (finalSession) {
|
|
201
|
+
const uniqueSessionIds = new Set(
|
|
202
|
+
finalSession.files.map((f) => f.session_id)
|
|
203
|
+
);
|
|
204
|
+
console.log(`\nTotal files tracked: ${finalSession.files.length}`);
|
|
205
|
+
console.log(`Unique session_ids: ${uniqueSessionIds.size}`);
|
|
206
|
+
console.log('\nFiles:');
|
|
207
|
+
for (const file of finalSession.files) {
|
|
208
|
+
console.log(
|
|
209
|
+
` - ${file.name} (session: ${file.session_id?.slice(0, 20)}...)`
|
|
210
|
+
);
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// Verify expectations
|
|
214
|
+
const fileACount = finalSession.files.filter(
|
|
215
|
+
(f) => f.name === 'file_a.txt'
|
|
216
|
+
).length;
|
|
217
|
+
const fileBCount = finalSession.files.filter(
|
|
218
|
+
(f) => f.name === 'file_b.txt'
|
|
219
|
+
).length;
|
|
220
|
+
|
|
221
|
+
console.log('\n✓ Checks:');
|
|
222
|
+
console.log(` file_a.txt count: ${fileACount} (expected: 1, latest wins)`);
|
|
223
|
+
console.log(` file_b.txt count: ${fileBCount} (expected: 1)`);
|
|
224
|
+
|
|
225
|
+
if (fileACount === 1 && fileBCount === 1) {
|
|
226
|
+
console.log('\n✅ All tests passed! Multi-session tracking works.');
|
|
227
|
+
} else {
|
|
228
|
+
console.log('\n❌ Test failed - unexpected file counts');
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
process.on('unhandledRejection', (reason, promise) => {
|
|
234
|
+
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
|
235
|
+
process.exit(1);
|
|
236
|
+
});
|
|
237
|
+
|
|
238
|
+
testMultiSessionFiles().catch((err) => {
|
|
239
|
+
console.error('Test failed:', err);
|
|
240
|
+
process.exit(1);
|
|
241
|
+
});
|