@librechat/agents 1.7.8 → 1.7.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/dist/cjs/llm/anthropic/llm.cjs +117 -0
  2. package/dist/cjs/llm/anthropic/llm.cjs.map +1 -0
  3. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +251 -0
  4. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -0
  5. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +135 -0
  6. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -0
  7. package/dist/cjs/llm/providers.cjs +3 -2
  8. package/dist/cjs/llm/providers.cjs.map +1 -1
  9. package/dist/cjs/llm/text.cjs +73 -0
  10. package/dist/cjs/llm/text.cjs.map +1 -0
  11. package/dist/esm/llm/anthropic/llm.mjs +115 -0
  12. package/dist/esm/llm/anthropic/llm.mjs.map +1 -0
  13. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +248 -0
  14. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -0
  15. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +133 -0
  16. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -0
  17. package/dist/esm/llm/providers.mjs +3 -2
  18. package/dist/esm/llm/providers.mjs.map +1 -1
  19. package/dist/esm/llm/text.mjs +71 -0
  20. package/dist/esm/llm/text.mjs.map +1 -0
  21. package/dist/types/llm/anthropic/llm.d.ts +13 -0
  22. package/dist/types/llm/anthropic/types.d.ts +20 -0
  23. package/dist/types/llm/anthropic/utils/message_inputs.d.ts +14 -0
  24. package/dist/types/llm/anthropic/utils/message_outputs.d.ts +16 -0
  25. package/dist/types/llm/text.d.ts +21 -0
  26. package/package.json +3 -3
  27. package/src/llm/anthropic/llm.ts +151 -0
  28. package/src/llm/anthropic/types.ts +32 -0
  29. package/src/llm/anthropic/utils/message_inputs.ts +279 -0
  30. package/src/llm/anthropic/utils/message_outputs.ts +217 -0
  31. package/src/llm/providers.ts +4 -2
  32. package/src/llm/text.ts +90 -0
  33. package/src/scripts/code_exec.ts +1 -1
  34. package/src/scripts/code_exec_simple.ts +1 -1
@@ -0,0 +1,279 @@
1
+ /**
2
+ * This util file contains functions for converting LangChain messages to Anthropic messages.
3
+ */
4
+ import {
5
+ BaseMessage,
6
+ SystemMessage,
7
+ HumanMessage,
8
+ AIMessage,
9
+ ToolMessage,
10
+ MessageContent,
11
+ isAIMessage,
12
+ } from '@langchain/core/messages';
13
+ import { ToolCall } from '@langchain/core/messages/tool';
14
+ import type {
15
+ AnthropicMessageCreateParams,
16
+ AnthropicToolResponse,
17
+ } from '@/llm/anthropic/types';
18
+
19
+ function _formatImage(imageUrl: string): { type: string; media_type: string; data: string } {
20
+ const regex = /^data:(image\/.+);base64,(.+)$/;
21
+ const match = imageUrl.match(regex);
22
+ if (match === null) {
23
+ throw new Error(
24
+ [
25
+ 'Anthropic only supports base64-encoded images currently.',
26
+ 'Example: data:image/png;base64,/9j/4AAQSk...',
27
+ ].join('\n\n')
28
+ );
29
+ }
30
+ return {
31
+ type: 'base64',
32
+ media_type: match[1] ?? '',
33
+ data: match[2] ?? '',
34
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
35
+ } as any;
36
+ }
37
+
38
+ function _mergeMessages(
39
+ messages: BaseMessage[]
40
+ ): (SystemMessage | HumanMessage | AIMessage)[] {
41
+ // Merge runs of human/tool messages into single human messages with content blocks.
42
+ const merged = [];
43
+ for (const message of messages) {
44
+ if (message._getType() === 'tool') {
45
+ if (typeof message.content === 'string') {
46
+ const previousMessage = merged[merged.length - 1] as BaseMessage | undefined;
47
+ if (
48
+ previousMessage &&
49
+ previousMessage._getType() === 'human' &&
50
+ Array.isArray(previousMessage.content) &&
51
+ 'type' in previousMessage.content[0] &&
52
+ previousMessage.content[0].type === 'tool_result'
53
+ ) {
54
+ // If the previous message was a tool result, we merge this tool message into it.
55
+ previousMessage.content.push({
56
+ type: 'tool_result',
57
+ content: message.content,
58
+ tool_use_id: (message as ToolMessage).tool_call_id,
59
+ });
60
+ } else {
61
+ // If not, we create a new human message with the tool result.
62
+ merged.push(
63
+ new HumanMessage({
64
+ content: [
65
+ {
66
+ type: 'tool_result',
67
+ content: message.content,
68
+ tool_use_id: (message as ToolMessage).tool_call_id,
69
+ },
70
+ ],
71
+ })
72
+ );
73
+ }
74
+ } else {
75
+ merged.push(
76
+ new HumanMessage({
77
+ content: [
78
+ {
79
+ type: 'tool_result',
80
+ content: _formatContent(message.content),
81
+ tool_use_id: (message as ToolMessage).tool_call_id,
82
+ },
83
+ ],
84
+ })
85
+ );
86
+ }
87
+ } else {
88
+ const previousMessage = merged[merged.length - 1] as BaseMessage | undefined;
89
+ if (
90
+ previousMessage &&
91
+ previousMessage._getType() === 'human' &&
92
+ message._getType() === 'human'
93
+ ) {
94
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
95
+ let combinedContent: Record<string, any>[];
96
+ if (typeof previousMessage.content === 'string') {
97
+ combinedContent = [{ type: 'text', text: previousMessage.content }];
98
+ } else {
99
+ combinedContent = previousMessage.content;
100
+ }
101
+ if (typeof message.content === 'string') {
102
+ combinedContent.push({ type: 'text', text: message.content });
103
+ } else {
104
+ combinedContent = combinedContent.concat(message.content);
105
+ }
106
+ previousMessage.content = combinedContent;
107
+ } else {
108
+ merged.push(message);
109
+ }
110
+ }
111
+ }
112
+ return merged;
113
+ }
114
+
115
+ export function _convertLangChainToolCallToAnthropic(
116
+ toolCall: ToolCall
117
+ ): AnthropicToolResponse {
118
+ if (toolCall.id === undefined) {
119
+ throw new Error('Anthropic requires all tool calls to have an "id".');
120
+ }
121
+ return {
122
+ type: 'tool_use',
123
+ id: toolCall.id,
124
+ name: toolCall.name,
125
+ input: toolCall.args,
126
+ };
127
+ }
128
+
129
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
130
+ function _formatContent(content: MessageContent): string | Record<string, any>[] {
131
+ const toolTypes = ['tool_use', 'tool_result', 'input_json_delta'];
132
+ const textTypes = ['text', 'text_delta'];
133
+
134
+ if (typeof content === 'string') {
135
+ return content;
136
+ } else {
137
+ const contentBlocks = content.map((contentPart) => {
138
+ const cacheControl =
139
+ 'cache_control' in contentPart ? contentPart.cache_control : undefined;
140
+
141
+ if (contentPart.type === 'image_url') {
142
+ let source;
143
+ if (typeof contentPart.image_url === 'string') {
144
+ source = _formatImage(contentPart.image_url);
145
+ } else {
146
+ source = _formatImage(contentPart.image_url.url);
147
+ }
148
+ return {
149
+ type: 'image' as const, // Explicitly setting the type as "image"
150
+ source,
151
+ ...(cacheControl ? { cache_control: cacheControl } : {}),
152
+ };
153
+ } else if (
154
+ textTypes.find((t) => t === contentPart.type) &&
155
+ 'text' in contentPart
156
+ ) {
157
+ // Assuming contentPart is of type MessageContentText here
158
+ return {
159
+ type: 'text' as const, // Explicitly setting the type as "text"
160
+ text: contentPart.text,
161
+ ...(cacheControl ? { cache_control: cacheControl } : {}),
162
+ };
163
+ } else if (toolTypes.find((t) => t === contentPart.type)) {
164
+ const contentPartCopy = { ...contentPart };
165
+ if ('index' in contentPartCopy) {
166
+ // Anthropic does not support passing the index field here, so we remove it.
167
+ delete contentPartCopy.index;
168
+ }
169
+
170
+ if (contentPartCopy.type === 'input_json_delta') {
171
+ // `input_json_delta` type only represents yielding partial tool inputs
172
+ // and is not a valid type for Anthropic messages.
173
+ contentPartCopy.type = 'tool_use';
174
+ }
175
+
176
+ if ('input' in contentPartCopy) {
177
+ // Anthropic tool use inputs should be valid objects, when applicable.
178
+ try {
179
+ contentPartCopy.input = JSON.parse(contentPartCopy.input);
180
+ } catch {
181
+ // no-op
182
+ }
183
+ }
184
+
185
+ // TODO: Fix when SDK types are fixed
186
+ return {
187
+ ...contentPartCopy,
188
+ ...(cacheControl ? { cache_control: cacheControl } : {}),
189
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
190
+ } as any;
191
+ } else {
192
+ throw new Error('Unsupported message content format');
193
+ }
194
+ });
195
+ return contentBlocks;
196
+ }
197
+ }
198
+
199
+ /**
200
+ * Formats messages as a prompt for the model.
201
+ * Used in LangSmith, export is important here.
202
+ * @param messages The base messages to format as a prompt.
203
+ * @returns The formatted prompt.
204
+ */
205
+ export function _convertMessagesToAnthropicPayload(
206
+ messages: BaseMessage[]
207
+ ): AnthropicMessageCreateParams {
208
+ const mergedMessages = _mergeMessages(messages);
209
+ let system;
210
+ if (mergedMessages.length > 0 && mergedMessages[0]._getType() === 'system') {
211
+ system = messages[0].content;
212
+ }
213
+ const conversationMessages =
214
+ system !== undefined ? mergedMessages.slice(1) : mergedMessages;
215
+ const formattedMessages = conversationMessages.map((message) => {
216
+ let role;
217
+ if (message._getType() === 'human') {
218
+ role = 'user' as const;
219
+ } else if (message._getType() === 'ai') {
220
+ role = 'assistant' as const;
221
+ } else if (message._getType() === 'tool') {
222
+ role = 'user' as const;
223
+ } else if (message._getType() === 'system') {
224
+ throw new Error(
225
+ 'System messages are only permitted as the first passed message.'
226
+ );
227
+ } else {
228
+ throw new Error(`Message type "${message._getType()}" is not supported.`);
229
+ }
230
+ if (isAIMessage(message) && !!message.tool_calls?.length) {
231
+ if (typeof message.content === 'string') {
232
+ if (message.content === '') {
233
+ return {
234
+ role,
235
+ content: message.tool_calls.map(
236
+ _convertLangChainToolCallToAnthropic
237
+ ),
238
+ };
239
+ } else {
240
+ return {
241
+ role,
242
+ content: [
243
+ { type: 'text', text: message.content },
244
+ ...message.tool_calls.map(_convertLangChainToolCallToAnthropic),
245
+ ],
246
+ };
247
+ }
248
+ } else {
249
+ const { content } = message;
250
+ const hasMismatchedToolCalls = !message.tool_calls.every((toolCall) =>
251
+ content.find(
252
+ (contentPart) =>
253
+ (contentPart.type === 'tool_use' ||
254
+ contentPart.type === 'input_json_delta') &&
255
+ contentPart.id === toolCall.id
256
+ )
257
+ );
258
+ if (hasMismatchedToolCalls) {
259
+ console.warn(
260
+ 'The "tool_calls" field on a message is only respected if content is a string.'
261
+ );
262
+ }
263
+ return {
264
+ role,
265
+ content: _formatContent(message.content),
266
+ };
267
+ }
268
+ } else {
269
+ return {
270
+ role,
271
+ content: _formatContent(message.content),
272
+ };
273
+ }
274
+ });
275
+ return {
276
+ messages: formattedMessages,
277
+ system,
278
+ } as AnthropicMessageCreateParams;
279
+ }
@@ -0,0 +1,217 @@
1
+ /**
2
+ * This util file contains functions for converting Anthropic messages to LangChain messages.
3
+ */
4
+ import Anthropic from '@anthropic-ai/sdk';
5
+ import {
6
+ AIMessage,
7
+ UsageMetadata,
8
+ AIMessageChunk,
9
+ } from '@langchain/core/messages';
10
+ import { ToolCall } from '@langchain/core/messages/tool';
11
+ import { ChatGeneration } from '@langchain/core/outputs';
12
+ import { AnthropicMessageResponse } from '../types.js';
13
+
14
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
15
+ export function extractToolCalls(content: Record<string, any>[]): ToolCall[] {
16
+ const toolCalls: ToolCall[] = [];
17
+ for (const block of content) {
18
+ if (block.type === 'tool_use') {
19
+ toolCalls.push({
20
+ name: block.name,
21
+ args: block.input,
22
+ id: block.id,
23
+ type: 'tool_call',
24
+ });
25
+ }
26
+ }
27
+ return toolCalls;
28
+ }
29
+
30
+ export function _makeMessageChunkFromAnthropicEvent(
31
+ data: Anthropic.Messages.RawMessageStreamEvent,
32
+ fields: {
33
+ streamUsage: boolean;
34
+ coerceContentToString: boolean;
35
+ }
36
+ ): {
37
+ chunk: AIMessageChunk;
38
+ } | null {
39
+ if (data.type === 'message_start') {
40
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
41
+ const { content, usage, ...additionalKwargs } = data.message;
42
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
43
+ const filteredAdditionalKwargs: Record<string, any> = {};
44
+ for (const [key, value] of Object.entries(additionalKwargs)) {
45
+ if (value !== undefined && value !== null) {
46
+ filteredAdditionalKwargs[key] = value;
47
+ }
48
+ }
49
+ const usageMetadata: UsageMetadata = {
50
+ input_tokens: usage.input_tokens,
51
+ output_tokens: usage.output_tokens,
52
+ total_tokens: usage.input_tokens + usage.output_tokens,
53
+ };
54
+ return {
55
+ chunk: new AIMessageChunk({
56
+ content: fields.coerceContentToString ? '' : [],
57
+ additional_kwargs: filteredAdditionalKwargs,
58
+ usage_metadata: fields.streamUsage ? usageMetadata : undefined,
59
+ id: data.message.id,
60
+ }),
61
+ };
62
+ } else if (data.type === 'message_delta') {
63
+ const usageMetadata: UsageMetadata = {
64
+ input_tokens: 0,
65
+ output_tokens: data.usage.output_tokens,
66
+ total_tokens: data.usage.output_tokens,
67
+ };
68
+ return {
69
+ chunk: new AIMessageChunk({
70
+ content: fields.coerceContentToString ? '' : [],
71
+ additional_kwargs: { ...data.delta },
72
+ usage_metadata: fields.streamUsage ? usageMetadata : undefined,
73
+ }),
74
+ };
75
+ } else if (
76
+ data.type === 'content_block_start' &&
77
+ data.content_block.type === 'tool_use'
78
+ ) {
79
+ const toolCallContentBlock =
80
+ data.content_block as Anthropic.Messages.ToolUseBlock;
81
+ return {
82
+ chunk: new AIMessageChunk({
83
+ content: fields.coerceContentToString
84
+ ? ''
85
+ : [
86
+ {
87
+ index: data.index,
88
+ ...data.content_block,
89
+ input: '',
90
+ },
91
+ ],
92
+ additional_kwargs: {},
93
+ tool_call_chunks: [
94
+ {
95
+ id: toolCallContentBlock.id,
96
+ index: data.index,
97
+ name: toolCallContentBlock.name,
98
+ args: '',
99
+ },
100
+ ],
101
+ }),
102
+ };
103
+ } else if (
104
+ data.type === 'content_block_delta' &&
105
+ data.delta.type === 'text_delta'
106
+ ) {
107
+ const content = data.delta.text;
108
+ if (content !== undefined) {
109
+ return {
110
+ chunk: new AIMessageChunk({
111
+ content: fields.coerceContentToString
112
+ ? content
113
+ : [
114
+ {
115
+ index: data.index,
116
+ ...data.delta,
117
+ },
118
+ ],
119
+ additional_kwargs: {},
120
+ }),
121
+ };
122
+ }
123
+ } else if (
124
+ data.type === 'content_block_delta' &&
125
+ data.delta.type === 'input_json_delta'
126
+ ) {
127
+ return {
128
+ chunk: new AIMessageChunk({
129
+ content: fields.coerceContentToString
130
+ ? ''
131
+ : [
132
+ {
133
+ index: data.index,
134
+ input: data.delta.partial_json,
135
+ type: data.delta.type,
136
+ },
137
+ ],
138
+ additional_kwargs: {},
139
+ tool_call_chunks: [
140
+ {
141
+ index: data.index,
142
+ args: data.delta.partial_json,
143
+ },
144
+ ],
145
+ }),
146
+ };
147
+ } else if (
148
+ data.type === 'content_block_start' &&
149
+ data.content_block.type === 'text'
150
+ ) {
151
+ const content = data.content_block.text;
152
+ if (content !== undefined) {
153
+ return {
154
+ chunk: new AIMessageChunk({
155
+ content: fields.coerceContentToString
156
+ ? content
157
+ : [
158
+ {
159
+ index: data.index,
160
+ ...data.content_block,
161
+ },
162
+ ],
163
+ additional_kwargs: {},
164
+ }),
165
+ };
166
+ }
167
+ }
168
+
169
+ return null;
170
+ }
171
+
172
+ export function anthropicResponseToChatMessages(
173
+ messages: AnthropicMessageResponse[],
174
+ additionalKwargs: Record<string, unknown>
175
+ ): ChatGeneration[] {
176
+ const usage: Record<string, number> | null | undefined =
177
+ additionalKwargs.usage as Record<string, number> | null | undefined;
178
+ const usageMetadata =
179
+ usage != null
180
+ ? {
181
+ input_tokens: usage.input_tokens ?? 0,
182
+ output_tokens: usage.output_tokens ?? 0,
183
+ total_tokens: (usage.input_tokens ?? 0) + (usage.output_tokens ?? 0),
184
+ }
185
+ : undefined;
186
+ if (messages.length === 1 && messages[0].type === 'text') {
187
+ return [
188
+ {
189
+ text: messages[0].text,
190
+ message: new AIMessage({
191
+ content: messages[0].text,
192
+ additional_kwargs: additionalKwargs,
193
+ usage_metadata: usageMetadata,
194
+ response_metadata: additionalKwargs,
195
+ id: additionalKwargs.id as string,
196
+ }),
197
+ },
198
+ ];
199
+ } else {
200
+ const toolCalls = extractToolCalls(messages);
201
+ const generations: ChatGeneration[] = [
202
+ {
203
+ text: '',
204
+ message: new AIMessage({
205
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
206
+ content: messages as any,
207
+ additional_kwargs: additionalKwargs,
208
+ tool_calls: toolCalls,
209
+ usage_metadata: usageMetadata,
210
+ response_metadata: additionalKwargs,
211
+ id: additionalKwargs.id as string,
212
+ }),
213
+ },
214
+ ];
215
+ return generations;
216
+ }
217
+ }
@@ -2,12 +2,13 @@
2
2
  import { ChatOpenAI } from '@langchain/openai';
3
3
  import { ChatOllama } from '@langchain/ollama';
4
4
  import { ChatBedrockConverse } from '@langchain/aws';
5
- import { ChatAnthropic } from '@langchain/anthropic';
5
+ // import { ChatAnthropic } from '@langchain/anthropic';
6
6
  import { ChatMistralAI } from '@langchain/mistralai';
7
7
  import { ChatVertexAI } from '@langchain/google-vertexai';
8
8
  import { BedrockChat } from '@langchain/community/chat_models/bedrock/web';
9
9
  import type { ChatModelConstructorMap, ProviderOptionsMap, ChatModelMap } from '@/types';
10
10
  import { Providers } from '@/common';
11
+ import { CustomAnthropic } from '@/llm/anthropic/llm';
11
12
 
12
13
  export const llmProviders: Partial<ChatModelConstructorMap> = {
13
14
  [Providers.OPENAI]: ChatOpenAI,
@@ -16,7 +17,8 @@ export const llmProviders: Partial<ChatModelConstructorMap> = {
16
17
  [Providers.BEDROCK_LEGACY]: BedrockChat,
17
18
  [Providers.MISTRALAI]: ChatMistralAI,
18
19
  [Providers.BEDROCK]: ChatBedrockConverse,
19
- [Providers.ANTHROPIC]: ChatAnthropic,
20
+ [Providers.ANTHROPIC]: CustomAnthropic,
21
+ // [Providers.ANTHROPIC]: CustomAnthropic,
20
22
  };
21
23
 
22
24
  export const manualToolStreamProviders = new Set<Providers | string>([Providers.ANTHROPIC, Providers.BEDROCK, Providers.OLLAMA]);
@@ -0,0 +1,90 @@
1
+ /* eslint-disable no-console */
2
+ import { Readable } from 'stream';
3
+ import type { ReadableOptions } from 'stream';
4
+ export interface TextStreamOptions extends ReadableOptions {
5
+ minChunkSize?: number;
6
+ maxChunkSize?: number;
7
+ delay?: number;
8
+ }
9
+
10
+ export type ProgressCallback = (chunk: string) => void;
11
+ export type PostChunkCallback = (chunk: string) => void;
12
+
13
+ export class TextStream extends Readable {
14
+ private text: string;
15
+ private currentIndex: number;
16
+ private minChunkSize: number;
17
+ private maxChunkSize: number;
18
+ private delay: number;
19
+
20
+ constructor(text: string, options: TextStreamOptions = {}) {
21
+ super(options);
22
+ this.text = text;
23
+ this.currentIndex = 0;
24
+ this.minChunkSize = options.minChunkSize ?? 2;
25
+ this.maxChunkSize = options.maxChunkSize ?? 4;
26
+ this.delay = options.delay ?? 20; // Time in milliseconds
27
+ }
28
+
29
+ _read(): void {
30
+ const { delay, minChunkSize, maxChunkSize } = this;
31
+
32
+ if (this.currentIndex < this.text.length) {
33
+ setTimeout(() => {
34
+ const remainingChars = this.text.length - this.currentIndex;
35
+ const chunkSize = Math.min(this.randomInt(minChunkSize, maxChunkSize + 1), remainingChars);
36
+
37
+ const chunk = this.text.slice(this.currentIndex, this.currentIndex + chunkSize);
38
+ this.push(chunk);
39
+ this.currentIndex += chunkSize;
40
+ }, delay);
41
+ } else {
42
+ this.push(null); // signal end of data
43
+ }
44
+ }
45
+
46
+ private randomInt(min: number, max: number): number {
47
+ return Math.floor(Math.random() * (max - min)) + min;
48
+ }
49
+
50
+ async processTextStream(progressCallback: ProgressCallback): Promise<void> {
51
+ const streamPromise = new Promise<void>((resolve, reject) => {
52
+ this.on('data', (chunk) => {
53
+ progressCallback(chunk.toString());
54
+ });
55
+
56
+ this.on('end', () => {
57
+ resolve();
58
+ });
59
+
60
+ this.on('error', (err) => {
61
+ reject(err);
62
+ });
63
+ });
64
+
65
+ try {
66
+ await streamPromise;
67
+ } catch (err) {
68
+ console.error('[processTextStream] Error in text stream:', err);
69
+ // Handle the error appropriately, e.g., return an error message or throw an error
70
+ }
71
+ }
72
+
73
+ async *generateText(progressCallback?: ProgressCallback): AsyncGenerator<string, void, unknown> {
74
+ const { delay, minChunkSize, maxChunkSize } = this;
75
+
76
+ while (this.currentIndex < this.text.length) {
77
+ await new Promise(resolve => setTimeout(resolve, delay));
78
+
79
+ const remainingChars = this.text.length - this.currentIndex;
80
+ const chunkSize = Math.min(this.randomInt(minChunkSize, maxChunkSize + 1), remainingChars);
81
+
82
+ const chunk = this.text.slice(this.currentIndex, this.currentIndex + chunkSize);
83
+
84
+ progressCallback?.(chunk);
85
+
86
+ yield chunk;
87
+ this.currentIndex += chunkSize;
88
+ }
89
+ }
90
+ }
@@ -64,7 +64,7 @@ async function testCodeExecution(): Promise<void> {
64
64
  graphConfig: {
65
65
  type: 'standard',
66
66
  llmConfig,
67
- tools: [new TavilySearchResults(), createCodeExecutionTool({ apiKey: 'test' })],
67
+ tools: [new TavilySearchResults(), createCodeExecutionTool()],
68
68
  instructions: 'You are a friendly AI assistant with coding capabilities. Always address the user by their name.',
69
69
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
70
70
  },
@@ -64,7 +64,7 @@ async function testCodeExecution(): Promise<void> {
64
64
  graphConfig: {
65
65
  type: 'standard',
66
66
  llmConfig,
67
- tools: [new TavilySearchResults(), createCodeExecutionTool({ apiKey: 'test' })],
67
+ tools: [new TavilySearchResults(), createCodeExecutionTool()],
68
68
  instructions: 'You are a friendly AI assistant with coding capabilities. Always address the user by their name.',
69
69
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
70
70
  },