@copilotkit/runtime 1.56.0 → 1.56.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. package/dist/agent/index.cjs +2 -2
  2. package/dist/agent/index.cjs.map +1 -1
  3. package/dist/agent/index.d.cts.map +1 -1
  4. package/dist/agent/index.d.mts.map +1 -1
  5. package/dist/agent/index.mjs +2 -2
  6. package/dist/agent/index.mjs.map +1 -1
  7. package/dist/lib/integrations/node-http/index.cjs +4 -1
  8. package/dist/lib/integrations/node-http/index.cjs.map +1 -1
  9. package/dist/lib/integrations/node-http/index.d.cts.map +1 -1
  10. package/dist/lib/integrations/node-http/index.d.mts.map +1 -1
  11. package/dist/lib/integrations/node-http/index.mjs +4 -1
  12. package/dist/lib/integrations/node-http/index.mjs.map +1 -1
  13. package/dist/lib/runtime/copilot-runtime.cjs +11 -1
  14. package/dist/lib/runtime/copilot-runtime.cjs.map +1 -1
  15. package/dist/lib/runtime/copilot-runtime.d.cts.map +1 -1
  16. package/dist/lib/runtime/copilot-runtime.d.mts.map +1 -1
  17. package/dist/lib/runtime/copilot-runtime.mjs +11 -1
  18. package/dist/lib/runtime/copilot-runtime.mjs.map +1 -1
  19. package/dist/lib/runtime/mcp-tools-utils.cjs +21 -4
  20. package/dist/lib/runtime/mcp-tools-utils.cjs.map +1 -1
  21. package/dist/lib/runtime/mcp-tools-utils.d.cts.map +1 -1
  22. package/dist/lib/runtime/mcp-tools-utils.d.mts.map +1 -1
  23. package/dist/lib/runtime/mcp-tools-utils.mjs +21 -4
  24. package/dist/lib/runtime/mcp-tools-utils.mjs.map +1 -1
  25. package/dist/package.cjs +1 -1
  26. package/dist/package.mjs +1 -1
  27. package/dist/service-adapters/anthropic/anthropic-adapter.cjs +11 -3
  28. package/dist/service-adapters/anthropic/anthropic-adapter.cjs.map +1 -1
  29. package/dist/service-adapters/anthropic/anthropic-adapter.d.cts +6 -0
  30. package/dist/service-adapters/anthropic/anthropic-adapter.d.cts.map +1 -1
  31. package/dist/service-adapters/anthropic/anthropic-adapter.d.mts +6 -0
  32. package/dist/service-adapters/anthropic/anthropic-adapter.d.mts.map +1 -1
  33. package/dist/service-adapters/anthropic/anthropic-adapter.mjs +11 -3
  34. package/dist/service-adapters/anthropic/anthropic-adapter.mjs.map +1 -1
  35. package/dist/service-adapters/anthropic/utils.cjs +27 -1
  36. package/dist/service-adapters/anthropic/utils.cjs.map +1 -1
  37. package/dist/service-adapters/anthropic/utils.mjs +27 -1
  38. package/dist/service-adapters/anthropic/utils.mjs.map +1 -1
  39. package/dist/service-adapters/langchain/utils.cjs +1 -1
  40. package/dist/service-adapters/langchain/utils.cjs.map +1 -1
  41. package/dist/service-adapters/langchain/utils.mjs +1 -1
  42. package/dist/service-adapters/langchain/utils.mjs.map +1 -1
  43. package/dist/service-adapters/openai/openai-adapter.cjs +2 -1
  44. package/dist/service-adapters/openai/openai-adapter.cjs.map +1 -1
  45. package/dist/service-adapters/openai/openai-adapter.d.cts +6 -0
  46. package/dist/service-adapters/openai/openai-adapter.d.cts.map +1 -1
  47. package/dist/service-adapters/openai/openai-adapter.d.mts +6 -0
  48. package/dist/service-adapters/openai/openai-adapter.d.mts.map +1 -1
  49. package/dist/service-adapters/openai/openai-adapter.mjs +2 -1
  50. package/dist/service-adapters/openai/openai-adapter.mjs.map +1 -1
  51. package/dist/v2/runtime/core/middleware-sse-parser.cjs +5 -2
  52. package/dist/v2/runtime/core/middleware-sse-parser.cjs.map +1 -1
  53. package/dist/v2/runtime/core/middleware-sse-parser.mjs +5 -2
  54. package/dist/v2/runtime/core/middleware-sse-parser.mjs.map +1 -1
  55. package/package.json +2 -2
  56. package/src/agent/__tests__/provider-id-collision.test.ts +195 -0
  57. package/src/agent/index.ts +19 -11
  58. package/src/lib/integrations/node-http/__tests__/request-duck-type.test.ts +66 -0
  59. package/src/lib/integrations/node-http/index.ts +15 -1
  60. package/src/lib/runtime/__tests__/mcp-tools-utils.test.ts +30 -1
  61. package/src/lib/runtime/__tests__/on-after-request.test.ts +122 -0
  62. package/src/lib/runtime/copilot-runtime.ts +16 -3
  63. package/src/lib/runtime/mcp-tools-utils.ts +41 -6
  64. package/src/service-adapters/anthropic/anthropic-adapter.ts +22 -2
  65. package/src/service-adapters/anthropic/utils.ts +60 -1
  66. package/src/service-adapters/langchain/utils.ts +1 -1
  67. package/src/service-adapters/openai/openai-adapter.ts +14 -1
  68. package/src/v2/runtime/__tests__/middleware-sse-parser.test.ts +50 -0
  69. package/src/v2/runtime/core/middleware-sse-parser.ts +12 -2
  70. package/tests/service-adapters/anthropic/anthropic-adapter.test.ts +268 -0
  71. package/tests/service-adapters/anthropic/utils-token-trimming.test.ts +301 -0
@@ -70,12 +70,19 @@ export interface AnthropicAdapterParams {
70
70
  * See: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
71
71
  */
72
72
  promptCaching?: AnthropicPromptCachingConfig;
73
+
74
+ /**
75
+ * Optional maximum input token limit. Overrides the default limit
76
+ * used when trimming messages to fit the context window.
77
+ */
78
+ maxInputTokens?: number;
73
79
  }
74
80
 
75
81
  export class AnthropicAdapter implements CopilotServiceAdapter {
76
82
  public model: string = DEFAULT_MODEL;
77
83
  public provider = "anthropic";
78
84
  private promptCaching: AnthropicPromptCachingConfig;
85
+ private maxInputTokens?: number;
79
86
 
80
87
  private _anthropic: Anthropic;
81
88
  public get anthropic(): Anthropic {
@@ -94,6 +101,7 @@ export class AnthropicAdapter implements CopilotServiceAdapter {
94
101
  this.model = params.model;
95
102
  }
96
103
  this.promptCaching = params?.promptCaching || { enabled: false };
104
+ this.maxInputTokens = params?.maxInputTokens;
97
105
  }
98
106
 
99
107
  getLanguageModel(): LanguageModel {
@@ -244,6 +252,7 @@ export class AnthropicAdapter implements CopilotServiceAdapter {
244
252
  forwardedParameters,
245
253
  } = request;
246
254
  const tools = actions.map(convertActionInputToAnthropicTool);
255
+ const knownActionNames = new Set(actions.map((a) => a.name));
247
256
 
248
257
  const messages = [...rawMessages];
249
258
 
@@ -322,6 +331,7 @@ export class AnthropicAdapter implements CopilotServiceAdapter {
322
331
  anthropicMessages,
323
332
  tools,
324
333
  model,
334
+ this.maxInputTokens,
325
335
  );
326
336
 
327
337
  // Apply prompt caching if enabled
@@ -350,7 +360,7 @@ export class AnthropicAdapter implements CopilotServiceAdapter {
350
360
  system: cachedSystemPrompt,
351
361
  model: this.model,
352
362
  messages: cachedMessages,
353
- max_tokens: forwardedParameters?.maxTokens || 1024,
363
+ max_tokens: forwardedParameters?.maxTokens || 4096,
354
364
  ...(forwardedParameters?.temperature
355
365
  ? { temperature: forwardedParameters.temperature }
356
366
  : {}),
@@ -375,12 +385,18 @@ export class AnthropicAdapter implements CopilotServiceAdapter {
375
385
  if (chunk.type === "message_start") {
376
386
  currentMessageId = chunk.message.id;
377
387
  } else if (chunk.type === "content_block_start") {
378
- hasReceivedContent = true;
379
388
  if (chunk.content_block.type === "text") {
389
+ hasReceivedContent = true;
380
390
  didOutputText = false;
381
391
  filterThinkingTextBuffer.reset();
382
392
  mode = "message";
383
393
  } else if (chunk.content_block.type === "tool_use") {
394
+ if (!knownActionNames.has(chunk.content_block.name)) {
395
+ // Unknown tool - skip execution to prevent crashes
396
+ mode = null;
397
+ continue;
398
+ }
399
+ hasReceivedContent = true;
384
400
  currentToolCallId = chunk.content_block.id;
385
401
  eventStream$.sendActionExecutionStart({
386
402
  actionExecutionId: currentToolCallId,
@@ -390,6 +406,10 @@ export class AnthropicAdapter implements CopilotServiceAdapter {
390
406
  mode = "function";
391
407
  }
392
408
  } else if (chunk.type === "content_block_delta") {
409
+ if (mode === null) {
410
+ // Skip deltas for unknown/skipped content blocks
411
+ continue;
412
+ }
393
413
  if (chunk.delta.type === "text_delta") {
394
414
  const text = filterThinkingTextBuffer.onTextChunk(
395
415
  chunk.delta.text,
@@ -49,7 +49,66 @@ export function limitMessagesToTokenCount(
49
49
  maxTokens -= numTokens;
50
50
  }
51
51
 
52
- return result;
52
+ // Post-process: remove orphaned tool_result and tool_use blocks.
53
+ // Token trimming may have removed the assistant message containing tool_use
54
+ // while keeping the user message with tool_result (or vice versa),
55
+ // which Anthropic rejects.
56
+
57
+ // Collect all tool_use IDs from assistant messages
58
+ const toolUseIds = new Set<string>();
59
+ for (const msg of result) {
60
+ if (msg.role === "assistant" && Array.isArray(msg.content)) {
61
+ for (const block of msg.content) {
62
+ if (block.type === "tool_use") {
63
+ toolUseIds.add(block.id);
64
+ }
65
+ }
66
+ }
67
+ }
68
+
69
+ // Collect all tool_result IDs from user messages
70
+ const toolResultIds = new Set<string>();
71
+ for (const msg of result) {
72
+ if (msg.role === "user" && Array.isArray(msg.content)) {
73
+ for (const block of msg.content) {
74
+ if (block.type === "tool_result") {
75
+ toolResultIds.add(block.tool_use_id);
76
+ }
77
+ }
78
+ }
79
+ }
80
+
81
+ // Filter orphaned blocks without mutating the original messages
82
+ const filtered: any[] = [];
83
+ for (const msg of result) {
84
+ if (msg.role === "user" && Array.isArray(msg.content)) {
85
+ const remaining = msg.content.filter(
86
+ (block: any) =>
87
+ block.type !== "tool_result" || toolUseIds.has(block.tool_use_id),
88
+ );
89
+ if (remaining.length === 0) continue;
90
+ if (remaining.length !== msg.content.length) {
91
+ filtered.push({ ...msg, content: remaining });
92
+ } else {
93
+ filtered.push(msg);
94
+ }
95
+ } else if (msg.role === "assistant" && Array.isArray(msg.content)) {
96
+ const remaining = msg.content.filter(
97
+ (block: any) =>
98
+ block.type !== "tool_use" || toolResultIds.has(block.id),
99
+ );
100
+ if (remaining.length === 0) continue;
101
+ if (remaining.length !== msg.content.length) {
102
+ filtered.push({ ...msg, content: remaining });
103
+ } else {
104
+ filtered.push(msg);
105
+ }
106
+ } else {
107
+ filtered.push(msg);
108
+ }
109
+ }
110
+
111
+ return filtered;
53
112
  }
54
113
 
55
114
  const MAX_TOKENS = 128000;
@@ -269,7 +269,7 @@ export async function streamLangChainResponse({
269
269
  });
270
270
  } else if (content) {
271
271
  mode = "message";
272
- currentMessageId = value.lc_kwargs?.id || randomId();
272
+ currentMessageId = randomId();
273
273
  eventStream$.sendTextMessageStart({ messageId: currentMessageId });
274
274
  }
275
275
  }
@@ -97,6 +97,12 @@ export interface OpenAIAdapterParams {
97
97
  * @default false
98
98
  */
99
99
  keepSystemRole?: boolean;
100
+
101
+ /**
102
+ * Optional maximum input token limit. Overrides the default model-based limit
103
+ * used when trimming messages to fit the context window.
104
+ */
105
+ maxInputTokens?: number;
100
106
  }
101
107
 
102
108
  export class OpenAIAdapter implements CopilotServiceAdapter {
@@ -106,6 +112,7 @@ export class OpenAIAdapter implements CopilotServiceAdapter {
106
112
  private disableParallelToolCalls: boolean = false;
107
113
  private _openai: OpenAI;
108
114
  private keepSystemRole: boolean = false;
115
+ private maxInputTokens?: number;
109
116
 
110
117
  public get openai(): OpenAI {
111
118
  return this._openai;
@@ -125,6 +132,7 @@ export class OpenAIAdapter implements CopilotServiceAdapter {
125
132
  }
126
133
  this.disableParallelToolCalls = params?.disableParallelToolCalls || false;
127
134
  this.keepSystemRole = params?.keepSystemRole ?? false;
135
+ this.maxInputTokens = params?.maxInputTokens;
128
136
  }
129
137
 
130
138
  getLanguageModel(): LanguageModel {
@@ -192,7 +200,12 @@ export class OpenAIAdapter implements CopilotServiceAdapter {
192
200
  let openaiMessages = filteredMessages.map((m) =>
193
201
  convertMessageToOpenAIMessage(m, { keepSystemRole: this.keepSystemRole }),
194
202
  );
195
- openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
203
+ openaiMessages = limitMessagesToTokenCount(
204
+ openaiMessages,
205
+ tools,
206
+ model,
207
+ this.maxInputTokens,
208
+ );
196
209
 
197
210
  let toolChoice: any = forwardedParameters?.toolChoice;
198
211
  if (forwardedParameters?.toolChoice === "function") {
@@ -83,6 +83,56 @@ describe("parseSSEResponse", () => {
83
83
  });
84
84
  });
85
85
 
86
+ it("normalizes array content in TOOL_CALL_RESULT (MCP adapters)", async () => {
87
+ const response = buildSSEResponse([
88
+ { type: "RUN_STARTED", threadId: "t-1", runId: "r-1" },
89
+ {
90
+ type: "TOOL_CALL_RESULT",
91
+ toolCallId: "tc-1",
92
+ messageId: "m-result",
93
+ role: "tool",
94
+ content: [
95
+ { type: "text", text: '{"metric":"cpu","value":42}' },
96
+ { type: "text", text: " extra info" },
97
+ ],
98
+ },
99
+ { type: "RUN_FINISHED", threadId: "t-1", runId: "r-1" },
100
+ ]);
101
+ const result = await parseSSEResponse(response);
102
+ expect(result.messages).toContainEqual({
103
+ id: "m-result",
104
+ role: "tool",
105
+ content: '{"metric":"cpu","value":42} extra info',
106
+ toolCallId: "tc-1",
107
+ });
108
+ });
109
+
110
+ it("filters non-text parts when normalizing array content in TOOL_CALL_RESULT", async () => {
111
+ const response = buildSSEResponse([
112
+ { type: "RUN_STARTED", threadId: "t-1", runId: "r-1" },
113
+ {
114
+ type: "TOOL_CALL_RESULT",
115
+ toolCallId: "tc-1",
116
+ messageId: "m-result",
117
+ role: "tool",
118
+ content: [
119
+ { type: "text", text: "valid" },
120
+ { type: "image", data: "binary" },
121
+ null,
122
+ { type: "text", text: " part" },
123
+ ],
124
+ },
125
+ { type: "RUN_FINISHED", threadId: "t-1", runId: "r-1" },
126
+ ]);
127
+ const result = await parseSSEResponse(response);
128
+ expect(result.messages).toContainEqual({
129
+ id: "m-result",
130
+ role: "tool",
131
+ content: "valid part",
132
+ toolCallId: "tc-1",
133
+ });
134
+ });
135
+
86
136
  it("uses MESSAGES_SNAPSHOT when present", async () => {
87
137
  const snapshotMessages = [
88
138
  { id: "u-1", role: "user", content: "hi" },
@@ -167,14 +167,24 @@ export async function parseSSEResponse(
167
167
  break;
168
168
  }
169
169
 
170
- case "TOOL_CALL_RESULT":
170
+ case "TOOL_CALL_RESULT": {
171
+ // langchain-mcp-adapters may send content as an array of
172
+ // {type:"text", text:string} objects instead of a plain string.
173
+ let resultContent = event.content;
174
+ if (Array.isArray(resultContent)) {
175
+ resultContent = resultContent
176
+ .filter((part: any) => part && typeof part.text === "string")
177
+ .map((part: any) => part.text)
178
+ .join("");
179
+ }
171
180
  messagesById.set(event.messageId, {
172
181
  id: event.messageId,
173
182
  role: "tool",
174
- content: event.content,
183
+ content: resultContent,
175
184
  toolCallId: event.toolCallId,
176
185
  });
177
186
  break;
187
+ }
178
188
  }
179
189
  }
180
190
 
@@ -374,4 +374,272 @@ describe("AnthropicAdapter", () => {
374
374
  });
375
375
  });
376
376
  });
377
+
378
+ describe("Unknown Tool Use Handling", () => {
379
+ it("should skip unknown tool_use blocks without crashing", async () => {
380
+ const systemMessage = new TextMessage("system", "System message");
381
+ const userMessage = new TextMessage("user", "Do something");
382
+
383
+ // Mock Anthropic to return a stream with an unknown tool_use block
384
+ mockAnthropicCreate.mockResolvedValue({
385
+ [Symbol.asyncIterator]: async function* () {
386
+ yield { type: "message_start", message: { id: "msg-1" } };
387
+ // Unknown tool_use block — tool name not in the actions list
388
+ yield {
389
+ type: "content_block_start",
390
+ content_block: {
391
+ type: "tool_use",
392
+ id: "tool-unknown",
393
+ name: "nonexistent_tool",
394
+ },
395
+ };
396
+ yield {
397
+ type: "content_block_delta",
398
+ delta: { type: "input_json_delta", partial_json: '{"query":' },
399
+ };
400
+ yield {
401
+ type: "content_block_delta",
402
+ delta: { type: "input_json_delta", partial_json: '"test"}' },
403
+ };
404
+ yield { type: "content_block_stop" };
405
+ // Then a normal text block
406
+ yield {
407
+ type: "content_block_start",
408
+ content_block: { type: "text" },
409
+ };
410
+ yield {
411
+ type: "content_block_delta",
412
+ delta: { type: "text_delta", text: "Here is the result." },
413
+ };
414
+ yield { type: "content_block_stop" };
415
+ },
416
+ });
417
+
418
+ const mockStream = {
419
+ sendTextMessageStart: vi.fn(),
420
+ sendTextMessageContent: vi.fn(),
421
+ sendTextMessageEnd: vi.fn(),
422
+ sendActionExecutionStart: vi.fn(),
423
+ sendActionExecutionArgs: vi.fn(),
424
+ sendActionExecutionEnd: vi.fn(),
425
+ complete: vi.fn(),
426
+ };
427
+
428
+ let streamCallbackDone: Promise<void>;
429
+ mockEventSource.stream.mockImplementation((callback: any) => {
430
+ streamCallbackDone = callback(mockStream);
431
+ });
432
+
433
+ await adapter.process({
434
+ threadId: "test-thread",
435
+ messages: [systemMessage, userMessage],
436
+ actions: [
437
+ {
438
+ name: "known_tool",
439
+ description: "A known tool",
440
+ parameters: [],
441
+ jsonSchema: '{"type":"object","properties":{}}',
442
+ },
443
+ ],
444
+ eventSource: mockEventSource,
445
+ forwardedParameters: {},
446
+ });
447
+
448
+ // Wait for async stream processing to complete
449
+ await streamCallbackDone!;
450
+
451
+ // Should NOT have sent action execution events for the unknown tool
452
+ expect(mockStream.sendActionExecutionStart).not.toHaveBeenCalled();
453
+ expect(mockStream.sendActionExecutionArgs).not.toHaveBeenCalled();
454
+ expect(mockStream.sendActionExecutionEnd).not.toHaveBeenCalled();
455
+
456
+ // Should still process the text block normally
457
+ expect(mockStream.sendTextMessageStart).toHaveBeenCalled();
458
+ expect(mockStream.sendTextMessageContent).toHaveBeenCalledWith({
459
+ messageId: "msg-1",
460
+ content: "Here is the result.",
461
+ });
462
+ expect(mockStream.sendTextMessageEnd).toHaveBeenCalled();
463
+ expect(mockStream.complete).toHaveBeenCalled();
464
+ });
465
+
466
+ it("should trigger fallback when only unknown tool_use blocks are returned", async () => {
467
+ const systemMessage = new TextMessage("system", "System message");
468
+ const userMessage = new TextMessage("user", "Do something");
469
+
470
+ const toolExecution = new ActionExecutionMessage({
471
+ id: "tool-prev",
472
+ name: "someAction",
473
+ arguments: "{}",
474
+ });
475
+
476
+ const toolResult = new ResultMessage({
477
+ actionExecutionId: "tool-prev",
478
+ result: "Previous result",
479
+ });
480
+
481
+ // Mock Anthropic to return ONLY an unknown tool_use block
482
+ mockAnthropicCreate.mockResolvedValue({
483
+ [Symbol.asyncIterator]: async function* () {
484
+ yield { type: "message_start", message: { id: "msg-1" } };
485
+ yield {
486
+ type: "content_block_start",
487
+ content_block: {
488
+ type: "tool_use",
489
+ id: "tool-unknown",
490
+ name: "nonexistent_tool",
491
+ },
492
+ };
493
+ yield {
494
+ type: "content_block_delta",
495
+ delta: { type: "input_json_delta", partial_json: "{}" },
496
+ };
497
+ yield { type: "content_block_stop" };
498
+ },
499
+ });
500
+
501
+ const mockStream = {
502
+ sendTextMessageStart: vi.fn(),
503
+ sendTextMessageContent: vi.fn(),
504
+ sendTextMessageEnd: vi.fn(),
505
+ sendActionExecutionStart: vi.fn(),
506
+ sendActionExecutionArgs: vi.fn(),
507
+ sendActionExecutionEnd: vi.fn(),
508
+ complete: vi.fn(),
509
+ };
510
+
511
+ let streamCallbackDone: Promise<void>;
512
+ mockEventSource.stream.mockImplementation((callback: any) => {
513
+ streamCallbackDone = callback(mockStream);
514
+ });
515
+
516
+ await adapter.process({
517
+ threadId: "test-thread",
518
+ messages: [systemMessage, userMessage, toolExecution, toolResult],
519
+ actions: [
520
+ {
521
+ name: "known_tool",
522
+ description: "A known tool",
523
+ parameters: [],
524
+ jsonSchema: '{"type":"object","properties":{}}',
525
+ },
526
+ ],
527
+ eventSource: mockEventSource,
528
+ forwardedParameters: {},
529
+ });
530
+
531
+ // Wait for async stream processing to complete
532
+ await streamCallbackDone!;
533
+
534
+ // Should NOT have sent action execution events
535
+ expect(mockStream.sendActionExecutionStart).not.toHaveBeenCalled();
536
+
537
+ // Should trigger fallback since hasReceivedContent should be false
538
+ expect(mockStream.sendTextMessageStart).toHaveBeenCalled();
539
+ expect(mockStream.sendTextMessageContent).toHaveBeenCalledWith({
540
+ messageId: expect.any(String),
541
+ content: "Previous result",
542
+ });
543
+ expect(mockStream.sendTextMessageEnd).toHaveBeenCalled();
544
+ });
545
+ });
546
+ });
547
+
548
+ describe("AnthropicAdapter max_tokens default", () => {
549
+ let mockAnthropicCreate: any;
550
+ let mockEventSource: any;
551
+
552
+ beforeEach(() => {
553
+ vi.clearAllMocks();
554
+ });
555
+
556
+ it("should default max_tokens to 4096 when not specified", async () => {
557
+ const mockAnthropic = {
558
+ messages: {
559
+ create: vi.fn(),
560
+ },
561
+ };
562
+
563
+ const adapter = new AnthropicAdapter({ anthropic: mockAnthropic as any });
564
+ mockAnthropicCreate = mockAnthropic.messages.create;
565
+
566
+ mockAnthropicCreate.mockResolvedValue({
567
+ [Symbol.asyncIterator]: async function* () {},
568
+ });
569
+
570
+ mockEventSource = {
571
+ stream: vi.fn((callback) => {
572
+ const mockStream = {
573
+ sendTextMessageStart: vi.fn(),
574
+ sendTextMessageContent: vi.fn(),
575
+ sendTextMessageEnd: vi.fn(),
576
+ sendActionExecutionStart: vi.fn(),
577
+ sendActionExecutionArgs: vi.fn(),
578
+ sendActionExecutionEnd: vi.fn(),
579
+ complete: vi.fn(),
580
+ };
581
+ callback(mockStream);
582
+ return Promise.resolve();
583
+ }),
584
+ };
585
+
586
+ const systemMessage = new TextMessage("system", "System message");
587
+ const userMessage = new TextMessage("user", "Hello");
588
+
589
+ await adapter.process({
590
+ threadId: "test-thread",
591
+ messages: [systemMessage, userMessage],
592
+ actions: [],
593
+ eventSource: mockEventSource,
594
+ forwardedParameters: {},
595
+ });
596
+
597
+ const createCallArgs = mockAnthropicCreate.mock.calls[0][0];
598
+ expect(createCallArgs.max_tokens).toBe(4096);
599
+ });
600
+
601
+ it("should use provided maxTokens when specified", async () => {
602
+ const mockAnthropic = {
603
+ messages: {
604
+ create: vi.fn(),
605
+ },
606
+ };
607
+
608
+ const adapter = new AnthropicAdapter({ anthropic: mockAnthropic as any });
609
+ mockAnthropicCreate = mockAnthropic.messages.create;
610
+
611
+ mockAnthropicCreate.mockResolvedValue({
612
+ [Symbol.asyncIterator]: async function* () {},
613
+ });
614
+
615
+ mockEventSource = {
616
+ stream: vi.fn((callback) => {
617
+ const mockStream = {
618
+ sendTextMessageStart: vi.fn(),
619
+ sendTextMessageContent: vi.fn(),
620
+ sendTextMessageEnd: vi.fn(),
621
+ sendActionExecutionStart: vi.fn(),
622
+ sendActionExecutionArgs: vi.fn(),
623
+ sendActionExecutionEnd: vi.fn(),
624
+ complete: vi.fn(),
625
+ };
626
+ callback(mockStream);
627
+ return Promise.resolve();
628
+ }),
629
+ };
630
+
631
+ const systemMessage = new TextMessage("system", "System message");
632
+ const userMessage = new TextMessage("user", "Hello");
633
+
634
+ await adapter.process({
635
+ threadId: "test-thread",
636
+ messages: [systemMessage, userMessage],
637
+ actions: [],
638
+ eventSource: mockEventSource,
639
+ forwardedParameters: { maxTokens: 8192 },
640
+ });
641
+
642
+ const createCallArgs = mockAnthropicCreate.mock.calls[0][0];
643
+ expect(createCallArgs.max_tokens).toBe(8192);
644
+ });
377
645
  });