@copilotkit/runtime 1.8.12-next.2 → 1.8.12-next.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/CHANGELOG.md +13 -0
  2. package/dist/{chunk-OZLQ2A5E.mjs → chunk-FA3E4I4W.mjs} +4 -3
  3. package/dist/chunk-FA3E4I4W.mjs.map +1 -0
  4. package/dist/{chunk-FDGTTGQU.mjs → chunk-KGZF7KSR.mjs} +2 -2
  5. package/dist/{chunk-VQSVMSXZ.mjs → chunk-MG576PIZ.mjs} +2 -2
  6. package/dist/{chunk-Y4H3U52G.mjs → chunk-MVKCCH5U.mjs} +216 -173
  7. package/dist/chunk-MVKCCH5U.mjs.map +1 -0
  8. package/dist/{chunk-V6IQU4D2.mjs → chunk-S5U6J5X2.mjs} +2 -2
  9. package/dist/index.js +217 -173
  10. package/dist/index.js.map +1 -1
  11. package/dist/index.mjs +5 -5
  12. package/dist/lib/index.js +109 -82
  13. package/dist/lib/index.js.map +1 -1
  14. package/dist/lib/index.mjs +5 -5
  15. package/dist/lib/integrations/index.js +2 -1
  16. package/dist/lib/integrations/index.js.map +1 -1
  17. package/dist/lib/integrations/index.mjs +5 -5
  18. package/dist/lib/integrations/nest/index.js +2 -1
  19. package/dist/lib/integrations/nest/index.js.map +1 -1
  20. package/dist/lib/integrations/nest/index.mjs +3 -3
  21. package/dist/lib/integrations/node-express/index.js +2 -1
  22. package/dist/lib/integrations/node-express/index.js.map +1 -1
  23. package/dist/lib/integrations/node-express/index.mjs +3 -3
  24. package/dist/lib/integrations/node-http/index.js +2 -1
  25. package/dist/lib/integrations/node-http/index.js.map +1 -1
  26. package/dist/lib/integrations/node-http/index.mjs +2 -2
  27. package/dist/service-adapters/index.js +215 -172
  28. package/dist/service-adapters/index.js.map +1 -1
  29. package/dist/service-adapters/index.mjs +1 -1
  30. package/jest.config.js +8 -3
  31. package/package.json +3 -2
  32. package/src/service-adapters/anthropic/anthropic-adapter.ts +124 -66
  33. package/src/service-adapters/anthropic/utils.ts +0 -19
  34. package/src/service-adapters/openai/openai-adapter.ts +107 -69
  35. package/tests/global.d.ts +13 -0
  36. package/tests/service-adapters/anthropic/allowlist-approach.test.ts +226 -0
  37. package/tests/service-adapters/anthropic/anthropic-adapter.test.ts +604 -0
  38. package/tests/service-adapters/openai/allowlist-approach.test.ts +238 -0
  39. package/tests/service-adapters/openai/openai-adapter.test.ts +301 -0
  40. package/tests/setup.jest.ts +21 -0
  41. package/tests/tsconfig.json +10 -0
  42. package/tsconfig.json +1 -1
  43. package/dist/chunk-OZLQ2A5E.mjs.map +0 -1
  44. package/dist/chunk-Y4H3U52G.mjs.map +0 -1
  45. /package/dist/{chunk-FDGTTGQU.mjs.map → chunk-KGZF7KSR.mjs.map} +0 -0
  46. /package/dist/{chunk-VQSVMSXZ.mjs.map → chunk-MG576PIZ.mjs.map} +0 -0
  47. /package/dist/{chunk-V6IQU4D2.mjs.map → chunk-S5U6J5X2.mjs.map} +0 -0
package/package.json CHANGED
@@ -9,7 +9,7 @@
9
9
  "publishConfig": {
10
10
  "access": "public"
11
11
  },
12
- "version": "1.8.12-next.2",
12
+ "version": "1.8.12-next.4",
13
13
  "sideEffects": false,
14
14
  "main": "./dist/index.js",
15
15
  "module": "./dist/index.mjs",
@@ -19,6 +19,7 @@
19
19
  "types": "./dist/index.d.ts",
20
20
  "license": "MIT",
21
21
  "devDependencies": {
22
+ "@jest/globals": "^29.7.0",
22
23
  "@swc/core": "1.5.28",
23
24
  "@types/express": "^4.17.21",
24
25
  "@types/jest": "^29.5.12",
@@ -63,7 +64,7 @@
63
64
  "rxjs": "^7.8.1",
64
65
  "type-graphql": "2.0.0-rc.1",
65
66
  "zod": "^3.23.3",
66
- "@copilotkit/shared": "1.8.12-next.2"
67
+ "@copilotkit/shared": "1.8.12-next.4"
67
68
  },
68
69
  "keywords": [
69
70
  "copilotkit",
@@ -25,7 +25,6 @@ import {
25
25
  import {
26
26
  convertActionInputToAnthropicTool,
27
27
  convertMessageToAnthropicMessage,
28
- groupAnthropicMessagesByRole,
29
28
  limitMessagesToTokenCount,
30
29
  } from "./utils";
31
30
 
@@ -80,9 +79,54 @@ export class AnthropicAdapter implements CopilotServiceAdapter {
80
79
  const instructionsMessage = messages.shift();
81
80
  const instructions = instructionsMessage.isTextMessage() ? instructionsMessage.content : "";
82
81
 
83
- let anthropicMessages = messages.map(convertMessageToAnthropicMessage);
84
- anthropicMessages = limitMessagesToTokenCount(anthropicMessages, tools, model);
85
- anthropicMessages = groupAnthropicMessagesByRole(anthropicMessages);
82
+ // ALLOWLIST APPROACH:
83
+ // 1. First, identify all valid tool_use calls (from assistant)
84
+ // 2. Then, only keep tool_result blocks that correspond to these valid tool_use IDs
85
+ // 3. Discard any other tool_result blocks
86
+
87
+ // Step 1: Extract valid tool_use IDs
88
+ const validToolUseIds = new Set<string>();
89
+
90
+ for (const message of messages) {
91
+ if (message.isActionExecutionMessage()) {
92
+ validToolUseIds.add(message.id);
93
+ }
94
+ }
95
+
96
+ // Step 2: Map each message to an Anthropic message, eliminating invalid tool_results
97
+ const anthropicMessages = messages
98
+ .map((message) => {
99
+ // For tool results, only include if they match a valid tool_use ID
100
+ if (message.isResultMessage()) {
101
+ // Skip if there's no corresponding tool_use
102
+ if (!validToolUseIds.has(message.actionExecutionId)) {
103
+ return null; // Will be filtered out later
104
+ }
105
+
106
+ // Remove this ID from valid IDs so we don't process duplicates
107
+ validToolUseIds.delete(message.actionExecutionId);
108
+
109
+ return {
110
+ role: "user",
111
+ content: [
112
+ {
113
+ type: "tool_result",
114
+ content: message.result,
115
+ tool_use_id: message.actionExecutionId,
116
+ },
117
+ ],
118
+ };
119
+ }
120
+
121
+ // For non-tool-result messages, convert normally
122
+ return convertMessageToAnthropicMessage(message);
123
+ })
124
+ .filter(Boolean) as Anthropic.Messages.MessageParam[]; // Explicitly cast after filtering nulls
125
+
126
+ // Apply token limits
127
+ const limitedMessages = limitMessagesToTokenCount(anthropicMessages, tools, model);
128
+
129
+ // We skip grouping by role since we've already ensured uniqueness of tool_results
86
130
 
87
131
  let toolChoice: any = forwardedParameters?.toolChoice;
88
132
  if (forwardedParameters?.toolChoice === "function") {
@@ -92,73 +136,87 @@ export class AnthropicAdapter implements CopilotServiceAdapter {
92
136
  };
93
137
  }
94
138
 
95
- const stream = this.anthropic.messages.create({
96
- system: instructions,
97
- model: this.model,
98
- messages: anthropicMessages,
99
- max_tokens: forwardedParameters?.maxTokens || 1024,
100
- ...(forwardedParameters?.temperature ? { temperature: forwardedParameters.temperature } : {}),
101
- ...(tools.length > 0 && { tools }),
102
- ...(toolChoice && { tool_choice: toolChoice }),
103
- stream: true,
104
- });
105
-
106
- eventSource.stream(async (eventStream$) => {
107
- let mode: "function" | "message" | null = null;
108
- let didOutputText = false;
109
- let currentMessageId = randomId();
110
- let currentToolCallId = randomId();
111
- let filterThinkingTextBuffer = new FilterThinkingTextBuffer();
112
-
113
- for await (const chunk of await stream) {
114
- if (chunk.type === "message_start") {
115
- currentMessageId = chunk.message.id;
116
- } else if (chunk.type === "content_block_start") {
117
- if (chunk.content_block.type === "text") {
118
- didOutputText = false;
119
- filterThinkingTextBuffer.reset();
120
- mode = "message";
121
- } else if (chunk.content_block.type === "tool_use") {
122
- currentToolCallId = chunk.content_block.id;
123
- eventStream$.sendActionExecutionStart({
124
- actionExecutionId: currentToolCallId,
125
- actionName: chunk.content_block.name,
126
- parentMessageId: currentMessageId,
127
- });
128
- mode = "function";
129
- }
130
- } else if (chunk.type === "content_block_delta") {
131
- if (chunk.delta.type === "text_delta") {
132
- const text = filterThinkingTextBuffer.onTextChunk(chunk.delta.text);
133
- if (text.length > 0) {
134
- if (!didOutputText) {
135
- eventStream$.sendTextMessageStart({ messageId: currentMessageId });
136
- didOutputText = true;
139
+ try {
140
+ const createParams = {
141
+ system: instructions,
142
+ model: this.model,
143
+ messages: limitedMessages,
144
+ max_tokens: forwardedParameters?.maxTokens || 1024,
145
+ ...(forwardedParameters?.temperature
146
+ ? { temperature: forwardedParameters.temperature }
147
+ : {}),
148
+ ...(tools.length > 0 && { tools }),
149
+ ...(toolChoice && { tool_choice: toolChoice }),
150
+ stream: true,
151
+ };
152
+
153
+ const stream = await this.anthropic.messages.create(createParams);
154
+
155
+ eventSource.stream(async (eventStream$) => {
156
+ let mode: "function" | "message" | null = null;
157
+ let didOutputText = false;
158
+ let currentMessageId = randomId();
159
+ let currentToolCallId = randomId();
160
+ let filterThinkingTextBuffer = new FilterThinkingTextBuffer();
161
+
162
+ try {
163
+ for await (const chunk of stream as AsyncIterable<any>) {
164
+ if (chunk.type === "message_start") {
165
+ currentMessageId = chunk.message.id;
166
+ } else if (chunk.type === "content_block_start") {
167
+ if (chunk.content_block.type === "text") {
168
+ didOutputText = false;
169
+ filterThinkingTextBuffer.reset();
170
+ mode = "message";
171
+ } else if (chunk.content_block.type === "tool_use") {
172
+ currentToolCallId = chunk.content_block.id;
173
+ eventStream$.sendActionExecutionStart({
174
+ actionExecutionId: currentToolCallId,
175
+ actionName: chunk.content_block.name,
176
+ parentMessageId: currentMessageId,
177
+ });
178
+ mode = "function";
179
+ }
180
+ } else if (chunk.type === "content_block_delta") {
181
+ if (chunk.delta.type === "text_delta") {
182
+ const text = filterThinkingTextBuffer.onTextChunk(chunk.delta.text);
183
+ if (text.length > 0) {
184
+ if (!didOutputText) {
185
+ eventStream$.sendTextMessageStart({ messageId: currentMessageId });
186
+ didOutputText = true;
187
+ }
188
+ eventStream$.sendTextMessageContent({
189
+ messageId: currentMessageId,
190
+ content: text,
191
+ });
192
+ }
193
+ } else if (chunk.delta.type === "input_json_delta") {
194
+ eventStream$.sendActionExecutionArgs({
195
+ actionExecutionId: currentToolCallId,
196
+ args: chunk.delta.partial_json,
197
+ });
198
+ }
199
+ } else if (chunk.type === "content_block_stop") {
200
+ if (mode === "message") {
201
+ if (didOutputText) {
202
+ eventStream$.sendTextMessageEnd({ messageId: currentMessageId });
203
+ }
204
+ } else if (mode === "function") {
205
+ eventStream$.sendActionExecutionEnd({ actionExecutionId: currentToolCallId });
137
206
  }
138
- eventStream$.sendTextMessageContent({
139
- messageId: currentMessageId,
140
- content: text,
141
- });
142
- }
143
- } else if (chunk.delta.type === "input_json_delta") {
144
- eventStream$.sendActionExecutionArgs({
145
- actionExecutionId: currentToolCallId,
146
- args: chunk.delta.partial_json,
147
- });
148
- }
149
- } else if (chunk.type === "content_block_stop") {
150
- if (mode === "message") {
151
- if (didOutputText) {
152
- eventStream$.sendTextMessageEnd({ messageId: currentMessageId });
153
207
  }
154
- } else if (mode === "function") {
155
- eventStream$.sendActionExecutionEnd({ actionExecutionId: currentToolCallId });
156
208
  }
209
+ } catch (error) {
210
+ console.error("[Anthropic] Error processing stream:", error);
211
+ throw error;
157
212
  }
158
- }
159
213
 
160
- eventStream$.complete();
161
- });
214
+ eventStream$.complete();
215
+ });
216
+ } catch (error) {
217
+ console.error("[Anthropic] Error during API call:", error);
218
+ throw error;
219
+ }
162
220
 
163
221
  return {
164
222
  threadId: threadId || randomUUID(),
@@ -155,22 +155,3 @@ export function convertMessageToAnthropicMessage(
155
155
  };
156
156
  }
157
157
  }
158
-
159
- export function groupAnthropicMessagesByRole(
160
- messageParams: Anthropic.Messages.MessageParam[],
161
- ): Anthropic.Messages.MessageParam[] {
162
- return messageParams.reduce((acc, message) => {
163
- const lastGroup = acc[acc.length - 1];
164
-
165
- if (lastGroup && lastGroup.role === message.role) {
166
- lastGroup.content = lastGroup.content.concat(message.content as any);
167
- } else {
168
- acc.push({
169
- role: message.role,
170
- content: [...(message.content as any)],
171
- });
172
- }
173
-
174
- return acc;
175
- }, [] as Anthropic.Messages.MessageParam[]);
176
- }
@@ -128,7 +128,34 @@ export class OpenAIAdapter implements CopilotServiceAdapter {
128
128
  const tools = actions.map(convertActionInputToOpenAITool);
129
129
  const threadId = threadIdFromRequest ?? randomUUID();
130
130
 
131
- let openaiMessages = messages.map((m) =>
131
+ // ALLOWLIST APPROACH: Only include tool_result messages that correspond to valid tool_calls
132
+ // Step 1: Extract valid tool_call IDs
133
+ const validToolUseIds = new Set<string>();
134
+
135
+ for (const message of messages) {
136
+ if (message.isActionExecutionMessage()) {
137
+ validToolUseIds.add(message.id);
138
+ }
139
+ }
140
+
141
+ // Step 2: Filter messages, keeping only those with valid tool_call IDs
142
+ const filteredMessages = messages.filter((message) => {
143
+ if (message.isResultMessage()) {
144
+ // Skip if there's no corresponding tool_call
145
+ if (!validToolUseIds.has(message.actionExecutionId)) {
146
+ return false;
147
+ }
148
+
149
+ // Remove this ID from valid IDs so we don't process duplicates
150
+ validToolUseIds.delete(message.actionExecutionId);
151
+ return true;
152
+ }
153
+
154
+ // Keep all non-tool-result messages
155
+ return true;
156
+ });
157
+
158
+ let openaiMessages = filteredMessages.map((m) =>
132
159
  convertMessageToOpenAIMessage(m, { keepSystemRole: this.keepSystemRole }),
133
160
  );
134
161
  openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
@@ -141,81 +168,92 @@ export class OpenAIAdapter implements CopilotServiceAdapter {
141
168
  };
142
169
  }
143
170
 
144
- const stream = this.openai.beta.chat.completions.stream({
145
- model: model,
146
- stream: true,
147
- messages: openaiMessages,
148
- ...(tools.length > 0 && { tools }),
149
- ...(forwardedParameters?.maxTokens && { max_tokens: forwardedParameters.maxTokens }),
150
- ...(forwardedParameters?.stop && { stop: forwardedParameters.stop }),
151
- ...(toolChoice && { tool_choice: toolChoice }),
152
- ...(this.disableParallelToolCalls && { parallel_tool_calls: false }),
153
- ...(forwardedParameters?.temperature && { temperature: forwardedParameters.temperature }),
154
- });
171
+ try {
172
+ const stream = this.openai.beta.chat.completions.stream({
173
+ model: model,
174
+ stream: true,
175
+ messages: openaiMessages,
176
+ ...(tools.length > 0 && { tools }),
177
+ ...(forwardedParameters?.maxTokens && { max_tokens: forwardedParameters.maxTokens }),
178
+ ...(forwardedParameters?.stop && { stop: forwardedParameters.stop }),
179
+ ...(toolChoice && { tool_choice: toolChoice }),
180
+ ...(this.disableParallelToolCalls && { parallel_tool_calls: false }),
181
+ ...(forwardedParameters?.temperature && { temperature: forwardedParameters.temperature }),
182
+ });
155
183
 
156
- eventSource.stream(async (eventStream$) => {
157
- let mode: "function" | "message" | null = null;
158
- let currentMessageId: string;
159
- let currentToolCallId: string;
160
- for await (const chunk of stream) {
161
- if (chunk.choices.length === 0) {
162
- continue;
163
- }
184
+ eventSource.stream(async (eventStream$) => {
185
+ let mode: "function" | "message" | null = null;
186
+ let currentMessageId: string;
187
+ let currentToolCallId: string;
164
188
 
165
- const toolCall = chunk.choices[0].delta.tool_calls?.[0];
166
- const content = chunk.choices[0].delta.content;
167
-
168
- // When switching from message to function or vice versa,
169
- // send the respective end event.
170
- // If toolCall?.id is defined, it means a new tool call starts.
171
- if (mode === "message" && toolCall?.id) {
172
- mode = null;
173
- eventStream$.sendTextMessageEnd({ messageId: currentMessageId });
174
- } else if (mode === "function" && (toolCall === undefined || toolCall?.id)) {
175
- mode = null;
176
- eventStream$.sendActionExecutionEnd({ actionExecutionId: currentToolCallId });
177
- }
189
+ try {
190
+ for await (const chunk of stream) {
191
+ if (chunk.choices.length === 0) {
192
+ continue;
193
+ }
194
+
195
+ const toolCall = chunk.choices[0].delta.tool_calls?.[0];
196
+ const content = chunk.choices[0].delta.content;
197
+
198
+ // When switching from message to function or vice versa,
199
+ // send the respective end event.
200
+ // If toolCall?.id is defined, it means a new tool call starts.
201
+ if (mode === "message" && toolCall?.id) {
202
+ mode = null;
203
+ eventStream$.sendTextMessageEnd({ messageId: currentMessageId });
204
+ } else if (mode === "function" && (toolCall === undefined || toolCall?.id)) {
205
+ mode = null;
206
+ eventStream$.sendActionExecutionEnd({ actionExecutionId: currentToolCallId });
207
+ }
178
208
 
179
- // If we send a new message type, send the appropriate start event.
180
- if (mode === null) {
181
- if (toolCall?.id) {
182
- mode = "function";
183
- currentToolCallId = toolCall!.id;
184
- eventStream$.sendActionExecutionStart({
185
- actionExecutionId: currentToolCallId,
186
- parentMessageId: chunk.id,
187
- actionName: toolCall!.function!.name,
188
- });
189
- } else if (content) {
190
- mode = "message";
191
- currentMessageId = chunk.id;
192
- eventStream$.sendTextMessageStart({ messageId: currentMessageId });
209
+ // If we send a new message type, send the appropriate start event.
210
+ if (mode === null) {
211
+ if (toolCall?.id) {
212
+ mode = "function";
213
+ currentToolCallId = toolCall!.id;
214
+ eventStream$.sendActionExecutionStart({
215
+ actionExecutionId: currentToolCallId,
216
+ parentMessageId: chunk.id,
217
+ actionName: toolCall!.function!.name,
218
+ });
219
+ } else if (content) {
220
+ mode = "message";
221
+ currentMessageId = chunk.id;
222
+ eventStream$.sendTextMessageStart({ messageId: currentMessageId });
223
+ }
224
+ }
225
+
226
+ // send the content events
227
+ if (mode === "message" && content) {
228
+ eventStream$.sendTextMessageContent({
229
+ messageId: currentMessageId,
230
+ content: content,
231
+ });
232
+ } else if (mode === "function" && toolCall?.function?.arguments) {
233
+ eventStream$.sendActionExecutionArgs({
234
+ actionExecutionId: currentToolCallId,
235
+ args: toolCall.function.arguments,
236
+ });
237
+ }
193
238
  }
194
- }
195
239
 
196
- // send the content events
197
- if (mode === "message" && content) {
198
- eventStream$.sendTextMessageContent({
199
- messageId: currentMessageId,
200
- content: content,
201
- });
202
- } else if (mode === "function" && toolCall?.function?.arguments) {
203
- eventStream$.sendActionExecutionArgs({
204
- actionExecutionId: currentToolCallId,
205
- args: toolCall.function.arguments,
206
- });
240
+ // send the end events
241
+ if (mode === "message") {
242
+ eventStream$.sendTextMessageEnd({ messageId: currentMessageId });
243
+ } else if (mode === "function") {
244
+ eventStream$.sendActionExecutionEnd({ actionExecutionId: currentToolCallId });
245
+ }
246
+ } catch (error) {
247
+ console.error("[OpenAI] Error processing stream:", error);
248
+ throw error;
207
249
  }
208
- }
209
-
210
- // send the end events
211
- if (mode === "message") {
212
- eventStream$.sendTextMessageEnd({ messageId: currentMessageId });
213
- } else if (mode === "function") {
214
- eventStream$.sendActionExecutionEnd({ actionExecutionId: currentToolCallId });
215
- }
216
250
 
217
- eventStream$.complete();
218
- });
251
+ eventStream$.complete();
252
+ });
253
+ } catch (error) {
254
+ console.error("[OpenAI] Error during API call:", error);
255
+ throw error;
256
+ }
219
257
 
220
258
  return {
221
259
  threadId,
@@ -0,0 +1,13 @@
1
+ import "@jest/globals";
2
+
3
+ declare global {
4
+ const jest: (typeof import("@jest/globals"))["jest"];
5
+ const expect: (typeof import("@jest/globals"))["expect"];
6
+ const test: (typeof import("@jest/globals"))["test"];
7
+ const describe: (typeof import("@jest/globals"))["describe"];
8
+ const beforeEach: (typeof import("@jest/globals"))["beforeEach"];
9
+ const afterEach: (typeof import("@jest/globals"))["afterEach"];
10
+ const beforeAll: (typeof import("@jest/globals"))["beforeAll"];
11
+ const afterAll: (typeof import("@jest/globals"))["afterAll"];
12
+ const it: (typeof import("@jest/globals"))["it"];
13
+ }