@copilotkit/runtime 1.8.12-next.2 → 1.8.12-next.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/CHANGELOG.md +13 -0
  2. package/dist/{chunk-OZLQ2A5E.mjs → chunk-FA3E4I4W.mjs} +4 -3
  3. package/dist/chunk-FA3E4I4W.mjs.map +1 -0
  4. package/dist/{chunk-FDGTTGQU.mjs → chunk-KGZF7KSR.mjs} +2 -2
  5. package/dist/{chunk-VQSVMSXZ.mjs → chunk-MG576PIZ.mjs} +2 -2
  6. package/dist/{chunk-Y4H3U52G.mjs → chunk-MVKCCH5U.mjs} +216 -173
  7. package/dist/chunk-MVKCCH5U.mjs.map +1 -0
  8. package/dist/{chunk-V6IQU4D2.mjs → chunk-S5U6J5X2.mjs} +2 -2
  9. package/dist/index.js +217 -173
  10. package/dist/index.js.map +1 -1
  11. package/dist/index.mjs +5 -5
  12. package/dist/lib/index.js +109 -82
  13. package/dist/lib/index.js.map +1 -1
  14. package/dist/lib/index.mjs +5 -5
  15. package/dist/lib/integrations/index.js +2 -1
  16. package/dist/lib/integrations/index.js.map +1 -1
  17. package/dist/lib/integrations/index.mjs +5 -5
  18. package/dist/lib/integrations/nest/index.js +2 -1
  19. package/dist/lib/integrations/nest/index.js.map +1 -1
  20. package/dist/lib/integrations/nest/index.mjs +3 -3
  21. package/dist/lib/integrations/node-express/index.js +2 -1
  22. package/dist/lib/integrations/node-express/index.js.map +1 -1
  23. package/dist/lib/integrations/node-express/index.mjs +3 -3
  24. package/dist/lib/integrations/node-http/index.js +2 -1
  25. package/dist/lib/integrations/node-http/index.js.map +1 -1
  26. package/dist/lib/integrations/node-http/index.mjs +2 -2
  27. package/dist/service-adapters/index.js +215 -172
  28. package/dist/service-adapters/index.js.map +1 -1
  29. package/dist/service-adapters/index.mjs +1 -1
  30. package/jest.config.js +8 -3
  31. package/package.json +3 -2
  32. package/src/service-adapters/anthropic/anthropic-adapter.ts +124 -66
  33. package/src/service-adapters/anthropic/utils.ts +0 -19
  34. package/src/service-adapters/openai/openai-adapter.ts +107 -69
  35. package/tests/global.d.ts +13 -0
  36. package/tests/service-adapters/anthropic/allowlist-approach.test.ts +226 -0
  37. package/tests/service-adapters/anthropic/anthropic-adapter.test.ts +604 -0
  38. package/tests/service-adapters/openai/allowlist-approach.test.ts +238 -0
  39. package/tests/service-adapters/openai/openai-adapter.test.ts +301 -0
  40. package/tests/setup.jest.ts +21 -0
  41. package/tests/tsconfig.json +10 -0
  42. package/tsconfig.json +1 -1
  43. package/dist/chunk-OZLQ2A5E.mjs.map +0 -1
  44. package/dist/chunk-Y4H3U52G.mjs.map +0 -1
  45. /package/dist/{chunk-FDGTTGQU.mjs.map → chunk-KGZF7KSR.mjs.map} +0 -0
  46. /package/dist/{chunk-VQSVMSXZ.mjs.map → chunk-MG576PIZ.mjs.map} +0 -0
  47. /package/dist/{chunk-V6IQU4D2.mjs.map → chunk-S5U6J5X2.mjs.map} +0 -0
@@ -2,7 +2,7 @@ import {
2
2
  getCommonConfig,
3
3
  getRuntimeInstanceTelemetryInfo,
4
4
  telemetry_client_default
5
- } from "./chunk-OZLQ2A5E.mjs";
5
+ } from "./chunk-FA3E4I4W.mjs";
6
6
  import {
7
7
  __name
8
8
  } from "./chunk-FHD4JECV.mjs";
@@ -77,4 +77,4 @@ export {
77
77
  config,
78
78
  copilotRuntimeNextJSPagesRouterEndpoint
79
79
  };
80
- //# sourceMappingURL=chunk-FDGTTGQU.mjs.map
80
+ //# sourceMappingURL=chunk-KGZF7KSR.mjs.map
@@ -2,7 +2,7 @@ import {
2
2
  copilotRuntimeNodeHttpEndpoint,
3
3
  getRuntimeInstanceTelemetryInfo,
4
4
  telemetry_client_default
5
- } from "./chunk-OZLQ2A5E.mjs";
5
+ } from "./chunk-FA3E4I4W.mjs";
6
6
  import {
7
7
  __name
8
8
  } from "./chunk-FHD4JECV.mjs";
@@ -22,4 +22,4 @@ __name(copilotRuntimeNestEndpoint, "copilotRuntimeNestEndpoint");
22
22
  export {
23
23
  copilotRuntimeNestEndpoint
24
24
  };
25
- //# sourceMappingURL=chunk-VQSVMSXZ.mjs.map
25
+ //# sourceMappingURL=chunk-MG576PIZ.mjs.map
@@ -288,7 +288,23 @@ var OpenAIAdapter = class {
288
288
  const { threadId: threadIdFromRequest, model = this.model, messages, actions, eventSource, forwardedParameters } = request;
289
289
  const tools = actions.map(convertActionInputToOpenAITool);
290
290
  const threadId = threadIdFromRequest ?? randomUUID();
291
- let openaiMessages = messages.map((m) => convertMessageToOpenAIMessage(m, {
291
+ const validToolUseIds = /* @__PURE__ */ new Set();
292
+ for (const message of messages) {
293
+ if (message.isActionExecutionMessage()) {
294
+ validToolUseIds.add(message.id);
295
+ }
296
+ }
297
+ const filteredMessages = messages.filter((message) => {
298
+ if (message.isResultMessage()) {
299
+ if (!validToolUseIds.has(message.actionExecutionId)) {
300
+ return false;
301
+ }
302
+ validToolUseIds.delete(message.actionExecutionId);
303
+ return true;
304
+ }
305
+ return true;
306
+ });
307
+ let openaiMessages = filteredMessages.map((m) => convertMessageToOpenAIMessage(m, {
292
308
  keepSystemRole: this.keepSystemRole
293
309
  }));
294
310
  openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
@@ -301,91 +317,101 @@ var OpenAIAdapter = class {
301
317
  }
302
318
  };
303
319
  }
304
- const stream = this.openai.beta.chat.completions.stream({
305
- model,
306
- stream: true,
307
- messages: openaiMessages,
308
- ...tools.length > 0 && {
309
- tools
310
- },
311
- ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
312
- max_tokens: forwardedParameters.maxTokens
313
- },
314
- ...(forwardedParameters == null ? void 0 : forwardedParameters.stop) && {
315
- stop: forwardedParameters.stop
316
- },
317
- ...toolChoice && {
318
- tool_choice: toolChoice
319
- },
320
- ...this.disableParallelToolCalls && {
321
- parallel_tool_calls: false
322
- },
323
- ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
324
- temperature: forwardedParameters.temperature
325
- }
326
- });
327
- eventSource.stream(async (eventStream$) => {
328
- var _a, _b;
329
- let mode = null;
330
- let currentMessageId;
331
- let currentToolCallId;
332
- for await (const chunk of stream) {
333
- if (chunk.choices.length === 0) {
334
- continue;
335
- }
336
- const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
337
- const content = chunk.choices[0].delta.content;
338
- if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
339
- mode = null;
340
- eventStream$.sendTextMessageEnd({
341
- messageId: currentMessageId
342
- });
343
- } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
344
- mode = null;
345
- eventStream$.sendActionExecutionEnd({
346
- actionExecutionId: currentToolCallId
347
- });
320
+ try {
321
+ const stream = this.openai.beta.chat.completions.stream({
322
+ model,
323
+ stream: true,
324
+ messages: openaiMessages,
325
+ ...tools.length > 0 && {
326
+ tools
327
+ },
328
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
329
+ max_tokens: forwardedParameters.maxTokens
330
+ },
331
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.stop) && {
332
+ stop: forwardedParameters.stop
333
+ },
334
+ ...toolChoice && {
335
+ tool_choice: toolChoice
336
+ },
337
+ ...this.disableParallelToolCalls && {
338
+ parallel_tool_calls: false
339
+ },
340
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
341
+ temperature: forwardedParameters.temperature
348
342
  }
349
- if (mode === null) {
350
- if (toolCall == null ? void 0 : toolCall.id) {
351
- mode = "function";
352
- currentToolCallId = toolCall.id;
353
- eventStream$.sendActionExecutionStart({
354
- actionExecutionId: currentToolCallId,
355
- parentMessageId: chunk.id,
356
- actionName: toolCall.function.name
357
- });
358
- } else if (content) {
359
- mode = "message";
360
- currentMessageId = chunk.id;
361
- eventStream$.sendTextMessageStart({
343
+ });
344
+ eventSource.stream(async (eventStream$) => {
345
+ var _a, _b;
346
+ let mode = null;
347
+ let currentMessageId;
348
+ let currentToolCallId;
349
+ try {
350
+ for await (const chunk of stream) {
351
+ if (chunk.choices.length === 0) {
352
+ continue;
353
+ }
354
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
355
+ const content = chunk.choices[0].delta.content;
356
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
357
+ mode = null;
358
+ eventStream$.sendTextMessageEnd({
359
+ messageId: currentMessageId
360
+ });
361
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
362
+ mode = null;
363
+ eventStream$.sendActionExecutionEnd({
364
+ actionExecutionId: currentToolCallId
365
+ });
366
+ }
367
+ if (mode === null) {
368
+ if (toolCall == null ? void 0 : toolCall.id) {
369
+ mode = "function";
370
+ currentToolCallId = toolCall.id;
371
+ eventStream$.sendActionExecutionStart({
372
+ actionExecutionId: currentToolCallId,
373
+ parentMessageId: chunk.id,
374
+ actionName: toolCall.function.name
375
+ });
376
+ } else if (content) {
377
+ mode = "message";
378
+ currentMessageId = chunk.id;
379
+ eventStream$.sendTextMessageStart({
380
+ messageId: currentMessageId
381
+ });
382
+ }
383
+ }
384
+ if (mode === "message" && content) {
385
+ eventStream$.sendTextMessageContent({
386
+ messageId: currentMessageId,
387
+ content
388
+ });
389
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
390
+ eventStream$.sendActionExecutionArgs({
391
+ actionExecutionId: currentToolCallId,
392
+ args: toolCall.function.arguments
393
+ });
394
+ }
395
+ }
396
+ if (mode === "message") {
397
+ eventStream$.sendTextMessageEnd({
362
398
  messageId: currentMessageId
363
399
  });
400
+ } else if (mode === "function") {
401
+ eventStream$.sendActionExecutionEnd({
402
+ actionExecutionId: currentToolCallId
403
+ });
364
404
  }
405
+ } catch (error) {
406
+ console.error("[OpenAI] Error processing stream:", error);
407
+ throw error;
365
408
  }
366
- if (mode === "message" && content) {
367
- eventStream$.sendTextMessageContent({
368
- messageId: currentMessageId,
369
- content
370
- });
371
- } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
372
- eventStream$.sendActionExecutionArgs({
373
- actionExecutionId: currentToolCallId,
374
- args: toolCall.function.arguments
375
- });
376
- }
377
- }
378
- if (mode === "message") {
379
- eventStream$.sendTextMessageEnd({
380
- messageId: currentMessageId
381
- });
382
- } else if (mode === "function") {
383
- eventStream$.sendActionExecutionEnd({
384
- actionExecutionId: currentToolCallId
385
- });
386
- }
387
- eventStream$.complete();
388
- });
409
+ eventStream$.complete();
410
+ });
411
+ } catch (error) {
412
+ console.error("[OpenAI] Error during API call:", error);
413
+ throw error;
414
+ }
389
415
  return {
390
416
  threadId
391
417
  };
@@ -1268,23 +1294,6 @@ function convertMessageToAnthropicMessage(message) {
1268
1294
  }
1269
1295
  }
1270
1296
  __name(convertMessageToAnthropicMessage, "convertMessageToAnthropicMessage");
1271
- function groupAnthropicMessagesByRole(messageParams) {
1272
- return messageParams.reduce((acc, message) => {
1273
- const lastGroup = acc[acc.length - 1];
1274
- if (lastGroup && lastGroup.role === message.role) {
1275
- lastGroup.content = lastGroup.content.concat(message.content);
1276
- } else {
1277
- acc.push({
1278
- role: message.role,
1279
- content: [
1280
- ...message.content
1281
- ]
1282
- });
1283
- }
1284
- return acc;
1285
- }, []);
1286
- }
1287
- __name(groupAnthropicMessagesByRole, "groupAnthropicMessagesByRole");
1288
1297
 
1289
1298
  // src/service-adapters/anthropic/anthropic-adapter.ts
1290
1299
  import { randomId as randomId3, randomUUID as randomUUID5 } from "@copilotkit/shared";
@@ -1309,9 +1318,32 @@ var AnthropicAdapter = class {
1309
1318
  ];
1310
1319
  const instructionsMessage = messages.shift();
1311
1320
  const instructions = instructionsMessage.isTextMessage() ? instructionsMessage.content : "";
1312
- let anthropicMessages = messages.map(convertMessageToAnthropicMessage);
1313
- anthropicMessages = limitMessagesToTokenCount2(anthropicMessages, tools, model);
1314
- anthropicMessages = groupAnthropicMessagesByRole(anthropicMessages);
1321
+ const validToolUseIds = /* @__PURE__ */ new Set();
1322
+ for (const message of messages) {
1323
+ if (message.isActionExecutionMessage()) {
1324
+ validToolUseIds.add(message.id);
1325
+ }
1326
+ }
1327
+ const anthropicMessages = messages.map((message) => {
1328
+ if (message.isResultMessage()) {
1329
+ if (!validToolUseIds.has(message.actionExecutionId)) {
1330
+ return null;
1331
+ }
1332
+ validToolUseIds.delete(message.actionExecutionId);
1333
+ return {
1334
+ role: "user",
1335
+ content: [
1336
+ {
1337
+ type: "tool_result",
1338
+ content: message.result,
1339
+ tool_use_id: message.actionExecutionId
1340
+ }
1341
+ ]
1342
+ };
1343
+ }
1344
+ return convertMessageToAnthropicMessage(message);
1345
+ }).filter(Boolean);
1346
+ const limitedMessages = limitMessagesToTokenCount2(anthropicMessages, tools, model);
1315
1347
  let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
1316
1348
  if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
1317
1349
  toolChoice = {
@@ -1319,82 +1351,93 @@ var AnthropicAdapter = class {
1319
1351
  name: forwardedParameters.toolChoiceFunctionName
1320
1352
  };
1321
1353
  }
1322
- const stream = this.anthropic.messages.create({
1323
- system: instructions,
1324
- model: this.model,
1325
- messages: anthropicMessages,
1326
- max_tokens: (forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) || 1024,
1327
- ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) ? {
1328
- temperature: forwardedParameters.temperature
1329
- } : {},
1330
- ...tools.length > 0 && {
1331
- tools
1332
- },
1333
- ...toolChoice && {
1334
- tool_choice: toolChoice
1335
- },
1336
- stream: true
1337
- });
1338
- eventSource.stream(async (eventStream$) => {
1339
- let mode = null;
1340
- let didOutputText = false;
1341
- let currentMessageId = randomId3();
1342
- let currentToolCallId = randomId3();
1343
- let filterThinkingTextBuffer = new FilterThinkingTextBuffer();
1344
- for await (const chunk of await stream) {
1345
- if (chunk.type === "message_start") {
1346
- currentMessageId = chunk.message.id;
1347
- } else if (chunk.type === "content_block_start") {
1348
- if (chunk.content_block.type === "text") {
1349
- didOutputText = false;
1350
- filterThinkingTextBuffer.reset();
1351
- mode = "message";
1352
- } else if (chunk.content_block.type === "tool_use") {
1353
- currentToolCallId = chunk.content_block.id;
1354
- eventStream$.sendActionExecutionStart({
1355
- actionExecutionId: currentToolCallId,
1356
- actionName: chunk.content_block.name,
1357
- parentMessageId: currentMessageId
1358
- });
1359
- mode = "function";
1360
- }
1361
- } else if (chunk.type === "content_block_delta") {
1362
- if (chunk.delta.type === "text_delta") {
1363
- const text = filterThinkingTextBuffer.onTextChunk(chunk.delta.text);
1364
- if (text.length > 0) {
1365
- if (!didOutputText) {
1366
- eventStream$.sendTextMessageStart({
1367
- messageId: currentMessageId
1354
+ try {
1355
+ const createParams = {
1356
+ system: instructions,
1357
+ model: this.model,
1358
+ messages: limitedMessages,
1359
+ max_tokens: (forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) || 1024,
1360
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) ? {
1361
+ temperature: forwardedParameters.temperature
1362
+ } : {},
1363
+ ...tools.length > 0 && {
1364
+ tools
1365
+ },
1366
+ ...toolChoice && {
1367
+ tool_choice: toolChoice
1368
+ },
1369
+ stream: true
1370
+ };
1371
+ const stream = await this.anthropic.messages.create(createParams);
1372
+ eventSource.stream(async (eventStream$) => {
1373
+ let mode = null;
1374
+ let didOutputText = false;
1375
+ let currentMessageId = randomId3();
1376
+ let currentToolCallId = randomId3();
1377
+ let filterThinkingTextBuffer = new FilterThinkingTextBuffer();
1378
+ try {
1379
+ for await (const chunk of stream) {
1380
+ if (chunk.type === "message_start") {
1381
+ currentMessageId = chunk.message.id;
1382
+ } else if (chunk.type === "content_block_start") {
1383
+ if (chunk.content_block.type === "text") {
1384
+ didOutputText = false;
1385
+ filterThinkingTextBuffer.reset();
1386
+ mode = "message";
1387
+ } else if (chunk.content_block.type === "tool_use") {
1388
+ currentToolCallId = chunk.content_block.id;
1389
+ eventStream$.sendActionExecutionStart({
1390
+ actionExecutionId: currentToolCallId,
1391
+ actionName: chunk.content_block.name,
1392
+ parentMessageId: currentMessageId
1393
+ });
1394
+ mode = "function";
1395
+ }
1396
+ } else if (chunk.type === "content_block_delta") {
1397
+ if (chunk.delta.type === "text_delta") {
1398
+ const text = filterThinkingTextBuffer.onTextChunk(chunk.delta.text);
1399
+ if (text.length > 0) {
1400
+ if (!didOutputText) {
1401
+ eventStream$.sendTextMessageStart({
1402
+ messageId: currentMessageId
1403
+ });
1404
+ didOutputText = true;
1405
+ }
1406
+ eventStream$.sendTextMessageContent({
1407
+ messageId: currentMessageId,
1408
+ content: text
1409
+ });
1410
+ }
1411
+ } else if (chunk.delta.type === "input_json_delta") {
1412
+ eventStream$.sendActionExecutionArgs({
1413
+ actionExecutionId: currentToolCallId,
1414
+ args: chunk.delta.partial_json
1415
+ });
1416
+ }
1417
+ } else if (chunk.type === "content_block_stop") {
1418
+ if (mode === "message") {
1419
+ if (didOutputText) {
1420
+ eventStream$.sendTextMessageEnd({
1421
+ messageId: currentMessageId
1422
+ });
1423
+ }
1424
+ } else if (mode === "function") {
1425
+ eventStream$.sendActionExecutionEnd({
1426
+ actionExecutionId: currentToolCallId
1368
1427
  });
1369
- didOutputText = true;
1370
1428
  }
1371
- eventStream$.sendTextMessageContent({
1372
- messageId: currentMessageId,
1373
- content: text
1374
- });
1375
- }
1376
- } else if (chunk.delta.type === "input_json_delta") {
1377
- eventStream$.sendActionExecutionArgs({
1378
- actionExecutionId: currentToolCallId,
1379
- args: chunk.delta.partial_json
1380
- });
1381
- }
1382
- } else if (chunk.type === "content_block_stop") {
1383
- if (mode === "message") {
1384
- if (didOutputText) {
1385
- eventStream$.sendTextMessageEnd({
1386
- messageId: currentMessageId
1387
- });
1388
1429
  }
1389
- } else if (mode === "function") {
1390
- eventStream$.sendActionExecutionEnd({
1391
- actionExecutionId: currentToolCallId
1392
- });
1393
1430
  }
1431
+ } catch (error) {
1432
+ console.error("[Anthropic] Error processing stream:", error);
1433
+ throw error;
1394
1434
  }
1395
- }
1396
- eventStream$.complete();
1397
- });
1435
+ eventStream$.complete();
1436
+ });
1437
+ } catch (error) {
1438
+ console.error("[Anthropic] Error during API call:", error);
1439
+ throw error;
1440
+ }
1398
1441
  return {
1399
1442
  threadId: threadId || randomUUID5()
1400
1443
  };
@@ -1503,4 +1546,4 @@ export {
1503
1546
  EmptyAdapter,
1504
1547
  ExperimentalEmptyAdapter
1505
1548
  };
1506
- //# sourceMappingURL=chunk-Y4H3U52G.mjs.map
1549
+ //# sourceMappingURL=chunk-MVKCCH5U.mjs.map