@copilotkit/runtime 1.5.9 → 1.5.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. package/CHANGELOG.md +22 -0
  2. package/__snapshots__/schema/schema.graphql +273 -0
  3. package/dist/chunk-44O2JGUY.mjs +12 -0
  4. package/dist/chunk-44O2JGUY.mjs.map +1 -0
  5. package/dist/chunk-CLGKEUOA.mjs +1408 -0
  6. package/dist/chunk-CLGKEUOA.mjs.map +1 -0
  7. package/dist/chunk-D2WLFQS6.mjs +43 -0
  8. package/dist/chunk-D2WLFQS6.mjs.map +1 -0
  9. package/dist/chunk-DFOKBSIS.mjs +1 -0
  10. package/dist/chunk-DFOKBSIS.mjs.map +1 -0
  11. package/dist/chunk-EH6BECEX.mjs +25 -0
  12. package/dist/chunk-EH6BECEX.mjs.map +1 -0
  13. package/dist/chunk-HKF5IS6J.mjs +3395 -0
  14. package/dist/chunk-HKF5IS6J.mjs.map +1 -0
  15. package/dist/chunk-O3VDLEP4.mjs +80 -0
  16. package/dist/chunk-O3VDLEP4.mjs.map +1 -0
  17. package/dist/chunk-RFF5IIZJ.mjs +66 -0
  18. package/dist/chunk-RFF5IIZJ.mjs.map +1 -0
  19. package/dist/chunk-U3V2BCGI.mjs +152 -0
  20. package/dist/chunk-U3V2BCGI.mjs.map +1 -0
  21. package/dist/chunk-XFOOQKZE.mjs +25 -0
  22. package/dist/chunk-XFOOQKZE.mjs.map +1 -0
  23. package/dist/copilot-runtime-36700e00.d.ts +196 -0
  24. package/dist/graphql/types/base/index.d.ts +6 -0
  25. package/dist/graphql/types/base/index.js +63 -0
  26. package/dist/graphql/types/base/index.js.map +1 -0
  27. package/dist/graphql/types/base/index.mjs +8 -0
  28. package/dist/graphql/types/base/index.mjs.map +1 -0
  29. package/dist/graphql/types/converted/index.d.ts +2 -0
  30. package/dist/graphql/types/converted/index.js +124 -0
  31. package/dist/graphql/types/converted/index.js.map +1 -0
  32. package/dist/graphql/types/converted/index.mjs +17 -0
  33. package/dist/graphql/types/converted/index.mjs.map +1 -0
  34. package/dist/groq-adapter-696b5d29.d.ts +281 -0
  35. package/dist/index-cc2b17be.d.ts +87 -0
  36. package/dist/index.d.ts +23 -0
  37. package/dist/index.js +5153 -0
  38. package/dist/index.js.map +1 -0
  39. package/dist/index.mjs +76 -0
  40. package/dist/index.mjs.map +1 -0
  41. package/dist/langserve-9125a12e.d.ts +176 -0
  42. package/dist/lib/cloud/index.d.ts +6 -0
  43. package/dist/lib/cloud/index.js +18 -0
  44. package/dist/lib/cloud/index.js.map +1 -0
  45. package/dist/lib/cloud/index.mjs +1 -0
  46. package/dist/lib/cloud/index.mjs.map +1 -0
  47. package/dist/lib/index.d.ts +20 -0
  48. package/dist/lib/index.js +4801 -0
  49. package/dist/lib/index.js.map +1 -0
  50. package/dist/lib/index.mjs +58 -0
  51. package/dist/lib/index.mjs.map +1 -0
  52. package/dist/lib/integrations/index.d.ts +33 -0
  53. package/dist/lib/integrations/index.js +2166 -0
  54. package/dist/lib/integrations/index.js.map +1 -0
  55. package/dist/lib/integrations/index.mjs +34 -0
  56. package/dist/lib/integrations/index.mjs.map +1 -0
  57. package/dist/lib/integrations/nest/index.d.ts +14 -0
  58. package/dist/lib/integrations/nest/index.js +2075 -0
  59. package/dist/lib/integrations/nest/index.js.map +1 -0
  60. package/dist/lib/integrations/nest/index.mjs +13 -0
  61. package/dist/lib/integrations/nest/index.mjs.map +1 -0
  62. package/dist/lib/integrations/node-express/index.d.ts +14 -0
  63. package/dist/lib/integrations/node-express/index.js +2075 -0
  64. package/dist/lib/integrations/node-express/index.js.map +1 -0
  65. package/dist/lib/integrations/node-express/index.mjs +13 -0
  66. package/dist/lib/integrations/node-express/index.mjs.map +1 -0
  67. package/dist/lib/integrations/node-http/index.d.ts +14 -0
  68. package/dist/lib/integrations/node-http/index.js +2061 -0
  69. package/dist/lib/integrations/node-http/index.js.map +1 -0
  70. package/dist/lib/integrations/node-http/index.mjs +12 -0
  71. package/dist/lib/integrations/node-http/index.mjs.map +1 -0
  72. package/dist/service-adapters/index.d.ts +84 -0
  73. package/dist/service-adapters/index.js +1448 -0
  74. package/dist/service-adapters/index.js.map +1 -0
  75. package/dist/service-adapters/index.mjs +26 -0
  76. package/dist/service-adapters/index.mjs.map +1 -0
  77. package/dist/utils/index.d.ts +49 -0
  78. package/dist/utils/index.js +174 -0
  79. package/dist/utils/index.js.map +1 -0
  80. package/dist/utils/index.mjs +12 -0
  81. package/dist/utils/index.mjs.map +1 -0
  82. package/package.json +2 -2
@@ -0,0 +1,1448 @@
1
+ var __create = Object.create;
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __getProtoOf = Object.getPrototypeOf;
6
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
7
+ var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
8
+ var __export = (target, all) => {
9
+ for (var name in all)
10
+ __defProp(target, name, { get: all[name], enumerable: true });
11
+ };
12
+ var __copyProps = (to, from, except, desc) => {
13
+ if (from && typeof from === "object" || typeof from === "function") {
14
+ for (let key of __getOwnPropNames(from))
15
+ if (!__hasOwnProp.call(to, key) && key !== except)
16
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
+ }
18
+ return to;
19
+ };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
28
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
+
30
+ // src/service-adapters/index.ts
31
+ var service_adapters_exports = {};
32
+ __export(service_adapters_exports, {
33
+ AnthropicAdapter: () => AnthropicAdapter,
34
+ ExperimentalEmptyAdapter: () => ExperimentalEmptyAdapter,
35
+ ExperimentalOllamaAdapter: () => ExperimentalOllamaAdapter,
36
+ GoogleGenerativeAIAdapter: () => GoogleGenerativeAIAdapter,
37
+ GroqAdapter: () => GroqAdapter,
38
+ LangChainAdapter: () => LangChainAdapter,
39
+ OpenAIAdapter: () => OpenAIAdapter,
40
+ OpenAIAssistantAdapter: () => OpenAIAssistantAdapter,
41
+ RemoteChain: () => RemoteChain,
42
+ UnifyAdapter: () => UnifyAdapter
43
+ });
44
+ module.exports = __toCommonJS(service_adapters_exports);
45
+
46
+ // src/service-adapters/langchain/langserve.ts
47
+ var import_remote = require("langchain/runnables/remote");
48
+ var RemoteChain = class {
49
+ name;
50
+ description;
51
+ chainUrl;
52
+ parameters;
53
+ parameterType;
54
+ constructor(options) {
55
+ this.name = options.name;
56
+ this.description = options.description;
57
+ this.chainUrl = options.chainUrl;
58
+ this.parameters = options.parameters;
59
+ this.parameterType = options.parameterType || "multi";
60
+ }
61
+ async toAction() {
62
+ if (!this.parameters) {
63
+ await this.inferLangServeParameters();
64
+ }
65
+ return {
66
+ name: this.name,
67
+ description: this.description,
68
+ parameters: this.parameters,
69
+ handler: async (args) => {
70
+ const runnable = new import_remote.RemoteRunnable({
71
+ url: this.chainUrl
72
+ });
73
+ let input;
74
+ if (this.parameterType === "single") {
75
+ input = args[Object.keys(args)[0]];
76
+ } else {
77
+ input = args;
78
+ }
79
+ return await runnable.invoke(input);
80
+ }
81
+ };
82
+ }
83
+ async inferLangServeParameters() {
84
+ const supportedTypes = [
85
+ "string",
86
+ "number",
87
+ "boolean"
88
+ ];
89
+ let schemaUrl = this.chainUrl.replace(/\/+$/, "") + "/input_schema";
90
+ let schema = await fetch(schemaUrl).then((res) => res.json()).catch(() => {
91
+ throw new Error("Failed to fetch langserve schema at " + schemaUrl);
92
+ });
93
+ if (supportedTypes.includes(schema.type)) {
94
+ this.parameterType = "single";
95
+ this.parameters = [
96
+ {
97
+ name: "input",
98
+ type: schema.type,
99
+ description: "The input to the chain"
100
+ }
101
+ ];
102
+ } else if (schema.type === "object") {
103
+ this.parameterType = "multi";
104
+ this.parameters = Object.keys(schema.properties).map((key) => {
105
+ var _a;
106
+ let property = schema.properties[key];
107
+ if (!supportedTypes.includes(property.type)) {
108
+ throw new Error("Unsupported schema type");
109
+ }
110
+ return {
111
+ name: key,
112
+ type: property.type,
113
+ description: property.description || "",
114
+ required: ((_a = schema.required) == null ? void 0 : _a.includes(key)) || false
115
+ };
116
+ });
117
+ } else {
118
+ throw new Error("Unsupported schema type");
119
+ }
120
+ }
121
+ };
122
+ __name(RemoteChain, "RemoteChain");
123
+
124
+ // src/service-adapters/openai/openai-adapter.ts
125
+ var import_openai = __toESM(require("openai"));
126
+
127
+ // src/service-adapters/openai/utils.ts
128
+ function limitMessagesToTokenCount(messages, tools, model, maxTokens) {
129
+ maxTokens || (maxTokens = maxTokensForOpenAIModel(model));
130
+ const result = [];
131
+ const toolsNumTokens = countToolsTokens(model, tools);
132
+ if (toolsNumTokens > maxTokens) {
133
+ throw new Error(`Too many tokens in function definitions: ${toolsNumTokens} > ${maxTokens}`);
134
+ }
135
+ maxTokens -= toolsNumTokens;
136
+ for (const message of messages) {
137
+ if (message.role === "system") {
138
+ const numTokens = countMessageTokens(model, message);
139
+ maxTokens -= numTokens;
140
+ if (maxTokens < 0) {
141
+ throw new Error("Not enough tokens for system message.");
142
+ }
143
+ }
144
+ }
145
+ let cutoff = false;
146
+ const reversedMessages = [
147
+ ...messages
148
+ ].reverse();
149
+ for (const message of reversedMessages) {
150
+ if (message.role === "system") {
151
+ result.unshift(message);
152
+ continue;
153
+ } else if (cutoff) {
154
+ continue;
155
+ }
156
+ let numTokens = countMessageTokens(model, message);
157
+ if (maxTokens < numTokens) {
158
+ cutoff = true;
159
+ continue;
160
+ }
161
+ result.unshift(message);
162
+ maxTokens -= numTokens;
163
+ }
164
+ return result;
165
+ }
166
+ __name(limitMessagesToTokenCount, "limitMessagesToTokenCount");
167
+ function maxTokensForOpenAIModel(model) {
168
+ return maxTokensByModel[model] || DEFAULT_MAX_TOKENS;
169
+ }
170
+ __name(maxTokensForOpenAIModel, "maxTokensForOpenAIModel");
171
+ var DEFAULT_MAX_TOKENS = 128e3;
172
+ var maxTokensByModel = {
173
+ // GPT-4
174
+ "gpt-4o": 128e3,
175
+ "gpt-4o-2024-05-13": 128e3,
176
+ "gpt-4-turbo": 128e3,
177
+ "gpt-4-turbo-2024-04-09": 128e3,
178
+ "gpt-4-0125-preview": 128e3,
179
+ "gpt-4-turbo-preview": 128e3,
180
+ "gpt-4-1106-preview": 128e3,
181
+ "gpt-4-vision-preview": 128e3,
182
+ "gpt-4-1106-vision-preview": 128e3,
183
+ "gpt-4-32k": 32768,
184
+ "gpt-4-32k-0613": 32768,
185
+ "gpt-4-32k-0314": 32768,
186
+ "gpt-4": 8192,
187
+ "gpt-4-0613": 8192,
188
+ "gpt-4-0314": 8192,
189
+ // GPT-3.5
190
+ "gpt-3.5-turbo-0125": 16385,
191
+ "gpt-3.5-turbo": 16385,
192
+ "gpt-3.5-turbo-1106": 16385,
193
+ "gpt-3.5-turbo-instruct": 4096,
194
+ "gpt-3.5-turbo-16k": 16385,
195
+ "gpt-3.5-turbo-0613": 4096,
196
+ "gpt-3.5-turbo-16k-0613": 16385,
197
+ "gpt-3.5-turbo-0301": 4097
198
+ };
199
+ function countToolsTokens(model, tools) {
200
+ if (tools.length === 0) {
201
+ return 0;
202
+ }
203
+ const json = JSON.stringify(tools);
204
+ return countTokens(model, json);
205
+ }
206
+ __name(countToolsTokens, "countToolsTokens");
207
+ function countMessageTokens(model, message) {
208
+ return countTokens(model, message.content || "");
209
+ }
210
+ __name(countMessageTokens, "countMessageTokens");
211
+ function countTokens(model, text) {
212
+ return text.length / 3;
213
+ }
214
+ __name(countTokens, "countTokens");
215
+ function convertActionInputToOpenAITool(action) {
216
+ return {
217
+ type: "function",
218
+ function: {
219
+ name: action.name,
220
+ description: action.description,
221
+ parameters: JSON.parse(action.jsonSchema)
222
+ }
223
+ };
224
+ }
225
+ __name(convertActionInputToOpenAITool, "convertActionInputToOpenAITool");
226
+ function convertMessageToOpenAIMessage(message) {
227
+ if (message.isTextMessage()) {
228
+ return {
229
+ role: message.role,
230
+ content: message.content
231
+ };
232
+ } else if (message.isActionExecutionMessage()) {
233
+ return {
234
+ role: "assistant",
235
+ tool_calls: [
236
+ {
237
+ id: message.id,
238
+ type: "function",
239
+ function: {
240
+ name: message.name,
241
+ arguments: JSON.stringify(message.arguments)
242
+ }
243
+ }
244
+ ]
245
+ };
246
+ } else if (message.isResultMessage()) {
247
+ return {
248
+ role: "tool",
249
+ content: message.result,
250
+ tool_call_id: message.actionExecutionId
251
+ };
252
+ }
253
+ }
254
+ __name(convertMessageToOpenAIMessage, "convertMessageToOpenAIMessage");
255
+ function convertSystemMessageToAssistantAPI(message) {
256
+ return {
257
+ ...message,
258
+ ...message.role === "system" && {
259
+ role: "assistant",
260
+ content: "THE FOLLOWING MESSAGE IS A SYSTEM MESSAGE: " + message.content
261
+ }
262
+ };
263
+ }
264
+ __name(convertSystemMessageToAssistantAPI, "convertSystemMessageToAssistantAPI");
265
+
266
+ // src/service-adapters/openai/openai-adapter.ts
267
+ var import_shared = require("@copilotkit/shared");
268
+ var DEFAULT_MODEL = "gpt-4o";
269
+ var OpenAIAdapter = class {
270
+ model = DEFAULT_MODEL;
271
+ disableParallelToolCalls = false;
272
+ _openai;
273
+ get openai() {
274
+ return this._openai;
275
+ }
276
+ constructor(params) {
277
+ this._openai = (params == null ? void 0 : params.openai) || new import_openai.default({});
278
+ if (params == null ? void 0 : params.model) {
279
+ this.model = params.model;
280
+ }
281
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
282
+ }
283
+ async process(request) {
284
+ const { threadId, model = this.model, messages, actions, eventSource, forwardedParameters } = request;
285
+ const tools = actions.map(convertActionInputToOpenAITool);
286
+ let openaiMessages = messages.map(convertMessageToOpenAIMessage);
287
+ openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
288
+ let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
289
+ if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
290
+ toolChoice = {
291
+ type: "function",
292
+ function: {
293
+ name: forwardedParameters.toolChoiceFunctionName
294
+ }
295
+ };
296
+ }
297
+ const stream = this.openai.beta.chat.completions.stream({
298
+ model,
299
+ stream: true,
300
+ messages: openaiMessages,
301
+ ...tools.length > 0 && {
302
+ tools
303
+ },
304
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
305
+ max_tokens: forwardedParameters.maxTokens
306
+ },
307
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.stop) && {
308
+ stop: forwardedParameters.stop
309
+ },
310
+ ...toolChoice && {
311
+ tool_choice: toolChoice
312
+ },
313
+ ...this.disableParallelToolCalls && {
314
+ parallel_tool_calls: false
315
+ },
316
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
317
+ temperature: forwardedParameters.temperature
318
+ }
319
+ });
320
+ eventSource.stream(async (eventStream$) => {
321
+ var _a, _b;
322
+ let mode = null;
323
+ let currentMessageId;
324
+ let currentToolCallId;
325
+ for await (const chunk of stream) {
326
+ if (chunk.choices.length === 0) {
327
+ continue;
328
+ }
329
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
330
+ const content = chunk.choices[0].delta.content;
331
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
332
+ mode = null;
333
+ eventStream$.sendTextMessageEnd({
334
+ messageId: currentMessageId
335
+ });
336
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
337
+ mode = null;
338
+ eventStream$.sendActionExecutionEnd({
339
+ actionExecutionId: currentToolCallId
340
+ });
341
+ }
342
+ if (mode === null) {
343
+ if (toolCall == null ? void 0 : toolCall.id) {
344
+ mode = "function";
345
+ currentToolCallId = toolCall.id;
346
+ eventStream$.sendActionExecutionStart({
347
+ actionExecutionId: currentToolCallId,
348
+ parentMessageId: chunk.id,
349
+ actionName: toolCall.function.name
350
+ });
351
+ } else if (content) {
352
+ mode = "message";
353
+ currentMessageId = chunk.id;
354
+ eventStream$.sendTextMessageStart({
355
+ messageId: currentMessageId
356
+ });
357
+ }
358
+ }
359
+ if (mode === "message" && content) {
360
+ eventStream$.sendTextMessageContent({
361
+ messageId: currentMessageId,
362
+ content
363
+ });
364
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
365
+ eventStream$.sendActionExecutionArgs({
366
+ actionExecutionId: currentToolCallId,
367
+ args: toolCall.function.arguments
368
+ });
369
+ }
370
+ }
371
+ if (mode === "message") {
372
+ eventStream$.sendTextMessageEnd({
373
+ messageId: currentMessageId
374
+ });
375
+ } else if (mode === "function") {
376
+ eventStream$.sendActionExecutionEnd({
377
+ actionExecutionId: currentToolCallId
378
+ });
379
+ }
380
+ eventStream$.complete();
381
+ });
382
+ return {
383
+ threadId: threadId || (0, import_shared.randomId)()
384
+ };
385
+ }
386
+ };
387
+ __name(OpenAIAdapter, "OpenAIAdapter");
388
+
389
+ // src/service-adapters/langchain/utils.ts
390
+ var import_messages = require("@langchain/core/messages");
391
+ var import_tools = require("@langchain/core/tools");
392
+ var import_shared2 = require("@copilotkit/shared");
393
+ function convertMessageToLangChainMessage(message) {
394
+ if (message.isTextMessage()) {
395
+ if (message.role == "user") {
396
+ return new import_messages.HumanMessage(message.content);
397
+ } else if (message.role == "assistant") {
398
+ return new import_messages.AIMessage(message.content);
399
+ } else if (message.role === "system") {
400
+ return new import_messages.SystemMessage(message.content);
401
+ }
402
+ } else if (message.isActionExecutionMessage()) {
403
+ return new import_messages.AIMessage({
404
+ content: "",
405
+ tool_calls: [
406
+ {
407
+ id: message.id,
408
+ args: message.arguments,
409
+ name: message.name
410
+ }
411
+ ]
412
+ });
413
+ } else if (message.isResultMessage()) {
414
+ return new import_messages.ToolMessage({
415
+ content: message.result,
416
+ tool_call_id: message.actionExecutionId
417
+ });
418
+ }
419
+ }
420
+ __name(convertMessageToLangChainMessage, "convertMessageToLangChainMessage");
421
+ function convertActionInputToLangChainTool(actionInput) {
422
+ return new import_tools.DynamicStructuredTool({
423
+ name: actionInput.name,
424
+ description: actionInput.description,
425
+ schema: (0, import_shared2.convertJsonSchemaToZodSchema)(JSON.parse(actionInput.jsonSchema), true),
426
+ func: async () => {
427
+ return "";
428
+ }
429
+ });
430
+ }
431
+ __name(convertActionInputToLangChainTool, "convertActionInputToLangChainTool");
432
+ function isAIMessage(message) {
433
+ return Object.prototype.toString.call(message) === "[object AIMessage]";
434
+ }
435
+ __name(isAIMessage, "isAIMessage");
436
+ function isAIMessageChunk(message) {
437
+ return Object.prototype.toString.call(message) === "[object AIMessageChunk]";
438
+ }
439
+ __name(isAIMessageChunk, "isAIMessageChunk");
440
+ function isBaseMessageChunk(message) {
441
+ return Object.prototype.toString.call(message) === "[object BaseMessageChunk]";
442
+ }
443
+ __name(isBaseMessageChunk, "isBaseMessageChunk");
444
+ function maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution) {
445
+ if (actionExecution) {
446
+ eventStream$.sendActionExecutionResult({
447
+ actionExecutionId: actionExecution.id,
448
+ actionName: actionExecution.name,
449
+ result: "Sending a message"
450
+ });
451
+ }
452
+ }
453
+ __name(maybeSendActionExecutionResultIsMessage, "maybeSendActionExecutionResultIsMessage");
454
+ async function streamLangChainResponse({ result, eventStream$, actionExecution }) {
455
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
456
+ if (typeof result === "string") {
457
+ if (!actionExecution) {
458
+ eventStream$.sendTextMessage((0, import_shared2.randomId)(), result);
459
+ } else {
460
+ eventStream$.sendActionExecutionResult({
461
+ actionExecutionId: actionExecution.id,
462
+ actionName: actionExecution.name,
463
+ result
464
+ });
465
+ }
466
+ } else if (isAIMessage(result)) {
467
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
468
+ if (result.content) {
469
+ eventStream$.sendTextMessage((0, import_shared2.randomId)(), result.content);
470
+ }
471
+ for (const toolCall of result.tool_calls) {
472
+ eventStream$.sendActionExecution({
473
+ actionExecutionId: toolCall.id || (0, import_shared2.randomId)(),
474
+ actionName: toolCall.name,
475
+ args: JSON.stringify(toolCall.args)
476
+ });
477
+ }
478
+ } else if (isBaseMessageChunk(result)) {
479
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
480
+ if ((_a = result.lc_kwargs) == null ? void 0 : _a.content) {
481
+ eventStream$.sendTextMessage((0, import_shared2.randomId)(), result.content);
482
+ }
483
+ if ((_b = result.lc_kwargs) == null ? void 0 : _b.tool_calls) {
484
+ for (const toolCall of (_c = result.lc_kwargs) == null ? void 0 : _c.tool_calls) {
485
+ eventStream$.sendActionExecution({
486
+ actionExecutionId: toolCall.id || (0, import_shared2.randomId)(),
487
+ actionName: toolCall.name,
488
+ args: JSON.stringify(toolCall.args)
489
+ });
490
+ }
491
+ }
492
+ } else if (result && "getReader" in result) {
493
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
494
+ let reader = result.getReader();
495
+ let mode = null;
496
+ let currentMessageId;
497
+ const toolCallDetails = {
498
+ name: null,
499
+ id: null,
500
+ index: null,
501
+ prevIndex: null
502
+ };
503
+ while (true) {
504
+ try {
505
+ const { done, value } = await reader.read();
506
+ let toolCallName = void 0;
507
+ let toolCallId = void 0;
508
+ let toolCallArgs = void 0;
509
+ let hasToolCall = false;
510
+ let content = "";
511
+ if (value && value.content) {
512
+ content = Array.isArray(value.content) ? ((_d = value.content[0]) == null ? void 0 : _d.text) ?? "" : value.content;
513
+ }
514
+ if (isAIMessageChunk(value)) {
515
+ let chunk = (_e = value.tool_call_chunks) == null ? void 0 : _e[0];
516
+ toolCallArgs = chunk == null ? void 0 : chunk.args;
517
+ hasToolCall = chunk != void 0;
518
+ if (chunk == null ? void 0 : chunk.name)
519
+ toolCallDetails.name = chunk.name;
520
+ if ((chunk == null ? void 0 : chunk.index) != null) {
521
+ toolCallDetails.index = chunk.index;
522
+ if (toolCallDetails.prevIndex == null)
523
+ toolCallDetails.prevIndex = chunk.index;
524
+ }
525
+ if (chunk == null ? void 0 : chunk.id)
526
+ toolCallDetails.id = chunk.index != null ? `${chunk.id}-idx-${chunk.index}` : chunk.id;
527
+ toolCallName = toolCallDetails.name;
528
+ toolCallId = toolCallDetails.id;
529
+ } else if (isBaseMessageChunk(value)) {
530
+ let chunk = (_g = (_f = value.additional_kwargs) == null ? void 0 : _f.tool_calls) == null ? void 0 : _g[0];
531
+ toolCallName = (_h = chunk == null ? void 0 : chunk.function) == null ? void 0 : _h.name;
532
+ toolCallId = chunk == null ? void 0 : chunk.id;
533
+ toolCallArgs = (_i = chunk == null ? void 0 : chunk.function) == null ? void 0 : _i.arguments;
534
+ hasToolCall = (chunk == null ? void 0 : chunk.function) != void 0;
535
+ }
536
+ if (mode === "message" && (toolCallId || done)) {
537
+ mode = null;
538
+ eventStream$.sendTextMessageEnd({
539
+ messageId: currentMessageId
540
+ });
541
+ } else if (mode === "function" && (!hasToolCall || done)) {
542
+ mode = null;
543
+ eventStream$.sendActionExecutionEnd({
544
+ actionExecutionId: toolCallId
545
+ });
546
+ }
547
+ if (done) {
548
+ break;
549
+ }
550
+ if (mode === null) {
551
+ if (hasToolCall && toolCallId && toolCallName) {
552
+ mode = "function";
553
+ eventStream$.sendActionExecutionStart({
554
+ actionExecutionId: toolCallId,
555
+ actionName: toolCallName,
556
+ parentMessageId: (_j = value.lc_kwargs) == null ? void 0 : _j.id
557
+ });
558
+ } else if (content) {
559
+ mode = "message";
560
+ currentMessageId = ((_k = value.lc_kwargs) == null ? void 0 : _k.id) || (0, import_shared2.randomId)();
561
+ eventStream$.sendTextMessageStart({
562
+ messageId: currentMessageId
563
+ });
564
+ }
565
+ }
566
+ if (mode === "message" && content) {
567
+ eventStream$.sendTextMessageContent({
568
+ messageId: currentMessageId,
569
+ content
570
+ });
571
+ } else if (mode === "function" && toolCallArgs) {
572
+ if (toolCallDetails.index !== toolCallDetails.prevIndex) {
573
+ eventStream$.sendActionExecutionEnd({
574
+ actionExecutionId: toolCallId
575
+ });
576
+ eventStream$.sendActionExecutionStart({
577
+ actionExecutionId: toolCallId,
578
+ actionName: toolCallName,
579
+ parentMessageId: (_l = value.lc_kwargs) == null ? void 0 : _l.id
580
+ });
581
+ toolCallDetails.prevIndex = toolCallDetails.index;
582
+ }
583
+ eventStream$.sendActionExecutionArgs({
584
+ actionExecutionId: toolCallId,
585
+ args: toolCallArgs
586
+ });
587
+ }
588
+ } catch (error) {
589
+ console.error("Error reading from stream", error);
590
+ break;
591
+ }
592
+ }
593
+ } else if (actionExecution) {
594
+ eventStream$.sendActionExecutionResult({
595
+ actionExecutionId: actionExecution.id,
596
+ actionName: actionExecution.name,
597
+ result: encodeResult(result)
598
+ });
599
+ } else {
600
+ throw new Error("Invalid return type from LangChain function.");
601
+ }
602
+ eventStream$.complete();
603
+ }
604
+ __name(streamLangChainResponse, "streamLangChainResponse");
605
+ function encodeResult(result) {
606
+ if (result === void 0) {
607
+ return "";
608
+ } else if (typeof result === "string") {
609
+ return result;
610
+ } else {
611
+ return JSON.stringify(result);
612
+ }
613
+ }
614
+ __name(encodeResult, "encodeResult");
615
+
616
+ // src/service-adapters/langchain/langchain-adapter.ts
617
+ var import_shared3 = require("@copilotkit/shared");
618
+ var import_promises = require("@langchain/core/callbacks/promises");
619
+ var LangChainAdapter = class {
620
+ options;
621
+ /**
622
+ * To use LangChain as a backend, provide a handler function to the adapter with your custom LangChain logic.
623
+ */
624
+ constructor(options) {
625
+ this.options = options;
626
+ }
627
+ async process(request) {
628
+ try {
629
+ const { eventSource, model, actions, messages, runId } = request;
630
+ const threadId = request.threadId ?? (0, import_shared3.randomId)();
631
+ const result = await this.options.chainFn({
632
+ messages: messages.map(convertMessageToLangChainMessage),
633
+ tools: actions.map(convertActionInputToLangChainTool),
634
+ model,
635
+ threadId,
636
+ runId
637
+ });
638
+ eventSource.stream(async (eventStream$) => {
639
+ await streamLangChainResponse({
640
+ result,
641
+ eventStream$
642
+ });
643
+ });
644
+ return {
645
+ threadId
646
+ };
647
+ } finally {
648
+ await (0, import_promises.awaitAllCallbacks)();
649
+ }
650
+ }
651
+ };
652
+ __name(LangChainAdapter, "LangChainAdapter");
653
+
654
+ // src/service-adapters/google/google-genai-adapter.ts
655
+ var import_google_gauth = require("@langchain/google-gauth");
656
+ var GoogleGenerativeAIAdapter = class extends LangChainAdapter {
657
+ constructor(options) {
658
+ super({
659
+ chainFn: async ({ messages, tools, threadId }) => {
660
+ const model = new import_google_gauth.ChatGoogle({
661
+ modelName: (options == null ? void 0 : options.model) ?? "gemini-1.5-pro",
662
+ apiVersion: "v1beta"
663
+ }).bindTools(tools);
664
+ return model.stream(messages, {
665
+ metadata: {
666
+ conversation_id: threadId
667
+ }
668
+ });
669
+ }
670
+ });
671
+ }
672
+ };
673
+ __name(GoogleGenerativeAIAdapter, "GoogleGenerativeAIAdapter");
674
+
675
+ // src/service-adapters/openai/openai-assistant-adapter.ts
676
+ var import_openai2 = __toESM(require("openai"));
677
+ var OpenAIAssistantAdapter = class {
678
+ openai;
679
+ codeInterpreterEnabled;
680
+ assistantId;
681
+ fileSearchEnabled;
682
+ disableParallelToolCalls;
683
+ constructor(params) {
684
+ this.openai = params.openai || new import_openai2.default({});
685
+ this.codeInterpreterEnabled = params.codeInterpreterEnabled === false || true;
686
+ this.fileSearchEnabled = params.fileSearchEnabled === false || true;
687
+ this.assistantId = params.assistantId;
688
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
689
+ }
690
+ async process(request) {
691
+ const { messages, actions, eventSource, runId, forwardedParameters } = request;
692
+ let threadId = request.threadId || (await this.openai.beta.threads.create()).id;
693
+ const lastMessage = messages.at(-1);
694
+ let nextRunId = void 0;
695
+ if (lastMessage.isResultMessage() && runId) {
696
+ nextRunId = await this.submitToolOutputs(threadId, runId, messages, eventSource);
697
+ } else if (lastMessage.isTextMessage()) {
698
+ nextRunId = await this.submitUserMessage(threadId, messages, actions, eventSource, forwardedParameters);
699
+ } else {
700
+ throw new Error("No actionable message found in the messages");
701
+ }
702
+ return {
703
+ threadId,
704
+ runId: nextRunId
705
+ };
706
+ }
707
+ async submitToolOutputs(threadId, runId, messages, eventSource) {
708
+ let run = await this.openai.beta.threads.runs.retrieve(threadId, runId);
709
+ if (!run.required_action) {
710
+ throw new Error("No tool outputs required");
711
+ }
712
+ const toolCallsIds = run.required_action.submit_tool_outputs.tool_calls.map((toolCall) => toolCall.id);
713
+ const resultMessages = messages.filter((message) => message.isResultMessage() && toolCallsIds.includes(message.actionExecutionId));
714
+ if (toolCallsIds.length != resultMessages.length) {
715
+ throw new Error("Number of function results does not match the number of tool calls");
716
+ }
717
+ const toolOutputs = resultMessages.map((message) => {
718
+ return {
719
+ tool_call_id: message.actionExecutionId,
720
+ output: message.result
721
+ };
722
+ });
723
+ const stream = this.openai.beta.threads.runs.submitToolOutputsStream(threadId, runId, {
724
+ tool_outputs: toolOutputs,
725
+ ...this.disableParallelToolCalls && {
726
+ parallel_tool_calls: false
727
+ }
728
+ });
729
+ await this.streamResponse(stream, eventSource);
730
+ return runId;
731
+ }
732
+ async submitUserMessage(threadId, messages, actions, eventSource, forwardedParameters) {
733
+ messages = [
734
+ ...messages
735
+ ];
736
+ const instructionsMessage = messages.shift();
737
+ const instructions = instructionsMessage.isTextMessage() ? instructionsMessage.content : "";
738
+ const userMessage = messages.map(convertMessageToOpenAIMessage).map(convertSystemMessageToAssistantAPI).at(-1);
739
+ if (userMessage.role !== "user") {
740
+ throw new Error("No user message found");
741
+ }
742
+ await this.openai.beta.threads.messages.create(threadId, {
743
+ role: "user",
744
+ content: userMessage.content
745
+ });
746
+ const openaiTools = actions.map(convertActionInputToOpenAITool);
747
+ const tools = [
748
+ ...openaiTools,
749
+ ...this.codeInterpreterEnabled ? [
750
+ {
751
+ type: "code_interpreter"
752
+ }
753
+ ] : [],
754
+ ...this.fileSearchEnabled ? [
755
+ {
756
+ type: "file_search"
757
+ }
758
+ ] : []
759
+ ];
760
+ let stream = this.openai.beta.threads.runs.stream(threadId, {
761
+ assistant_id: this.assistantId,
762
+ instructions,
763
+ tools,
764
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
765
+ max_completion_tokens: forwardedParameters.maxTokens
766
+ },
767
+ ...this.disableParallelToolCalls && {
768
+ parallel_tool_calls: false
769
+ }
770
+ });
771
+ await this.streamResponse(stream, eventSource);
772
+ return getRunIdFromStream(stream);
773
+ }
774
+ async streamResponse(stream, eventSource) {
775
+ eventSource.stream(async (eventStream$) => {
776
+ var _a, _b, _c, _d, _e, _f;
777
+ let inFunctionCall = false;
778
+ let currentMessageId;
779
+ let currentToolCallId;
780
+ for await (const chunk of stream) {
781
+ switch (chunk.event) {
782
+ case "thread.message.created":
783
+ if (inFunctionCall) {
784
+ eventStream$.sendActionExecutionEnd({
785
+ actionExecutionId: currentToolCallId
786
+ });
787
+ }
788
+ currentMessageId = chunk.data.id;
789
+ eventStream$.sendTextMessageStart({
790
+ messageId: currentMessageId
791
+ });
792
+ break;
793
+ case "thread.message.delta":
794
+ if (((_a = chunk.data.delta.content) == null ? void 0 : _a[0].type) === "text") {
795
+ eventStream$.sendTextMessageContent({
796
+ messageId: currentMessageId,
797
+ content: (_b = chunk.data.delta.content) == null ? void 0 : _b[0].text.value
798
+ });
799
+ }
800
+ break;
801
+ case "thread.message.completed":
802
+ eventStream$.sendTextMessageEnd({
803
+ messageId: currentMessageId
804
+ });
805
+ break;
806
+ case "thread.run.step.delta":
807
+ let toolCallId;
808
+ let toolCallName;
809
+ let toolCallArgs;
810
+ if (chunk.data.delta.step_details.type === "tool_calls" && ((_c = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _c[0].type) === "function") {
811
+ toolCallId = (_d = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _d[0].id;
812
+ toolCallName = (_e = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _e[0].function.name;
813
+ toolCallArgs = (_f = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _f[0].function.arguments;
814
+ }
815
+ if (toolCallName && toolCallId) {
816
+ if (inFunctionCall) {
817
+ eventStream$.sendActionExecutionEnd({
818
+ actionExecutionId: currentToolCallId
819
+ });
820
+ }
821
+ inFunctionCall = true;
822
+ currentToolCallId = toolCallId;
823
+ eventStream$.sendActionExecutionStart({
824
+ actionExecutionId: currentToolCallId,
825
+ parentMessageId: chunk.data.id,
826
+ actionName: toolCallName
827
+ });
828
+ } else if (toolCallArgs) {
829
+ eventStream$.sendActionExecutionArgs({
830
+ actionExecutionId: currentToolCallId,
831
+ args: toolCallArgs
832
+ });
833
+ }
834
+ break;
835
+ }
836
+ }
837
+ if (inFunctionCall) {
838
+ eventStream$.sendActionExecutionEnd({
839
+ actionExecutionId: currentToolCallId
840
+ });
841
+ }
842
+ eventStream$.complete();
843
+ });
844
+ }
845
+ };
846
+ __name(OpenAIAssistantAdapter, "OpenAIAssistantAdapter");
847
+ function getRunIdFromStream(stream) {
848
+ return new Promise((resolve, reject) => {
849
+ let runIdGetter = /* @__PURE__ */ __name((event) => {
850
+ if (event.event === "thread.run.created") {
851
+ const runId = event.data.id;
852
+ stream.off("event", runIdGetter);
853
+ resolve(runId);
854
+ }
855
+ }, "runIdGetter");
856
+ stream.on("event", runIdGetter);
857
+ });
858
+ }
859
+ __name(getRunIdFromStream, "getRunIdFromStream");
860
+
861
+ // src/service-adapters/unify/unify-adapter.ts
862
+ var import_openai3 = __toESM(require("openai"));
863
+ var import_shared4 = require("@copilotkit/shared");
864
+ var UnifyAdapter = class {
865
+ apiKey;
866
+ model;
867
+ start;
868
+ constructor(options) {
869
+ if (options == null ? void 0 : options.apiKey) {
870
+ this.apiKey = options.apiKey;
871
+ } else {
872
+ this.apiKey = "UNIFY_API_KEY";
873
+ }
874
+ this.model = options == null ? void 0 : options.model;
875
+ this.start = true;
876
+ }
877
+ async process(request) {
878
+ const tools = request.actions.map(convertActionInputToOpenAITool);
879
+ const openai = new import_openai3.default({
880
+ apiKey: this.apiKey,
881
+ baseURL: "https://api.unify.ai/v0/"
882
+ });
883
+ const forwardedParameters = request.forwardedParameters;
884
+ const messages = request.messages.map(convertMessageToOpenAIMessage);
885
+ const stream = await openai.chat.completions.create({
886
+ model: this.model,
887
+ messages,
888
+ stream: true,
889
+ ...tools.length > 0 && {
890
+ tools
891
+ },
892
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
893
+ temperature: forwardedParameters.temperature
894
+ }
895
+ });
896
+ let model = null;
897
+ let currentMessageId;
898
+ let currentToolCallId;
899
+ request.eventSource.stream(async (eventStream$) => {
900
+ var _a, _b;
901
+ let mode = null;
902
+ for await (const chunk of stream) {
903
+ if (this.start) {
904
+ model = chunk.model;
905
+ currentMessageId = (0, import_shared4.randomId)();
906
+ eventStream$.sendTextMessageStart({
907
+ messageId: currentMessageId
908
+ });
909
+ eventStream$.sendTextMessageContent({
910
+ messageId: currentMessageId,
911
+ content: `Model used: ${model}
912
+ `
913
+ });
914
+ eventStream$.sendTextMessageEnd({
915
+ messageId: currentMessageId
916
+ });
917
+ this.start = false;
918
+ }
919
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
920
+ const content = chunk.choices[0].delta.content;
921
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
922
+ mode = null;
923
+ eventStream$.sendTextMessageEnd({
924
+ messageId: currentMessageId
925
+ });
926
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
927
+ mode = null;
928
+ eventStream$.sendActionExecutionEnd({
929
+ actionExecutionId: currentToolCallId
930
+ });
931
+ }
932
+ if (mode === null) {
933
+ if (toolCall == null ? void 0 : toolCall.id) {
934
+ mode = "function";
935
+ currentToolCallId = toolCall.id;
936
+ eventStream$.sendActionExecutionStart({
937
+ actionExecutionId: currentToolCallId,
938
+ actionName: toolCall.function.name
939
+ });
940
+ } else if (content) {
941
+ mode = "message";
942
+ currentMessageId = chunk.id;
943
+ eventStream$.sendTextMessageStart({
944
+ messageId: currentMessageId
945
+ });
946
+ }
947
+ }
948
+ if (mode === "message" && content) {
949
+ eventStream$.sendTextMessageContent({
950
+ messageId: currentMessageId,
951
+ content
952
+ });
953
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
954
+ eventStream$.sendActionExecutionArgs({
955
+ actionExecutionId: currentToolCallId,
956
+ args: toolCall.function.arguments
957
+ });
958
+ }
959
+ }
960
+ if (mode === "message") {
961
+ eventStream$.sendTextMessageEnd({
962
+ messageId: currentMessageId
963
+ });
964
+ } else if (mode === "function") {
965
+ eventStream$.sendActionExecutionEnd({
966
+ actionExecutionId: currentToolCallId
967
+ });
968
+ }
969
+ eventStream$.complete();
970
+ });
971
+ return {
972
+ threadId: request.threadId || (0, import_shared4.randomId)()
973
+ };
974
+ }
975
+ };
976
+ __name(UnifyAdapter, "UnifyAdapter");
977
+
978
+ // src/service-adapters/groq/groq-adapter.ts
979
+ var import_groq_sdk = require("groq-sdk");
980
+ var import_shared5 = require("@copilotkit/shared");
981
+ var DEFAULT_MODEL2 = "llama3-groq-70b-8192-tool-use-preview";
982
+ var GroqAdapter = class {
983
+ model = DEFAULT_MODEL2;
984
+ disableParallelToolCalls = false;
985
+ _groq;
986
+ get groq() {
987
+ return this._groq;
988
+ }
989
+ constructor(params) {
990
+ this._groq = (params == null ? void 0 : params.groq) || new import_groq_sdk.Groq({});
991
+ if (params == null ? void 0 : params.model) {
992
+ this.model = params.model;
993
+ }
994
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
995
+ }
996
+ async process(request) {
997
+ const { threadId, model = this.model, messages, actions, eventSource, forwardedParameters } = request;
998
+ const tools = actions.map(convertActionInputToOpenAITool);
999
+ let openaiMessages = messages.map(convertMessageToOpenAIMessage);
1000
+ openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
1001
+ let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
1002
+ if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
1003
+ toolChoice = {
1004
+ type: "function",
1005
+ function: {
1006
+ name: forwardedParameters.toolChoiceFunctionName
1007
+ }
1008
+ };
1009
+ }
1010
+ const stream = await this.groq.chat.completions.create({
1011
+ model,
1012
+ stream: true,
1013
+ messages: openaiMessages,
1014
+ ...tools.length > 0 && {
1015
+ tools
1016
+ },
1017
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
1018
+ max_tokens: forwardedParameters.maxTokens
1019
+ },
1020
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.stop) && {
1021
+ stop: forwardedParameters.stop
1022
+ },
1023
+ ...toolChoice && {
1024
+ tool_choice: toolChoice
1025
+ },
1026
+ ...this.disableParallelToolCalls && {
1027
+ parallel_tool_calls: false
1028
+ },
1029
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
1030
+ temperature: forwardedParameters.temperature
1031
+ }
1032
+ });
1033
+ eventSource.stream(async (eventStream$) => {
1034
+ var _a, _b;
1035
+ let mode = null;
1036
+ let currentMessageId;
1037
+ let currentToolCallId;
1038
+ for await (const chunk of stream) {
1039
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
1040
+ const content = chunk.choices[0].delta.content;
1041
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
1042
+ mode = null;
1043
+ eventStream$.sendTextMessageEnd({
1044
+ messageId: currentMessageId
1045
+ });
1046
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
1047
+ mode = null;
1048
+ eventStream$.sendActionExecutionEnd({
1049
+ actionExecutionId: currentToolCallId
1050
+ });
1051
+ }
1052
+ if (mode === null) {
1053
+ if (toolCall == null ? void 0 : toolCall.id) {
1054
+ mode = "function";
1055
+ currentToolCallId = toolCall.id;
1056
+ eventStream$.sendActionExecutionStart({
1057
+ actionExecutionId: currentToolCallId,
1058
+ actionName: toolCall.function.name,
1059
+ parentMessageId: chunk.id
1060
+ });
1061
+ } else if (content) {
1062
+ mode = "message";
1063
+ currentMessageId = chunk.id;
1064
+ eventStream$.sendTextMessageStart({
1065
+ messageId: currentMessageId
1066
+ });
1067
+ }
1068
+ }
1069
+ if (mode === "message" && content) {
1070
+ eventStream$.sendTextMessageContent({
1071
+ messageId: currentMessageId,
1072
+ content
1073
+ });
1074
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
1075
+ eventStream$.sendActionExecutionArgs({
1076
+ actionExecutionId: currentToolCallId,
1077
+ args: toolCall.function.arguments
1078
+ });
1079
+ }
1080
+ }
1081
+ if (mode === "message") {
1082
+ eventStream$.sendTextMessageEnd({
1083
+ messageId: currentMessageId
1084
+ });
1085
+ } else if (mode === "function") {
1086
+ eventStream$.sendActionExecutionEnd({
1087
+ actionExecutionId: currentToolCallId
1088
+ });
1089
+ }
1090
+ eventStream$.complete();
1091
+ });
1092
+ return {
1093
+ threadId: threadId || (0, import_shared5.randomId)()
1094
+ };
1095
+ }
1096
+ };
1097
+ __name(GroqAdapter, "GroqAdapter");
1098
+
1099
+ // src/service-adapters/anthropic/anthropic-adapter.ts
1100
+ var import_sdk = __toESM(require("@anthropic-ai/sdk"));
1101
+
1102
+ // src/service-adapters/anthropic/utils.ts
1103
+ function limitMessagesToTokenCount2(messages, tools, model, maxTokens) {
1104
+ maxTokens || (maxTokens = MAX_TOKENS);
1105
+ const result = [];
1106
+ const toolsNumTokens = countToolsTokens2(model, tools);
1107
+ if (toolsNumTokens > maxTokens) {
1108
+ throw new Error(`Too many tokens in function definitions: ${toolsNumTokens} > ${maxTokens}`);
1109
+ }
1110
+ maxTokens -= toolsNumTokens;
1111
+ for (const message of messages) {
1112
+ if (message.role === "system") {
1113
+ const numTokens = countMessageTokens2(model, message);
1114
+ maxTokens -= numTokens;
1115
+ if (maxTokens < 0) {
1116
+ throw new Error("Not enough tokens for system message.");
1117
+ }
1118
+ }
1119
+ }
1120
+ let cutoff = false;
1121
+ const reversedMessages = [
1122
+ ...messages
1123
+ ].reverse();
1124
+ for (const message of reversedMessages) {
1125
+ if (message.role === "system") {
1126
+ result.unshift(message);
1127
+ continue;
1128
+ } else if (cutoff) {
1129
+ continue;
1130
+ }
1131
+ let numTokens = countMessageTokens2(model, message);
1132
+ if (maxTokens < numTokens) {
1133
+ cutoff = true;
1134
+ continue;
1135
+ }
1136
+ result.unshift(message);
1137
+ maxTokens -= numTokens;
1138
+ }
1139
+ return result;
1140
+ }
1141
+ __name(limitMessagesToTokenCount2, "limitMessagesToTokenCount");
1142
+ var MAX_TOKENS = 128e3;
1143
+ function countToolsTokens2(model, tools) {
1144
+ if (tools.length === 0) {
1145
+ return 0;
1146
+ }
1147
+ const json = JSON.stringify(tools);
1148
+ return countTokens2(model, json);
1149
+ }
1150
+ __name(countToolsTokens2, "countToolsTokens");
1151
+ function countMessageTokens2(model, message) {
1152
+ return countTokens2(model, JSON.stringify(message.content) || "");
1153
+ }
1154
+ __name(countMessageTokens2, "countMessageTokens");
1155
+ function countTokens2(model, text) {
1156
+ return text.length / 3;
1157
+ }
1158
+ __name(countTokens2, "countTokens");
1159
+ function convertActionInputToAnthropicTool(action) {
1160
+ return {
1161
+ name: action.name,
1162
+ description: action.description,
1163
+ input_schema: JSON.parse(action.jsonSchema)
1164
+ };
1165
+ }
1166
+ __name(convertActionInputToAnthropicTool, "convertActionInputToAnthropicTool");
1167
+ function convertMessageToAnthropicMessage(message) {
1168
+ if (message.isTextMessage()) {
1169
+ if (message.role === "system") {
1170
+ return {
1171
+ role: "assistant",
1172
+ content: [
1173
+ {
1174
+ type: "text",
1175
+ text: "THE FOLLOWING MESSAGE IS A SYSTEM MESSAGE: " + message.content
1176
+ }
1177
+ ]
1178
+ };
1179
+ } else {
1180
+ return {
1181
+ role: message.role === "user" ? "user" : "assistant",
1182
+ content: [
1183
+ {
1184
+ type: "text",
1185
+ text: message.content
1186
+ }
1187
+ ]
1188
+ };
1189
+ }
1190
+ } else if (message.isActionExecutionMessage()) {
1191
+ return {
1192
+ role: "assistant",
1193
+ content: [
1194
+ {
1195
+ id: message.id,
1196
+ type: "tool_use",
1197
+ input: message.arguments,
1198
+ name: message.name
1199
+ }
1200
+ ]
1201
+ };
1202
+ } else if (message.isResultMessage()) {
1203
+ return {
1204
+ role: "user",
1205
+ content: [
1206
+ {
1207
+ type: "tool_result",
1208
+ content: message.result,
1209
+ tool_use_id: message.actionExecutionId
1210
+ }
1211
+ ]
1212
+ };
1213
+ }
1214
+ }
1215
+ __name(convertMessageToAnthropicMessage, "convertMessageToAnthropicMessage");
1216
+ function groupAnthropicMessagesByRole(messageParams) {
1217
+ return messageParams.reduce((acc, message) => {
1218
+ const lastGroup = acc[acc.length - 1];
1219
+ if (lastGroup && lastGroup.role === message.role) {
1220
+ lastGroup.content = lastGroup.content.concat(message.content);
1221
+ } else {
1222
+ acc.push({
1223
+ role: message.role,
1224
+ content: [
1225
+ ...message.content
1226
+ ]
1227
+ });
1228
+ }
1229
+ return acc;
1230
+ }, []);
1231
+ }
1232
+ __name(groupAnthropicMessagesByRole, "groupAnthropicMessagesByRole");
1233
+
1234
+ // src/service-adapters/anthropic/anthropic-adapter.ts
1235
+ var import_shared6 = require("@copilotkit/shared");
1236
+ var DEFAULT_MODEL3 = "claude-3-sonnet-20240229";
1237
+ var AnthropicAdapter = class {
1238
+ model = DEFAULT_MODEL3;
1239
+ _anthropic;
1240
+ get anthropic() {
1241
+ return this._anthropic;
1242
+ }
1243
+ constructor(params) {
1244
+ this._anthropic = (params == null ? void 0 : params.anthropic) || new import_sdk.default({});
1245
+ if (params == null ? void 0 : params.model) {
1246
+ this.model = params.model;
1247
+ }
1248
+ }
1249
+ async process(request) {
1250
+ const { threadId, model = this.model, messages: rawMessages, actions, eventSource, forwardedParameters } = request;
1251
+ const tools = actions.map(convertActionInputToAnthropicTool);
1252
+ const messages = [
1253
+ ...rawMessages
1254
+ ];
1255
+ const instructionsMessage = messages.shift();
1256
+ const instructions = instructionsMessage.isTextMessage() ? instructionsMessage.content : "";
1257
+ let anthropicMessages = messages.map(convertMessageToAnthropicMessage);
1258
+ anthropicMessages = limitMessagesToTokenCount2(anthropicMessages, tools, model);
1259
+ anthropicMessages = groupAnthropicMessagesByRole(anthropicMessages);
1260
+ let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
1261
+ if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
1262
+ toolChoice = {
1263
+ type: "tool",
1264
+ name: forwardedParameters.toolChoiceFunctionName
1265
+ };
1266
+ }
1267
+ const stream = this.anthropic.messages.create({
1268
+ system: instructions,
1269
+ model: this.model,
1270
+ messages: anthropicMessages,
1271
+ max_tokens: (forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) || 1024,
1272
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) ? {
1273
+ temperature: forwardedParameters.temperature
1274
+ } : {},
1275
+ ...tools.length > 0 && {
1276
+ tools
1277
+ },
1278
+ ...toolChoice && {
1279
+ tool_choice: toolChoice
1280
+ },
1281
+ stream: true
1282
+ });
1283
+ eventSource.stream(async (eventStream$) => {
1284
+ let mode = null;
1285
+ let didOutputText = false;
1286
+ let currentMessageId = (0, import_shared6.randomId)();
1287
+ let currentToolCallId = (0, import_shared6.randomId)();
1288
+ let filterThinkingTextBuffer = new FilterThinkingTextBuffer();
1289
+ for await (const chunk of await stream) {
1290
+ if (chunk.type === "message_start") {
1291
+ currentMessageId = chunk.message.id;
1292
+ } else if (chunk.type === "content_block_start") {
1293
+ if (chunk.content_block.type === "text") {
1294
+ didOutputText = false;
1295
+ filterThinkingTextBuffer.reset();
1296
+ mode = "message";
1297
+ } else if (chunk.content_block.type === "tool_use") {
1298
+ currentToolCallId = chunk.content_block.id;
1299
+ eventStream$.sendActionExecutionStart({
1300
+ actionExecutionId: currentToolCallId,
1301
+ actionName: chunk.content_block.name,
1302
+ parentMessageId: currentMessageId
1303
+ });
1304
+ mode = "function";
1305
+ }
1306
+ } else if (chunk.type === "content_block_delta") {
1307
+ if (chunk.delta.type === "text_delta") {
1308
+ const text = filterThinkingTextBuffer.onTextChunk(chunk.delta.text);
1309
+ if (text.length > 0) {
1310
+ if (!didOutputText) {
1311
+ eventStream$.sendTextMessageStart({
1312
+ messageId: currentMessageId
1313
+ });
1314
+ didOutputText = true;
1315
+ }
1316
+ eventStream$.sendTextMessageContent({
1317
+ messageId: currentMessageId,
1318
+ content: text
1319
+ });
1320
+ }
1321
+ } else if (chunk.delta.type === "input_json_delta") {
1322
+ eventStream$.sendActionExecutionArgs({
1323
+ actionExecutionId: currentToolCallId,
1324
+ args: chunk.delta.partial_json
1325
+ });
1326
+ }
1327
+ } else if (chunk.type === "content_block_stop") {
1328
+ if (mode === "message") {
1329
+ if (didOutputText) {
1330
+ eventStream$.sendTextMessageEnd({
1331
+ messageId: currentMessageId
1332
+ });
1333
+ }
1334
+ } else if (mode === "function") {
1335
+ eventStream$.sendActionExecutionEnd({
1336
+ actionExecutionId: currentToolCallId
1337
+ });
1338
+ }
1339
+ }
1340
+ }
1341
+ eventStream$.complete();
1342
+ });
1343
+ return {
1344
+ threadId: threadId || (0, import_shared6.randomId)()
1345
+ };
1346
+ }
1347
+ };
1348
+ __name(AnthropicAdapter, "AnthropicAdapter");
1349
+ var THINKING_TAG = "<thinking>";
1350
+ var THINKING_TAG_END = "</thinking>";
1351
+ var FilterThinkingTextBuffer = /* @__PURE__ */ __name(class FilterThinkingTextBuffer2 {
1352
+ buffer;
1353
+ didFilterThinkingTag = false;
1354
+ constructor() {
1355
+ this.buffer = "";
1356
+ }
1357
+ onTextChunk(text) {
1358
+ this.buffer += text;
1359
+ if (this.didFilterThinkingTag) {
1360
+ return text;
1361
+ }
1362
+ const potentialTag = this.buffer.slice(0, THINKING_TAG.length);
1363
+ if (THINKING_TAG.startsWith(potentialTag)) {
1364
+ if (this.buffer.includes(THINKING_TAG_END)) {
1365
+ const end = this.buffer.indexOf(THINKING_TAG_END);
1366
+ const filteredText = this.buffer.slice(end + THINKING_TAG_END.length);
1367
+ this.buffer = filteredText;
1368
+ this.didFilterThinkingTag = true;
1369
+ return filteredText;
1370
+ } else {
1371
+ return "";
1372
+ }
1373
+ }
1374
+ return text;
1375
+ }
1376
+ reset() {
1377
+ this.buffer = "";
1378
+ this.didFilterThinkingTag = false;
1379
+ }
1380
+ }, "FilterThinkingTextBuffer");
1381
+
1382
+ // src/service-adapters/experimental/ollama/ollama-adapter.ts
1383
+ var import_ollama = require("@langchain/community/llms/ollama");
1384
+ var import_shared7 = require("@copilotkit/shared");
1385
+ var DEFAULT_MODEL4 = "llama3:latest";
1386
+ var ExperimentalOllamaAdapter = class {
1387
+ model;
1388
+ constructor(options) {
1389
+ if (options == null ? void 0 : options.model) {
1390
+ this.model = options.model;
1391
+ } else {
1392
+ this.model = DEFAULT_MODEL4;
1393
+ }
1394
+ }
1395
+ async process(request) {
1396
+ const { messages, actions, eventSource } = request;
1397
+ const ollama = new import_ollama.Ollama({
1398
+ model: this.model
1399
+ });
1400
+ const contents = messages.filter((m) => m.isTextMessage()).map((m) => m.content);
1401
+ const _stream = await ollama.stream(contents);
1402
+ eventSource.stream(async (eventStream$) => {
1403
+ const currentMessageId = (0, import_shared7.randomId)();
1404
+ eventStream$.sendTextMessageStart({
1405
+ messageId: currentMessageId
1406
+ });
1407
+ for await (const chunkText of _stream) {
1408
+ eventStream$.sendTextMessageContent({
1409
+ messageId: currentMessageId,
1410
+ content: chunkText
1411
+ });
1412
+ }
1413
+ eventStream$.sendTextMessageEnd({
1414
+ messageId: currentMessageId
1415
+ });
1416
+ eventStream$.complete();
1417
+ });
1418
+ return {
1419
+ threadId: request.threadId || (0, import_shared7.randomId)()
1420
+ };
1421
+ }
1422
+ };
1423
+ __name(ExperimentalOllamaAdapter, "ExperimentalOllamaAdapter");
1424
+
1425
+ // src/service-adapters/experimental/empty/empty-adapter.ts
1426
+ var import_shared8 = require("@copilotkit/shared");
1427
+ var ExperimentalEmptyAdapter = class {
1428
+ async process(request) {
1429
+ return {
1430
+ threadId: request.threadId || (0, import_shared8.randomId)()
1431
+ };
1432
+ }
1433
+ };
1434
+ __name(ExperimentalEmptyAdapter, "ExperimentalEmptyAdapter");
1435
+ // Annotate the CommonJS export names for ESM import in node:
1436
+ 0 && (module.exports = {
1437
+ AnthropicAdapter,
1438
+ ExperimentalEmptyAdapter,
1439
+ ExperimentalOllamaAdapter,
1440
+ GoogleGenerativeAIAdapter,
1441
+ GroqAdapter,
1442
+ LangChainAdapter,
1443
+ OpenAIAdapter,
1444
+ OpenAIAssistantAdapter,
1445
+ RemoteChain,
1446
+ UnifyAdapter
1447
+ });
1448
+ //# sourceMappingURL=index.js.map