@copilotkit/runtime 1.50.0-beta.1 → 1.50.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. package/CHANGELOG.md +74 -0
  2. package/dist/chunk-2OZAGFV3.mjs +43 -0
  3. package/dist/chunk-2OZAGFV3.mjs.map +1 -0
  4. package/dist/chunk-62NE5S6M.mjs +226 -0
  5. package/dist/chunk-62NE5S6M.mjs.map +1 -0
  6. package/dist/chunk-6XRUR5UK.mjs +1 -0
  7. package/dist/chunk-6XRUR5UK.mjs.map +1 -0
  8. package/dist/chunk-AMUJQ6IR.mjs +50 -0
  9. package/dist/chunk-AMUJQ6IR.mjs.map +1 -0
  10. package/dist/chunk-BJEYMRDD.mjs +25 -0
  11. package/dist/chunk-BJEYMRDD.mjs.map +1 -0
  12. package/dist/chunk-DZV4ZIAR.mjs +3063 -0
  13. package/dist/chunk-DZV4ZIAR.mjs.map +1 -0
  14. package/dist/chunk-FHD4JECV.mjs +33 -0
  15. package/dist/chunk-FHD4JECV.mjs.map +1 -0
  16. package/dist/chunk-FMU55SEU.mjs +25 -0
  17. package/dist/chunk-FMU55SEU.mjs.map +1 -0
  18. package/dist/chunk-OWIGJONH.mjs +275 -0
  19. package/dist/chunk-OWIGJONH.mjs.map +1 -0
  20. package/dist/chunk-SBCOROE4.mjs +1112 -0
  21. package/dist/chunk-SBCOROE4.mjs.map +1 -0
  22. package/dist/chunk-TTUAEJLD.mjs +617 -0
  23. package/dist/chunk-TTUAEJLD.mjs.map +1 -0
  24. package/dist/chunk-XWBDEXDA.mjs +153 -0
  25. package/dist/chunk-XWBDEXDA.mjs.map +1 -0
  26. package/dist/chunk-Z752VE75.mjs +74 -0
  27. package/dist/chunk-Z752VE75.mjs.map +1 -0
  28. package/dist/graphql/message-conversion/index.d.ts +18 -0
  29. package/dist/graphql/message-conversion/index.js +725 -0
  30. package/dist/graphql/message-conversion/index.js.map +1 -0
  31. package/dist/graphql/message-conversion/index.mjs +245 -0
  32. package/dist/graphql/message-conversion/index.mjs.map +1 -0
  33. package/dist/graphql/types/base/index.d.ts +6 -0
  34. package/dist/graphql/types/base/index.js +63 -0
  35. package/dist/graphql/types/base/index.js.map +1 -0
  36. package/dist/graphql/types/base/index.mjs +8 -0
  37. package/dist/graphql/types/base/index.mjs.map +1 -0
  38. package/dist/graphql/types/converted/index.d.ts +2 -0
  39. package/dist/graphql/types/converted/index.js +294 -0
  40. package/dist/graphql/types/converted/index.js.map +1 -0
  41. package/dist/graphql/types/converted/index.mjs +20 -0
  42. package/dist/graphql/types/converted/index.mjs.map +1 -0
  43. package/dist/groq-adapter-50bc6e4a.d.ts +326 -0
  44. package/dist/index-adbd78f1.d.ts +154 -0
  45. package/dist/index.d.ts +136 -287
  46. package/dist/index.js +393 -287
  47. package/dist/index.js.map +1 -1
  48. package/dist/index.mjs +385 -276
  49. package/dist/index.mjs.map +1 -1
  50. package/dist/langgraph.d.ts +284 -0
  51. package/dist/langgraph.js +211 -0
  52. package/dist/langgraph.js.map +1 -0
  53. package/dist/langgraph.mjs +206 -0
  54. package/dist/langgraph.mjs.map +1 -0
  55. package/dist/langserve-74a52292.d.ts +242 -0
  56. package/dist/lib/cloud/index.d.ts +6 -0
  57. package/dist/lib/cloud/index.js +18 -0
  58. package/dist/lib/cloud/index.js.map +1 -0
  59. package/dist/lib/cloud/index.mjs +1 -0
  60. package/dist/lib/cloud/index.mjs.map +1 -0
  61. package/dist/lib/index.d.ts +266 -0
  62. package/dist/lib/index.js +4944 -0
  63. package/dist/lib/index.js.map +1 -0
  64. package/dist/lib/index.mjs +74 -0
  65. package/dist/lib/index.mjs.map +1 -0
  66. package/dist/lib/integrations/index.d.ts +28 -0
  67. package/dist/lib/integrations/index.js +3024 -0
  68. package/dist/lib/integrations/index.js.map +1 -0
  69. package/dist/lib/integrations/index.mjs +36 -0
  70. package/dist/lib/integrations/index.mjs.map +1 -0
  71. package/dist/lib/integrations/nest/index.d.ts +16 -0
  72. package/dist/lib/integrations/nest/index.js +2937 -0
  73. package/dist/lib/integrations/nest/index.js.map +1 -0
  74. package/dist/lib/integrations/nest/index.mjs +13 -0
  75. package/dist/lib/integrations/nest/index.mjs.map +1 -0
  76. package/dist/lib/integrations/node-express/index.d.ts +16 -0
  77. package/dist/lib/integrations/node-express/index.js +2937 -0
  78. package/dist/lib/integrations/node-express/index.js.map +1 -0
  79. package/dist/lib/integrations/node-express/index.mjs +13 -0
  80. package/dist/lib/integrations/node-express/index.mjs.map +1 -0
  81. package/dist/lib/integrations/node-http/index.d.ts +16 -0
  82. package/dist/lib/integrations/node-http/index.js +2923 -0
  83. package/dist/lib/integrations/node-http/index.js.map +1 -0
  84. package/dist/lib/integrations/node-http/index.mjs +12 -0
  85. package/dist/lib/integrations/node-http/index.mjs.map +1 -0
  86. package/dist/service-adapters/index.d.ts +166 -0
  87. package/dist/service-adapters/index.js +1800 -0
  88. package/dist/service-adapters/index.js.map +1 -0
  89. package/dist/service-adapters/index.mjs +36 -0
  90. package/dist/service-adapters/index.mjs.map +1 -0
  91. package/dist/service-adapters/shared/index.d.ts +9 -0
  92. package/dist/service-adapters/shared/index.js +72 -0
  93. package/dist/service-adapters/shared/index.js.map +1 -0
  94. package/dist/service-adapters/shared/index.mjs +8 -0
  95. package/dist/service-adapters/shared/index.mjs.map +1 -0
  96. package/dist/shared-f6d43ef8.d.ts +446 -0
  97. package/dist/utils/index.d.ts +65 -0
  98. package/dist/utils/index.js +175 -0
  99. package/dist/utils/index.js.map +1 -0
  100. package/dist/utils/index.mjs +12 -0
  101. package/dist/utils/index.mjs.map +1 -0
  102. package/dist/v2/index.d.ts +1 -0
  103. package/dist/v2/index.js +7 -0
  104. package/dist/v2/index.js.map +1 -1
  105. package/dist/v2/index.mjs +1 -0
  106. package/dist/v2/index.mjs.map +1 -1
  107. package/package.json +56 -18
  108. package/src/graphql/message-conversion/agui-to-gql.test.ts +2 -2
  109. package/src/graphql/message-conversion/gql-to-agui.test.ts +30 -28
  110. package/src/graphql/message-conversion/roundtrip-conversion.test.ts +8 -8
  111. package/src/langgraph.ts +1 -0
  112. package/src/lib/index.ts +42 -1
  113. package/src/lib/integrations/nextjs/app-router.ts +3 -1
  114. package/src/lib/integrations/node-http/index.ts +132 -11
  115. package/src/lib/integrations/shared.ts +2 -2
  116. package/src/lib/runtime/agent-integrations/{langgraph.agent.ts → langgraph/agent.ts} +5 -30
  117. package/src/lib/runtime/agent-integrations/langgraph/consts.ts +34 -0
  118. package/src/lib/runtime/agent-integrations/langgraph/index.ts +2 -0
  119. package/src/lib/runtime/copilot-runtime.ts +51 -68
  120. package/src/lib/runtime/telemetry-agent-runner.ts +134 -0
  121. package/src/service-adapters/anthropic/anthropic-adapter.ts +16 -3
  122. package/src/service-adapters/bedrock/bedrock-adapter.ts +4 -1
  123. package/src/service-adapters/experimental/ollama/ollama-adapter.ts +2 -1
  124. package/src/service-adapters/google/google-genai-adapter.ts +9 -4
  125. package/src/service-adapters/groq/groq-adapter.ts +16 -3
  126. package/src/service-adapters/langchain/langchain-adapter.ts +5 -3
  127. package/src/service-adapters/langchain/langserve.ts +2 -1
  128. package/src/service-adapters/openai/openai-adapter.ts +17 -3
  129. package/src/service-adapters/openai/openai-assistant-adapter.ts +26 -11
  130. package/src/service-adapters/unify/unify-adapter.ts +3 -1
  131. package/src/v2/index.ts +1 -0
  132. package/tsup.config.ts +5 -2
@@ -0,0 +1,1112 @@
1
+ import {
2
+ convertServiceAdapterError
3
+ } from "./chunk-AMUJQ6IR.mjs";
4
+ import {
5
+ __name
6
+ } from "./chunk-FHD4JECV.mjs";
7
+
8
+ // src/service-adapters/openai/openai-adapter.ts
9
+ import OpenAI from "openai";
10
+
11
+ // src/service-adapters/openai/utils.ts
12
+ import { parseJson } from "@copilotkit/shared";
13
+ function limitMessagesToTokenCount(messages, tools, model, maxTokens) {
14
+ maxTokens || (maxTokens = maxTokensForOpenAIModel(model));
15
+ const result = [];
16
+ const toolsNumTokens = countToolsTokens(model, tools);
17
+ if (toolsNumTokens > maxTokens) {
18
+ throw new Error(`Too many tokens in function definitions: ${toolsNumTokens} > ${maxTokens}`);
19
+ }
20
+ maxTokens -= toolsNumTokens;
21
+ for (const message of messages) {
22
+ if ([
23
+ "system",
24
+ "developer"
25
+ ].includes(message.role)) {
26
+ const numTokens = countMessageTokens(model, message);
27
+ maxTokens -= numTokens;
28
+ if (maxTokens < 0) {
29
+ throw new Error("Not enough tokens for system message.");
30
+ }
31
+ }
32
+ }
33
+ let cutoff = false;
34
+ const reversedMessages = [
35
+ ...messages
36
+ ].reverse();
37
+ for (const message of reversedMessages) {
38
+ if ([
39
+ "system",
40
+ "developer"
41
+ ].includes(message.role)) {
42
+ result.unshift(message);
43
+ continue;
44
+ } else if (cutoff) {
45
+ continue;
46
+ }
47
+ let numTokens = countMessageTokens(model, message);
48
+ if (maxTokens < numTokens) {
49
+ cutoff = true;
50
+ continue;
51
+ }
52
+ result.unshift(message);
53
+ maxTokens -= numTokens;
54
+ }
55
+ return result;
56
+ }
57
+ __name(limitMessagesToTokenCount, "limitMessagesToTokenCount");
58
+ function maxTokensForOpenAIModel(model) {
59
+ return maxTokensByModel[model] || DEFAULT_MAX_TOKENS;
60
+ }
61
+ __name(maxTokensForOpenAIModel, "maxTokensForOpenAIModel");
62
+ var DEFAULT_MAX_TOKENS = 128e3;
63
+ var maxTokensByModel = {
64
+ // o1
65
+ o1: 2e5,
66
+ "o1-2024-12-17": 2e5,
67
+ "o1-mini": 128e3,
68
+ "o1-mini-2024-09-12": 128e3,
69
+ "o1-preview": 128e3,
70
+ "o1-preview-2024-09-12": 128e3,
71
+ // o3-mini
72
+ "o3-mini": 2e5,
73
+ "o3-mini-2025-01-31": 2e5,
74
+ // GPT-4
75
+ "gpt-4o": 128e3,
76
+ "chatgpt-4o-latest": 128e3,
77
+ "gpt-4o-2024-08-06": 128e3,
78
+ "gpt-4o-2024-05-13": 128e3,
79
+ "gpt-4o-mini": 128e3,
80
+ "gpt-4o-mini-2024-07-18": 128e3,
81
+ "gpt-4-turbo": 128e3,
82
+ "gpt-4-turbo-2024-04-09": 128e3,
83
+ "gpt-4-0125-preview": 128e3,
84
+ "gpt-4-turbo-preview": 128e3,
85
+ "gpt-4-1106-preview": 128e3,
86
+ "gpt-4-vision-preview": 128e3,
87
+ "gpt-4-1106-vision-preview": 128e3,
88
+ "gpt-4-32k": 32768,
89
+ "gpt-4-32k-0613": 32768,
90
+ "gpt-4-32k-0314": 32768,
91
+ "gpt-4": 8192,
92
+ "gpt-4-0613": 8192,
93
+ "gpt-4-0314": 8192,
94
+ // GPT-3.5
95
+ "gpt-3.5-turbo-0125": 16385,
96
+ "gpt-3.5-turbo": 16385,
97
+ "gpt-3.5-turbo-1106": 16385,
98
+ "gpt-3.5-turbo-instruct": 4096,
99
+ "gpt-3.5-turbo-16k": 16385,
100
+ "gpt-3.5-turbo-0613": 4096,
101
+ "gpt-3.5-turbo-16k-0613": 16385,
102
+ "gpt-3.5-turbo-0301": 4097
103
+ };
104
+ function countToolsTokens(model, tools) {
105
+ if (tools.length === 0) {
106
+ return 0;
107
+ }
108
+ const json = JSON.stringify(tools);
109
+ return countTokens(model, json);
110
+ }
111
+ __name(countToolsTokens, "countToolsTokens");
112
+ function countMessageTokens(model, message) {
113
+ return countTokens(model, message.content || "");
114
+ }
115
+ __name(countMessageTokens, "countMessageTokens");
116
+ function countTokens(model, text) {
117
+ return text.length / 3;
118
+ }
119
+ __name(countTokens, "countTokens");
120
+ function convertActionInputToOpenAITool(action) {
121
+ return {
122
+ type: "function",
123
+ function: {
124
+ name: action.name,
125
+ description: action.description,
126
+ parameters: parseJson(action.jsonSchema, {})
127
+ }
128
+ };
129
+ }
130
+ __name(convertActionInputToOpenAITool, "convertActionInputToOpenAITool");
131
+ function convertMessageToOpenAIMessage(message, options) {
132
+ const { keepSystemRole } = options || {
133
+ keepSystemRole: false
134
+ };
135
+ if (message.isTextMessage()) {
136
+ let role = message.role;
137
+ if (message.role === "system" && !keepSystemRole) {
138
+ role = "developer";
139
+ }
140
+ return {
141
+ role,
142
+ content: message.content
143
+ };
144
+ } else if (message.isImageMessage()) {
145
+ return {
146
+ role: "user",
147
+ content: [
148
+ {
149
+ type: "image_url",
150
+ image_url: {
151
+ url: `data:image/${message.format};base64,${message.bytes}`
152
+ }
153
+ }
154
+ ]
155
+ };
156
+ } else if (message.isActionExecutionMessage()) {
157
+ return {
158
+ role: "assistant",
159
+ tool_calls: [
160
+ {
161
+ id: message.id,
162
+ type: "function",
163
+ function: {
164
+ name: message.name,
165
+ arguments: JSON.stringify(message.arguments)
166
+ }
167
+ }
168
+ ]
169
+ };
170
+ } else if (message.isResultMessage()) {
171
+ return {
172
+ role: "tool",
173
+ content: message.result,
174
+ tool_call_id: message.actionExecutionId
175
+ };
176
+ }
177
+ }
178
+ __name(convertMessageToOpenAIMessage, "convertMessageToOpenAIMessage");
179
+ function convertSystemMessageToAssistantAPI(message) {
180
+ return {
181
+ ...message,
182
+ ...[
183
+ "system",
184
+ "developer"
185
+ ].includes(message.role) && {
186
+ role: "assistant",
187
+ content: "THE FOLLOWING MESSAGE IS A SYSTEM MESSAGE: " + message.content
188
+ }
189
+ };
190
+ }
191
+ __name(convertSystemMessageToAssistantAPI, "convertSystemMessageToAssistantAPI");
192
+
193
+ // src/service-adapters/openai/openai-adapter.ts
194
+ import { randomUUID } from "@copilotkit/shared";
195
+ var DEFAULT_MODEL = "gpt-4o";
196
+ var OpenAIAdapter = class {
197
+ model = DEFAULT_MODEL;
198
+ provider = "openai";
199
+ disableParallelToolCalls = false;
200
+ _openai;
201
+ keepSystemRole = false;
202
+ get openai() {
203
+ return this._openai;
204
+ }
205
+ constructor(params) {
206
+ this._openai = (params == null ? void 0 : params.openai) || new OpenAI({});
207
+ if (params == null ? void 0 : params.model) {
208
+ this.model = params.model;
209
+ }
210
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
211
+ this.keepSystemRole = (params == null ? void 0 : params.keepSystemRole) ?? false;
212
+ }
213
+ async process(request) {
214
+ const { threadId: threadIdFromRequest, model = this.model, messages, actions, eventSource, forwardedParameters } = request;
215
+ const tools = actions.map(convertActionInputToOpenAITool);
216
+ const threadId = threadIdFromRequest ?? randomUUID();
217
+ const validToolUseIds = /* @__PURE__ */ new Set();
218
+ for (const message of messages) {
219
+ if (message.isActionExecutionMessage()) {
220
+ validToolUseIds.add(message.id);
221
+ }
222
+ }
223
+ const filteredMessages = messages.filter((message) => {
224
+ if (message.isResultMessage()) {
225
+ if (!validToolUseIds.has(message.actionExecutionId)) {
226
+ return false;
227
+ }
228
+ validToolUseIds.delete(message.actionExecutionId);
229
+ return true;
230
+ }
231
+ return true;
232
+ });
233
+ let openaiMessages = filteredMessages.map((m) => convertMessageToOpenAIMessage(m, {
234
+ keepSystemRole: this.keepSystemRole
235
+ }));
236
+ openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
237
+ let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
238
+ if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
239
+ toolChoice = {
240
+ type: "function",
241
+ function: {
242
+ name: forwardedParameters.toolChoiceFunctionName
243
+ }
244
+ };
245
+ }
246
+ try {
247
+ const stream = this.openai.beta.chat.completions.stream({
248
+ model,
249
+ stream: true,
250
+ messages: openaiMessages,
251
+ ...tools.length > 0 && {
252
+ tools
253
+ },
254
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
255
+ max_completion_tokens: forwardedParameters.maxTokens
256
+ },
257
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.stop) && {
258
+ stop: forwardedParameters.stop
259
+ },
260
+ ...toolChoice && {
261
+ tool_choice: toolChoice
262
+ },
263
+ ...this.disableParallelToolCalls && {
264
+ parallel_tool_calls: false
265
+ },
266
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
267
+ temperature: forwardedParameters.temperature
268
+ }
269
+ });
270
+ eventSource.stream(async (eventStream$) => {
271
+ var _a, _b;
272
+ let mode = null;
273
+ let currentMessageId;
274
+ let currentToolCallId;
275
+ try {
276
+ for await (const chunk of stream) {
277
+ if (chunk.choices.length === 0) {
278
+ continue;
279
+ }
280
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
281
+ const content = chunk.choices[0].delta.content;
282
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
283
+ mode = null;
284
+ eventStream$.sendTextMessageEnd({
285
+ messageId: currentMessageId
286
+ });
287
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
288
+ mode = null;
289
+ eventStream$.sendActionExecutionEnd({
290
+ actionExecutionId: currentToolCallId
291
+ });
292
+ }
293
+ if (mode === null) {
294
+ if (toolCall == null ? void 0 : toolCall.id) {
295
+ mode = "function";
296
+ currentToolCallId = toolCall.id;
297
+ eventStream$.sendActionExecutionStart({
298
+ actionExecutionId: currentToolCallId,
299
+ parentMessageId: chunk.id,
300
+ actionName: toolCall.function.name
301
+ });
302
+ } else if (content) {
303
+ mode = "message";
304
+ currentMessageId = chunk.id;
305
+ eventStream$.sendTextMessageStart({
306
+ messageId: currentMessageId
307
+ });
308
+ }
309
+ }
310
+ if (mode === "message" && content) {
311
+ eventStream$.sendTextMessageContent({
312
+ messageId: currentMessageId,
313
+ content
314
+ });
315
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
316
+ eventStream$.sendActionExecutionArgs({
317
+ actionExecutionId: currentToolCallId,
318
+ args: toolCall.function.arguments
319
+ });
320
+ }
321
+ }
322
+ if (mode === "message") {
323
+ eventStream$.sendTextMessageEnd({
324
+ messageId: currentMessageId
325
+ });
326
+ } else if (mode === "function") {
327
+ eventStream$.sendActionExecutionEnd({
328
+ actionExecutionId: currentToolCallId
329
+ });
330
+ }
331
+ } catch (error) {
332
+ console.error("[OpenAI] Error during API call:", error);
333
+ throw convertServiceAdapterError(error, "OpenAI");
334
+ }
335
+ eventStream$.complete();
336
+ });
337
+ } catch (error) {
338
+ console.error("[OpenAI] Error during API call:", error);
339
+ throw convertServiceAdapterError(error, "OpenAI");
340
+ }
341
+ return {
342
+ threadId
343
+ };
344
+ }
345
+ };
346
+ __name(OpenAIAdapter, "OpenAIAdapter");
347
+
348
+ // src/service-adapters/langchain/utils.ts
349
+ import { AIMessage, HumanMessage, SystemMessage, ToolMessage } from "@langchain/core/messages";
350
+ import { DynamicStructuredTool } from "@langchain/core/tools";
351
+ import { randomId, convertJsonSchemaToZodSchema } from "@copilotkit/shared";
352
+ function convertMessageToLangChainMessage(message) {
353
+ if (message.isTextMessage()) {
354
+ if (message.role == "user") {
355
+ return new HumanMessage(message.content);
356
+ } else if (message.role == "assistant") {
357
+ return new AIMessage(message.content);
358
+ } else if (message.role === "system") {
359
+ return new SystemMessage(message.content);
360
+ }
361
+ } else if (message.isActionExecutionMessage()) {
362
+ return new AIMessage({
363
+ content: "",
364
+ tool_calls: [
365
+ {
366
+ id: message.id,
367
+ args: message.arguments,
368
+ name: message.name
369
+ }
370
+ ]
371
+ });
372
+ } else if (message.isResultMessage()) {
373
+ return new ToolMessage({
374
+ content: message.result,
375
+ tool_call_id: message.actionExecutionId
376
+ });
377
+ }
378
+ }
379
+ __name(convertMessageToLangChainMessage, "convertMessageToLangChainMessage");
380
+ function convertActionInputToLangChainTool(actionInput) {
381
+ return new DynamicStructuredTool({
382
+ ...actionInput,
383
+ name: actionInput.name,
384
+ description: actionInput.description,
385
+ schema: convertJsonSchemaToZodSchema(JSON.parse(actionInput.jsonSchema), true),
386
+ func: async () => {
387
+ return "";
388
+ }
389
+ });
390
+ }
391
+ __name(convertActionInputToLangChainTool, "convertActionInputToLangChainTool");
392
+ function isAIMessage(message) {
393
+ return Object.prototype.toString.call(message) === "[object AIMessage]";
394
+ }
395
+ __name(isAIMessage, "isAIMessage");
396
+ function isAIMessageChunk(message) {
397
+ return Object.prototype.toString.call(message) === "[object AIMessageChunk]";
398
+ }
399
+ __name(isAIMessageChunk, "isAIMessageChunk");
400
+ function isBaseMessageChunk(message) {
401
+ return Object.prototype.toString.call(message) === "[object BaseMessageChunk]";
402
+ }
403
+ __name(isBaseMessageChunk, "isBaseMessageChunk");
404
+ function maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution) {
405
+ if (actionExecution) {
406
+ eventStream$.sendActionExecutionResult({
407
+ actionExecutionId: actionExecution.id,
408
+ actionName: actionExecution.name,
409
+ result: "Sending a message"
410
+ });
411
+ }
412
+ }
413
+ __name(maybeSendActionExecutionResultIsMessage, "maybeSendActionExecutionResultIsMessage");
414
+ async function streamLangChainResponse({ result, eventStream$, actionExecution }) {
415
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
416
+ if (typeof result === "string") {
417
+ if (!actionExecution || (actionExecution == null ? void 0 : actionExecution.returnDirect)) {
418
+ eventStream$.sendActionExecutionResult({
419
+ actionExecutionId: actionExecution.id,
420
+ actionName: actionExecution.name,
421
+ result
422
+ });
423
+ eventStream$.sendTextMessage(randomId(), result);
424
+ } else {
425
+ eventStream$.sendActionExecutionResult({
426
+ actionExecutionId: actionExecution.id,
427
+ actionName: actionExecution.name,
428
+ result
429
+ });
430
+ }
431
+ } else if (isAIMessage(result)) {
432
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
433
+ if (result.content) {
434
+ eventStream$.sendTextMessage(randomId(), result.content);
435
+ }
436
+ for (const toolCall of result.tool_calls) {
437
+ eventStream$.sendActionExecution({
438
+ actionExecutionId: toolCall.id || randomId(),
439
+ actionName: toolCall.name,
440
+ args: JSON.stringify(toolCall.args)
441
+ });
442
+ }
443
+ } else if (isBaseMessageChunk(result)) {
444
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
445
+ if ((_a = result.lc_kwargs) == null ? void 0 : _a.content) {
446
+ eventStream$.sendTextMessage(randomId(), result.content);
447
+ }
448
+ if ((_b = result.lc_kwargs) == null ? void 0 : _b.tool_calls) {
449
+ for (const toolCall of (_c = result.lc_kwargs) == null ? void 0 : _c.tool_calls) {
450
+ eventStream$.sendActionExecution({
451
+ actionExecutionId: toolCall.id || randomId(),
452
+ actionName: toolCall.name,
453
+ args: JSON.stringify(toolCall.args)
454
+ });
455
+ }
456
+ }
457
+ } else if (result && "getReader" in result) {
458
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
459
+ let reader = result.getReader();
460
+ let mode = null;
461
+ let currentMessageId;
462
+ const toolCallDetails = {
463
+ name: null,
464
+ id: null,
465
+ index: null,
466
+ prevIndex: null
467
+ };
468
+ while (true) {
469
+ try {
470
+ const { done, value } = await reader.read();
471
+ let toolCallName = void 0;
472
+ let toolCallId = void 0;
473
+ let toolCallArgs = void 0;
474
+ let hasToolCall = false;
475
+ let content = "";
476
+ if (value && value.content) {
477
+ content = Array.isArray(value.content) ? ((_d = value.content[0]) == null ? void 0 : _d.text) ?? "" : value.content;
478
+ }
479
+ if (isAIMessageChunk(value)) {
480
+ let chunk = (_e = value.tool_call_chunks) == null ? void 0 : _e[0];
481
+ toolCallArgs = chunk == null ? void 0 : chunk.args;
482
+ hasToolCall = chunk != void 0;
483
+ if (chunk == null ? void 0 : chunk.name)
484
+ toolCallDetails.name = chunk.name;
485
+ if ((chunk == null ? void 0 : chunk.index) != null) {
486
+ toolCallDetails.index = chunk.index;
487
+ if (toolCallDetails.prevIndex == null)
488
+ toolCallDetails.prevIndex = chunk.index;
489
+ }
490
+ if (chunk == null ? void 0 : chunk.id)
491
+ toolCallDetails.id = chunk.index != null ? `${chunk.id}-idx-${chunk.index}` : chunk.id;
492
+ toolCallName = toolCallDetails.name;
493
+ toolCallId = toolCallDetails.id;
494
+ } else if (isBaseMessageChunk(value)) {
495
+ let chunk = (_g = (_f = value.additional_kwargs) == null ? void 0 : _f.tool_calls) == null ? void 0 : _g[0];
496
+ toolCallName = (_h = chunk == null ? void 0 : chunk.function) == null ? void 0 : _h.name;
497
+ toolCallId = chunk == null ? void 0 : chunk.id;
498
+ toolCallArgs = (_i = chunk == null ? void 0 : chunk.function) == null ? void 0 : _i.arguments;
499
+ hasToolCall = (chunk == null ? void 0 : chunk.function) != void 0;
500
+ }
501
+ if (mode === "message" && (toolCallId || done)) {
502
+ mode = null;
503
+ eventStream$.sendTextMessageEnd({
504
+ messageId: currentMessageId
505
+ });
506
+ } else if (mode === "function" && (!hasToolCall || done)) {
507
+ mode = null;
508
+ eventStream$.sendActionExecutionEnd({
509
+ actionExecutionId: toolCallId
510
+ });
511
+ }
512
+ if (done) {
513
+ break;
514
+ }
515
+ if (mode === null) {
516
+ if (hasToolCall && toolCallId && toolCallName) {
517
+ mode = "function";
518
+ eventStream$.sendActionExecutionStart({
519
+ actionExecutionId: toolCallId,
520
+ actionName: toolCallName,
521
+ parentMessageId: (_j = value.lc_kwargs) == null ? void 0 : _j.id
522
+ });
523
+ } else if (content) {
524
+ mode = "message";
525
+ currentMessageId = ((_k = value.lc_kwargs) == null ? void 0 : _k.id) || randomId();
526
+ eventStream$.sendTextMessageStart({
527
+ messageId: currentMessageId
528
+ });
529
+ }
530
+ }
531
+ if (mode === "message" && content) {
532
+ eventStream$.sendTextMessageContent({
533
+ messageId: currentMessageId,
534
+ content
535
+ });
536
+ } else if (mode === "function" && toolCallArgs) {
537
+ if (toolCallDetails.index !== toolCallDetails.prevIndex) {
538
+ eventStream$.sendActionExecutionEnd({
539
+ actionExecutionId: toolCallId
540
+ });
541
+ eventStream$.sendActionExecutionStart({
542
+ actionExecutionId: toolCallId,
543
+ actionName: toolCallName,
544
+ parentMessageId: (_l = value.lc_kwargs) == null ? void 0 : _l.id
545
+ });
546
+ toolCallDetails.prevIndex = toolCallDetails.index;
547
+ }
548
+ eventStream$.sendActionExecutionArgs({
549
+ actionExecutionId: toolCallId,
550
+ args: toolCallArgs
551
+ });
552
+ }
553
+ } catch (error) {
554
+ console.error("Error reading from stream", error);
555
+ break;
556
+ }
557
+ }
558
+ } else if (actionExecution) {
559
+ eventStream$.sendActionExecutionResult({
560
+ actionExecutionId: actionExecution.id,
561
+ actionName: actionExecution.name,
562
+ result: encodeResult(result)
563
+ });
564
+ } else {
565
+ throw new Error("Invalid return type from LangChain function.");
566
+ }
567
+ eventStream$.complete();
568
+ }
569
+ __name(streamLangChainResponse, "streamLangChainResponse");
570
+ function encodeResult(result) {
571
+ if (result === void 0) {
572
+ return "";
573
+ } else if (typeof result === "string") {
574
+ return result;
575
+ } else {
576
+ return JSON.stringify(result);
577
+ }
578
+ }
579
+ __name(encodeResult, "encodeResult");
580
+
581
+ // src/service-adapters/langchain/langchain-adapter.ts
582
+ import { randomUUID as randomUUID2 } from "@copilotkit/shared";
583
+ import { awaitAllCallbacks } from "@langchain/core/callbacks/promises";
584
+ var LangChainAdapter = class {
585
+ options;
586
+ /**
587
+ * To use LangChain as a backend, provide a handler function to the adapter with your custom LangChain logic.
588
+ */
589
+ constructor(options) {
590
+ this.options = options;
591
+ }
592
+ async process(request) {
593
+ try {
594
+ const { eventSource, model, actions, messages, runId, threadId: threadIdFromRequest } = request;
595
+ const threadId = threadIdFromRequest ?? randomUUID2();
596
+ const result = await this.options.chainFn({
597
+ messages: messages.map(convertMessageToLangChainMessage),
598
+ tools: actions.map(convertActionInputToLangChainTool),
599
+ model,
600
+ threadId,
601
+ runId
602
+ });
603
+ eventSource.stream(async (eventStream$) => {
604
+ await streamLangChainResponse({
605
+ result,
606
+ eventStream$
607
+ });
608
+ });
609
+ return {
610
+ threadId
611
+ };
612
+ } finally {
613
+ await awaitAllCallbacks();
614
+ }
615
+ }
616
+ };
617
+ __name(LangChainAdapter, "LangChainAdapter");
618
+
619
+ // src/service-adapters/google/google-genai-adapter.ts
620
+ import { ChatGoogle } from "@langchain/google-gauth";
621
+ import { AIMessage as AIMessage2 } from "@langchain/core/messages";
622
+ var DEFAULT_MODEL2 = "gemini-1.5-pro";
623
+ var GoogleGenerativeAIAdapter = class extends LangChainAdapter {
624
+ provider = "google";
625
+ model = DEFAULT_MODEL2;
626
+ constructor(options) {
627
+ super({
628
+ chainFn: async ({ messages, tools, threadId }) => {
629
+ const filteredMessages = messages.filter((message) => {
630
+ if (!(message instanceof AIMessage2)) {
631
+ return true;
632
+ }
633
+ return message.content && String(message.content).trim().length > 0 || message.tool_calls && message.tool_calls.length > 0;
634
+ });
635
+ this.model = (options == null ? void 0 : options.model) ?? "gemini-1.5-pro";
636
+ const model = new ChatGoogle({
637
+ apiKey: (options == null ? void 0 : options.apiKey) ?? process.env.GOOGLE_API_KEY,
638
+ modelName: this.model,
639
+ apiVersion: "v1beta"
640
+ }).bindTools(tools);
641
+ return model.stream(filteredMessages, {
642
+ metadata: {
643
+ conversation_id: threadId
644
+ }
645
+ });
646
+ }
647
+ });
648
+ }
649
+ };
650
+ __name(GoogleGenerativeAIAdapter, "GoogleGenerativeAIAdapter");
651
+
652
+ // src/service-adapters/openai/openai-assistant-adapter.ts
653
+ import OpenAI2 from "openai";
654
+ var OpenAIAssistantAdapter = class {
655
+ openai;
656
+ codeInterpreterEnabled;
657
+ assistantId;
658
+ fileSearchEnabled;
659
+ disableParallelToolCalls;
660
+ keepSystemRole = false;
661
+ constructor(params) {
662
+ this.openai = params.openai || new OpenAI2({});
663
+ this.codeInterpreterEnabled = params.codeInterpreterEnabled === false || true;
664
+ this.fileSearchEnabled = params.fileSearchEnabled === false || true;
665
+ this.assistantId = params.assistantId;
666
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
667
+ this.keepSystemRole = (params == null ? void 0 : params.keepSystemRole) ?? false;
668
+ }
669
+ async process(request) {
670
+ var _a, _b;
671
+ const { messages, actions, eventSource, runId, forwardedParameters } = request;
672
+ let threadId = (_b = (_a = request.extensions) == null ? void 0 : _a.openaiAssistantAPI) == null ? void 0 : _b.threadId;
673
+ if (!threadId) {
674
+ threadId = (await this.openai.beta.threads.create()).id;
675
+ }
676
+ const lastMessage = messages.at(-1);
677
+ let nextRunId = void 0;
678
+ if (lastMessage.isResultMessage() && runId) {
679
+ nextRunId = await this.submitToolOutputs(threadId, runId, messages, eventSource);
680
+ } else if (lastMessage.isTextMessage()) {
681
+ nextRunId = await this.submitUserMessage(threadId, messages, actions, eventSource, forwardedParameters);
682
+ } else {
683
+ throw new Error("No actionable message found in the messages");
684
+ }
685
+ return {
686
+ runId: nextRunId,
687
+ threadId,
688
+ extensions: {
689
+ ...request.extensions,
690
+ openaiAssistantAPI: {
691
+ threadId,
692
+ runId: nextRunId
693
+ }
694
+ }
695
+ };
696
+ }
697
+ async submitToolOutputs(threadId, runId, messages, eventSource) {
698
+ let run = await this.openai.beta.threads.runs.retrieve(threadId, runId);
699
+ if (!run.required_action) {
700
+ throw new Error("No tool outputs required");
701
+ }
702
+ const toolCallsIds = run.required_action.submit_tool_outputs.tool_calls.map((toolCall) => toolCall.id);
703
+ const resultMessages = messages.filter((message) => message.isResultMessage() && toolCallsIds.includes(message.actionExecutionId));
704
+ if (toolCallsIds.length != resultMessages.length) {
705
+ throw new Error("Number of function results does not match the number of tool calls");
706
+ }
707
+ const toolOutputs = resultMessages.map((message) => {
708
+ return {
709
+ tool_call_id: message.actionExecutionId,
710
+ output: message.result
711
+ };
712
+ });
713
+ const stream = this.openai.beta.threads.runs.submitToolOutputsStream(threadId, runId, {
714
+ tool_outputs: toolOutputs,
715
+ ...this.disableParallelToolCalls && {
716
+ parallel_tool_calls: false
717
+ }
718
+ });
719
+ await this.streamResponse(stream, eventSource);
720
+ return runId;
721
+ }
722
+ async submitUserMessage(threadId, messages, actions, eventSource, forwardedParameters) {
723
+ messages = [
724
+ ...messages
725
+ ];
726
+ const instructionsMessage = messages.shift();
727
+ const instructions = instructionsMessage.isTextMessage() ? instructionsMessage.content : "";
728
+ const userMessage = messages.map((m) => convertMessageToOpenAIMessage(m, {
729
+ keepSystemRole: this.keepSystemRole
730
+ })).map(convertSystemMessageToAssistantAPI).at(-1);
731
+ if (userMessage.role !== "user") {
732
+ throw new Error("No user message found");
733
+ }
734
+ await this.openai.beta.threads.messages.create(threadId, {
735
+ role: "user",
736
+ content: userMessage.content
737
+ });
738
+ const openaiTools = actions.map(convertActionInputToOpenAITool);
739
+ const tools = [
740
+ ...openaiTools,
741
+ ...this.codeInterpreterEnabled ? [
742
+ {
743
+ type: "code_interpreter"
744
+ }
745
+ ] : [],
746
+ ...this.fileSearchEnabled ? [
747
+ {
748
+ type: "file_search"
749
+ }
750
+ ] : []
751
+ ];
752
+ let stream = this.openai.beta.threads.runs.stream(threadId, {
753
+ assistant_id: this.assistantId,
754
+ instructions,
755
+ tools,
756
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
757
+ max_completion_tokens: forwardedParameters.maxTokens
758
+ },
759
+ ...this.disableParallelToolCalls && {
760
+ parallel_tool_calls: false
761
+ }
762
+ });
763
+ await this.streamResponse(stream, eventSource);
764
+ return getRunIdFromStream(stream);
765
+ }
766
+ async streamResponse(stream, eventSource) {
767
+ eventSource.stream(async (eventStream$) => {
768
+ var _a, _b, _c, _d, _e, _f;
769
+ let inFunctionCall = false;
770
+ let currentMessageId;
771
+ let currentToolCallId;
772
+ for await (const chunk of stream) {
773
+ switch (chunk.event) {
774
+ case "thread.message.created":
775
+ if (inFunctionCall) {
776
+ eventStream$.sendActionExecutionEnd({
777
+ actionExecutionId: currentToolCallId
778
+ });
779
+ }
780
+ currentMessageId = chunk.data.id;
781
+ eventStream$.sendTextMessageStart({
782
+ messageId: currentMessageId
783
+ });
784
+ break;
785
+ case "thread.message.delta":
786
+ if (((_a = chunk.data.delta.content) == null ? void 0 : _a[0].type) === "text") {
787
+ eventStream$.sendTextMessageContent({
788
+ messageId: currentMessageId,
789
+ content: (_b = chunk.data.delta.content) == null ? void 0 : _b[0].text.value
790
+ });
791
+ }
792
+ break;
793
+ case "thread.message.completed":
794
+ eventStream$.sendTextMessageEnd({
795
+ messageId: currentMessageId
796
+ });
797
+ break;
798
+ case "thread.run.step.delta":
799
+ let toolCallId;
800
+ let toolCallName;
801
+ let toolCallArgs;
802
+ if (chunk.data.delta.step_details.type === "tool_calls" && ((_c = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _c[0].type) === "function") {
803
+ toolCallId = (_d = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _d[0].id;
804
+ toolCallName = (_e = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _e[0].function.name;
805
+ toolCallArgs = (_f = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _f[0].function.arguments;
806
+ }
807
+ if (toolCallName && toolCallId) {
808
+ if (inFunctionCall) {
809
+ eventStream$.sendActionExecutionEnd({
810
+ actionExecutionId: currentToolCallId
811
+ });
812
+ }
813
+ inFunctionCall = true;
814
+ currentToolCallId = toolCallId;
815
+ eventStream$.sendActionExecutionStart({
816
+ actionExecutionId: currentToolCallId,
817
+ parentMessageId: chunk.data.id,
818
+ actionName: toolCallName
819
+ });
820
+ } else if (toolCallArgs) {
821
+ eventStream$.sendActionExecutionArgs({
822
+ actionExecutionId: currentToolCallId,
823
+ args: toolCallArgs
824
+ });
825
+ }
826
+ break;
827
+ }
828
+ }
829
+ if (inFunctionCall) {
830
+ eventStream$.sendActionExecutionEnd({
831
+ actionExecutionId: currentToolCallId
832
+ });
833
+ }
834
+ eventStream$.complete();
835
+ });
836
+ }
837
+ };
838
+ __name(OpenAIAssistantAdapter, "OpenAIAssistantAdapter");
839
+ function getRunIdFromStream(stream) {
840
+ return new Promise((resolve, reject) => {
841
+ let runIdGetter = /* @__PURE__ */ __name((event) => {
842
+ if (event.event === "thread.run.created") {
843
+ const runId = event.data.id;
844
+ stream.off("event", runIdGetter);
845
+ resolve(runId);
846
+ }
847
+ }, "runIdGetter");
848
+ stream.on("event", runIdGetter);
849
+ });
850
+ }
851
+ __name(getRunIdFromStream, "getRunIdFromStream");
852
+
853
+ // src/service-adapters/unify/unify-adapter.ts
854
+ import OpenAI3 from "openai";
855
+ import { randomId as randomId2, randomUUID as randomUUID3 } from "@copilotkit/shared";
856
+ var UnifyAdapter = class {
857
+ apiKey;
858
+ model;
859
+ start;
860
+ provider = "unify";
861
+ constructor(options) {
862
+ if (options == null ? void 0 : options.apiKey) {
863
+ this.apiKey = options.apiKey;
864
+ } else {
865
+ this.apiKey = "UNIFY_API_KEY";
866
+ }
867
+ this.model = options == null ? void 0 : options.model;
868
+ this.start = true;
869
+ }
870
+ async process(request) {
871
+ const tools = request.actions.map(convertActionInputToOpenAITool);
872
+ const openai = new OpenAI3({
873
+ apiKey: this.apiKey,
874
+ baseURL: "https://api.unify.ai/v0/"
875
+ });
876
+ const forwardedParameters = request.forwardedParameters;
877
+ const messages = request.messages.map((m) => convertMessageToOpenAIMessage(m));
878
+ const stream = await openai.chat.completions.create({
879
+ model: this.model,
880
+ messages,
881
+ stream: true,
882
+ ...tools.length > 0 && {
883
+ tools
884
+ },
885
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
886
+ temperature: forwardedParameters.temperature
887
+ }
888
+ });
889
+ let model = null;
890
+ let currentMessageId;
891
+ let currentToolCallId;
892
+ request.eventSource.stream(async (eventStream$) => {
893
+ var _a, _b;
894
+ let mode = null;
895
+ for await (const chunk of stream) {
896
+ if (this.start) {
897
+ model = chunk.model;
898
+ currentMessageId = randomId2();
899
+ eventStream$.sendTextMessageStart({
900
+ messageId: currentMessageId
901
+ });
902
+ eventStream$.sendTextMessageContent({
903
+ messageId: currentMessageId,
904
+ content: `Model used: ${model}
905
+ `
906
+ });
907
+ eventStream$.sendTextMessageEnd({
908
+ messageId: currentMessageId
909
+ });
910
+ this.start = false;
911
+ }
912
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
913
+ const content = chunk.choices[0].delta.content;
914
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
915
+ mode = null;
916
+ eventStream$.sendTextMessageEnd({
917
+ messageId: currentMessageId
918
+ });
919
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
920
+ mode = null;
921
+ eventStream$.sendActionExecutionEnd({
922
+ actionExecutionId: currentToolCallId
923
+ });
924
+ }
925
+ if (mode === null) {
926
+ if (toolCall == null ? void 0 : toolCall.id) {
927
+ mode = "function";
928
+ currentToolCallId = toolCall.id;
929
+ eventStream$.sendActionExecutionStart({
930
+ actionExecutionId: currentToolCallId,
931
+ actionName: toolCall.function.name
932
+ });
933
+ } else if (content) {
934
+ mode = "message";
935
+ currentMessageId = chunk.id;
936
+ eventStream$.sendTextMessageStart({
937
+ messageId: currentMessageId
938
+ });
939
+ }
940
+ }
941
+ if (mode === "message" && content) {
942
+ eventStream$.sendTextMessageContent({
943
+ messageId: currentMessageId,
944
+ content
945
+ });
946
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
947
+ eventStream$.sendActionExecutionArgs({
948
+ actionExecutionId: currentToolCallId,
949
+ args: toolCall.function.arguments
950
+ });
951
+ }
952
+ }
953
+ if (mode === "message") {
954
+ eventStream$.sendTextMessageEnd({
955
+ messageId: currentMessageId
956
+ });
957
+ } else if (mode === "function") {
958
+ eventStream$.sendActionExecutionEnd({
959
+ actionExecutionId: currentToolCallId
960
+ });
961
+ }
962
+ eventStream$.complete();
963
+ });
964
+ return {
965
+ threadId: request.threadId || randomUUID3()
966
+ };
967
+ }
968
+ };
969
+ __name(UnifyAdapter, "UnifyAdapter");
970
+
971
+ // src/service-adapters/groq/groq-adapter.ts
972
+ import { Groq } from "groq-sdk";
973
+ import { randomUUID as randomUUID4 } from "@copilotkit/shared";
974
+ var DEFAULT_MODEL3 = "llama-3.3-70b-versatile";
975
+ var GroqAdapter = class {
976
+ model = DEFAULT_MODEL3;
977
+ provider = "groq";
978
+ disableParallelToolCalls = false;
979
+ _groq;
980
+ get groq() {
981
+ return this._groq;
982
+ }
983
+ constructor(params) {
984
+ this._groq = (params == null ? void 0 : params.groq) || new Groq({});
985
+ if (params == null ? void 0 : params.model) {
986
+ this.model = params.model;
987
+ }
988
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
989
+ }
990
+ async process(request) {
991
+ const { threadId, model = this.model, messages, actions, eventSource, forwardedParameters } = request;
992
+ const tools = actions.map(convertActionInputToOpenAITool);
993
+ let openaiMessages = messages.map((m) => convertMessageToOpenAIMessage(m, {
994
+ keepSystemRole: true
995
+ }));
996
+ openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
997
+ let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
998
+ if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
999
+ toolChoice = {
1000
+ type: "function",
1001
+ function: {
1002
+ name: forwardedParameters.toolChoiceFunctionName
1003
+ }
1004
+ };
1005
+ }
1006
+ let stream;
1007
+ try {
1008
+ stream = await this.groq.chat.completions.create({
1009
+ model,
1010
+ stream: true,
1011
+ messages: openaiMessages,
1012
+ ...tools.length > 0 && {
1013
+ tools
1014
+ },
1015
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
1016
+ max_tokens: forwardedParameters.maxTokens
1017
+ },
1018
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.stop) && {
1019
+ stop: forwardedParameters.stop
1020
+ },
1021
+ ...toolChoice && {
1022
+ tool_choice: toolChoice
1023
+ },
1024
+ ...this.disableParallelToolCalls && {
1025
+ parallel_tool_calls: false
1026
+ },
1027
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
1028
+ temperature: forwardedParameters.temperature
1029
+ }
1030
+ });
1031
+ } catch (error) {
1032
+ throw convertServiceAdapterError(error, "Groq");
1033
+ }
1034
+ eventSource.stream(async (eventStream$) => {
1035
+ var _a, _b;
1036
+ let mode = null;
1037
+ let currentMessageId;
1038
+ let currentToolCallId;
1039
+ try {
1040
+ for await (const chunk of stream) {
1041
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
1042
+ const content = chunk.choices[0].delta.content;
1043
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
1044
+ mode = null;
1045
+ eventStream$.sendTextMessageEnd({
1046
+ messageId: currentMessageId
1047
+ });
1048
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
1049
+ mode = null;
1050
+ eventStream$.sendActionExecutionEnd({
1051
+ actionExecutionId: currentToolCallId
1052
+ });
1053
+ }
1054
+ if (mode === null) {
1055
+ if (toolCall == null ? void 0 : toolCall.id) {
1056
+ mode = "function";
1057
+ currentToolCallId = toolCall.id;
1058
+ eventStream$.sendActionExecutionStart({
1059
+ actionExecutionId: currentToolCallId,
1060
+ actionName: toolCall.function.name,
1061
+ parentMessageId: chunk.id
1062
+ });
1063
+ } else if (content) {
1064
+ mode = "message";
1065
+ currentMessageId = chunk.id;
1066
+ eventStream$.sendTextMessageStart({
1067
+ messageId: currentMessageId
1068
+ });
1069
+ }
1070
+ }
1071
+ if (mode === "message" && content) {
1072
+ eventStream$.sendTextMessageContent({
1073
+ messageId: currentMessageId,
1074
+ content
1075
+ });
1076
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
1077
+ eventStream$.sendActionExecutionArgs({
1078
+ actionExecutionId: currentToolCallId,
1079
+ args: toolCall.function.arguments
1080
+ });
1081
+ }
1082
+ }
1083
+ if (mode === "message") {
1084
+ eventStream$.sendTextMessageEnd({
1085
+ messageId: currentMessageId
1086
+ });
1087
+ } else if (mode === "function") {
1088
+ eventStream$.sendActionExecutionEnd({
1089
+ actionExecutionId: currentToolCallId
1090
+ });
1091
+ }
1092
+ } catch (error) {
1093
+ throw convertServiceAdapterError(error, "Groq");
1094
+ }
1095
+ eventStream$.complete();
1096
+ });
1097
+ return {
1098
+ threadId: request.threadId || randomUUID4()
1099
+ };
1100
+ }
1101
+ };
1102
+ __name(GroqAdapter, "GroqAdapter");
1103
+
1104
+ export {
1105
+ OpenAIAdapter,
1106
+ LangChainAdapter,
1107
+ GoogleGenerativeAIAdapter,
1108
+ OpenAIAssistantAdapter,
1109
+ UnifyAdapter,
1110
+ GroqAdapter
1111
+ };
1112
+ //# sourceMappingURL=chunk-SBCOROE4.mjs.map