@copilotkit/runtime 0.0.0-mme-load-agent-state-20250117154700

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (176) hide show
  1. package/.eslintrc.js +7 -0
  2. package/CHANGELOG.md +929 -0
  3. package/README.md +46 -0
  4. package/__snapshots__/schema/schema.graphql +306 -0
  5. package/dist/chunk-44O2JGUY.mjs +12 -0
  6. package/dist/chunk-44O2JGUY.mjs.map +1 -0
  7. package/dist/chunk-4BWLNVK4.mjs +80 -0
  8. package/dist/chunk-4BWLNVK4.mjs.map +1 -0
  9. package/dist/chunk-4QJA7OT2.mjs +3395 -0
  10. package/dist/chunk-4QJA7OT2.mjs.map +1 -0
  11. package/dist/chunk-4YJIXJLO.mjs +25 -0
  12. package/dist/chunk-4YJIXJLO.mjs.map +1 -0
  13. package/dist/chunk-67KK2GZ5.mjs +3765 -0
  14. package/dist/chunk-67KK2GZ5.mjs.map +1 -0
  15. package/dist/chunk-7BOVBWKI.mjs +25 -0
  16. package/dist/chunk-7BOVBWKI.mjs.map +1 -0
  17. package/dist/chunk-B5KHNAW5.mjs +25 -0
  18. package/dist/chunk-B5KHNAW5.mjs.map +1 -0
  19. package/dist/chunk-CLGKEUOA.mjs +1408 -0
  20. package/dist/chunk-CLGKEUOA.mjs.map +1 -0
  21. package/dist/chunk-D2WLFQS6.mjs +43 -0
  22. package/dist/chunk-D2WLFQS6.mjs.map +1 -0
  23. package/dist/chunk-DFOKBSIS.mjs +1 -0
  24. package/dist/chunk-DFOKBSIS.mjs.map +1 -0
  25. package/dist/chunk-DKLATJGV.mjs +25 -0
  26. package/dist/chunk-DKLATJGV.mjs.map +1 -0
  27. package/dist/chunk-FYONHPZL.mjs +3397 -0
  28. package/dist/chunk-FYONHPZL.mjs.map +1 -0
  29. package/dist/chunk-HNUNXFTW.mjs +129 -0
  30. package/dist/chunk-HNUNXFTW.mjs.map +1 -0
  31. package/dist/chunk-OKUXS4SE.mjs +25 -0
  32. package/dist/chunk-OKUXS4SE.mjs.map +1 -0
  33. package/dist/chunk-P4PPTGPJ.mjs +25 -0
  34. package/dist/chunk-P4PPTGPJ.mjs.map +1 -0
  35. package/dist/chunk-RFF5IIZJ.mjs +66 -0
  36. package/dist/chunk-RFF5IIZJ.mjs.map +1 -0
  37. package/dist/chunk-U3V2BCGI.mjs +152 -0
  38. package/dist/chunk-U3V2BCGI.mjs.map +1 -0
  39. package/dist/chunk-U7EKYV47.mjs +80 -0
  40. package/dist/chunk-U7EKYV47.mjs.map +1 -0
  41. package/dist/chunk-XXYYCH4X.mjs +80 -0
  42. package/dist/chunk-XXYYCH4X.mjs.map +1 -0
  43. package/dist/chunk-YT7A6V5T.mjs +1420 -0
  44. package/dist/chunk-YT7A6V5T.mjs.map +1 -0
  45. package/dist/copilot-runtime-36700e00.d.ts +196 -0
  46. package/dist/copilot-runtime-8c442d65.d.ts +209 -0
  47. package/dist/graphql/types/base/index.d.ts +6 -0
  48. package/dist/graphql/types/base/index.js +63 -0
  49. package/dist/graphql/types/base/index.js.map +1 -0
  50. package/dist/graphql/types/base/index.mjs +8 -0
  51. package/dist/graphql/types/base/index.mjs.map +1 -0
  52. package/dist/graphql/types/converted/index.d.ts +2 -0
  53. package/dist/graphql/types/converted/index.js +187 -0
  54. package/dist/graphql/types/converted/index.js.map +1 -0
  55. package/dist/graphql/types/converted/index.mjs +17 -0
  56. package/dist/graphql/types/converted/index.mjs.map +1 -0
  57. package/dist/groq-adapter-696b5d29.d.ts +281 -0
  58. package/dist/groq-adapter-7a82cd22.d.ts +301 -0
  59. package/dist/index-a7f37670.d.ts +103 -0
  60. package/dist/index-cc2b17be.d.ts +87 -0
  61. package/dist/index.d.ts +23 -0
  62. package/dist/index.js +5597 -0
  63. package/dist/index.js.map +1 -0
  64. package/dist/index.mjs +76 -0
  65. package/dist/index.mjs.map +1 -0
  66. package/dist/langserve-9125a12e.d.ts +176 -0
  67. package/dist/langserve-e308c437.d.ts +209 -0
  68. package/dist/lib/cloud/index.d.ts +6 -0
  69. package/dist/lib/cloud/index.js +18 -0
  70. package/dist/lib/cloud/index.js.map +1 -0
  71. package/dist/lib/cloud/index.mjs +1 -0
  72. package/dist/lib/cloud/index.mjs.map +1 -0
  73. package/dist/lib/index.d.ts +20 -0
  74. package/dist/lib/index.js +5256 -0
  75. package/dist/lib/index.js.map +1 -0
  76. package/dist/lib/index.mjs +58 -0
  77. package/dist/lib/index.mjs.map +1 -0
  78. package/dist/lib/integrations/index.d.ts +33 -0
  79. package/dist/lib/integrations/index.js +2488 -0
  80. package/dist/lib/integrations/index.js.map +1 -0
  81. package/dist/lib/integrations/index.mjs +34 -0
  82. package/dist/lib/integrations/index.mjs.map +1 -0
  83. package/dist/lib/integrations/nest/index.d.ts +14 -0
  84. package/dist/lib/integrations/nest/index.js +2397 -0
  85. package/dist/lib/integrations/nest/index.js.map +1 -0
  86. package/dist/lib/integrations/nest/index.mjs +13 -0
  87. package/dist/lib/integrations/nest/index.mjs.map +1 -0
  88. package/dist/lib/integrations/node-express/index.d.ts +14 -0
  89. package/dist/lib/integrations/node-express/index.js +2397 -0
  90. package/dist/lib/integrations/node-express/index.js.map +1 -0
  91. package/dist/lib/integrations/node-express/index.mjs +13 -0
  92. package/dist/lib/integrations/node-express/index.mjs.map +1 -0
  93. package/dist/lib/integrations/node-http/index.d.ts +14 -0
  94. package/dist/lib/integrations/node-http/index.js +2383 -0
  95. package/dist/lib/integrations/node-http/index.js.map +1 -0
  96. package/dist/lib/integrations/node-http/index.mjs +12 -0
  97. package/dist/lib/integrations/node-http/index.mjs.map +1 -0
  98. package/dist/service-adapters/index.d.ts +84 -0
  99. package/dist/service-adapters/index.js +1460 -0
  100. package/dist/service-adapters/index.js.map +1 -0
  101. package/dist/service-adapters/index.mjs +26 -0
  102. package/dist/service-adapters/index.mjs.map +1 -0
  103. package/dist/utils/index.d.ts +49 -0
  104. package/dist/utils/index.js +174 -0
  105. package/dist/utils/index.js.map +1 -0
  106. package/dist/utils/index.mjs +12 -0
  107. package/dist/utils/index.mjs.map +1 -0
  108. package/jest.config.js +5 -0
  109. package/package.json +85 -0
  110. package/scripts/generate-gql-schema.ts +13 -0
  111. package/src/agents/langgraph/event-source.ts +287 -0
  112. package/src/agents/langgraph/events.ts +338 -0
  113. package/src/graphql/inputs/action.input.ts +16 -0
  114. package/src/graphql/inputs/agent-session.input.ts +13 -0
  115. package/src/graphql/inputs/agent-state.input.ts +10 -0
  116. package/src/graphql/inputs/cloud-guardrails.input.ts +16 -0
  117. package/src/graphql/inputs/cloud.input.ts +8 -0
  118. package/src/graphql/inputs/context-property.input.ts +10 -0
  119. package/src/graphql/inputs/custom-property.input.ts +15 -0
  120. package/src/graphql/inputs/extensions.input.ts +21 -0
  121. package/src/graphql/inputs/forwarded-parameters.input.ts +22 -0
  122. package/src/graphql/inputs/frontend.input.ts +14 -0
  123. package/src/graphql/inputs/generate-copilot-response.input.ts +51 -0
  124. package/src/graphql/inputs/load-agent-state.input.ts +10 -0
  125. package/src/graphql/inputs/message.input.ts +92 -0
  126. package/src/graphql/resolvers/copilot.resolver.ts +561 -0
  127. package/src/graphql/resolvers/state.resolver.ts +23 -0
  128. package/src/graphql/types/agents-response.type.ts +19 -0
  129. package/src/graphql/types/base/index.ts +10 -0
  130. package/src/graphql/types/converted/index.ts +136 -0
  131. package/src/graphql/types/copilot-response.type.ts +117 -0
  132. package/src/graphql/types/enums.ts +37 -0
  133. package/src/graphql/types/extensions-response.type.ts +23 -0
  134. package/src/graphql/types/guardrails-result.type.ts +20 -0
  135. package/src/graphql/types/load-agent-state-response.type.ts +17 -0
  136. package/src/graphql/types/message-status.type.ts +40 -0
  137. package/src/graphql/types/response-status.type.ts +66 -0
  138. package/src/index.ts +4 -0
  139. package/src/lib/cloud/index.ts +4 -0
  140. package/src/lib/index.ts +8 -0
  141. package/src/lib/integrations/index.ts +6 -0
  142. package/src/lib/integrations/nest/index.ts +17 -0
  143. package/src/lib/integrations/nextjs/app-router.ts +40 -0
  144. package/src/lib/integrations/nextjs/pages-router.ts +49 -0
  145. package/src/lib/integrations/node-express/index.ts +17 -0
  146. package/src/lib/integrations/node-http/index.ts +34 -0
  147. package/src/lib/integrations/shared.ts +110 -0
  148. package/src/lib/logger.ts +28 -0
  149. package/src/lib/runtime/copilot-runtime.ts +571 -0
  150. package/src/lib/runtime/remote-action-constructors.ts +304 -0
  151. package/src/lib/runtime/remote-actions.ts +174 -0
  152. package/src/lib/runtime/remote-lg-action.ts +669 -0
  153. package/src/lib/telemetry-client.ts +52 -0
  154. package/src/service-adapters/anthropic/anthropic-adapter.ts +204 -0
  155. package/src/service-adapters/anthropic/utils.ts +144 -0
  156. package/src/service-adapters/conversion.ts +64 -0
  157. package/src/service-adapters/events.ts +424 -0
  158. package/src/service-adapters/experimental/empty/empty-adapter.ts +33 -0
  159. package/src/service-adapters/experimental/ollama/ollama-adapter.ts +79 -0
  160. package/src/service-adapters/google/google-genai-adapter.ts +39 -0
  161. package/src/service-adapters/groq/groq-adapter.ts +173 -0
  162. package/src/service-adapters/index.ts +16 -0
  163. package/src/service-adapters/langchain/langchain-adapter.ts +106 -0
  164. package/src/service-adapters/langchain/langserve.ts +87 -0
  165. package/src/service-adapters/langchain/types.ts +14 -0
  166. package/src/service-adapters/langchain/utils.ts +306 -0
  167. package/src/service-adapters/openai/openai-adapter.ts +211 -0
  168. package/src/service-adapters/openai/openai-assistant-adapter.ts +315 -0
  169. package/src/service-adapters/openai/utils.ts +161 -0
  170. package/src/service-adapters/service-adapter.ts +34 -0
  171. package/src/service-adapters/unify/unify-adapter.ts +144 -0
  172. package/src/utils/failed-response-status-reasons.ts +48 -0
  173. package/src/utils/index.ts +1 -0
  174. package/tsconfig.json +11 -0
  175. package/tsup.config.ts +16 -0
  176. package/typedoc.json +4 -0
@@ -0,0 +1,1420 @@
1
+ import {
2
+ __name
3
+ } from "./chunk-44O2JGUY.mjs";
4
+
5
+ // src/service-adapters/langchain/langserve.ts
6
+ import { RemoteRunnable } from "langchain/runnables/remote";
7
+ var RemoteChain = class {
8
+ name;
9
+ description;
10
+ chainUrl;
11
+ parameters;
12
+ parameterType;
13
+ constructor(options) {
14
+ this.name = options.name;
15
+ this.description = options.description;
16
+ this.chainUrl = options.chainUrl;
17
+ this.parameters = options.parameters;
18
+ this.parameterType = options.parameterType || "multi";
19
+ }
20
+ async toAction() {
21
+ if (!this.parameters) {
22
+ await this.inferLangServeParameters();
23
+ }
24
+ return {
25
+ name: this.name,
26
+ description: this.description,
27
+ parameters: this.parameters,
28
+ handler: async (args) => {
29
+ const runnable = new RemoteRunnable({
30
+ url: this.chainUrl
31
+ });
32
+ let input;
33
+ if (this.parameterType === "single") {
34
+ input = args[Object.keys(args)[0]];
35
+ } else {
36
+ input = args;
37
+ }
38
+ return await runnable.invoke(input);
39
+ }
40
+ };
41
+ }
42
+ async inferLangServeParameters() {
43
+ const supportedTypes = [
44
+ "string",
45
+ "number",
46
+ "boolean"
47
+ ];
48
+ let schemaUrl = this.chainUrl.replace(/\/+$/, "") + "/input_schema";
49
+ let schema = await fetch(schemaUrl).then((res) => res.json()).catch(() => {
50
+ throw new Error("Failed to fetch langserve schema at " + schemaUrl);
51
+ });
52
+ if (supportedTypes.includes(schema.type)) {
53
+ this.parameterType = "single";
54
+ this.parameters = [
55
+ {
56
+ name: "input",
57
+ type: schema.type,
58
+ description: "The input to the chain"
59
+ }
60
+ ];
61
+ } else if (schema.type === "object") {
62
+ this.parameterType = "multi";
63
+ this.parameters = Object.keys(schema.properties).map((key) => {
64
+ var _a;
65
+ let property = schema.properties[key];
66
+ if (!supportedTypes.includes(property.type)) {
67
+ throw new Error("Unsupported schema type");
68
+ }
69
+ return {
70
+ name: key,
71
+ type: property.type,
72
+ description: property.description || "",
73
+ required: ((_a = schema.required) == null ? void 0 : _a.includes(key)) || false
74
+ };
75
+ });
76
+ } else {
77
+ throw new Error("Unsupported schema type");
78
+ }
79
+ }
80
+ };
81
+ __name(RemoteChain, "RemoteChain");
82
+
83
+ // src/service-adapters/openai/openai-adapter.ts
84
+ import OpenAI from "openai";
85
+
86
+ // src/service-adapters/openai/utils.ts
87
+ function limitMessagesToTokenCount(messages, tools, model, maxTokens) {
88
+ maxTokens || (maxTokens = maxTokensForOpenAIModel(model));
89
+ const result = [];
90
+ const toolsNumTokens = countToolsTokens(model, tools);
91
+ if (toolsNumTokens > maxTokens) {
92
+ throw new Error(`Too many tokens in function definitions: ${toolsNumTokens} > ${maxTokens}`);
93
+ }
94
+ maxTokens -= toolsNumTokens;
95
+ for (const message of messages) {
96
+ if (message.role === "system") {
97
+ const numTokens = countMessageTokens(model, message);
98
+ maxTokens -= numTokens;
99
+ if (maxTokens < 0) {
100
+ throw new Error("Not enough tokens for system message.");
101
+ }
102
+ }
103
+ }
104
+ let cutoff = false;
105
+ const reversedMessages = [
106
+ ...messages
107
+ ].reverse();
108
+ for (const message of reversedMessages) {
109
+ if (message.role === "system") {
110
+ result.unshift(message);
111
+ continue;
112
+ } else if (cutoff) {
113
+ continue;
114
+ }
115
+ let numTokens = countMessageTokens(model, message);
116
+ if (maxTokens < numTokens) {
117
+ cutoff = true;
118
+ continue;
119
+ }
120
+ result.unshift(message);
121
+ maxTokens -= numTokens;
122
+ }
123
+ return result;
124
+ }
125
+ __name(limitMessagesToTokenCount, "limitMessagesToTokenCount");
126
+ function maxTokensForOpenAIModel(model) {
127
+ return maxTokensByModel[model] || DEFAULT_MAX_TOKENS;
128
+ }
129
+ __name(maxTokensForOpenAIModel, "maxTokensForOpenAIModel");
130
+ var DEFAULT_MAX_TOKENS = 128e3;
131
+ var maxTokensByModel = {
132
+ // GPT-4
133
+ "gpt-4o": 128e3,
134
+ "gpt-4o-2024-05-13": 128e3,
135
+ "gpt-4-turbo": 128e3,
136
+ "gpt-4-turbo-2024-04-09": 128e3,
137
+ "gpt-4-0125-preview": 128e3,
138
+ "gpt-4-turbo-preview": 128e3,
139
+ "gpt-4-1106-preview": 128e3,
140
+ "gpt-4-vision-preview": 128e3,
141
+ "gpt-4-1106-vision-preview": 128e3,
142
+ "gpt-4-32k": 32768,
143
+ "gpt-4-32k-0613": 32768,
144
+ "gpt-4-32k-0314": 32768,
145
+ "gpt-4": 8192,
146
+ "gpt-4-0613": 8192,
147
+ "gpt-4-0314": 8192,
148
+ // GPT-3.5
149
+ "gpt-3.5-turbo-0125": 16385,
150
+ "gpt-3.5-turbo": 16385,
151
+ "gpt-3.5-turbo-1106": 16385,
152
+ "gpt-3.5-turbo-instruct": 4096,
153
+ "gpt-3.5-turbo-16k": 16385,
154
+ "gpt-3.5-turbo-0613": 4096,
155
+ "gpt-3.5-turbo-16k-0613": 16385,
156
+ "gpt-3.5-turbo-0301": 4097
157
+ };
158
+ function countToolsTokens(model, tools) {
159
+ if (tools.length === 0) {
160
+ return 0;
161
+ }
162
+ const json = JSON.stringify(tools);
163
+ return countTokens(model, json);
164
+ }
165
+ __name(countToolsTokens, "countToolsTokens");
166
+ function countMessageTokens(model, message) {
167
+ return countTokens(model, message.content || "");
168
+ }
169
+ __name(countMessageTokens, "countMessageTokens");
170
+ function countTokens(model, text) {
171
+ return text.length / 3;
172
+ }
173
+ __name(countTokens, "countTokens");
174
+ function convertActionInputToOpenAITool(action) {
175
+ return {
176
+ type: "function",
177
+ function: {
178
+ name: action.name,
179
+ description: action.description,
180
+ parameters: JSON.parse(action.jsonSchema)
181
+ }
182
+ };
183
+ }
184
+ __name(convertActionInputToOpenAITool, "convertActionInputToOpenAITool");
185
+ function convertMessageToOpenAIMessage(message) {
186
+ if (message.isTextMessage()) {
187
+ return {
188
+ role: message.role,
189
+ content: message.content
190
+ };
191
+ } else if (message.isActionExecutionMessage()) {
192
+ return {
193
+ role: "assistant",
194
+ tool_calls: [
195
+ {
196
+ id: message.id,
197
+ type: "function",
198
+ function: {
199
+ name: message.name,
200
+ arguments: JSON.stringify(message.arguments)
201
+ }
202
+ }
203
+ ]
204
+ };
205
+ } else if (message.isResultMessage()) {
206
+ return {
207
+ role: "tool",
208
+ content: message.result,
209
+ tool_call_id: message.actionExecutionId
210
+ };
211
+ }
212
+ }
213
+ __name(convertMessageToOpenAIMessage, "convertMessageToOpenAIMessage");
214
+ function convertSystemMessageToAssistantAPI(message) {
215
+ return {
216
+ ...message,
217
+ ...message.role === "system" && {
218
+ role: "assistant",
219
+ content: "THE FOLLOWING MESSAGE IS A SYSTEM MESSAGE: " + message.content
220
+ }
221
+ };
222
+ }
223
+ __name(convertSystemMessageToAssistantAPI, "convertSystemMessageToAssistantAPI");
224
+
225
+ // src/service-adapters/openai/openai-adapter.ts
226
+ import { randomUUID } from "@copilotkit/shared";
227
+ var DEFAULT_MODEL = "gpt-4o";
228
+ var OpenAIAdapter = class {
229
+ model = DEFAULT_MODEL;
230
+ disableParallelToolCalls = false;
231
+ _openai;
232
+ get openai() {
233
+ return this._openai;
234
+ }
235
+ constructor(params) {
236
+ this._openai = (params == null ? void 0 : params.openai) || new OpenAI({});
237
+ if (params == null ? void 0 : params.model) {
238
+ this.model = params.model;
239
+ }
240
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
241
+ }
242
+ async process(request) {
243
+ const { threadId: threadIdFromRequest, model = this.model, messages, actions, eventSource, forwardedParameters } = request;
244
+ const tools = actions.map(convertActionInputToOpenAITool);
245
+ const threadId = threadIdFromRequest ?? randomUUID();
246
+ let openaiMessages = messages.map(convertMessageToOpenAIMessage);
247
+ openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
248
+ let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
249
+ if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
250
+ toolChoice = {
251
+ type: "function",
252
+ function: {
253
+ name: forwardedParameters.toolChoiceFunctionName
254
+ }
255
+ };
256
+ }
257
+ const stream = this.openai.beta.chat.completions.stream({
258
+ model,
259
+ stream: true,
260
+ messages: openaiMessages,
261
+ ...tools.length > 0 && {
262
+ tools
263
+ },
264
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
265
+ max_tokens: forwardedParameters.maxTokens
266
+ },
267
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.stop) && {
268
+ stop: forwardedParameters.stop
269
+ },
270
+ ...toolChoice && {
271
+ tool_choice: toolChoice
272
+ },
273
+ ...this.disableParallelToolCalls && {
274
+ parallel_tool_calls: false
275
+ },
276
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
277
+ temperature: forwardedParameters.temperature
278
+ }
279
+ });
280
+ eventSource.stream(async (eventStream$) => {
281
+ var _a, _b;
282
+ let mode = null;
283
+ let currentMessageId;
284
+ let currentToolCallId;
285
+ for await (const chunk of stream) {
286
+ if (chunk.choices.length === 0) {
287
+ continue;
288
+ }
289
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
290
+ const content = chunk.choices[0].delta.content;
291
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
292
+ mode = null;
293
+ eventStream$.sendTextMessageEnd({
294
+ messageId: currentMessageId
295
+ });
296
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
297
+ mode = null;
298
+ eventStream$.sendActionExecutionEnd({
299
+ actionExecutionId: currentToolCallId
300
+ });
301
+ }
302
+ if (mode === null) {
303
+ if (toolCall == null ? void 0 : toolCall.id) {
304
+ mode = "function";
305
+ currentToolCallId = toolCall.id;
306
+ eventStream$.sendActionExecutionStart({
307
+ actionExecutionId: currentToolCallId,
308
+ parentMessageId: chunk.id,
309
+ actionName: toolCall.function.name
310
+ });
311
+ } else if (content) {
312
+ mode = "message";
313
+ currentMessageId = chunk.id;
314
+ eventStream$.sendTextMessageStart({
315
+ messageId: currentMessageId
316
+ });
317
+ }
318
+ }
319
+ if (mode === "message" && content) {
320
+ eventStream$.sendTextMessageContent({
321
+ messageId: currentMessageId,
322
+ content
323
+ });
324
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
325
+ eventStream$.sendActionExecutionArgs({
326
+ actionExecutionId: currentToolCallId,
327
+ args: toolCall.function.arguments
328
+ });
329
+ }
330
+ }
331
+ if (mode === "message") {
332
+ eventStream$.sendTextMessageEnd({
333
+ messageId: currentMessageId
334
+ });
335
+ } else if (mode === "function") {
336
+ eventStream$.sendActionExecutionEnd({
337
+ actionExecutionId: currentToolCallId
338
+ });
339
+ }
340
+ eventStream$.complete();
341
+ });
342
+ return {
343
+ threadId
344
+ };
345
+ }
346
+ };
347
+ __name(OpenAIAdapter, "OpenAIAdapter");
348
+
349
+ // src/service-adapters/langchain/utils.ts
350
+ import { AIMessage, HumanMessage, SystemMessage, ToolMessage } from "@langchain/core/messages";
351
+ import { DynamicStructuredTool } from "@langchain/core/tools";
352
+ import { randomId, convertJsonSchemaToZodSchema } from "@copilotkit/shared";
353
+ function convertMessageToLangChainMessage(message) {
354
+ if (message.isTextMessage()) {
355
+ if (message.role == "user") {
356
+ return new HumanMessage(message.content);
357
+ } else if (message.role == "assistant") {
358
+ return new AIMessage(message.content);
359
+ } else if (message.role === "system") {
360
+ return new SystemMessage(message.content);
361
+ }
362
+ } else if (message.isActionExecutionMessage()) {
363
+ return new AIMessage({
364
+ content: "",
365
+ tool_calls: [
366
+ {
367
+ id: message.id,
368
+ args: message.arguments,
369
+ name: message.name
370
+ }
371
+ ]
372
+ });
373
+ } else if (message.isResultMessage()) {
374
+ return new ToolMessage({
375
+ content: message.result,
376
+ tool_call_id: message.actionExecutionId
377
+ });
378
+ }
379
+ }
380
+ __name(convertMessageToLangChainMessage, "convertMessageToLangChainMessage");
381
+ function convertActionInputToLangChainTool(actionInput) {
382
+ return new DynamicStructuredTool({
383
+ name: actionInput.name,
384
+ description: actionInput.description,
385
+ schema: convertJsonSchemaToZodSchema(JSON.parse(actionInput.jsonSchema), true),
386
+ func: async () => {
387
+ return "";
388
+ }
389
+ });
390
+ }
391
+ __name(convertActionInputToLangChainTool, "convertActionInputToLangChainTool");
392
+ function isAIMessage(message) {
393
+ return Object.prototype.toString.call(message) === "[object AIMessage]";
394
+ }
395
+ __name(isAIMessage, "isAIMessage");
396
+ function isAIMessageChunk(message) {
397
+ return Object.prototype.toString.call(message) === "[object AIMessageChunk]";
398
+ }
399
+ __name(isAIMessageChunk, "isAIMessageChunk");
400
+ function isBaseMessageChunk(message) {
401
+ return Object.prototype.toString.call(message) === "[object BaseMessageChunk]";
402
+ }
403
+ __name(isBaseMessageChunk, "isBaseMessageChunk");
404
+ function maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution) {
405
+ if (actionExecution) {
406
+ eventStream$.sendActionExecutionResult({
407
+ actionExecutionId: actionExecution.id,
408
+ actionName: actionExecution.name,
409
+ result: "Sending a message"
410
+ });
411
+ }
412
+ }
413
+ __name(maybeSendActionExecutionResultIsMessage, "maybeSendActionExecutionResultIsMessage");
414
+ async function streamLangChainResponse({ result, eventStream$, actionExecution }) {
415
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
416
+ if (typeof result === "string") {
417
+ if (!actionExecution) {
418
+ eventStream$.sendTextMessage(randomId(), result);
419
+ } else {
420
+ eventStream$.sendActionExecutionResult({
421
+ actionExecutionId: actionExecution.id,
422
+ actionName: actionExecution.name,
423
+ result
424
+ });
425
+ }
426
+ } else if (isAIMessage(result)) {
427
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
428
+ if (result.content) {
429
+ eventStream$.sendTextMessage(randomId(), result.content);
430
+ }
431
+ for (const toolCall of result.tool_calls) {
432
+ eventStream$.sendActionExecution({
433
+ actionExecutionId: toolCall.id || randomId(),
434
+ actionName: toolCall.name,
435
+ args: JSON.stringify(toolCall.args)
436
+ });
437
+ }
438
+ } else if (isBaseMessageChunk(result)) {
439
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
440
+ if ((_a = result.lc_kwargs) == null ? void 0 : _a.content) {
441
+ eventStream$.sendTextMessage(randomId(), result.content);
442
+ }
443
+ if ((_b = result.lc_kwargs) == null ? void 0 : _b.tool_calls) {
444
+ for (const toolCall of (_c = result.lc_kwargs) == null ? void 0 : _c.tool_calls) {
445
+ eventStream$.sendActionExecution({
446
+ actionExecutionId: toolCall.id || randomId(),
447
+ actionName: toolCall.name,
448
+ args: JSON.stringify(toolCall.args)
449
+ });
450
+ }
451
+ }
452
+ } else if (result && "getReader" in result) {
453
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
454
+ let reader = result.getReader();
455
+ let mode = null;
456
+ let currentMessageId;
457
+ const toolCallDetails = {
458
+ name: null,
459
+ id: null,
460
+ index: null,
461
+ prevIndex: null
462
+ };
463
+ while (true) {
464
+ try {
465
+ const { done, value } = await reader.read();
466
+ let toolCallName = void 0;
467
+ let toolCallId = void 0;
468
+ let toolCallArgs = void 0;
469
+ let hasToolCall = false;
470
+ let content = "";
471
+ if (value && value.content) {
472
+ content = Array.isArray(value.content) ? ((_d = value.content[0]) == null ? void 0 : _d.text) ?? "" : value.content;
473
+ }
474
+ if (isAIMessageChunk(value)) {
475
+ let chunk = (_e = value.tool_call_chunks) == null ? void 0 : _e[0];
476
+ toolCallArgs = chunk == null ? void 0 : chunk.args;
477
+ hasToolCall = chunk != void 0;
478
+ if (chunk == null ? void 0 : chunk.name)
479
+ toolCallDetails.name = chunk.name;
480
+ if ((chunk == null ? void 0 : chunk.index) != null) {
481
+ toolCallDetails.index = chunk.index;
482
+ if (toolCallDetails.prevIndex == null)
483
+ toolCallDetails.prevIndex = chunk.index;
484
+ }
485
+ if (chunk == null ? void 0 : chunk.id)
486
+ toolCallDetails.id = chunk.index != null ? `${chunk.id}-idx-${chunk.index}` : chunk.id;
487
+ toolCallName = toolCallDetails.name;
488
+ toolCallId = toolCallDetails.id;
489
+ } else if (isBaseMessageChunk(value)) {
490
+ let chunk = (_g = (_f = value.additional_kwargs) == null ? void 0 : _f.tool_calls) == null ? void 0 : _g[0];
491
+ toolCallName = (_h = chunk == null ? void 0 : chunk.function) == null ? void 0 : _h.name;
492
+ toolCallId = chunk == null ? void 0 : chunk.id;
493
+ toolCallArgs = (_i = chunk == null ? void 0 : chunk.function) == null ? void 0 : _i.arguments;
494
+ hasToolCall = (chunk == null ? void 0 : chunk.function) != void 0;
495
+ }
496
+ if (mode === "message" && (toolCallId || done)) {
497
+ mode = null;
498
+ eventStream$.sendTextMessageEnd({
499
+ messageId: currentMessageId
500
+ });
501
+ } else if (mode === "function" && (!hasToolCall || done)) {
502
+ mode = null;
503
+ eventStream$.sendActionExecutionEnd({
504
+ actionExecutionId: toolCallId
505
+ });
506
+ }
507
+ if (done) {
508
+ break;
509
+ }
510
+ if (mode === null) {
511
+ if (hasToolCall && toolCallId && toolCallName) {
512
+ mode = "function";
513
+ eventStream$.sendActionExecutionStart({
514
+ actionExecutionId: toolCallId,
515
+ actionName: toolCallName,
516
+ parentMessageId: (_j = value.lc_kwargs) == null ? void 0 : _j.id
517
+ });
518
+ } else if (content) {
519
+ mode = "message";
520
+ currentMessageId = ((_k = value.lc_kwargs) == null ? void 0 : _k.id) || randomId();
521
+ eventStream$.sendTextMessageStart({
522
+ messageId: currentMessageId
523
+ });
524
+ }
525
+ }
526
+ if (mode === "message" && content) {
527
+ eventStream$.sendTextMessageContent({
528
+ messageId: currentMessageId,
529
+ content
530
+ });
531
+ } else if (mode === "function" && toolCallArgs) {
532
+ if (toolCallDetails.index !== toolCallDetails.prevIndex) {
533
+ eventStream$.sendActionExecutionEnd({
534
+ actionExecutionId: toolCallId
535
+ });
536
+ eventStream$.sendActionExecutionStart({
537
+ actionExecutionId: toolCallId,
538
+ actionName: toolCallName,
539
+ parentMessageId: (_l = value.lc_kwargs) == null ? void 0 : _l.id
540
+ });
541
+ toolCallDetails.prevIndex = toolCallDetails.index;
542
+ }
543
+ eventStream$.sendActionExecutionArgs({
544
+ actionExecutionId: toolCallId,
545
+ args: toolCallArgs
546
+ });
547
+ }
548
+ } catch (error) {
549
+ console.error("Error reading from stream", error);
550
+ break;
551
+ }
552
+ }
553
+ } else if (actionExecution) {
554
+ eventStream$.sendActionExecutionResult({
555
+ actionExecutionId: actionExecution.id,
556
+ actionName: actionExecution.name,
557
+ result: encodeResult(result)
558
+ });
559
+ } else {
560
+ throw new Error("Invalid return type from LangChain function.");
561
+ }
562
+ eventStream$.complete();
563
+ }
564
+ __name(streamLangChainResponse, "streamLangChainResponse");
565
+ function encodeResult(result) {
566
+ if (result === void 0) {
567
+ return "";
568
+ } else if (typeof result === "string") {
569
+ return result;
570
+ } else {
571
+ return JSON.stringify(result);
572
+ }
573
+ }
574
+ __name(encodeResult, "encodeResult");
575
+
576
+ // src/service-adapters/langchain/langchain-adapter.ts
577
+ import { randomUUID as randomUUID2 } from "@copilotkit/shared";
578
+ import { awaitAllCallbacks } from "@langchain/core/callbacks/promises";
579
+ var LangChainAdapter = class {
580
+ options;
581
+ /**
582
+ * To use LangChain as a backend, provide a handler function to the adapter with your custom LangChain logic.
583
+ */
584
+ constructor(options) {
585
+ this.options = options;
586
+ }
587
+ async process(request) {
588
+ try {
589
+ const { eventSource, model, actions, messages, runId, threadId: threadIdFromRequest } = request;
590
+ const threadId = threadIdFromRequest ?? randomUUID2();
591
+ const result = await this.options.chainFn({
592
+ messages: messages.map(convertMessageToLangChainMessage),
593
+ tools: actions.map(convertActionInputToLangChainTool),
594
+ model,
595
+ threadId,
596
+ runId
597
+ });
598
+ eventSource.stream(async (eventStream$) => {
599
+ await streamLangChainResponse({
600
+ result,
601
+ eventStream$
602
+ });
603
+ });
604
+ return {
605
+ threadId
606
+ };
607
+ } finally {
608
+ await awaitAllCallbacks();
609
+ }
610
+ }
611
+ };
612
+ __name(LangChainAdapter, "LangChainAdapter");
613
+
614
+ // src/service-adapters/google/google-genai-adapter.ts
615
+ import { ChatGoogle } from "@langchain/google-gauth";
616
+ var GoogleGenerativeAIAdapter = class extends LangChainAdapter {
617
+ constructor(options) {
618
+ super({
619
+ chainFn: async ({ messages, tools, threadId }) => {
620
+ const model = new ChatGoogle({
621
+ modelName: (options == null ? void 0 : options.model) ?? "gemini-1.5-pro",
622
+ apiVersion: "v1beta"
623
+ }).bindTools(tools);
624
+ return model.stream(messages, {
625
+ metadata: {
626
+ conversation_id: threadId
627
+ }
628
+ });
629
+ }
630
+ });
631
+ }
632
+ };
633
+ __name(GoogleGenerativeAIAdapter, "GoogleGenerativeAIAdapter");
634
+
635
+ // src/service-adapters/openai/openai-assistant-adapter.ts
636
+ import OpenAI2 from "openai";
637
+ var OpenAIAssistantAdapter = class {
638
+ openai;
639
+ codeInterpreterEnabled;
640
+ assistantId;
641
+ fileSearchEnabled;
642
+ disableParallelToolCalls;
643
+ constructor(params) {
644
+ this.openai = params.openai || new OpenAI2({});
645
+ this.codeInterpreterEnabled = params.codeInterpreterEnabled === false || true;
646
+ this.fileSearchEnabled = params.fileSearchEnabled === false || true;
647
+ this.assistantId = params.assistantId;
648
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
649
+ }
650
+ async process(request) {
651
+ var _a, _b;
652
+ const { messages, actions, eventSource, runId, forwardedParameters } = request;
653
+ let threadId = (_b = (_a = request.extensions) == null ? void 0 : _a.openaiAssistantAPI) == null ? void 0 : _b.threadId;
654
+ if (!threadId) {
655
+ threadId = (await this.openai.beta.threads.create()).id;
656
+ }
657
+ const lastMessage = messages.at(-1);
658
+ let nextRunId = void 0;
659
+ if (lastMessage.isResultMessage() && runId) {
660
+ nextRunId = await this.submitToolOutputs(threadId, runId, messages, eventSource);
661
+ } else if (lastMessage.isTextMessage()) {
662
+ nextRunId = await this.submitUserMessage(threadId, messages, actions, eventSource, forwardedParameters);
663
+ } else {
664
+ throw new Error("No actionable message found in the messages");
665
+ }
666
+ return {
667
+ runId: nextRunId,
668
+ threadId,
669
+ extensions: {
670
+ ...request.extensions,
671
+ openaiAssistantAPI: {
672
+ threadId,
673
+ runId: nextRunId
674
+ }
675
+ }
676
+ };
677
+ }
678
+ async submitToolOutputs(threadId, runId, messages, eventSource) {
679
+ let run = await this.openai.beta.threads.runs.retrieve(threadId, runId);
680
+ if (!run.required_action) {
681
+ throw new Error("No tool outputs required");
682
+ }
683
+ const toolCallsIds = run.required_action.submit_tool_outputs.tool_calls.map((toolCall) => toolCall.id);
684
+ const resultMessages = messages.filter((message) => message.isResultMessage() && toolCallsIds.includes(message.actionExecutionId));
685
+ if (toolCallsIds.length != resultMessages.length) {
686
+ throw new Error("Number of function results does not match the number of tool calls");
687
+ }
688
+ const toolOutputs = resultMessages.map((message) => {
689
+ return {
690
+ tool_call_id: message.actionExecutionId,
691
+ output: message.result
692
+ };
693
+ });
694
+ const stream = this.openai.beta.threads.runs.submitToolOutputsStream(threadId, runId, {
695
+ tool_outputs: toolOutputs,
696
+ ...this.disableParallelToolCalls && {
697
+ parallel_tool_calls: false
698
+ }
699
+ });
700
+ await this.streamResponse(stream, eventSource);
701
+ return runId;
702
+ }
703
+ async submitUserMessage(threadId, messages, actions, eventSource, forwardedParameters) {
704
+ messages = [
705
+ ...messages
706
+ ];
707
+ const instructionsMessage = messages.shift();
708
+ const instructions = instructionsMessage.isTextMessage() ? instructionsMessage.content : "";
709
+ const userMessage = messages.map(convertMessageToOpenAIMessage).map(convertSystemMessageToAssistantAPI).at(-1);
710
+ if (userMessage.role !== "user") {
711
+ throw new Error("No user message found");
712
+ }
713
+ await this.openai.beta.threads.messages.create(threadId, {
714
+ role: "user",
715
+ content: userMessage.content
716
+ });
717
+ const openaiTools = actions.map(convertActionInputToOpenAITool);
718
+ const tools = [
719
+ ...openaiTools,
720
+ ...this.codeInterpreterEnabled ? [
721
+ {
722
+ type: "code_interpreter"
723
+ }
724
+ ] : [],
725
+ ...this.fileSearchEnabled ? [
726
+ {
727
+ type: "file_search"
728
+ }
729
+ ] : []
730
+ ];
731
+ let stream = this.openai.beta.threads.runs.stream(threadId, {
732
+ assistant_id: this.assistantId,
733
+ instructions,
734
+ tools,
735
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
736
+ max_completion_tokens: forwardedParameters.maxTokens
737
+ },
738
+ ...this.disableParallelToolCalls && {
739
+ parallel_tool_calls: false
740
+ }
741
+ });
742
+ await this.streamResponse(stream, eventSource);
743
+ return getRunIdFromStream(stream);
744
+ }
745
+ async streamResponse(stream, eventSource) {
746
+ eventSource.stream(async (eventStream$) => {
747
+ var _a, _b, _c, _d, _e, _f;
748
+ let inFunctionCall = false;
749
+ let currentMessageId;
750
+ let currentToolCallId;
751
+ for await (const chunk of stream) {
752
+ switch (chunk.event) {
753
+ case "thread.message.created":
754
+ if (inFunctionCall) {
755
+ eventStream$.sendActionExecutionEnd({
756
+ actionExecutionId: currentToolCallId
757
+ });
758
+ }
759
+ currentMessageId = chunk.data.id;
760
+ eventStream$.sendTextMessageStart({
761
+ messageId: currentMessageId
762
+ });
763
+ break;
764
+ case "thread.message.delta":
765
+ if (((_a = chunk.data.delta.content) == null ? void 0 : _a[0].type) === "text") {
766
+ eventStream$.sendTextMessageContent({
767
+ messageId: currentMessageId,
768
+ content: (_b = chunk.data.delta.content) == null ? void 0 : _b[0].text.value
769
+ });
770
+ }
771
+ break;
772
+ case "thread.message.completed":
773
+ eventStream$.sendTextMessageEnd({
774
+ messageId: currentMessageId
775
+ });
776
+ break;
777
+ case "thread.run.step.delta":
778
+ let toolCallId;
779
+ let toolCallName;
780
+ let toolCallArgs;
781
+ if (chunk.data.delta.step_details.type === "tool_calls" && ((_c = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _c[0].type) === "function") {
782
+ toolCallId = (_d = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _d[0].id;
783
+ toolCallName = (_e = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _e[0].function.name;
784
+ toolCallArgs = (_f = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _f[0].function.arguments;
785
+ }
786
+ if (toolCallName && toolCallId) {
787
+ if (inFunctionCall) {
788
+ eventStream$.sendActionExecutionEnd({
789
+ actionExecutionId: currentToolCallId
790
+ });
791
+ }
792
+ inFunctionCall = true;
793
+ currentToolCallId = toolCallId;
794
+ eventStream$.sendActionExecutionStart({
795
+ actionExecutionId: currentToolCallId,
796
+ parentMessageId: chunk.data.id,
797
+ actionName: toolCallName
798
+ });
799
+ } else if (toolCallArgs) {
800
+ eventStream$.sendActionExecutionArgs({
801
+ actionExecutionId: currentToolCallId,
802
+ args: toolCallArgs
803
+ });
804
+ }
805
+ break;
806
+ }
807
+ }
808
+ if (inFunctionCall) {
809
+ eventStream$.sendActionExecutionEnd({
810
+ actionExecutionId: currentToolCallId
811
+ });
812
+ }
813
+ eventStream$.complete();
814
+ });
815
+ }
816
+ };
817
+ __name(OpenAIAssistantAdapter, "OpenAIAssistantAdapter");
818
+ function getRunIdFromStream(stream) {
819
+ return new Promise((resolve, reject) => {
820
+ let runIdGetter = /* @__PURE__ */ __name((event) => {
821
+ if (event.event === "thread.run.created") {
822
+ const runId = event.data.id;
823
+ stream.off("event", runIdGetter);
824
+ resolve(runId);
825
+ }
826
+ }, "runIdGetter");
827
+ stream.on("event", runIdGetter);
828
+ });
829
+ }
830
+ __name(getRunIdFromStream, "getRunIdFromStream");
831
+
832
+ // src/service-adapters/unify/unify-adapter.ts
833
+ import OpenAI3 from "openai";
834
+ import { randomId as randomId2, randomUUID as randomUUID3 } from "@copilotkit/shared";
835
+ var UnifyAdapter = class {
836
+ apiKey;
837
+ model;
838
+ start;
839
+ constructor(options) {
840
+ if (options == null ? void 0 : options.apiKey) {
841
+ this.apiKey = options.apiKey;
842
+ } else {
843
+ this.apiKey = "UNIFY_API_KEY";
844
+ }
845
+ this.model = options == null ? void 0 : options.model;
846
+ this.start = true;
847
+ }
848
+ async process(request) {
849
+ const tools = request.actions.map(convertActionInputToOpenAITool);
850
+ const openai = new OpenAI3({
851
+ apiKey: this.apiKey,
852
+ baseURL: "https://api.unify.ai/v0/"
853
+ });
854
+ const forwardedParameters = request.forwardedParameters;
855
+ const messages = request.messages.map(convertMessageToOpenAIMessage);
856
+ const stream = await openai.chat.completions.create({
857
+ model: this.model,
858
+ messages,
859
+ stream: true,
860
+ ...tools.length > 0 && {
861
+ tools
862
+ },
863
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
864
+ temperature: forwardedParameters.temperature
865
+ }
866
+ });
867
+ let model = null;
868
+ let currentMessageId;
869
+ let currentToolCallId;
870
+ request.eventSource.stream(async (eventStream$) => {
871
+ var _a, _b;
872
+ let mode = null;
873
+ for await (const chunk of stream) {
874
+ if (this.start) {
875
+ model = chunk.model;
876
+ currentMessageId = randomId2();
877
+ eventStream$.sendTextMessageStart({
878
+ messageId: currentMessageId
879
+ });
880
+ eventStream$.sendTextMessageContent({
881
+ messageId: currentMessageId,
882
+ content: `Model used: ${model}
883
+ `
884
+ });
885
+ eventStream$.sendTextMessageEnd({
886
+ messageId: currentMessageId
887
+ });
888
+ this.start = false;
889
+ }
890
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
891
+ const content = chunk.choices[0].delta.content;
892
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
893
+ mode = null;
894
+ eventStream$.sendTextMessageEnd({
895
+ messageId: currentMessageId
896
+ });
897
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
898
+ mode = null;
899
+ eventStream$.sendActionExecutionEnd({
900
+ actionExecutionId: currentToolCallId
901
+ });
902
+ }
903
+ if (mode === null) {
904
+ if (toolCall == null ? void 0 : toolCall.id) {
905
+ mode = "function";
906
+ currentToolCallId = toolCall.id;
907
+ eventStream$.sendActionExecutionStart({
908
+ actionExecutionId: currentToolCallId,
909
+ actionName: toolCall.function.name
910
+ });
911
+ } else if (content) {
912
+ mode = "message";
913
+ currentMessageId = chunk.id;
914
+ eventStream$.sendTextMessageStart({
915
+ messageId: currentMessageId
916
+ });
917
+ }
918
+ }
919
+ if (mode === "message" && content) {
920
+ eventStream$.sendTextMessageContent({
921
+ messageId: currentMessageId,
922
+ content
923
+ });
924
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
925
+ eventStream$.sendActionExecutionArgs({
926
+ actionExecutionId: currentToolCallId,
927
+ args: toolCall.function.arguments
928
+ });
929
+ }
930
+ }
931
+ if (mode === "message") {
932
+ eventStream$.sendTextMessageEnd({
933
+ messageId: currentMessageId
934
+ });
935
+ } else if (mode === "function") {
936
+ eventStream$.sendActionExecutionEnd({
937
+ actionExecutionId: currentToolCallId
938
+ });
939
+ }
940
+ eventStream$.complete();
941
+ });
942
+ return {
943
+ threadId: request.threadId || randomUUID3()
944
+ };
945
+ }
946
+ };
947
+ __name(UnifyAdapter, "UnifyAdapter");
948
+
949
+ // src/service-adapters/groq/groq-adapter.ts
950
+ import { Groq } from "groq-sdk";
951
+ import { randomUUID as randomUUID4 } from "@copilotkit/shared";
952
+ var DEFAULT_MODEL2 = "llama3-groq-70b-8192-tool-use-preview";
953
+ var GroqAdapter = class {
954
+ model = DEFAULT_MODEL2;
955
+ disableParallelToolCalls = false;
956
+ _groq;
957
+ get groq() {
958
+ return this._groq;
959
+ }
960
+ constructor(params) {
961
+ this._groq = (params == null ? void 0 : params.groq) || new Groq({});
962
+ if (params == null ? void 0 : params.model) {
963
+ this.model = params.model;
964
+ }
965
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
966
+ }
967
+ async process(request) {
968
+ const { threadId, model = this.model, messages, actions, eventSource, forwardedParameters } = request;
969
+ const tools = actions.map(convertActionInputToOpenAITool);
970
+ let openaiMessages = messages.map(convertMessageToOpenAIMessage);
971
+ openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
972
+ let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
973
+ if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
974
+ toolChoice = {
975
+ type: "function",
976
+ function: {
977
+ name: forwardedParameters.toolChoiceFunctionName
978
+ }
979
+ };
980
+ }
981
+ const stream = await this.groq.chat.completions.create({
982
+ model,
983
+ stream: true,
984
+ messages: openaiMessages,
985
+ ...tools.length > 0 && {
986
+ tools
987
+ },
988
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
989
+ max_tokens: forwardedParameters.maxTokens
990
+ },
991
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.stop) && {
992
+ stop: forwardedParameters.stop
993
+ },
994
+ ...toolChoice && {
995
+ tool_choice: toolChoice
996
+ },
997
+ ...this.disableParallelToolCalls && {
998
+ parallel_tool_calls: false
999
+ },
1000
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
1001
+ temperature: forwardedParameters.temperature
1002
+ }
1003
+ });
1004
+ eventSource.stream(async (eventStream$) => {
1005
+ var _a, _b;
1006
+ let mode = null;
1007
+ let currentMessageId;
1008
+ let currentToolCallId;
1009
+ for await (const chunk of stream) {
1010
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
1011
+ const content = chunk.choices[0].delta.content;
1012
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
1013
+ mode = null;
1014
+ eventStream$.sendTextMessageEnd({
1015
+ messageId: currentMessageId
1016
+ });
1017
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
1018
+ mode = null;
1019
+ eventStream$.sendActionExecutionEnd({
1020
+ actionExecutionId: currentToolCallId
1021
+ });
1022
+ }
1023
+ if (mode === null) {
1024
+ if (toolCall == null ? void 0 : toolCall.id) {
1025
+ mode = "function";
1026
+ currentToolCallId = toolCall.id;
1027
+ eventStream$.sendActionExecutionStart({
1028
+ actionExecutionId: currentToolCallId,
1029
+ actionName: toolCall.function.name,
1030
+ parentMessageId: chunk.id
1031
+ });
1032
+ } else if (content) {
1033
+ mode = "message";
1034
+ currentMessageId = chunk.id;
1035
+ eventStream$.sendTextMessageStart({
1036
+ messageId: currentMessageId
1037
+ });
1038
+ }
1039
+ }
1040
+ if (mode === "message" && content) {
1041
+ eventStream$.sendTextMessageContent({
1042
+ messageId: currentMessageId,
1043
+ content
1044
+ });
1045
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
1046
+ eventStream$.sendActionExecutionArgs({
1047
+ actionExecutionId: currentToolCallId,
1048
+ args: toolCall.function.arguments
1049
+ });
1050
+ }
1051
+ }
1052
+ if (mode === "message") {
1053
+ eventStream$.sendTextMessageEnd({
1054
+ messageId: currentMessageId
1055
+ });
1056
+ } else if (mode === "function") {
1057
+ eventStream$.sendActionExecutionEnd({
1058
+ actionExecutionId: currentToolCallId
1059
+ });
1060
+ }
1061
+ eventStream$.complete();
1062
+ });
1063
+ return {
1064
+ threadId: request.threadId || randomUUID4()
1065
+ };
1066
+ }
1067
+ };
1068
+ __name(GroqAdapter, "GroqAdapter");
1069
+
1070
+ // src/service-adapters/anthropic/anthropic-adapter.ts
1071
+ import Anthropic from "@anthropic-ai/sdk";
1072
+
1073
+ // src/service-adapters/anthropic/utils.ts
1074
+ function limitMessagesToTokenCount2(messages, tools, model, maxTokens) {
1075
+ maxTokens || (maxTokens = MAX_TOKENS);
1076
+ const result = [];
1077
+ const toolsNumTokens = countToolsTokens2(model, tools);
1078
+ if (toolsNumTokens > maxTokens) {
1079
+ throw new Error(`Too many tokens in function definitions: ${toolsNumTokens} > ${maxTokens}`);
1080
+ }
1081
+ maxTokens -= toolsNumTokens;
1082
+ for (const message of messages) {
1083
+ if (message.role === "system") {
1084
+ const numTokens = countMessageTokens2(model, message);
1085
+ maxTokens -= numTokens;
1086
+ if (maxTokens < 0) {
1087
+ throw new Error("Not enough tokens for system message.");
1088
+ }
1089
+ }
1090
+ }
1091
+ let cutoff = false;
1092
+ const reversedMessages = [
1093
+ ...messages
1094
+ ].reverse();
1095
+ for (const message of reversedMessages) {
1096
+ if (message.role === "system") {
1097
+ result.unshift(message);
1098
+ continue;
1099
+ } else if (cutoff) {
1100
+ continue;
1101
+ }
1102
+ let numTokens = countMessageTokens2(model, message);
1103
+ if (maxTokens < numTokens) {
1104
+ cutoff = true;
1105
+ continue;
1106
+ }
1107
+ result.unshift(message);
1108
+ maxTokens -= numTokens;
1109
+ }
1110
+ return result;
1111
+ }
1112
+ __name(limitMessagesToTokenCount2, "limitMessagesToTokenCount");
1113
+ var MAX_TOKENS = 128e3;
1114
+ function countToolsTokens2(model, tools) {
1115
+ if (tools.length === 0) {
1116
+ return 0;
1117
+ }
1118
+ const json = JSON.stringify(tools);
1119
+ return countTokens2(model, json);
1120
+ }
1121
+ __name(countToolsTokens2, "countToolsTokens");
1122
+ function countMessageTokens2(model, message) {
1123
+ return countTokens2(model, JSON.stringify(message.content) || "");
1124
+ }
1125
+ __name(countMessageTokens2, "countMessageTokens");
1126
+ function countTokens2(model, text) {
1127
+ return text.length / 3;
1128
+ }
1129
+ __name(countTokens2, "countTokens");
1130
+ function convertActionInputToAnthropicTool(action) {
1131
+ return {
1132
+ name: action.name,
1133
+ description: action.description,
1134
+ input_schema: JSON.parse(action.jsonSchema)
1135
+ };
1136
+ }
1137
+ __name(convertActionInputToAnthropicTool, "convertActionInputToAnthropicTool");
1138
+ function convertMessageToAnthropicMessage(message) {
1139
+ if (message.isTextMessage()) {
1140
+ if (message.role === "system") {
1141
+ return {
1142
+ role: "assistant",
1143
+ content: [
1144
+ {
1145
+ type: "text",
1146
+ text: "THE FOLLOWING MESSAGE IS A SYSTEM MESSAGE: " + message.content
1147
+ }
1148
+ ]
1149
+ };
1150
+ } else {
1151
+ return {
1152
+ role: message.role === "user" ? "user" : "assistant",
1153
+ content: [
1154
+ {
1155
+ type: "text",
1156
+ text: message.content
1157
+ }
1158
+ ]
1159
+ };
1160
+ }
1161
+ } else if (message.isActionExecutionMessage()) {
1162
+ return {
1163
+ role: "assistant",
1164
+ content: [
1165
+ {
1166
+ id: message.id,
1167
+ type: "tool_use",
1168
+ input: message.arguments,
1169
+ name: message.name
1170
+ }
1171
+ ]
1172
+ };
1173
+ } else if (message.isResultMessage()) {
1174
+ return {
1175
+ role: "user",
1176
+ content: [
1177
+ {
1178
+ type: "tool_result",
1179
+ content: message.result,
1180
+ tool_use_id: message.actionExecutionId
1181
+ }
1182
+ ]
1183
+ };
1184
+ }
1185
+ }
1186
+ __name(convertMessageToAnthropicMessage, "convertMessageToAnthropicMessage");
1187
+ function groupAnthropicMessagesByRole(messageParams) {
1188
+ return messageParams.reduce((acc, message) => {
1189
+ const lastGroup = acc[acc.length - 1];
1190
+ if (lastGroup && lastGroup.role === message.role) {
1191
+ lastGroup.content = lastGroup.content.concat(message.content);
1192
+ } else {
1193
+ acc.push({
1194
+ role: message.role,
1195
+ content: [
1196
+ ...message.content
1197
+ ]
1198
+ });
1199
+ }
1200
+ return acc;
1201
+ }, []);
1202
+ }
1203
+ __name(groupAnthropicMessagesByRole, "groupAnthropicMessagesByRole");
1204
+
1205
+ // src/service-adapters/anthropic/anthropic-adapter.ts
1206
+ import { randomId as randomId3, randomUUID as randomUUID5 } from "@copilotkit/shared";
1207
+ var DEFAULT_MODEL3 = "claude-3-sonnet-20240229";
1208
+ var AnthropicAdapter = class {
1209
+ model = DEFAULT_MODEL3;
1210
+ _anthropic;
1211
+ get anthropic() {
1212
+ return this._anthropic;
1213
+ }
1214
+ constructor(params) {
1215
+ this._anthropic = (params == null ? void 0 : params.anthropic) || new Anthropic({});
1216
+ if (params == null ? void 0 : params.model) {
1217
+ this.model = params.model;
1218
+ }
1219
+ }
1220
+ async process(request) {
1221
+ const { threadId, model = this.model, messages: rawMessages, actions, eventSource, forwardedParameters } = request;
1222
+ const tools = actions.map(convertActionInputToAnthropicTool);
1223
+ const messages = [
1224
+ ...rawMessages
1225
+ ];
1226
+ const instructionsMessage = messages.shift();
1227
+ const instructions = instructionsMessage.isTextMessage() ? instructionsMessage.content : "";
1228
+ let anthropicMessages = messages.map(convertMessageToAnthropicMessage);
1229
+ anthropicMessages = limitMessagesToTokenCount2(anthropicMessages, tools, model);
1230
+ anthropicMessages = groupAnthropicMessagesByRole(anthropicMessages);
1231
+ let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
1232
+ if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
1233
+ toolChoice = {
1234
+ type: "tool",
1235
+ name: forwardedParameters.toolChoiceFunctionName
1236
+ };
1237
+ }
1238
+ const stream = this.anthropic.messages.create({
1239
+ system: instructions,
1240
+ model: this.model,
1241
+ messages: anthropicMessages,
1242
+ max_tokens: (forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) || 1024,
1243
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) ? {
1244
+ temperature: forwardedParameters.temperature
1245
+ } : {},
1246
+ ...tools.length > 0 && {
1247
+ tools
1248
+ },
1249
+ ...toolChoice && {
1250
+ tool_choice: toolChoice
1251
+ },
1252
+ stream: true
1253
+ });
1254
+ eventSource.stream(async (eventStream$) => {
1255
+ let mode = null;
1256
+ let didOutputText = false;
1257
+ let currentMessageId = randomId3();
1258
+ let currentToolCallId = randomId3();
1259
+ let filterThinkingTextBuffer = new FilterThinkingTextBuffer();
1260
+ for await (const chunk of await stream) {
1261
+ if (chunk.type === "message_start") {
1262
+ currentMessageId = chunk.message.id;
1263
+ } else if (chunk.type === "content_block_start") {
1264
+ if (chunk.content_block.type === "text") {
1265
+ didOutputText = false;
1266
+ filterThinkingTextBuffer.reset();
1267
+ mode = "message";
1268
+ } else if (chunk.content_block.type === "tool_use") {
1269
+ currentToolCallId = chunk.content_block.id;
1270
+ eventStream$.sendActionExecutionStart({
1271
+ actionExecutionId: currentToolCallId,
1272
+ actionName: chunk.content_block.name,
1273
+ parentMessageId: currentMessageId
1274
+ });
1275
+ mode = "function";
1276
+ }
1277
+ } else if (chunk.type === "content_block_delta") {
1278
+ if (chunk.delta.type === "text_delta") {
1279
+ const text = filterThinkingTextBuffer.onTextChunk(chunk.delta.text);
1280
+ if (text.length > 0) {
1281
+ if (!didOutputText) {
1282
+ eventStream$.sendTextMessageStart({
1283
+ messageId: currentMessageId
1284
+ });
1285
+ didOutputText = true;
1286
+ }
1287
+ eventStream$.sendTextMessageContent({
1288
+ messageId: currentMessageId,
1289
+ content: text
1290
+ });
1291
+ }
1292
+ } else if (chunk.delta.type === "input_json_delta") {
1293
+ eventStream$.sendActionExecutionArgs({
1294
+ actionExecutionId: currentToolCallId,
1295
+ args: chunk.delta.partial_json
1296
+ });
1297
+ }
1298
+ } else if (chunk.type === "content_block_stop") {
1299
+ if (mode === "message") {
1300
+ if (didOutputText) {
1301
+ eventStream$.sendTextMessageEnd({
1302
+ messageId: currentMessageId
1303
+ });
1304
+ }
1305
+ } else if (mode === "function") {
1306
+ eventStream$.sendActionExecutionEnd({
1307
+ actionExecutionId: currentToolCallId
1308
+ });
1309
+ }
1310
+ }
1311
+ }
1312
+ eventStream$.complete();
1313
+ });
1314
+ return {
1315
+ threadId: threadId || randomUUID5()
1316
+ };
1317
+ }
1318
+ };
1319
+ __name(AnthropicAdapter, "AnthropicAdapter");
1320
+ var THINKING_TAG = "<thinking>";
1321
+ var THINKING_TAG_END = "</thinking>";
1322
+ var FilterThinkingTextBuffer = /* @__PURE__ */ __name(class FilterThinkingTextBuffer2 {
1323
+ buffer;
1324
+ didFilterThinkingTag = false;
1325
+ constructor() {
1326
+ this.buffer = "";
1327
+ }
1328
+ onTextChunk(text) {
1329
+ this.buffer += text;
1330
+ if (this.didFilterThinkingTag) {
1331
+ return text;
1332
+ }
1333
+ const potentialTag = this.buffer.slice(0, THINKING_TAG.length);
1334
+ if (THINKING_TAG.startsWith(potentialTag)) {
1335
+ if (this.buffer.includes(THINKING_TAG_END)) {
1336
+ const end = this.buffer.indexOf(THINKING_TAG_END);
1337
+ const filteredText = this.buffer.slice(end + THINKING_TAG_END.length);
1338
+ this.buffer = filteredText;
1339
+ this.didFilterThinkingTag = true;
1340
+ return filteredText;
1341
+ } else {
1342
+ return "";
1343
+ }
1344
+ }
1345
+ return text;
1346
+ }
1347
+ reset() {
1348
+ this.buffer = "";
1349
+ this.didFilterThinkingTag = false;
1350
+ }
1351
+ }, "FilterThinkingTextBuffer");
1352
+
1353
+ // src/service-adapters/experimental/ollama/ollama-adapter.ts
1354
+ import { Ollama } from "@langchain/community/llms/ollama";
1355
+ import { randomId as randomId4, randomUUID as randomUUID6 } from "@copilotkit/shared";
1356
+ var DEFAULT_MODEL4 = "llama3:latest";
1357
+ var ExperimentalOllamaAdapter = class {
1358
+ model;
1359
+ constructor(options) {
1360
+ if (options == null ? void 0 : options.model) {
1361
+ this.model = options.model;
1362
+ } else {
1363
+ this.model = DEFAULT_MODEL4;
1364
+ }
1365
+ }
1366
+ async process(request) {
1367
+ const { messages, actions, eventSource } = request;
1368
+ const ollama = new Ollama({
1369
+ model: this.model
1370
+ });
1371
+ const contents = messages.filter((m) => m.isTextMessage()).map((m) => m.content);
1372
+ const _stream = await ollama.stream(contents);
1373
+ eventSource.stream(async (eventStream$) => {
1374
+ const currentMessageId = randomId4();
1375
+ eventStream$.sendTextMessageStart({
1376
+ messageId: currentMessageId
1377
+ });
1378
+ for await (const chunkText of _stream) {
1379
+ eventStream$.sendTextMessageContent({
1380
+ messageId: currentMessageId,
1381
+ content: chunkText
1382
+ });
1383
+ }
1384
+ eventStream$.sendTextMessageEnd({
1385
+ messageId: currentMessageId
1386
+ });
1387
+ eventStream$.complete();
1388
+ });
1389
+ return {
1390
+ threadId: request.threadId || randomUUID6()
1391
+ };
1392
+ }
1393
+ };
1394
+ __name(ExperimentalOllamaAdapter, "ExperimentalOllamaAdapter");
1395
+
1396
+ // src/service-adapters/experimental/empty/empty-adapter.ts
1397
+ import { randomUUID as randomUUID7 } from "@copilotkit/shared";
1398
+ var ExperimentalEmptyAdapter = class {
1399
+ async process(request) {
1400
+ return {
1401
+ threadId: request.threadId || randomUUID7()
1402
+ };
1403
+ }
1404
+ };
1405
+ __name(ExperimentalEmptyAdapter, "ExperimentalEmptyAdapter");
1406
+
1407
+ export {
1408
+ RemoteChain,
1409
+ OpenAIAdapter,
1410
+ streamLangChainResponse,
1411
+ LangChainAdapter,
1412
+ GoogleGenerativeAIAdapter,
1413
+ OpenAIAssistantAdapter,
1414
+ UnifyAdapter,
1415
+ GroqAdapter,
1416
+ AnthropicAdapter,
1417
+ ExperimentalOllamaAdapter,
1418
+ ExperimentalEmptyAdapter
1419
+ };
1420
+ //# sourceMappingURL=chunk-YT7A6V5T.mjs.map