@copilotkit/runtime 1.50.0-beta.8 → 1.50.0-beta.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. package/CHANGELOG.md +11 -0
  2. package/dist/chunk-2OZAGFV3.mjs +43 -0
  3. package/dist/chunk-2OZAGFV3.mjs.map +1 -0
  4. package/dist/chunk-62NE5S6M.mjs +226 -0
  5. package/dist/chunk-62NE5S6M.mjs.map +1 -0
  6. package/dist/chunk-6XRUR5UK.mjs +1 -0
  7. package/dist/chunk-6XRUR5UK.mjs.map +1 -0
  8. package/dist/chunk-AMUJQ6IR.mjs +50 -0
  9. package/dist/chunk-AMUJQ6IR.mjs.map +1 -0
  10. package/dist/chunk-BJEYMRDD.mjs +25 -0
  11. package/dist/chunk-BJEYMRDD.mjs.map +1 -0
  12. package/dist/chunk-DZV4ZIAR.mjs +3063 -0
  13. package/dist/chunk-DZV4ZIAR.mjs.map +1 -0
  14. package/dist/chunk-FHD4JECV.mjs +33 -0
  15. package/dist/chunk-FHD4JECV.mjs.map +1 -0
  16. package/dist/chunk-FMU55SEU.mjs +25 -0
  17. package/dist/chunk-FMU55SEU.mjs.map +1 -0
  18. package/dist/chunk-OWIGJONH.mjs +275 -0
  19. package/dist/chunk-OWIGJONH.mjs.map +1 -0
  20. package/dist/chunk-SBCOROE4.mjs +1112 -0
  21. package/dist/chunk-SBCOROE4.mjs.map +1 -0
  22. package/dist/chunk-TTUAEJLD.mjs +617 -0
  23. package/dist/chunk-TTUAEJLD.mjs.map +1 -0
  24. package/dist/chunk-XWBDEXDA.mjs +153 -0
  25. package/dist/chunk-XWBDEXDA.mjs.map +1 -0
  26. package/dist/chunk-Z752VE75.mjs +74 -0
  27. package/dist/chunk-Z752VE75.mjs.map +1 -0
  28. package/dist/graphql/message-conversion/index.d.ts +18 -0
  29. package/dist/graphql/message-conversion/index.js +725 -0
  30. package/dist/graphql/message-conversion/index.js.map +1 -0
  31. package/dist/graphql/message-conversion/index.mjs +245 -0
  32. package/dist/graphql/message-conversion/index.mjs.map +1 -0
  33. package/dist/graphql/types/base/index.d.ts +6 -0
  34. package/dist/graphql/types/base/index.js +63 -0
  35. package/dist/graphql/types/base/index.js.map +1 -0
  36. package/dist/graphql/types/base/index.mjs +8 -0
  37. package/dist/graphql/types/base/index.mjs.map +1 -0
  38. package/dist/graphql/types/converted/index.d.ts +2 -0
  39. package/dist/graphql/types/converted/index.js +294 -0
  40. package/dist/graphql/types/converted/index.js.map +1 -0
  41. package/dist/graphql/types/converted/index.mjs +20 -0
  42. package/dist/graphql/types/converted/index.mjs.map +1 -0
  43. package/dist/groq-adapter-50bc6e4a.d.ts +326 -0
  44. package/dist/index-adbd78f1.d.ts +154 -0
  45. package/dist/index.js +1 -1
  46. package/dist/index.js.map +1 -1
  47. package/dist/index.mjs +1 -1
  48. package/dist/index.mjs.map +1 -1
  49. package/dist/langserve-74a52292.d.ts +242 -0
  50. package/dist/lib/cloud/index.d.ts +6 -0
  51. package/dist/lib/cloud/index.js +18 -0
  52. package/dist/lib/cloud/index.js.map +1 -0
  53. package/dist/lib/cloud/index.mjs +1 -0
  54. package/dist/lib/cloud/index.mjs.map +1 -0
  55. package/dist/lib/index.d.ts +266 -0
  56. package/dist/lib/index.js +4944 -0
  57. package/dist/lib/index.js.map +1 -0
  58. package/dist/lib/index.mjs +74 -0
  59. package/dist/lib/index.mjs.map +1 -0
  60. package/dist/lib/integrations/index.d.ts +28 -0
  61. package/dist/lib/integrations/index.js +3024 -0
  62. package/dist/lib/integrations/index.js.map +1 -0
  63. package/dist/lib/integrations/index.mjs +36 -0
  64. package/dist/lib/integrations/index.mjs.map +1 -0
  65. package/dist/lib/integrations/nest/index.d.ts +16 -0
  66. package/dist/lib/integrations/nest/index.js +2937 -0
  67. package/dist/lib/integrations/nest/index.js.map +1 -0
  68. package/dist/lib/integrations/nest/index.mjs +13 -0
  69. package/dist/lib/integrations/nest/index.mjs.map +1 -0
  70. package/dist/lib/integrations/node-express/index.d.ts +16 -0
  71. package/dist/lib/integrations/node-express/index.js +2937 -0
  72. package/dist/lib/integrations/node-express/index.js.map +1 -0
  73. package/dist/lib/integrations/node-express/index.mjs +13 -0
  74. package/dist/lib/integrations/node-express/index.mjs.map +1 -0
  75. package/dist/lib/integrations/node-http/index.d.ts +16 -0
  76. package/dist/lib/integrations/node-http/index.js +2923 -0
  77. package/dist/lib/integrations/node-http/index.js.map +1 -0
  78. package/dist/lib/integrations/node-http/index.mjs +12 -0
  79. package/dist/lib/integrations/node-http/index.mjs.map +1 -0
  80. package/dist/service-adapters/index.d.ts +166 -0
  81. package/dist/service-adapters/index.js +1800 -0
  82. package/dist/service-adapters/index.js.map +1 -0
  83. package/dist/service-adapters/index.mjs +36 -0
  84. package/dist/service-adapters/index.mjs.map +1 -0
  85. package/dist/service-adapters/shared/index.d.ts +9 -0
  86. package/dist/service-adapters/shared/index.js +72 -0
  87. package/dist/service-adapters/shared/index.js.map +1 -0
  88. package/dist/service-adapters/shared/index.mjs +8 -0
  89. package/dist/service-adapters/shared/index.mjs.map +1 -0
  90. package/dist/shared-f6d43ef8.d.ts +446 -0
  91. package/dist/utils/index.d.ts +65 -0
  92. package/dist/utils/index.js +175 -0
  93. package/dist/utils/index.js.map +1 -0
  94. package/dist/utils/index.mjs +12 -0
  95. package/dist/utils/index.mjs.map +1 -0
  96. package/package.json +2 -2
@@ -0,0 +1,1800 @@
1
+ var __create = Object.create;
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __getProtoOf = Object.getPrototypeOf;
6
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
7
+ var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
8
+ var __export = (target, all) => {
9
+ for (var name in all)
10
+ __defProp(target, name, { get: all[name], enumerable: true });
11
+ };
12
+ var __copyProps = (to, from, except, desc) => {
13
+ if (from && typeof from === "object" || typeof from === "function") {
14
+ for (let key of __getOwnPropNames(from))
15
+ if (!__hasOwnProp.call(to, key) && key !== except)
16
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
+ }
18
+ return to;
19
+ };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
28
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
+
30
+ // src/service-adapters/index.ts
31
+ var service_adapters_exports = {};
32
+ __export(service_adapters_exports, {
33
+ AnthropicAdapter: () => AnthropicAdapter,
34
+ BedrockAdapter: () => BedrockAdapter,
35
+ EmptyAdapter: () => EmptyAdapter,
36
+ ExperimentalEmptyAdapter: () => ExperimentalEmptyAdapter,
37
+ ExperimentalOllamaAdapter: () => ExperimentalOllamaAdapter,
38
+ GoogleGenerativeAIAdapter: () => GoogleGenerativeAIAdapter,
39
+ GroqAdapter: () => GroqAdapter,
40
+ LangChainAdapter: () => LangChainAdapter,
41
+ OpenAIAdapter: () => OpenAIAdapter,
42
+ OpenAIAssistantAdapter: () => OpenAIAssistantAdapter,
43
+ RemoteChain: () => RemoteChain,
44
+ UnifyAdapter: () => UnifyAdapter,
45
+ convertServiceAdapterError: () => convertServiceAdapterError
46
+ });
47
+ module.exports = __toCommonJS(service_adapters_exports);
48
+
49
+ // src/service-adapters/langchain/langserve.ts
50
+ var import_remote = require("langchain/runnables/remote");
51
+ var RemoteChain = class {
52
+ name;
53
+ description;
54
+ chainUrl;
55
+ parameters;
56
+ parameterType;
57
+ constructor(options) {
58
+ this.name = options.name;
59
+ this.description = options.description;
60
+ this.chainUrl = options.chainUrl;
61
+ this.parameters = options.parameters;
62
+ this.parameterType = options.parameterType || "multi";
63
+ }
64
+ async toAction() {
65
+ if (!this.parameters) {
66
+ await this.inferLangServeParameters();
67
+ }
68
+ return {
69
+ name: this.name,
70
+ description: this.description,
71
+ parameters: this.parameters,
72
+ handler: async (args) => {
73
+ const runnable = new import_remote.RemoteRunnable({
74
+ url: this.chainUrl
75
+ });
76
+ let input;
77
+ if (this.parameterType === "single") {
78
+ input = args[Object.keys(args)[0]];
79
+ } else {
80
+ input = args;
81
+ }
82
+ return await runnable.invoke(input);
83
+ }
84
+ };
85
+ }
86
+ async inferLangServeParameters() {
87
+ const supportedTypes = [
88
+ "string",
89
+ "number",
90
+ "boolean"
91
+ ];
92
+ let schemaUrl = this.chainUrl.replace(/\/+$/, "") + "/input_schema";
93
+ let schema = await fetch(schemaUrl).then((res) => res.json()).catch(() => {
94
+ throw new Error("Failed to fetch langserve schema at " + schemaUrl);
95
+ });
96
+ if (supportedTypes.includes(schema.type)) {
97
+ this.parameterType = "single";
98
+ this.parameters = [
99
+ {
100
+ name: "input",
101
+ type: schema.type,
102
+ description: "The input to the chain"
103
+ }
104
+ ];
105
+ } else if (schema.type === "object") {
106
+ this.parameterType = "multi";
107
+ this.parameters = Object.keys(schema.properties).map((key) => {
108
+ var _a;
109
+ let property = schema.properties[key];
110
+ if (!supportedTypes.includes(property.type)) {
111
+ throw new Error("Unsupported schema type");
112
+ }
113
+ return {
114
+ name: key,
115
+ type: property.type,
116
+ description: property.description || "",
117
+ required: ((_a = schema.required) == null ? void 0 : _a.includes(key)) || false
118
+ };
119
+ });
120
+ } else {
121
+ throw new Error("Unsupported schema type");
122
+ }
123
+ }
124
+ };
125
+ __name(RemoteChain, "RemoteChain");
126
+
127
+ // src/service-adapters/shared/error-utils.ts
128
+ var import_shared = require("@copilotkit/shared");
129
+ function convertServiceAdapterError(error, adapterName) {
130
+ var _a, _b, _c;
131
+ const errorName = ((_a = error == null ? void 0 : error.constructor) == null ? void 0 : _a.name) || error.name;
132
+ const errorMessage = (error == null ? void 0 : error.message) || String(error);
133
+ const statusCode = error.status || error.statusCode || ((_b = error.response) == null ? void 0 : _b.status);
134
+ const responseData = error.error || ((_c = error.response) == null ? void 0 : _c.data) || error.data;
135
+ const structuredError = new import_shared.CopilotKitLowLevelError({
136
+ error: error instanceof Error ? error : new Error(errorMessage),
137
+ url: `${adapterName} service adapter`,
138
+ message: `${adapterName} API error: ${errorMessage}`
139
+ });
140
+ if (statusCode) {
141
+ structuredError.statusCode = statusCode;
142
+ }
143
+ if (responseData) {
144
+ structuredError.responseData = responseData;
145
+ }
146
+ if (errorName) {
147
+ structuredError.originalErrorType = errorName;
148
+ }
149
+ let newCode;
150
+ if (statusCode === 401) {
151
+ newCode = import_shared.CopilotKitErrorCode.AUTHENTICATION_ERROR;
152
+ } else if (statusCode >= 400 && statusCode < 500) {
153
+ newCode = import_shared.CopilotKitErrorCode.CONFIGURATION_ERROR;
154
+ } else if (statusCode >= 500) {
155
+ newCode = import_shared.CopilotKitErrorCode.NETWORK_ERROR;
156
+ } else if (statusCode) {
157
+ newCode = import_shared.CopilotKitErrorCode.CONFIGURATION_ERROR;
158
+ } else {
159
+ newCode = import_shared.CopilotKitErrorCode.NETWORK_ERROR;
160
+ }
161
+ structuredError.code = newCode;
162
+ if (structuredError.extensions) {
163
+ structuredError.extensions.code = newCode;
164
+ }
165
+ return structuredError;
166
+ }
167
+ __name(convertServiceAdapterError, "convertServiceAdapterError");
168
+
169
+ // src/service-adapters/openai/openai-adapter.ts
170
+ var import_openai = __toESM(require("openai"));
171
+
172
+ // src/service-adapters/openai/utils.ts
173
+ var import_shared2 = require("@copilotkit/shared");
174
+ function limitMessagesToTokenCount(messages, tools, model, maxTokens) {
175
+ maxTokens || (maxTokens = maxTokensForOpenAIModel(model));
176
+ const result = [];
177
+ const toolsNumTokens = countToolsTokens(model, tools);
178
+ if (toolsNumTokens > maxTokens) {
179
+ throw new Error(`Too many tokens in function definitions: ${toolsNumTokens} > ${maxTokens}`);
180
+ }
181
+ maxTokens -= toolsNumTokens;
182
+ for (const message of messages) {
183
+ if ([
184
+ "system",
185
+ "developer"
186
+ ].includes(message.role)) {
187
+ const numTokens = countMessageTokens(model, message);
188
+ maxTokens -= numTokens;
189
+ if (maxTokens < 0) {
190
+ throw new Error("Not enough tokens for system message.");
191
+ }
192
+ }
193
+ }
194
+ let cutoff = false;
195
+ const reversedMessages = [
196
+ ...messages
197
+ ].reverse();
198
+ for (const message of reversedMessages) {
199
+ if ([
200
+ "system",
201
+ "developer"
202
+ ].includes(message.role)) {
203
+ result.unshift(message);
204
+ continue;
205
+ } else if (cutoff) {
206
+ continue;
207
+ }
208
+ let numTokens = countMessageTokens(model, message);
209
+ if (maxTokens < numTokens) {
210
+ cutoff = true;
211
+ continue;
212
+ }
213
+ result.unshift(message);
214
+ maxTokens -= numTokens;
215
+ }
216
+ return result;
217
+ }
218
+ __name(limitMessagesToTokenCount, "limitMessagesToTokenCount");
219
+ function maxTokensForOpenAIModel(model) {
220
+ return maxTokensByModel[model] || DEFAULT_MAX_TOKENS;
221
+ }
222
+ __name(maxTokensForOpenAIModel, "maxTokensForOpenAIModel");
223
+ var DEFAULT_MAX_TOKENS = 128e3;
224
+ var maxTokensByModel = {
225
+ // o1
226
+ o1: 2e5,
227
+ "o1-2024-12-17": 2e5,
228
+ "o1-mini": 128e3,
229
+ "o1-mini-2024-09-12": 128e3,
230
+ "o1-preview": 128e3,
231
+ "o1-preview-2024-09-12": 128e3,
232
+ // o3-mini
233
+ "o3-mini": 2e5,
234
+ "o3-mini-2025-01-31": 2e5,
235
+ // GPT-4
236
+ "gpt-4o": 128e3,
237
+ "chatgpt-4o-latest": 128e3,
238
+ "gpt-4o-2024-08-06": 128e3,
239
+ "gpt-4o-2024-05-13": 128e3,
240
+ "gpt-4o-mini": 128e3,
241
+ "gpt-4o-mini-2024-07-18": 128e3,
242
+ "gpt-4-turbo": 128e3,
243
+ "gpt-4-turbo-2024-04-09": 128e3,
244
+ "gpt-4-0125-preview": 128e3,
245
+ "gpt-4-turbo-preview": 128e3,
246
+ "gpt-4-1106-preview": 128e3,
247
+ "gpt-4-vision-preview": 128e3,
248
+ "gpt-4-1106-vision-preview": 128e3,
249
+ "gpt-4-32k": 32768,
250
+ "gpt-4-32k-0613": 32768,
251
+ "gpt-4-32k-0314": 32768,
252
+ "gpt-4": 8192,
253
+ "gpt-4-0613": 8192,
254
+ "gpt-4-0314": 8192,
255
+ // GPT-3.5
256
+ "gpt-3.5-turbo-0125": 16385,
257
+ "gpt-3.5-turbo": 16385,
258
+ "gpt-3.5-turbo-1106": 16385,
259
+ "gpt-3.5-turbo-instruct": 4096,
260
+ "gpt-3.5-turbo-16k": 16385,
261
+ "gpt-3.5-turbo-0613": 4096,
262
+ "gpt-3.5-turbo-16k-0613": 16385,
263
+ "gpt-3.5-turbo-0301": 4097
264
+ };
265
+ function countToolsTokens(model, tools) {
266
+ if (tools.length === 0) {
267
+ return 0;
268
+ }
269
+ const json = JSON.stringify(tools);
270
+ return countTokens(model, json);
271
+ }
272
+ __name(countToolsTokens, "countToolsTokens");
273
+ function countMessageTokens(model, message) {
274
+ return countTokens(model, message.content || "");
275
+ }
276
+ __name(countMessageTokens, "countMessageTokens");
277
+ function countTokens(model, text) {
278
+ return text.length / 3;
279
+ }
280
+ __name(countTokens, "countTokens");
281
+ function convertActionInputToOpenAITool(action) {
282
+ return {
283
+ type: "function",
284
+ function: {
285
+ name: action.name,
286
+ description: action.description,
287
+ parameters: (0, import_shared2.parseJson)(action.jsonSchema, {})
288
+ }
289
+ };
290
+ }
291
+ __name(convertActionInputToOpenAITool, "convertActionInputToOpenAITool");
292
+ function convertMessageToOpenAIMessage(message, options) {
293
+ const { keepSystemRole } = options || {
294
+ keepSystemRole: false
295
+ };
296
+ if (message.isTextMessage()) {
297
+ let role = message.role;
298
+ if (message.role === "system" && !keepSystemRole) {
299
+ role = "developer";
300
+ }
301
+ return {
302
+ role,
303
+ content: message.content
304
+ };
305
+ } else if (message.isImageMessage()) {
306
+ return {
307
+ role: "user",
308
+ content: [
309
+ {
310
+ type: "image_url",
311
+ image_url: {
312
+ url: `data:image/${message.format};base64,${message.bytes}`
313
+ }
314
+ }
315
+ ]
316
+ };
317
+ } else if (message.isActionExecutionMessage()) {
318
+ return {
319
+ role: "assistant",
320
+ tool_calls: [
321
+ {
322
+ id: message.id,
323
+ type: "function",
324
+ function: {
325
+ name: message.name,
326
+ arguments: JSON.stringify(message.arguments)
327
+ }
328
+ }
329
+ ]
330
+ };
331
+ } else if (message.isResultMessage()) {
332
+ return {
333
+ role: "tool",
334
+ content: message.result,
335
+ tool_call_id: message.actionExecutionId
336
+ };
337
+ }
338
+ }
339
+ __name(convertMessageToOpenAIMessage, "convertMessageToOpenAIMessage");
340
+ function convertSystemMessageToAssistantAPI(message) {
341
+ return {
342
+ ...message,
343
+ ...[
344
+ "system",
345
+ "developer"
346
+ ].includes(message.role) && {
347
+ role: "assistant",
348
+ content: "THE FOLLOWING MESSAGE IS A SYSTEM MESSAGE: " + message.content
349
+ }
350
+ };
351
+ }
352
+ __name(convertSystemMessageToAssistantAPI, "convertSystemMessageToAssistantAPI");
353
+
354
+ // src/service-adapters/openai/openai-adapter.ts
355
+ var import_shared3 = require("@copilotkit/shared");
356
+ var DEFAULT_MODEL = "gpt-4o";
357
+ var OpenAIAdapter = class {
358
+ model = DEFAULT_MODEL;
359
+ provider = "openai";
360
+ disableParallelToolCalls = false;
361
+ _openai;
362
+ keepSystemRole = false;
363
+ get openai() {
364
+ return this._openai;
365
+ }
366
+ constructor(params) {
367
+ this._openai = (params == null ? void 0 : params.openai) || new import_openai.default({});
368
+ if (params == null ? void 0 : params.model) {
369
+ this.model = params.model;
370
+ }
371
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
372
+ this.keepSystemRole = (params == null ? void 0 : params.keepSystemRole) ?? false;
373
+ }
374
+ async process(request) {
375
+ const { threadId: threadIdFromRequest, model = this.model, messages, actions, eventSource, forwardedParameters } = request;
376
+ const tools = actions.map(convertActionInputToOpenAITool);
377
+ const threadId = threadIdFromRequest ?? (0, import_shared3.randomUUID)();
378
+ const validToolUseIds = /* @__PURE__ */ new Set();
379
+ for (const message of messages) {
380
+ if (message.isActionExecutionMessage()) {
381
+ validToolUseIds.add(message.id);
382
+ }
383
+ }
384
+ const filteredMessages = messages.filter((message) => {
385
+ if (message.isResultMessage()) {
386
+ if (!validToolUseIds.has(message.actionExecutionId)) {
387
+ return false;
388
+ }
389
+ validToolUseIds.delete(message.actionExecutionId);
390
+ return true;
391
+ }
392
+ return true;
393
+ });
394
+ let openaiMessages = filteredMessages.map((m) => convertMessageToOpenAIMessage(m, {
395
+ keepSystemRole: this.keepSystemRole
396
+ }));
397
+ openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
398
+ let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
399
+ if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
400
+ toolChoice = {
401
+ type: "function",
402
+ function: {
403
+ name: forwardedParameters.toolChoiceFunctionName
404
+ }
405
+ };
406
+ }
407
+ try {
408
+ const stream = this.openai.beta.chat.completions.stream({
409
+ model,
410
+ stream: true,
411
+ messages: openaiMessages,
412
+ ...tools.length > 0 && {
413
+ tools
414
+ },
415
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
416
+ max_completion_tokens: forwardedParameters.maxTokens
417
+ },
418
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.stop) && {
419
+ stop: forwardedParameters.stop
420
+ },
421
+ ...toolChoice && {
422
+ tool_choice: toolChoice
423
+ },
424
+ ...this.disableParallelToolCalls && {
425
+ parallel_tool_calls: false
426
+ },
427
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
428
+ temperature: forwardedParameters.temperature
429
+ }
430
+ });
431
+ eventSource.stream(async (eventStream$) => {
432
+ var _a, _b;
433
+ let mode = null;
434
+ let currentMessageId;
435
+ let currentToolCallId;
436
+ try {
437
+ for await (const chunk of stream) {
438
+ if (chunk.choices.length === 0) {
439
+ continue;
440
+ }
441
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
442
+ const content = chunk.choices[0].delta.content;
443
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
444
+ mode = null;
445
+ eventStream$.sendTextMessageEnd({
446
+ messageId: currentMessageId
447
+ });
448
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
449
+ mode = null;
450
+ eventStream$.sendActionExecutionEnd({
451
+ actionExecutionId: currentToolCallId
452
+ });
453
+ }
454
+ if (mode === null) {
455
+ if (toolCall == null ? void 0 : toolCall.id) {
456
+ mode = "function";
457
+ currentToolCallId = toolCall.id;
458
+ eventStream$.sendActionExecutionStart({
459
+ actionExecutionId: currentToolCallId,
460
+ parentMessageId: chunk.id,
461
+ actionName: toolCall.function.name
462
+ });
463
+ } else if (content) {
464
+ mode = "message";
465
+ currentMessageId = chunk.id;
466
+ eventStream$.sendTextMessageStart({
467
+ messageId: currentMessageId
468
+ });
469
+ }
470
+ }
471
+ if (mode === "message" && content) {
472
+ eventStream$.sendTextMessageContent({
473
+ messageId: currentMessageId,
474
+ content
475
+ });
476
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
477
+ eventStream$.sendActionExecutionArgs({
478
+ actionExecutionId: currentToolCallId,
479
+ args: toolCall.function.arguments
480
+ });
481
+ }
482
+ }
483
+ if (mode === "message") {
484
+ eventStream$.sendTextMessageEnd({
485
+ messageId: currentMessageId
486
+ });
487
+ } else if (mode === "function") {
488
+ eventStream$.sendActionExecutionEnd({
489
+ actionExecutionId: currentToolCallId
490
+ });
491
+ }
492
+ } catch (error) {
493
+ console.error("[OpenAI] Error during API call:", error);
494
+ throw convertServiceAdapterError(error, "OpenAI");
495
+ }
496
+ eventStream$.complete();
497
+ });
498
+ } catch (error) {
499
+ console.error("[OpenAI] Error during API call:", error);
500
+ throw convertServiceAdapterError(error, "OpenAI");
501
+ }
502
+ return {
503
+ threadId
504
+ };
505
+ }
506
+ };
507
+ __name(OpenAIAdapter, "OpenAIAdapter");
508
+
509
+ // src/service-adapters/langchain/utils.ts
510
+ var import_messages = require("@langchain/core/messages");
511
+ var import_tools = require("@langchain/core/tools");
512
+ var import_shared5 = require("@copilotkit/shared");
513
+ function convertMessageToLangChainMessage(message) {
514
+ if (message.isTextMessage()) {
515
+ if (message.role == "user") {
516
+ return new import_messages.HumanMessage(message.content);
517
+ } else if (message.role == "assistant") {
518
+ return new import_messages.AIMessage(message.content);
519
+ } else if (message.role === "system") {
520
+ return new import_messages.SystemMessage(message.content);
521
+ }
522
+ } else if (message.isActionExecutionMessage()) {
523
+ return new import_messages.AIMessage({
524
+ content: "",
525
+ tool_calls: [
526
+ {
527
+ id: message.id,
528
+ args: message.arguments,
529
+ name: message.name
530
+ }
531
+ ]
532
+ });
533
+ } else if (message.isResultMessage()) {
534
+ return new import_messages.ToolMessage({
535
+ content: message.result,
536
+ tool_call_id: message.actionExecutionId
537
+ });
538
+ }
539
+ }
540
+ __name(convertMessageToLangChainMessage, "convertMessageToLangChainMessage");
541
+ function convertActionInputToLangChainTool(actionInput) {
542
+ return new import_tools.DynamicStructuredTool({
543
+ ...actionInput,
544
+ name: actionInput.name,
545
+ description: actionInput.description,
546
+ schema: (0, import_shared5.convertJsonSchemaToZodSchema)(JSON.parse(actionInput.jsonSchema), true),
547
+ func: async () => {
548
+ return "";
549
+ }
550
+ });
551
+ }
552
+ __name(convertActionInputToLangChainTool, "convertActionInputToLangChainTool");
553
+ function isAIMessage(message) {
554
+ return Object.prototype.toString.call(message) === "[object AIMessage]";
555
+ }
556
+ __name(isAIMessage, "isAIMessage");
557
+ function isAIMessageChunk(message) {
558
+ return Object.prototype.toString.call(message) === "[object AIMessageChunk]";
559
+ }
560
+ __name(isAIMessageChunk, "isAIMessageChunk");
561
+ function isBaseMessageChunk(message) {
562
+ return Object.prototype.toString.call(message) === "[object BaseMessageChunk]";
563
+ }
564
+ __name(isBaseMessageChunk, "isBaseMessageChunk");
565
+ function maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution) {
566
+ if (actionExecution) {
567
+ eventStream$.sendActionExecutionResult({
568
+ actionExecutionId: actionExecution.id,
569
+ actionName: actionExecution.name,
570
+ result: "Sending a message"
571
+ });
572
+ }
573
+ }
574
+ __name(maybeSendActionExecutionResultIsMessage, "maybeSendActionExecutionResultIsMessage");
575
+ async function streamLangChainResponse({ result, eventStream$, actionExecution }) {
576
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
577
+ if (typeof result === "string") {
578
+ if (!actionExecution || (actionExecution == null ? void 0 : actionExecution.returnDirect)) {
579
+ eventStream$.sendActionExecutionResult({
580
+ actionExecutionId: actionExecution.id,
581
+ actionName: actionExecution.name,
582
+ result
583
+ });
584
+ eventStream$.sendTextMessage((0, import_shared5.randomId)(), result);
585
+ } else {
586
+ eventStream$.sendActionExecutionResult({
587
+ actionExecutionId: actionExecution.id,
588
+ actionName: actionExecution.name,
589
+ result
590
+ });
591
+ }
592
+ } else if (isAIMessage(result)) {
593
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
594
+ if (result.content) {
595
+ eventStream$.sendTextMessage((0, import_shared5.randomId)(), result.content);
596
+ }
597
+ for (const toolCall of result.tool_calls) {
598
+ eventStream$.sendActionExecution({
599
+ actionExecutionId: toolCall.id || (0, import_shared5.randomId)(),
600
+ actionName: toolCall.name,
601
+ args: JSON.stringify(toolCall.args)
602
+ });
603
+ }
604
+ } else if (isBaseMessageChunk(result)) {
605
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
606
+ if ((_a = result.lc_kwargs) == null ? void 0 : _a.content) {
607
+ eventStream$.sendTextMessage((0, import_shared5.randomId)(), result.content);
608
+ }
609
+ if ((_b = result.lc_kwargs) == null ? void 0 : _b.tool_calls) {
610
+ for (const toolCall of (_c = result.lc_kwargs) == null ? void 0 : _c.tool_calls) {
611
+ eventStream$.sendActionExecution({
612
+ actionExecutionId: toolCall.id || (0, import_shared5.randomId)(),
613
+ actionName: toolCall.name,
614
+ args: JSON.stringify(toolCall.args)
615
+ });
616
+ }
617
+ }
618
+ } else if (result && "getReader" in result) {
619
+ maybeSendActionExecutionResultIsMessage(eventStream$, actionExecution);
620
+ let reader = result.getReader();
621
+ let mode = null;
622
+ let currentMessageId;
623
+ const toolCallDetails = {
624
+ name: null,
625
+ id: null,
626
+ index: null,
627
+ prevIndex: null
628
+ };
629
+ while (true) {
630
+ try {
631
+ const { done, value } = await reader.read();
632
+ let toolCallName = void 0;
633
+ let toolCallId = void 0;
634
+ let toolCallArgs = void 0;
635
+ let hasToolCall = false;
636
+ let content = "";
637
+ if (value && value.content) {
638
+ content = Array.isArray(value.content) ? ((_d = value.content[0]) == null ? void 0 : _d.text) ?? "" : value.content;
639
+ }
640
+ if (isAIMessageChunk(value)) {
641
+ let chunk = (_e = value.tool_call_chunks) == null ? void 0 : _e[0];
642
+ toolCallArgs = chunk == null ? void 0 : chunk.args;
643
+ hasToolCall = chunk != void 0;
644
+ if (chunk == null ? void 0 : chunk.name)
645
+ toolCallDetails.name = chunk.name;
646
+ if ((chunk == null ? void 0 : chunk.index) != null) {
647
+ toolCallDetails.index = chunk.index;
648
+ if (toolCallDetails.prevIndex == null)
649
+ toolCallDetails.prevIndex = chunk.index;
650
+ }
651
+ if (chunk == null ? void 0 : chunk.id)
652
+ toolCallDetails.id = chunk.index != null ? `${chunk.id}-idx-${chunk.index}` : chunk.id;
653
+ toolCallName = toolCallDetails.name;
654
+ toolCallId = toolCallDetails.id;
655
+ } else if (isBaseMessageChunk(value)) {
656
+ let chunk = (_g = (_f = value.additional_kwargs) == null ? void 0 : _f.tool_calls) == null ? void 0 : _g[0];
657
+ toolCallName = (_h = chunk == null ? void 0 : chunk.function) == null ? void 0 : _h.name;
658
+ toolCallId = chunk == null ? void 0 : chunk.id;
659
+ toolCallArgs = (_i = chunk == null ? void 0 : chunk.function) == null ? void 0 : _i.arguments;
660
+ hasToolCall = (chunk == null ? void 0 : chunk.function) != void 0;
661
+ }
662
+ if (mode === "message" && (toolCallId || done)) {
663
+ mode = null;
664
+ eventStream$.sendTextMessageEnd({
665
+ messageId: currentMessageId
666
+ });
667
+ } else if (mode === "function" && (!hasToolCall || done)) {
668
+ mode = null;
669
+ eventStream$.sendActionExecutionEnd({
670
+ actionExecutionId: toolCallId
671
+ });
672
+ }
673
+ if (done) {
674
+ break;
675
+ }
676
+ if (mode === null) {
677
+ if (hasToolCall && toolCallId && toolCallName) {
678
+ mode = "function";
679
+ eventStream$.sendActionExecutionStart({
680
+ actionExecutionId: toolCallId,
681
+ actionName: toolCallName,
682
+ parentMessageId: (_j = value.lc_kwargs) == null ? void 0 : _j.id
683
+ });
684
+ } else if (content) {
685
+ mode = "message";
686
+ currentMessageId = ((_k = value.lc_kwargs) == null ? void 0 : _k.id) || (0, import_shared5.randomId)();
687
+ eventStream$.sendTextMessageStart({
688
+ messageId: currentMessageId
689
+ });
690
+ }
691
+ }
692
+ if (mode === "message" && content) {
693
+ eventStream$.sendTextMessageContent({
694
+ messageId: currentMessageId,
695
+ content
696
+ });
697
+ } else if (mode === "function" && toolCallArgs) {
698
+ if (toolCallDetails.index !== toolCallDetails.prevIndex) {
699
+ eventStream$.sendActionExecutionEnd({
700
+ actionExecutionId: toolCallId
701
+ });
702
+ eventStream$.sendActionExecutionStart({
703
+ actionExecutionId: toolCallId,
704
+ actionName: toolCallName,
705
+ parentMessageId: (_l = value.lc_kwargs) == null ? void 0 : _l.id
706
+ });
707
+ toolCallDetails.prevIndex = toolCallDetails.index;
708
+ }
709
+ eventStream$.sendActionExecutionArgs({
710
+ actionExecutionId: toolCallId,
711
+ args: toolCallArgs
712
+ });
713
+ }
714
+ } catch (error) {
715
+ console.error("Error reading from stream", error);
716
+ break;
717
+ }
718
+ }
719
+ } else if (actionExecution) {
720
+ eventStream$.sendActionExecutionResult({
721
+ actionExecutionId: actionExecution.id,
722
+ actionName: actionExecution.name,
723
+ result: encodeResult(result)
724
+ });
725
+ } else {
726
+ throw new Error("Invalid return type from LangChain function.");
727
+ }
728
+ eventStream$.complete();
729
+ }
730
+ __name(streamLangChainResponse, "streamLangChainResponse");
731
+ function encodeResult(result) {
732
+ if (result === void 0) {
733
+ return "";
734
+ } else if (typeof result === "string") {
735
+ return result;
736
+ } else {
737
+ return JSON.stringify(result);
738
+ }
739
+ }
740
+ __name(encodeResult, "encodeResult");
741
+
742
+ // src/service-adapters/langchain/langchain-adapter.ts
743
+ var import_shared6 = require("@copilotkit/shared");
744
+ var import_promises = require("@langchain/core/callbacks/promises");
745
+ var LangChainAdapter = class {
746
+ options;
747
+ /**
748
+ * To use LangChain as a backend, provide a handler function to the adapter with your custom LangChain logic.
749
+ */
750
+ constructor(options) {
751
+ this.options = options;
752
+ }
753
+ async process(request) {
754
+ try {
755
+ const { eventSource, model, actions, messages, runId, threadId: threadIdFromRequest } = request;
756
+ const threadId = threadIdFromRequest ?? (0, import_shared6.randomUUID)();
757
+ const result = await this.options.chainFn({
758
+ messages: messages.map(convertMessageToLangChainMessage),
759
+ tools: actions.map(convertActionInputToLangChainTool),
760
+ model,
761
+ threadId,
762
+ runId
763
+ });
764
+ eventSource.stream(async (eventStream$) => {
765
+ await streamLangChainResponse({
766
+ result,
767
+ eventStream$
768
+ });
769
+ });
770
+ return {
771
+ threadId
772
+ };
773
+ } finally {
774
+ await (0, import_promises.awaitAllCallbacks)();
775
+ }
776
+ }
777
+ };
778
+ __name(LangChainAdapter, "LangChainAdapter");
779
+
780
+ // src/service-adapters/google/google-genai-adapter.ts
781
+ var import_google_gauth = require("@langchain/google-gauth");
782
+ var import_messages2 = require("@langchain/core/messages");
783
+ var DEFAULT_MODEL2 = "gemini-1.5-pro";
784
+ var GoogleGenerativeAIAdapter = class extends LangChainAdapter {
785
+ provider = "google";
786
+ model = DEFAULT_MODEL2;
787
+ constructor(options) {
788
+ super({
789
+ chainFn: async ({ messages, tools, threadId }) => {
790
+ const filteredMessages = messages.filter((message) => {
791
+ if (!(message instanceof import_messages2.AIMessage)) {
792
+ return true;
793
+ }
794
+ return message.content && String(message.content).trim().length > 0 || message.tool_calls && message.tool_calls.length > 0;
795
+ });
796
+ this.model = (options == null ? void 0 : options.model) ?? "gemini-1.5-pro";
797
+ const model = new import_google_gauth.ChatGoogle({
798
+ apiKey: (options == null ? void 0 : options.apiKey) ?? process.env.GOOGLE_API_KEY,
799
+ modelName: this.model,
800
+ apiVersion: "v1beta"
801
+ }).bindTools(tools);
802
+ return model.stream(filteredMessages, {
803
+ metadata: {
804
+ conversation_id: threadId
805
+ }
806
+ });
807
+ }
808
+ });
809
+ }
810
+ };
811
+ __name(GoogleGenerativeAIAdapter, "GoogleGenerativeAIAdapter");
812
+
813
+ // src/service-adapters/openai/openai-assistant-adapter.ts
814
+ var import_openai2 = __toESM(require("openai"));
815
+ var OpenAIAssistantAdapter = class {
816
+ openai;
817
+ codeInterpreterEnabled;
818
+ assistantId;
819
+ fileSearchEnabled;
820
+ disableParallelToolCalls;
821
+ keepSystemRole = false;
822
+ constructor(params) {
823
+ this.openai = params.openai || new import_openai2.default({});
824
+ this.codeInterpreterEnabled = params.codeInterpreterEnabled === false || true;
825
+ this.fileSearchEnabled = params.fileSearchEnabled === false || true;
826
+ this.assistantId = params.assistantId;
827
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
828
+ this.keepSystemRole = (params == null ? void 0 : params.keepSystemRole) ?? false;
829
+ }
830
+ async process(request) {
831
+ var _a, _b;
832
+ const { messages, actions, eventSource, runId, forwardedParameters } = request;
833
+ let threadId = (_b = (_a = request.extensions) == null ? void 0 : _a.openaiAssistantAPI) == null ? void 0 : _b.threadId;
834
+ if (!threadId) {
835
+ threadId = (await this.openai.beta.threads.create()).id;
836
+ }
837
+ const lastMessage = messages.at(-1);
838
+ let nextRunId = void 0;
839
+ if (lastMessage.isResultMessage() && runId) {
840
+ nextRunId = await this.submitToolOutputs(threadId, runId, messages, eventSource);
841
+ } else if (lastMessage.isTextMessage()) {
842
+ nextRunId = await this.submitUserMessage(threadId, messages, actions, eventSource, forwardedParameters);
843
+ } else {
844
+ throw new Error("No actionable message found in the messages");
845
+ }
846
+ return {
847
+ runId: nextRunId,
848
+ threadId,
849
+ extensions: {
850
+ ...request.extensions,
851
+ openaiAssistantAPI: {
852
+ threadId,
853
+ runId: nextRunId
854
+ }
855
+ }
856
+ };
857
+ }
858
+ async submitToolOutputs(threadId, runId, messages, eventSource) {
859
+ let run = await this.openai.beta.threads.runs.retrieve(threadId, runId);
860
+ if (!run.required_action) {
861
+ throw new Error("No tool outputs required");
862
+ }
863
+ const toolCallsIds = run.required_action.submit_tool_outputs.tool_calls.map((toolCall) => toolCall.id);
864
+ const resultMessages = messages.filter((message) => message.isResultMessage() && toolCallsIds.includes(message.actionExecutionId));
865
+ if (toolCallsIds.length != resultMessages.length) {
866
+ throw new Error("Number of function results does not match the number of tool calls");
867
+ }
868
+ const toolOutputs = resultMessages.map((message) => {
869
+ return {
870
+ tool_call_id: message.actionExecutionId,
871
+ output: message.result
872
+ };
873
+ });
874
+ const stream = this.openai.beta.threads.runs.submitToolOutputsStream(threadId, runId, {
875
+ tool_outputs: toolOutputs,
876
+ ...this.disableParallelToolCalls && {
877
+ parallel_tool_calls: false
878
+ }
879
+ });
880
+ await this.streamResponse(stream, eventSource);
881
+ return runId;
882
+ }
883
+ async submitUserMessage(threadId, messages, actions, eventSource, forwardedParameters) {
884
+ messages = [
885
+ ...messages
886
+ ];
887
+ const instructionsMessage = messages.shift();
888
+ const instructions = instructionsMessage.isTextMessage() ? instructionsMessage.content : "";
889
+ const userMessage = messages.map((m) => convertMessageToOpenAIMessage(m, {
890
+ keepSystemRole: this.keepSystemRole
891
+ })).map(convertSystemMessageToAssistantAPI).at(-1);
892
+ if (userMessage.role !== "user") {
893
+ throw new Error("No user message found");
894
+ }
895
+ await this.openai.beta.threads.messages.create(threadId, {
896
+ role: "user",
897
+ content: userMessage.content
898
+ });
899
+ const openaiTools = actions.map(convertActionInputToOpenAITool);
900
+ const tools = [
901
+ ...openaiTools,
902
+ ...this.codeInterpreterEnabled ? [
903
+ {
904
+ type: "code_interpreter"
905
+ }
906
+ ] : [],
907
+ ...this.fileSearchEnabled ? [
908
+ {
909
+ type: "file_search"
910
+ }
911
+ ] : []
912
+ ];
913
+ let stream = this.openai.beta.threads.runs.stream(threadId, {
914
+ assistant_id: this.assistantId,
915
+ instructions,
916
+ tools,
917
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
918
+ max_completion_tokens: forwardedParameters.maxTokens
919
+ },
920
+ ...this.disableParallelToolCalls && {
921
+ parallel_tool_calls: false
922
+ }
923
+ });
924
+ await this.streamResponse(stream, eventSource);
925
+ return getRunIdFromStream(stream);
926
+ }
927
+ async streamResponse(stream, eventSource) {
928
+ eventSource.stream(async (eventStream$) => {
929
+ var _a, _b, _c, _d, _e, _f;
930
+ let inFunctionCall = false;
931
+ let currentMessageId;
932
+ let currentToolCallId;
933
+ for await (const chunk of stream) {
934
+ switch (chunk.event) {
935
+ case "thread.message.created":
936
+ if (inFunctionCall) {
937
+ eventStream$.sendActionExecutionEnd({
938
+ actionExecutionId: currentToolCallId
939
+ });
940
+ }
941
+ currentMessageId = chunk.data.id;
942
+ eventStream$.sendTextMessageStart({
943
+ messageId: currentMessageId
944
+ });
945
+ break;
946
+ case "thread.message.delta":
947
+ if (((_a = chunk.data.delta.content) == null ? void 0 : _a[0].type) === "text") {
948
+ eventStream$.sendTextMessageContent({
949
+ messageId: currentMessageId,
950
+ content: (_b = chunk.data.delta.content) == null ? void 0 : _b[0].text.value
951
+ });
952
+ }
953
+ break;
954
+ case "thread.message.completed":
955
+ eventStream$.sendTextMessageEnd({
956
+ messageId: currentMessageId
957
+ });
958
+ break;
959
+ case "thread.run.step.delta":
960
+ let toolCallId;
961
+ let toolCallName;
962
+ let toolCallArgs;
963
+ if (chunk.data.delta.step_details.type === "tool_calls" && ((_c = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _c[0].type) === "function") {
964
+ toolCallId = (_d = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _d[0].id;
965
+ toolCallName = (_e = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _e[0].function.name;
966
+ toolCallArgs = (_f = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _f[0].function.arguments;
967
+ }
968
+ if (toolCallName && toolCallId) {
969
+ if (inFunctionCall) {
970
+ eventStream$.sendActionExecutionEnd({
971
+ actionExecutionId: currentToolCallId
972
+ });
973
+ }
974
+ inFunctionCall = true;
975
+ currentToolCallId = toolCallId;
976
+ eventStream$.sendActionExecutionStart({
977
+ actionExecutionId: currentToolCallId,
978
+ parentMessageId: chunk.data.id,
979
+ actionName: toolCallName
980
+ });
981
+ } else if (toolCallArgs) {
982
+ eventStream$.sendActionExecutionArgs({
983
+ actionExecutionId: currentToolCallId,
984
+ args: toolCallArgs
985
+ });
986
+ }
987
+ break;
988
+ }
989
+ }
990
+ if (inFunctionCall) {
991
+ eventStream$.sendActionExecutionEnd({
992
+ actionExecutionId: currentToolCallId
993
+ });
994
+ }
995
+ eventStream$.complete();
996
+ });
997
+ }
998
+ };
999
+ __name(OpenAIAssistantAdapter, "OpenAIAssistantAdapter");
1000
+ function getRunIdFromStream(stream) {
1001
+ return new Promise((resolve, reject) => {
1002
+ let runIdGetter = /* @__PURE__ */ __name((event) => {
1003
+ if (event.event === "thread.run.created") {
1004
+ const runId = event.data.id;
1005
+ stream.off("event", runIdGetter);
1006
+ resolve(runId);
1007
+ }
1008
+ }, "runIdGetter");
1009
+ stream.on("event", runIdGetter);
1010
+ });
1011
+ }
1012
+ __name(getRunIdFromStream, "getRunIdFromStream");
1013
+
1014
+ // src/service-adapters/unify/unify-adapter.ts
1015
+ var import_openai3 = __toESM(require("openai"));
1016
+ var import_shared7 = require("@copilotkit/shared");
1017
+ var UnifyAdapter = class {
1018
+ apiKey;
1019
+ model;
1020
+ start;
1021
+ provider = "unify";
1022
+ constructor(options) {
1023
+ if (options == null ? void 0 : options.apiKey) {
1024
+ this.apiKey = options.apiKey;
1025
+ } else {
1026
+ this.apiKey = "UNIFY_API_KEY";
1027
+ }
1028
+ this.model = options == null ? void 0 : options.model;
1029
+ this.start = true;
1030
+ }
1031
+ async process(request) {
1032
+ const tools = request.actions.map(convertActionInputToOpenAITool);
1033
+ const openai = new import_openai3.default({
1034
+ apiKey: this.apiKey,
1035
+ baseURL: "https://api.unify.ai/v0/"
1036
+ });
1037
+ const forwardedParameters = request.forwardedParameters;
1038
+ const messages = request.messages.map((m) => convertMessageToOpenAIMessage(m));
1039
+ const stream = await openai.chat.completions.create({
1040
+ model: this.model,
1041
+ messages,
1042
+ stream: true,
1043
+ ...tools.length > 0 && {
1044
+ tools
1045
+ },
1046
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
1047
+ temperature: forwardedParameters.temperature
1048
+ }
1049
+ });
1050
+ let model = null;
1051
+ let currentMessageId;
1052
+ let currentToolCallId;
1053
+ request.eventSource.stream(async (eventStream$) => {
1054
+ var _a, _b;
1055
+ let mode = null;
1056
+ for await (const chunk of stream) {
1057
+ if (this.start) {
1058
+ model = chunk.model;
1059
+ currentMessageId = (0, import_shared7.randomId)();
1060
+ eventStream$.sendTextMessageStart({
1061
+ messageId: currentMessageId
1062
+ });
1063
+ eventStream$.sendTextMessageContent({
1064
+ messageId: currentMessageId,
1065
+ content: `Model used: ${model}
1066
+ `
1067
+ });
1068
+ eventStream$.sendTextMessageEnd({
1069
+ messageId: currentMessageId
1070
+ });
1071
+ this.start = false;
1072
+ }
1073
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
1074
+ const content = chunk.choices[0].delta.content;
1075
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
1076
+ mode = null;
1077
+ eventStream$.sendTextMessageEnd({
1078
+ messageId: currentMessageId
1079
+ });
1080
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
1081
+ mode = null;
1082
+ eventStream$.sendActionExecutionEnd({
1083
+ actionExecutionId: currentToolCallId
1084
+ });
1085
+ }
1086
+ if (mode === null) {
1087
+ if (toolCall == null ? void 0 : toolCall.id) {
1088
+ mode = "function";
1089
+ currentToolCallId = toolCall.id;
1090
+ eventStream$.sendActionExecutionStart({
1091
+ actionExecutionId: currentToolCallId,
1092
+ actionName: toolCall.function.name
1093
+ });
1094
+ } else if (content) {
1095
+ mode = "message";
1096
+ currentMessageId = chunk.id;
1097
+ eventStream$.sendTextMessageStart({
1098
+ messageId: currentMessageId
1099
+ });
1100
+ }
1101
+ }
1102
+ if (mode === "message" && content) {
1103
+ eventStream$.sendTextMessageContent({
1104
+ messageId: currentMessageId,
1105
+ content
1106
+ });
1107
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
1108
+ eventStream$.sendActionExecutionArgs({
1109
+ actionExecutionId: currentToolCallId,
1110
+ args: toolCall.function.arguments
1111
+ });
1112
+ }
1113
+ }
1114
+ if (mode === "message") {
1115
+ eventStream$.sendTextMessageEnd({
1116
+ messageId: currentMessageId
1117
+ });
1118
+ } else if (mode === "function") {
1119
+ eventStream$.sendActionExecutionEnd({
1120
+ actionExecutionId: currentToolCallId
1121
+ });
1122
+ }
1123
+ eventStream$.complete();
1124
+ });
1125
+ return {
1126
+ threadId: request.threadId || (0, import_shared7.randomUUID)()
1127
+ };
1128
+ }
1129
+ };
1130
+ __name(UnifyAdapter, "UnifyAdapter");
1131
+
1132
+ // src/service-adapters/groq/groq-adapter.ts
1133
+ var import_groq_sdk = require("groq-sdk");
1134
+ var import_shared8 = require("@copilotkit/shared");
1135
+ var DEFAULT_MODEL3 = "llama-3.3-70b-versatile";
1136
+ var GroqAdapter = class {
1137
+ model = DEFAULT_MODEL3;
1138
+ provider = "groq";
1139
+ disableParallelToolCalls = false;
1140
+ _groq;
1141
+ get groq() {
1142
+ return this._groq;
1143
+ }
1144
+ constructor(params) {
1145
+ this._groq = (params == null ? void 0 : params.groq) || new import_groq_sdk.Groq({});
1146
+ if (params == null ? void 0 : params.model) {
1147
+ this.model = params.model;
1148
+ }
1149
+ this.disableParallelToolCalls = (params == null ? void 0 : params.disableParallelToolCalls) || false;
1150
+ }
1151
+ async process(request) {
1152
+ const { threadId, model = this.model, messages, actions, eventSource, forwardedParameters } = request;
1153
+ const tools = actions.map(convertActionInputToOpenAITool);
1154
+ let openaiMessages = messages.map((m) => convertMessageToOpenAIMessage(m, {
1155
+ keepSystemRole: true
1156
+ }));
1157
+ openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
1158
+ let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
1159
+ if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
1160
+ toolChoice = {
1161
+ type: "function",
1162
+ function: {
1163
+ name: forwardedParameters.toolChoiceFunctionName
1164
+ }
1165
+ };
1166
+ }
1167
+ let stream;
1168
+ try {
1169
+ stream = await this.groq.chat.completions.create({
1170
+ model,
1171
+ stream: true,
1172
+ messages: openaiMessages,
1173
+ ...tools.length > 0 && {
1174
+ tools
1175
+ },
1176
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) && {
1177
+ max_tokens: forwardedParameters.maxTokens
1178
+ },
1179
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.stop) && {
1180
+ stop: forwardedParameters.stop
1181
+ },
1182
+ ...toolChoice && {
1183
+ tool_choice: toolChoice
1184
+ },
1185
+ ...this.disableParallelToolCalls && {
1186
+ parallel_tool_calls: false
1187
+ },
1188
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) && {
1189
+ temperature: forwardedParameters.temperature
1190
+ }
1191
+ });
1192
+ } catch (error) {
1193
+ throw convertServiceAdapterError(error, "Groq");
1194
+ }
1195
+ eventSource.stream(async (eventStream$) => {
1196
+ var _a, _b;
1197
+ let mode = null;
1198
+ let currentMessageId;
1199
+ let currentToolCallId;
1200
+ try {
1201
+ for await (const chunk of stream) {
1202
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
1203
+ const content = chunk.choices[0].delta.content;
1204
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
1205
+ mode = null;
1206
+ eventStream$.sendTextMessageEnd({
1207
+ messageId: currentMessageId
1208
+ });
1209
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
1210
+ mode = null;
1211
+ eventStream$.sendActionExecutionEnd({
1212
+ actionExecutionId: currentToolCallId
1213
+ });
1214
+ }
1215
+ if (mode === null) {
1216
+ if (toolCall == null ? void 0 : toolCall.id) {
1217
+ mode = "function";
1218
+ currentToolCallId = toolCall.id;
1219
+ eventStream$.sendActionExecutionStart({
1220
+ actionExecutionId: currentToolCallId,
1221
+ actionName: toolCall.function.name,
1222
+ parentMessageId: chunk.id
1223
+ });
1224
+ } else if (content) {
1225
+ mode = "message";
1226
+ currentMessageId = chunk.id;
1227
+ eventStream$.sendTextMessageStart({
1228
+ messageId: currentMessageId
1229
+ });
1230
+ }
1231
+ }
1232
+ if (mode === "message" && content) {
1233
+ eventStream$.sendTextMessageContent({
1234
+ messageId: currentMessageId,
1235
+ content
1236
+ });
1237
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
1238
+ eventStream$.sendActionExecutionArgs({
1239
+ actionExecutionId: currentToolCallId,
1240
+ args: toolCall.function.arguments
1241
+ });
1242
+ }
1243
+ }
1244
+ if (mode === "message") {
1245
+ eventStream$.sendTextMessageEnd({
1246
+ messageId: currentMessageId
1247
+ });
1248
+ } else if (mode === "function") {
1249
+ eventStream$.sendActionExecutionEnd({
1250
+ actionExecutionId: currentToolCallId
1251
+ });
1252
+ }
1253
+ } catch (error) {
1254
+ throw convertServiceAdapterError(error, "Groq");
1255
+ }
1256
+ eventStream$.complete();
1257
+ });
1258
+ return {
1259
+ threadId: request.threadId || (0, import_shared8.randomUUID)()
1260
+ };
1261
+ }
1262
+ };
1263
+ __name(GroqAdapter, "GroqAdapter");
1264
+
1265
+ // src/service-adapters/anthropic/anthropic-adapter.ts
1266
+ var import_sdk = __toESM(require("@anthropic-ai/sdk"));
1267
+
1268
+ // src/service-adapters/anthropic/utils.ts
1269
+ function limitMessagesToTokenCount2(messages, tools, model, maxTokens) {
1270
+ maxTokens || (maxTokens = MAX_TOKENS);
1271
+ const result = [];
1272
+ const toolsNumTokens = countToolsTokens2(model, tools);
1273
+ if (toolsNumTokens > maxTokens) {
1274
+ throw new Error(`Too many tokens in function definitions: ${toolsNumTokens} > ${maxTokens}`);
1275
+ }
1276
+ maxTokens -= toolsNumTokens;
1277
+ for (const message of messages) {
1278
+ if (message.role === "system") {
1279
+ const numTokens = countMessageTokens2(model, message);
1280
+ maxTokens -= numTokens;
1281
+ if (maxTokens < 0) {
1282
+ throw new Error("Not enough tokens for system message.");
1283
+ }
1284
+ }
1285
+ }
1286
+ let cutoff = false;
1287
+ const reversedMessages = [
1288
+ ...messages
1289
+ ].reverse();
1290
+ for (const message of reversedMessages) {
1291
+ if (message.role === "system") {
1292
+ result.unshift(message);
1293
+ continue;
1294
+ } else if (cutoff) {
1295
+ continue;
1296
+ }
1297
+ let numTokens = countMessageTokens2(model, message);
1298
+ if (maxTokens < numTokens) {
1299
+ cutoff = true;
1300
+ continue;
1301
+ }
1302
+ result.unshift(message);
1303
+ maxTokens -= numTokens;
1304
+ }
1305
+ return result;
1306
+ }
1307
+ __name(limitMessagesToTokenCount2, "limitMessagesToTokenCount");
1308
+ var MAX_TOKENS = 128e3;
1309
+ function countToolsTokens2(model, tools) {
1310
+ if (tools.length === 0) {
1311
+ return 0;
1312
+ }
1313
+ const json = JSON.stringify(tools);
1314
+ return countTokens2(model, json);
1315
+ }
1316
+ __name(countToolsTokens2, "countToolsTokens");
1317
+ function countMessageTokens2(model, message) {
1318
+ return countTokens2(model, JSON.stringify(message.content) || "");
1319
+ }
1320
+ __name(countMessageTokens2, "countMessageTokens");
1321
+ function countTokens2(model, text) {
1322
+ return text.length / 3;
1323
+ }
1324
+ __name(countTokens2, "countTokens");
1325
+ function convertActionInputToAnthropicTool(action) {
1326
+ return {
1327
+ name: action.name,
1328
+ description: action.description,
1329
+ input_schema: JSON.parse(action.jsonSchema)
1330
+ };
1331
+ }
1332
+ __name(convertActionInputToAnthropicTool, "convertActionInputToAnthropicTool");
1333
+ function convertMessageToAnthropicMessage(message) {
1334
+ if (message.isTextMessage()) {
1335
+ if (message.role === "system") {
1336
+ return {
1337
+ role: "assistant",
1338
+ content: [
1339
+ {
1340
+ type: "text",
1341
+ text: "THE FOLLOWING MESSAGE IS A SYSTEM MESSAGE: " + message.content
1342
+ }
1343
+ ]
1344
+ };
1345
+ } else {
1346
+ return {
1347
+ role: message.role === "user" ? "user" : "assistant",
1348
+ content: [
1349
+ {
1350
+ type: "text",
1351
+ text: message.content
1352
+ }
1353
+ ]
1354
+ };
1355
+ }
1356
+ } else if (message.isImageMessage()) {
1357
+ let mediaType;
1358
+ switch (message.format) {
1359
+ case "jpeg":
1360
+ mediaType = "image/jpeg";
1361
+ break;
1362
+ case "png":
1363
+ mediaType = "image/png";
1364
+ break;
1365
+ case "webp":
1366
+ mediaType = "image/webp";
1367
+ break;
1368
+ case "gif":
1369
+ mediaType = "image/gif";
1370
+ break;
1371
+ default:
1372
+ throw new Error(`Unsupported image format: ${message.format}`);
1373
+ }
1374
+ return {
1375
+ role: "user",
1376
+ content: [
1377
+ {
1378
+ type: "image",
1379
+ source: {
1380
+ type: "base64",
1381
+ media_type: mediaType,
1382
+ data: message.bytes
1383
+ }
1384
+ }
1385
+ ]
1386
+ };
1387
+ } else if (message.isActionExecutionMessage()) {
1388
+ return {
1389
+ role: "assistant",
1390
+ content: [
1391
+ {
1392
+ id: message.id,
1393
+ type: "tool_use",
1394
+ input: message.arguments,
1395
+ name: message.name
1396
+ }
1397
+ ]
1398
+ };
1399
+ } else if (message.isResultMessage()) {
1400
+ return {
1401
+ role: "user",
1402
+ content: [
1403
+ {
1404
+ type: "tool_result",
1405
+ content: message.result || "Action completed successfully",
1406
+ tool_use_id: message.actionExecutionId
1407
+ }
1408
+ ]
1409
+ };
1410
+ }
1411
+ }
1412
+ __name(convertMessageToAnthropicMessage, "convertMessageToAnthropicMessage");
1413
+
1414
+ // src/service-adapters/anthropic/anthropic-adapter.ts
1415
+ var import_shared10 = require("@copilotkit/shared");
1416
+ var DEFAULT_MODEL4 = "claude-3-5-sonnet-latest";
1417
+ var AnthropicAdapter = class {
1418
+ model = DEFAULT_MODEL4;
1419
+ provider = "anthropic";
1420
+ promptCaching;
1421
+ _anthropic;
1422
+ get anthropic() {
1423
+ return this._anthropic;
1424
+ }
1425
+ constructor(params) {
1426
+ this._anthropic = (params == null ? void 0 : params.anthropic) || new import_sdk.default({});
1427
+ if (params == null ? void 0 : params.model) {
1428
+ this.model = params.model;
1429
+ }
1430
+ this.promptCaching = (params == null ? void 0 : params.promptCaching) || {
1431
+ enabled: false
1432
+ };
1433
+ }
1434
+ /**
1435
+ * Adds cache control to system prompt
1436
+ */
1437
+ addSystemPromptCaching(system, debug = false) {
1438
+ if (!this.promptCaching.enabled || !system) {
1439
+ return system;
1440
+ }
1441
+ const originalTextLength = system.length;
1442
+ if (debug) {
1443
+ console.log(`[ANTHROPIC CACHE DEBUG] Added cache control to system prompt (${originalTextLength} chars).`);
1444
+ }
1445
+ return [
1446
+ {
1447
+ type: "text",
1448
+ text: system,
1449
+ cache_control: {
1450
+ type: "ephemeral"
1451
+ }
1452
+ }
1453
+ ];
1454
+ }
1455
+ /**
1456
+ * Adds cache control to the final message
1457
+ */
1458
+ addIncrementalMessageCaching(messages, debug = false) {
1459
+ if (!this.promptCaching.enabled || messages.length === 0) {
1460
+ return messages;
1461
+ }
1462
+ const finalMessage = messages[messages.length - 1];
1463
+ const messageNumber = messages.length;
1464
+ if (Array.isArray(finalMessage.content) && finalMessage.content.length > 0) {
1465
+ const finalBlock = finalMessage.content[finalMessage.content.length - 1];
1466
+ const updatedMessages = [
1467
+ ...messages.slice(0, -1),
1468
+ {
1469
+ ...finalMessage,
1470
+ content: [
1471
+ ...finalMessage.content.slice(0, -1),
1472
+ {
1473
+ ...finalBlock,
1474
+ cache_control: {
1475
+ type: "ephemeral"
1476
+ }
1477
+ }
1478
+ ]
1479
+ }
1480
+ ];
1481
+ if (debug) {
1482
+ console.log(`[ANTHROPIC CACHE DEBUG] Added cache control to final message (message ${messageNumber}).`);
1483
+ }
1484
+ return updatedMessages;
1485
+ }
1486
+ return messages;
1487
+ }
1488
+ shouldGenerateFallbackResponse(messages) {
1489
+ var _a, _b, _c;
1490
+ if (messages.length === 0)
1491
+ return false;
1492
+ const lastMessage = messages[messages.length - 1];
1493
+ const endsWithToolResult = lastMessage.role === "user" && Array.isArray(lastMessage.content) && lastMessage.content.some((content) => content.type === "tool_result");
1494
+ if (messages.length >= 3 && endsWithToolResult) {
1495
+ const lastThree = messages.slice(-3);
1496
+ const hasRecentToolPattern = ((_a = lastThree[0]) == null ? void 0 : _a.role) === "user" && // Initial user message
1497
+ ((_b = lastThree[1]) == null ? void 0 : _b.role) === "assistant" && // Assistant tool use
1498
+ Array.isArray(lastThree[1].content) && lastThree[1].content.some((content) => content.type === "tool_use") && ((_c = lastThree[2]) == null ? void 0 : _c.role) === "user" && // Tool result
1499
+ Array.isArray(lastThree[2].content) && lastThree[2].content.some((content) => content.type === "tool_result");
1500
+ return hasRecentToolPattern;
1501
+ }
1502
+ return endsWithToolResult;
1503
+ }
1504
+ async process(request) {
1505
+ const { threadId, model = this.model, messages: rawMessages, actions, eventSource, forwardedParameters } = request;
1506
+ const tools = actions.map(convertActionInputToAnthropicTool);
1507
+ const messages = [
1508
+ ...rawMessages
1509
+ ];
1510
+ const instructionsMessage = messages.shift();
1511
+ const instructions = instructionsMessage.isTextMessage() ? instructionsMessage.content : "";
1512
+ const validToolUseIds = /* @__PURE__ */ new Set();
1513
+ for (const message of messages) {
1514
+ if (message.isActionExecutionMessage()) {
1515
+ validToolUseIds.add(message.id);
1516
+ }
1517
+ }
1518
+ const processedToolResultIds = /* @__PURE__ */ new Set();
1519
+ const anthropicMessages = messages.map((message) => {
1520
+ if (message.isResultMessage()) {
1521
+ if (!validToolUseIds.has(message.actionExecutionId)) {
1522
+ return null;
1523
+ }
1524
+ if (processedToolResultIds.has(message.actionExecutionId)) {
1525
+ return null;
1526
+ }
1527
+ processedToolResultIds.add(message.actionExecutionId);
1528
+ return {
1529
+ role: "user",
1530
+ content: [
1531
+ {
1532
+ type: "tool_result",
1533
+ content: message.result || "Action completed successfully",
1534
+ tool_use_id: message.actionExecutionId
1535
+ }
1536
+ ]
1537
+ };
1538
+ }
1539
+ return convertMessageToAnthropicMessage(message);
1540
+ }).filter(Boolean).filter((msg) => {
1541
+ if (msg.role === "assistant" && Array.isArray(msg.content)) {
1542
+ const hasEmptyTextOnly = msg.content.length === 1 && msg.content[0].type === "text" && (!msg.content[0].text || msg.content[0].text.trim() === "");
1543
+ return !hasEmptyTextOnly;
1544
+ }
1545
+ return true;
1546
+ });
1547
+ const limitedMessages = limitMessagesToTokenCount2(anthropicMessages, tools, model);
1548
+ const cachedSystemPrompt = this.addSystemPromptCaching(instructions, this.promptCaching.debug);
1549
+ const cachedMessages = this.addIncrementalMessageCaching(limitedMessages, this.promptCaching.debug);
1550
+ let toolChoice = forwardedParameters == null ? void 0 : forwardedParameters.toolChoice;
1551
+ if ((forwardedParameters == null ? void 0 : forwardedParameters.toolChoice) === "function") {
1552
+ toolChoice = {
1553
+ type: "tool",
1554
+ name: forwardedParameters.toolChoiceFunctionName
1555
+ };
1556
+ }
1557
+ try {
1558
+ const createParams = {
1559
+ system: cachedSystemPrompt,
1560
+ model: this.model,
1561
+ messages: cachedMessages,
1562
+ max_tokens: (forwardedParameters == null ? void 0 : forwardedParameters.maxTokens) || 1024,
1563
+ ...(forwardedParameters == null ? void 0 : forwardedParameters.temperature) ? {
1564
+ temperature: forwardedParameters.temperature
1565
+ } : {},
1566
+ ...tools.length > 0 && {
1567
+ tools
1568
+ },
1569
+ ...toolChoice && {
1570
+ tool_choice: toolChoice
1571
+ },
1572
+ stream: true
1573
+ };
1574
+ const stream = await this.anthropic.messages.create(createParams);
1575
+ eventSource.stream(async (eventStream$) => {
1576
+ let mode = null;
1577
+ let didOutputText = false;
1578
+ let currentMessageId = (0, import_shared10.randomId)();
1579
+ let currentToolCallId = (0, import_shared10.randomId)();
1580
+ let filterThinkingTextBuffer = new FilterThinkingTextBuffer();
1581
+ let hasReceivedContent = false;
1582
+ try {
1583
+ for await (const chunk of stream) {
1584
+ if (chunk.type === "message_start") {
1585
+ currentMessageId = chunk.message.id;
1586
+ } else if (chunk.type === "content_block_start") {
1587
+ hasReceivedContent = true;
1588
+ if (chunk.content_block.type === "text") {
1589
+ didOutputText = false;
1590
+ filterThinkingTextBuffer.reset();
1591
+ mode = "message";
1592
+ } else if (chunk.content_block.type === "tool_use") {
1593
+ currentToolCallId = chunk.content_block.id;
1594
+ eventStream$.sendActionExecutionStart({
1595
+ actionExecutionId: currentToolCallId,
1596
+ actionName: chunk.content_block.name,
1597
+ parentMessageId: currentMessageId
1598
+ });
1599
+ mode = "function";
1600
+ }
1601
+ } else if (chunk.type === "content_block_delta") {
1602
+ if (chunk.delta.type === "text_delta") {
1603
+ const text = filterThinkingTextBuffer.onTextChunk(chunk.delta.text);
1604
+ if (text.length > 0) {
1605
+ if (!didOutputText) {
1606
+ eventStream$.sendTextMessageStart({
1607
+ messageId: currentMessageId
1608
+ });
1609
+ didOutputText = true;
1610
+ }
1611
+ eventStream$.sendTextMessageContent({
1612
+ messageId: currentMessageId,
1613
+ content: text
1614
+ });
1615
+ }
1616
+ } else if (chunk.delta.type === "input_json_delta") {
1617
+ eventStream$.sendActionExecutionArgs({
1618
+ actionExecutionId: currentToolCallId,
1619
+ args: chunk.delta.partial_json
1620
+ });
1621
+ }
1622
+ } else if (chunk.type === "content_block_stop") {
1623
+ if (mode === "message") {
1624
+ if (didOutputText) {
1625
+ eventStream$.sendTextMessageEnd({
1626
+ messageId: currentMessageId
1627
+ });
1628
+ }
1629
+ } else if (mode === "function") {
1630
+ eventStream$.sendActionExecutionEnd({
1631
+ actionExecutionId: currentToolCallId
1632
+ });
1633
+ }
1634
+ }
1635
+ }
1636
+ } catch (error) {
1637
+ throw convertServiceAdapterError(error, "Anthropic");
1638
+ }
1639
+ if (!hasReceivedContent && this.shouldGenerateFallbackResponse(cachedMessages)) {
1640
+ let fallbackContent = "Task completed successfully.";
1641
+ const lastMessage = cachedMessages[cachedMessages.length - 1];
1642
+ if ((lastMessage == null ? void 0 : lastMessage.role) === "user" && Array.isArray(lastMessage.content)) {
1643
+ const toolResult = lastMessage.content.find((c) => c.type === "tool_result");
1644
+ if ((toolResult == null ? void 0 : toolResult.content) && toolResult.content !== "Action completed successfully") {
1645
+ fallbackContent = toolResult.content;
1646
+ }
1647
+ }
1648
+ currentMessageId = (0, import_shared10.randomId)();
1649
+ eventStream$.sendTextMessageStart({
1650
+ messageId: currentMessageId
1651
+ });
1652
+ eventStream$.sendTextMessageContent({
1653
+ messageId: currentMessageId,
1654
+ content: fallbackContent
1655
+ });
1656
+ eventStream$.sendTextMessageEnd({
1657
+ messageId: currentMessageId
1658
+ });
1659
+ }
1660
+ eventStream$.complete();
1661
+ });
1662
+ } catch (error) {
1663
+ throw convertServiceAdapterError(error, "Anthropic");
1664
+ }
1665
+ return {
1666
+ threadId: threadId || (0, import_shared10.randomUUID)()
1667
+ };
1668
+ }
1669
+ };
1670
+ __name(AnthropicAdapter, "AnthropicAdapter");
1671
+ var THINKING_TAG = "<thinking>";
1672
+ var THINKING_TAG_END = "</thinking>";
1673
+ var FilterThinkingTextBuffer = /* @__PURE__ */ __name(class FilterThinkingTextBuffer2 {
1674
+ buffer;
1675
+ didFilterThinkingTag = false;
1676
+ constructor() {
1677
+ this.buffer = "";
1678
+ }
1679
+ onTextChunk(text) {
1680
+ this.buffer += text;
1681
+ if (this.didFilterThinkingTag) {
1682
+ return text;
1683
+ }
1684
+ const potentialTag = this.buffer.slice(0, THINKING_TAG.length);
1685
+ if (THINKING_TAG.startsWith(potentialTag)) {
1686
+ if (this.buffer.includes(THINKING_TAG_END)) {
1687
+ const end = this.buffer.indexOf(THINKING_TAG_END);
1688
+ const filteredText = this.buffer.slice(end + THINKING_TAG_END.length);
1689
+ this.buffer = filteredText;
1690
+ this.didFilterThinkingTag = true;
1691
+ return filteredText;
1692
+ } else {
1693
+ return "";
1694
+ }
1695
+ }
1696
+ return text;
1697
+ }
1698
+ reset() {
1699
+ this.buffer = "";
1700
+ this.didFilterThinkingTag = false;
1701
+ }
1702
+ }, "FilterThinkingTextBuffer");
1703
+
1704
+ // src/service-adapters/experimental/ollama/ollama-adapter.ts
1705
+ var import_ollama = require("@langchain/community/llms/ollama");
1706
+ var import_shared12 = require("@copilotkit/shared");
1707
+ var DEFAULT_MODEL5 = "llama3:latest";
1708
+ var ExperimentalOllamaAdapter = class {
1709
+ model;
1710
+ provider = "ollama";
1711
+ constructor(options) {
1712
+ if (options == null ? void 0 : options.model) {
1713
+ this.model = options.model;
1714
+ } else {
1715
+ this.model = DEFAULT_MODEL5;
1716
+ }
1717
+ }
1718
+ async process(request) {
1719
+ const { messages, actions, eventSource } = request;
1720
+ const ollama = new import_ollama.Ollama({
1721
+ model: this.model
1722
+ });
1723
+ const contents = messages.filter((m) => m.isTextMessage()).map((m) => m.content);
1724
+ const _stream = await ollama.stream(contents);
1725
+ eventSource.stream(async (eventStream$) => {
1726
+ const currentMessageId = (0, import_shared12.randomId)();
1727
+ eventStream$.sendTextMessageStart({
1728
+ messageId: currentMessageId
1729
+ });
1730
+ for await (const chunkText of _stream) {
1731
+ eventStream$.sendTextMessageContent({
1732
+ messageId: currentMessageId,
1733
+ content: chunkText
1734
+ });
1735
+ }
1736
+ eventStream$.sendTextMessageEnd({
1737
+ messageId: currentMessageId
1738
+ });
1739
+ eventStream$.complete();
1740
+ });
1741
+ return {
1742
+ threadId: request.threadId || (0, import_shared12.randomUUID)()
1743
+ };
1744
+ }
1745
+ };
1746
+ __name(ExperimentalOllamaAdapter, "ExperimentalOllamaAdapter");
1747
+
1748
+ // src/service-adapters/bedrock/bedrock-adapter.ts
1749
+ var import_aws = require("@langchain/aws");
1750
+ var DEFAULT_MODEL6 = "amazon.nova-lite-v1:0";
1751
+ var BedrockAdapter = class extends LangChainAdapter {
1752
+ provider = "bedrock";
1753
+ model = DEFAULT_MODEL6;
1754
+ constructor(options) {
1755
+ super({
1756
+ chainFn: async ({ messages, tools, threadId }) => {
1757
+ this.model = (options == null ? void 0 : options.model) ?? "amazon.nova-lite-v1:0";
1758
+ const model = new import_aws.ChatBedrockConverse({
1759
+ model: this.model,
1760
+ region: (options == null ? void 0 : options.region) ?? "us-east-1",
1761
+ credentials: (options == null ? void 0 : options.credentials) ? {
1762
+ accessKeyId: options.credentials.accessKeyId,
1763
+ secretAccessKey: options.credentials.secretAccessKey
1764
+ } : void 0
1765
+ }).bindTools(tools);
1766
+ return model.stream(messages);
1767
+ }
1768
+ });
1769
+ }
1770
+ };
1771
+ __name(BedrockAdapter, "BedrockAdapter");
1772
+
1773
+ // src/service-adapters/empty/empty-adapter.ts
1774
+ var import_shared13 = require("@copilotkit/shared");
1775
+ var EmptyAdapter = class {
1776
+ async process(request) {
1777
+ return {
1778
+ threadId: request.threadId || (0, import_shared13.randomUUID)()
1779
+ };
1780
+ }
1781
+ };
1782
+ __name(EmptyAdapter, "EmptyAdapter");
1783
+ var ExperimentalEmptyAdapter = EmptyAdapter;
1784
+ // Annotate the CommonJS export names for ESM import in node:
1785
+ 0 && (module.exports = {
1786
+ AnthropicAdapter,
1787
+ BedrockAdapter,
1788
+ EmptyAdapter,
1789
+ ExperimentalEmptyAdapter,
1790
+ ExperimentalOllamaAdapter,
1791
+ GoogleGenerativeAIAdapter,
1792
+ GroqAdapter,
1793
+ LangChainAdapter,
1794
+ OpenAIAdapter,
1795
+ OpenAIAssistantAdapter,
1796
+ RemoteChain,
1797
+ UnifyAdapter,
1798
+ convertServiceAdapterError
1799
+ });
1800
+ //# sourceMappingURL=index.js.map