@copilotkit/runtime 0.37.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/.eslintrc.js +7 -0
  2. package/.turbo/turbo-build.log +70 -0
  3. package/CHANGELOG.md +1 -0
  4. package/__snapshots__/schema/schema.graphql +178 -0
  5. package/dist/chunk-2CCVVJDU.mjs +56 -0
  6. package/dist/chunk-2CCVVJDU.mjs.map +1 -0
  7. package/dist/chunk-4UA4RB4C.mjs +185 -0
  8. package/dist/chunk-4UA4RB4C.mjs.map +1 -0
  9. package/dist/chunk-5HGYI6EG.mjs +678 -0
  10. package/dist/chunk-5HGYI6EG.mjs.map +1 -0
  11. package/dist/chunk-7IFP53C6.mjs +169 -0
  12. package/dist/chunk-7IFP53C6.mjs.map +1 -0
  13. package/dist/chunk-BLTAUVRP.mjs +30 -0
  14. package/dist/chunk-BLTAUVRP.mjs.map +1 -0
  15. package/dist/chunk-NFCPM5AM.mjs +43 -0
  16. package/dist/chunk-NFCPM5AM.mjs.map +1 -0
  17. package/dist/chunk-XPAUPJMW.mjs +1051 -0
  18. package/dist/chunk-XPAUPJMW.mjs.map +1 -0
  19. package/dist/graphql/types/base/index.d.ts +6 -0
  20. package/dist/graphql/types/base/index.js +63 -0
  21. package/dist/graphql/types/base/index.js.map +1 -0
  22. package/dist/graphql/types/base/index.mjs +7 -0
  23. package/dist/graphql/types/base/index.mjs.map +1 -0
  24. package/dist/graphql/types/converted/index.d.ts +2 -0
  25. package/dist/graphql/types/converted/index.js +88 -0
  26. package/dist/graphql/types/converted/index.js.map +1 -0
  27. package/dist/graphql/types/converted/index.mjs +12 -0
  28. package/dist/graphql/types/converted/index.mjs.map +1 -0
  29. package/dist/index-aa091e3c.d.ts +49 -0
  30. package/dist/index-f0875df3.d.ts +197 -0
  31. package/dist/index.d.ts +15 -0
  32. package/dist/index.js +2171 -0
  33. package/dist/index.js.map +1 -0
  34. package/dist/index.mjs +49 -0
  35. package/dist/index.mjs.map +1 -0
  36. package/dist/langchain-adapter-9ce103f3.d.ts +200 -0
  37. package/dist/langserve-fd5066ee.d.ts +96 -0
  38. package/dist/lib/index.d.ts +15 -0
  39. package/dist/lib/index.js +2170 -0
  40. package/dist/lib/index.js.map +1 -0
  41. package/dist/lib/index.mjs +46 -0
  42. package/dist/lib/index.mjs.map +1 -0
  43. package/dist/lib/integrations/index.d.ts +9 -0
  44. package/dist/lib/integrations/index.js +1024 -0
  45. package/dist/lib/integrations/index.js.map +1 -0
  46. package/dist/lib/integrations/index.mjs +24 -0
  47. package/dist/lib/integrations/index.mjs.map +1 -0
  48. package/dist/lib/integrations/node-http/index.d.ts +8 -0
  49. package/dist/lib/integrations/node-http/index.js +969 -0
  50. package/dist/lib/integrations/node-http/index.js.map +1 -0
  51. package/dist/lib/integrations/node-http/index.mjs +10 -0
  52. package/dist/lib/integrations/node-http/index.mjs.map +1 -0
  53. package/dist/pages-router-b6bc6c60.d.ts +30 -0
  54. package/dist/service-adapters/index.d.ts +11 -0
  55. package/dist/service-adapters/index.js +912 -0
  56. package/dist/service-adapters/index.js.map +1 -0
  57. package/dist/service-adapters/index.mjs +18 -0
  58. package/dist/service-adapters/index.mjs.map +1 -0
  59. package/jest.config.js +5 -0
  60. package/package.json +63 -0
  61. package/scripts/generate-gql-schema.ts +13 -0
  62. package/src/graphql/inputs/action.input.ts +13 -0
  63. package/src/graphql/inputs/cloud-guardrails.input.ts +19 -0
  64. package/src/graphql/inputs/cloud.input.ts +8 -0
  65. package/src/graphql/inputs/context-property.input.ts +10 -0
  66. package/src/graphql/inputs/custom-property.input.ts +15 -0
  67. package/src/graphql/inputs/frontend.input.ts +11 -0
  68. package/src/graphql/inputs/generate-copilot-response.input.ts +22 -0
  69. package/src/graphql/inputs/message.input.ts +50 -0
  70. package/src/graphql/resolvers/copilot.resolver.ts +147 -0
  71. package/src/graphql/types/base/index.ts +10 -0
  72. package/src/graphql/types/converted/index.ts +29 -0
  73. package/src/graphql/types/copilot-response.type.ts +75 -0
  74. package/src/graphql/types/enums.ts +22 -0
  75. package/src/graphql/types/guardrails-result.type.ts +20 -0
  76. package/src/graphql/types/message-status.type.ts +40 -0
  77. package/src/graphql/types/response-status.type.ts +52 -0
  78. package/src/index.ts +2 -0
  79. package/src/lib/copilot-cloud.ts +63 -0
  80. package/src/lib/copilot-runtime.ts +261 -0
  81. package/src/lib/guardrails.ts +3 -0
  82. package/src/lib/index.ts +7 -0
  83. package/src/lib/integrations/index.ts +4 -0
  84. package/src/lib/integrations/nextjs/app-router.ts +29 -0
  85. package/src/lib/integrations/nextjs/pages-router.ts +36 -0
  86. package/src/lib/integrations/node-http/index.ts +23 -0
  87. package/src/lib/integrations/shared.ts +68 -0
  88. package/src/service-adapters/conversion.ts +47 -0
  89. package/src/service-adapters/events.ts +197 -0
  90. package/src/service-adapters/experimental/groq/groq-adapter.ts +124 -0
  91. package/src/service-adapters/experimental/ollama/ollama-adapter.ts +75 -0
  92. package/src/service-adapters/google/google-genai-adapter.ts +149 -0
  93. package/src/service-adapters/google/utils.ts +94 -0
  94. package/src/service-adapters/index.ts +6 -0
  95. package/src/service-adapters/langchain/langchain-adapter.ts +82 -0
  96. package/src/service-adapters/langchain/langserve.ts +81 -0
  97. package/src/service-adapters/langchain/types.ts +14 -0
  98. package/src/service-adapters/langchain/utils.ts +235 -0
  99. package/src/service-adapters/openai/openai-adapter.ts +142 -0
  100. package/src/service-adapters/openai/openai-assistant-adapter.ts +260 -0
  101. package/src/service-adapters/openai/utils.ts +164 -0
  102. package/src/service-adapters/service-adapter.ts +29 -0
  103. package/tsconfig.json +11 -0
  104. package/tsup.config.ts +17 -0
  105. package/typedoc.json +4 -0
@@ -0,0 +1,678 @@
1
+ import {
2
+ convertActionInputToLangChainTool,
3
+ convertMessageToLangChainMessage,
4
+ streamLangChainResponse
5
+ } from "./chunk-7IFP53C6.mjs";
6
+ import {
7
+ ActionExecutionMessage,
8
+ ResultMessage,
9
+ TextMessage
10
+ } from "./chunk-BLTAUVRP.mjs";
11
+ import {
12
+ __name
13
+ } from "./chunk-NFCPM5AM.mjs";
14
+
15
+ // src/service-adapters/openai/openai-adapter.ts
16
+ import OpenAI from "openai";
17
+
18
+ // src/service-adapters/openai/utils.ts
19
+ import { encodingForModel } from "js-tiktoken";
20
+ function limitMessagesToTokenCount(messages, tools, model, maxTokens) {
21
+ maxTokens || (maxTokens = maxTokensForOpenAIModel(model));
22
+ const result = [];
23
+ const toolsNumTokens = countToolsTokens(model, tools);
24
+ if (toolsNumTokens > maxTokens) {
25
+ throw new Error(`Too many tokens in function definitions: ${toolsNumTokens} > ${maxTokens}`);
26
+ }
27
+ maxTokens -= toolsNumTokens;
28
+ for (const message of messages) {
29
+ if (message.role === "system") {
30
+ const numTokens = countMessageTokens(model, message);
31
+ maxTokens -= numTokens;
32
+ if (maxTokens < 0) {
33
+ throw new Error("Not enough tokens for system message.");
34
+ }
35
+ }
36
+ }
37
+ let cutoff = false;
38
+ const reversedMessages = [
39
+ ...messages
40
+ ].reverse();
41
+ for (const message of reversedMessages) {
42
+ if (message.role === "system") {
43
+ result.unshift(message);
44
+ continue;
45
+ } else if (cutoff) {
46
+ continue;
47
+ }
48
+ let numTokens = countMessageTokens(model, message);
49
+ if (maxTokens < numTokens) {
50
+ cutoff = true;
51
+ continue;
52
+ }
53
+ result.unshift(message);
54
+ maxTokens -= numTokens;
55
+ }
56
+ return result;
57
+ }
58
+ __name(limitMessagesToTokenCount, "limitMessagesToTokenCount");
59
+ function maxTokensForOpenAIModel(model) {
60
+ return maxTokensByModel[model] || DEFAULT_MAX_TOKENS;
61
+ }
62
+ __name(maxTokensForOpenAIModel, "maxTokensForOpenAIModel");
63
+ var DEFAULT_MAX_TOKENS = 128e3;
64
+ var maxTokensByModel = {
65
+ // GPT-4
66
+ "gpt-4o": 128e3,
67
+ "gpt-4o-2024-05-13": 128e3,
68
+ "gpt-4-turbo": 128e3,
69
+ "gpt-4-turbo-2024-04-09": 128e3,
70
+ "gpt-4-0125-preview": 128e3,
71
+ "gpt-4-turbo-preview": 128e3,
72
+ "gpt-4-1106-preview": 128e3,
73
+ "gpt-4-vision-preview": 128e3,
74
+ "gpt-4-1106-vision-preview": 128e3,
75
+ "gpt-4-32k": 32768,
76
+ "gpt-4-32k-0613": 32768,
77
+ "gpt-4-32k-0314": 32768,
78
+ "gpt-4": 8192,
79
+ "gpt-4-0613": 8192,
80
+ "gpt-4-0314": 8192,
81
+ // GPT-3.5
82
+ "gpt-3.5-turbo-0125": 16385,
83
+ "gpt-3.5-turbo": 16385,
84
+ "gpt-3.5-turbo-1106": 16385,
85
+ "gpt-3.5-turbo-instruct": 4096,
86
+ "gpt-3.5-turbo-16k": 16385,
87
+ "gpt-3.5-turbo-0613": 4096,
88
+ "gpt-3.5-turbo-16k-0613": 16385,
89
+ "gpt-3.5-turbo-0301": 4097
90
+ };
91
+ function countToolsTokens(model, tools) {
92
+ if (tools.length === 0) {
93
+ return 0;
94
+ }
95
+ const json = JSON.stringify(tools);
96
+ return countTokens(model, json);
97
+ }
98
+ __name(countToolsTokens, "countToolsTokens");
99
+ function countMessageTokens(model, message) {
100
+ return countTokens(model, message.content || "");
101
+ }
102
+ __name(countMessageTokens, "countMessageTokens");
103
+ function countTokens(model, text) {
104
+ let enc;
105
+ try {
106
+ enc = encodingForModel(model);
107
+ } catch (e) {
108
+ enc = encodingForModel("gpt-4");
109
+ }
110
+ return enc.encode(text).length;
111
+ }
112
+ __name(countTokens, "countTokens");
113
+ function convertActionInputToOpenAITool(action) {
114
+ return {
115
+ type: "function",
116
+ function: {
117
+ name: action.name,
118
+ description: action.description,
119
+ parameters: JSON.parse(action.jsonSchema)
120
+ }
121
+ };
122
+ }
123
+ __name(convertActionInputToOpenAITool, "convertActionInputToOpenAITool");
124
+ function convertMessageToOpenAIMessage(message) {
125
+ if (message instanceof TextMessage) {
126
+ return {
127
+ role: message.role,
128
+ content: message.content
129
+ };
130
+ } else if (message instanceof ActionExecutionMessage) {
131
+ return {
132
+ role: "assistant",
133
+ tool_calls: [
134
+ {
135
+ id: message.id,
136
+ type: "function",
137
+ function: {
138
+ name: message.name,
139
+ arguments: JSON.stringify(message.arguments)
140
+ }
141
+ }
142
+ ]
143
+ };
144
+ } else if (message instanceof ResultMessage) {
145
+ return {
146
+ role: "tool",
147
+ content: message.result,
148
+ tool_call_id: message.actionExecutionId
149
+ };
150
+ }
151
+ }
152
+ __name(convertMessageToOpenAIMessage, "convertMessageToOpenAIMessage");
153
+ function convertSystemMessageToAssistantAPI(message) {
154
+ return {
155
+ ...message,
156
+ ...message.role === "system" && {
157
+ role: "assistant",
158
+ content: "THE FOLLOWING MESSAGE IS A SYSTEM MESSAGE: " + message.content
159
+ }
160
+ };
161
+ }
162
+ __name(convertSystemMessageToAssistantAPI, "convertSystemMessageToAssistantAPI");
163
+
164
+ // src/service-adapters/openai/openai-adapter.ts
165
+ var DEFAULT_MODEL = "gpt-4o";
166
+ var OpenAIAdapter = class {
167
+ model = DEFAULT_MODEL;
168
+ _openai;
169
+ get openai() {
170
+ return this._openai;
171
+ }
172
+ constructor(params) {
173
+ this._openai = (params == null ? void 0 : params.openai) || new OpenAI({});
174
+ if (params == null ? void 0 : params.model) {
175
+ this.model = params.model;
176
+ }
177
+ }
178
+ async process({ model = this.model, messages, actions, eventSource }) {
179
+ const tools = actions.map(convertActionInputToOpenAITool);
180
+ let openaiMessages = messages.map(convertMessageToOpenAIMessage);
181
+ openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);
182
+ const stream = this.openai.beta.chat.completions.stream({
183
+ model,
184
+ stream: true,
185
+ messages: openaiMessages,
186
+ ...tools.length > 0 && {
187
+ tools
188
+ }
189
+ });
190
+ eventSource.stream(async (eventStream$) => {
191
+ var _a, _b;
192
+ let mode = null;
193
+ for await (const chunk of stream) {
194
+ const toolCall = (_a = chunk.choices[0].delta.tool_calls) == null ? void 0 : _a[0];
195
+ const content = chunk.choices[0].delta.content;
196
+ if (mode === "message" && (toolCall == null ? void 0 : toolCall.id)) {
197
+ mode = null;
198
+ eventStream$.sendTextMessageEnd();
199
+ } else if (mode === "function" && (toolCall === void 0 || (toolCall == null ? void 0 : toolCall.id))) {
200
+ mode = null;
201
+ eventStream$.sendActionExecutionEnd();
202
+ }
203
+ if (mode === null) {
204
+ if (toolCall == null ? void 0 : toolCall.id) {
205
+ mode = "function";
206
+ eventStream$.sendActionExecutionStart(toolCall.id, toolCall.function.name);
207
+ } else if (content) {
208
+ mode = "message";
209
+ eventStream$.sendTextMessageStart(chunk.id);
210
+ }
211
+ }
212
+ if (mode === "message" && content) {
213
+ eventStream$.sendTextMessageContent(content);
214
+ } else if (mode === "function" && ((_b = toolCall == null ? void 0 : toolCall.function) == null ? void 0 : _b.arguments)) {
215
+ eventStream$.sendActionExecutionArgs(toolCall.function.arguments);
216
+ }
217
+ }
218
+ if (mode === "message") {
219
+ eventStream$.sendTextMessageEnd();
220
+ } else if (mode === "function") {
221
+ eventStream$.sendActionExecutionEnd();
222
+ }
223
+ eventStream$.complete();
224
+ });
225
+ return {};
226
+ }
227
+ };
228
+ __name(OpenAIAdapter, "OpenAIAdapter");
229
+
230
+ // src/service-adapters/openai/openai-assistant-adapter.ts
231
+ import OpenAI2 from "openai";
232
+ var OpenAIAssistantAdapter = class {
233
+ openai;
234
+ codeInterpreterEnabled;
235
+ assistantId;
236
+ fileSearchEnabled;
237
+ constructor(params) {
238
+ this.openai = params.openai || new OpenAI2({});
239
+ this.codeInterpreterEnabled = params.codeInterpreterEnabled === false || true;
240
+ this.fileSearchEnabled = params.fileSearchEnabled === false || true;
241
+ this.assistantId = params.assistantId;
242
+ }
243
+ async process({ messages, actions, eventSource, threadId, runId }) {
244
+ threadId || (threadId = (await this.openai.beta.threads.create()).id);
245
+ const lastMessage = messages.at(-1);
246
+ let nextRunId = void 0;
247
+ if (lastMessage instanceof ResultMessage && runId) {
248
+ nextRunId = await this.submitToolOutputs(threadId, runId, messages, eventSource);
249
+ } else if (lastMessage instanceof TextMessage) {
250
+ nextRunId = await this.submitUserMessage(threadId, messages, actions, eventSource);
251
+ } else {
252
+ throw new Error("No actionable message found in the messages");
253
+ }
254
+ return {
255
+ threadId,
256
+ runId: nextRunId
257
+ };
258
+ }
259
+ async submitToolOutputs(threadId, runId, messages, eventSource) {
260
+ let run = await this.openai.beta.threads.runs.retrieve(threadId, runId);
261
+ if (!run.required_action) {
262
+ throw new Error("No tool outputs required");
263
+ }
264
+ const toolCallsIds = run.required_action.submit_tool_outputs.tool_calls.map((toolCall) => toolCall.id);
265
+ const resultMessages = messages.filter((message) => message instanceof ResultMessage && toolCallsIds.includes(message.actionExecutionId));
266
+ if (toolCallsIds.length != resultMessages.length) {
267
+ throw new Error("Number of function results does not match the number of tool calls");
268
+ }
269
+ const toolOutputs = resultMessages.map((message) => {
270
+ return {
271
+ tool_call_id: message.actionExecutionId,
272
+ output: message.result
273
+ };
274
+ });
275
+ const stream = this.openai.beta.threads.runs.submitToolOutputsStream(threadId, runId, {
276
+ tool_outputs: toolOutputs
277
+ });
278
+ await this.streamResponse(stream, eventSource);
279
+ return runId;
280
+ }
281
+ async submitUserMessage(threadId, messages, actions, eventSource) {
282
+ messages = [
283
+ ...messages
284
+ ];
285
+ const instructionsMessage = messages.shift();
286
+ const instructions = instructionsMessage instanceof TextMessage ? instructionsMessage.content : "";
287
+ const userMessage = messages.map(convertMessageToOpenAIMessage).map(convertSystemMessageToAssistantAPI).at(-1);
288
+ if (userMessage.role !== "user") {
289
+ throw new Error("No user message found");
290
+ }
291
+ await this.openai.beta.threads.messages.create(threadId, {
292
+ role: "user",
293
+ content: userMessage.content
294
+ });
295
+ const openaiTools = actions.map(convertActionInputToOpenAITool);
296
+ const tools = [
297
+ ...openaiTools,
298
+ ...this.codeInterpreterEnabled ? [
299
+ {
300
+ type: "code_interpreter"
301
+ }
302
+ ] : [],
303
+ ...this.fileSearchEnabled ? [
304
+ {
305
+ type: "file_search"
306
+ }
307
+ ] : []
308
+ ];
309
+ let stream = this.openai.beta.threads.runs.stream(threadId, {
310
+ assistant_id: this.assistantId,
311
+ instructions,
312
+ tools
313
+ });
314
+ await this.streamResponse(stream, eventSource);
315
+ return getRunIdFromStream(stream);
316
+ }
317
+ async streamResponse(stream, eventSource) {
318
+ eventSource.stream(async (eventStream$) => {
319
+ var _a, _b, _c, _d, _e, _f;
320
+ let inFunctionCall = false;
321
+ for await (const chunk of stream) {
322
+ switch (chunk.event) {
323
+ case "thread.message.created":
324
+ if (inFunctionCall) {
325
+ eventStream$.sendActionExecutionEnd();
326
+ }
327
+ eventStream$.sendTextMessageStart(chunk.data.id);
328
+ break;
329
+ case "thread.message.delta":
330
+ if (((_a = chunk.data.delta.content) == null ? void 0 : _a[0].type) === "text") {
331
+ eventStream$.sendTextMessageContent((_b = chunk.data.delta.content) == null ? void 0 : _b[0].text.value);
332
+ }
333
+ break;
334
+ case "thread.message.completed":
335
+ eventStream$.sendTextMessageEnd();
336
+ break;
337
+ case "thread.run.step.delta":
338
+ let toolCallId;
339
+ let toolCallName;
340
+ let toolCallArgs;
341
+ if (chunk.data.delta.step_details.type === "tool_calls" && ((_c = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _c[0].type) === "function") {
342
+ toolCallId = (_d = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _d[0].id;
343
+ toolCallName = (_e = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _e[0].function.name;
344
+ toolCallArgs = (_f = chunk.data.delta.step_details.tool_calls) == null ? void 0 : _f[0].function.arguments;
345
+ }
346
+ if (toolCallName && toolCallId) {
347
+ if (inFunctionCall) {
348
+ eventStream$.sendActionExecutionEnd();
349
+ }
350
+ inFunctionCall = true;
351
+ eventStream$.sendActionExecutionStart(toolCallId, toolCallName);
352
+ } else if (toolCallArgs) {
353
+ eventStream$.sendActionExecutionArgs(toolCallArgs);
354
+ }
355
+ break;
356
+ }
357
+ }
358
+ if (inFunctionCall) {
359
+ eventStream$.sendActionExecutionEnd();
360
+ }
361
+ eventStream$.complete();
362
+ });
363
+ }
364
+ };
365
+ __name(OpenAIAssistantAdapter, "OpenAIAssistantAdapter");
366
+ function getRunIdFromStream(stream) {
367
+ return new Promise((resolve, reject) => {
368
+ let runIdGetter = /* @__PURE__ */ __name((event) => {
369
+ if (event.event === "thread.run.created") {
370
+ const runId = event.data.id;
371
+ stream.off("event", runIdGetter);
372
+ resolve(runId);
373
+ }
374
+ }, "runIdGetter");
375
+ stream.on("event", runIdGetter);
376
+ });
377
+ }
378
+ __name(getRunIdFromStream, "getRunIdFromStream");
379
+
380
+ // src/service-adapters/google/google-genai-adapter.ts
381
+ import { GoogleGenerativeAI } from "@google/generative-ai";
382
+
383
+ // src/service-adapters/google/utils.ts
384
+ function convertMessageToGoogleGenAIMessage(message) {
385
+ if (message instanceof TextMessage) {
386
+ const role = {
387
+ user: "user",
388
+ assistant: "model",
389
+ system: "user"
390
+ }[message.role];
391
+ const text = message.role === "system" ? "THE FOLLOWING MESSAGE IS A SYSTEM MESSAGE: " + message.content : message.content;
392
+ return {
393
+ role,
394
+ parts: [
395
+ {
396
+ text
397
+ }
398
+ ]
399
+ };
400
+ } else if (message instanceof ActionExecutionMessage) {
401
+ return {
402
+ role: "model",
403
+ parts: [
404
+ {
405
+ functionCall: {
406
+ name: message.name,
407
+ args: message.arguments
408
+ }
409
+ }
410
+ ]
411
+ };
412
+ } else if (message instanceof ResultMessage) {
413
+ return {
414
+ role: "model",
415
+ parts: [
416
+ {
417
+ functionResponse: {
418
+ name: message.actionName,
419
+ response: {
420
+ name: message.actionName,
421
+ content: tryParseJson(message.result)
422
+ }
423
+ }
424
+ }
425
+ ]
426
+ };
427
+ }
428
+ }
429
+ __name(convertMessageToGoogleGenAIMessage, "convertMessageToGoogleGenAIMessage");
430
+ function transformActionToGoogleGenAITool(action) {
431
+ const name = action.name;
432
+ const description = action.description;
433
+ const parameters = JSON.parse(action.jsonSchema);
434
+ const transformProperties = /* @__PURE__ */ __name((props) => {
435
+ for (const key in props) {
436
+ if (props[key].type) {
437
+ props[key].type = props[key].type.toUpperCase();
438
+ }
439
+ if (props[key].properties) {
440
+ transformProperties(props[key].properties);
441
+ }
442
+ }
443
+ }, "transformProperties");
444
+ transformProperties(parameters);
445
+ return {
446
+ functionDeclarations: [
447
+ {
448
+ name,
449
+ description,
450
+ parameters
451
+ }
452
+ ]
453
+ };
454
+ }
455
+ __name(transformActionToGoogleGenAITool, "transformActionToGoogleGenAITool");
456
+ function tryParseJson(str) {
457
+ if (!str) {
458
+ return "";
459
+ }
460
+ try {
461
+ return JSON.parse(str);
462
+ } catch (e) {
463
+ return str;
464
+ }
465
+ }
466
+ __name(tryParseJson, "tryParseJson");
467
+
468
+ // src/service-adapters/google/google-genai-adapter.ts
469
+ import { nanoid } from "nanoid";
470
+ var GoogleGenerativeAIAdapter = class {
471
+ model;
472
+ constructor(options) {
473
+ if (options == null ? void 0 : options.model) {
474
+ this.model = options.model;
475
+ } else {
476
+ const genAI = new GoogleGenerativeAI(process.env["GOOGLE_API_KEY"]);
477
+ this.model = genAI.getGenerativeModel({
478
+ model: "gemini-pro"
479
+ });
480
+ }
481
+ }
482
+ async process(request) {
483
+ const { messages, actions, eventSource } = request;
484
+ const history = messages.slice(1, -1).map(convertMessageToGoogleGenAIMessage);
485
+ const currentMessage = convertMessageToGoogleGenAIMessage(messages.at(-1));
486
+ if (!currentMessage) {
487
+ throw new Error("No current message");
488
+ }
489
+ let systemMessage;
490
+ const firstMessage = messages.at(0);
491
+ if (firstMessage instanceof TextMessage && firstMessage.role === "system") {
492
+ systemMessage = firstMessage.content.trim();
493
+ } else {
494
+ throw new Error("First message is not a system message");
495
+ }
496
+ const tools = actions.map(transformActionToGoogleGenAITool);
497
+ const isFirstGenGeminiPro = this.model.model === "gemini-pro" || this.model.model === "models/gemini-pro";
498
+ const chat = this.model.startChat({
499
+ history: [
500
+ ...history,
501
+ // gemini-pro does not support system instructions, so we need to add them to the history
502
+ ...isFirstGenGeminiPro ? [
503
+ {
504
+ role: "user",
505
+ parts: [
506
+ {
507
+ text: systemMessage
508
+ }
509
+ ]
510
+ }
511
+ ] : []
512
+ ],
513
+ // only gemini-1.5-pro-latest and later supports setting system instructions
514
+ ...isFirstGenGeminiPro ? {} : {
515
+ systemInstruction: {
516
+ role: "user",
517
+ parts: [
518
+ {
519
+ text: systemMessage
520
+ }
521
+ ]
522
+ }
523
+ },
524
+ tools
525
+ });
526
+ const result = await chat.sendMessageStream(currentMessage.parts);
527
+ eventSource.stream(async (eventStream$) => {
528
+ let isTextMessage = false;
529
+ for await (const chunk of result.stream) {
530
+ const chunkText = chunk.text();
531
+ if (!isTextMessage) {
532
+ isTextMessage = true;
533
+ eventStream$.sendTextMessageStart(nanoid());
534
+ }
535
+ eventStream$.sendTextMessageContent(chunkText);
536
+ }
537
+ if (isTextMessage) {
538
+ eventStream$.sendTextMessageEnd();
539
+ }
540
+ let calls = (await result.response).functionCalls();
541
+ if (calls) {
542
+ for (let call of calls) {
543
+ eventStream$.sendActionExecution(nanoid(), call.name, JSON.stringify(replaceNewlinesInObject(call.args)));
544
+ }
545
+ }
546
+ eventStream$.complete();
547
+ });
548
+ return {};
549
+ }
550
+ };
551
+ __name(GoogleGenerativeAIAdapter, "GoogleGenerativeAIAdapter");
552
+ function replaceNewlinesInObject(obj) {
553
+ if (typeof obj === "string") {
554
+ return obj.replace(/\\\\n/g, "\n");
555
+ } else if (Array.isArray(obj)) {
556
+ return obj.map(replaceNewlinesInObject);
557
+ } else if (typeof obj === "object" && obj !== null) {
558
+ const newObj = {};
559
+ for (const key in obj) {
560
+ if (obj.hasOwnProperty(key)) {
561
+ newObj[key] = replaceNewlinesInObject(obj[key]);
562
+ }
563
+ }
564
+ return newObj;
565
+ }
566
+ return obj;
567
+ }
568
+ __name(replaceNewlinesInObject, "replaceNewlinesInObject");
569
+
570
+ // src/service-adapters/langchain/langchain-adapter.ts
571
+ var LangChainAdapter = class {
572
+ options;
573
+ /**
574
+ * To use LangChain as a backend, provide a handler function to the adapter with your custom LangChain logic.
575
+ */
576
+ constructor(options) {
577
+ this.options = options;
578
+ }
579
+ async process({ eventSource, model, actions, messages, threadId, runId }) {
580
+ const result = await this.options.chainFn({
581
+ messages: messages.map(convertMessageToLangChainMessage),
582
+ tools: actions.map(convertActionInputToLangChainTool),
583
+ model,
584
+ threadId,
585
+ runId
586
+ });
587
+ eventSource.stream(async (eventStream$) => {
588
+ await streamLangChainResponse({
589
+ result,
590
+ eventStream$
591
+ });
592
+ });
593
+ return {};
594
+ }
595
+ };
596
+ __name(LangChainAdapter, "LangChainAdapter");
597
+
598
+ // src/service-adapters/langchain/langserve.ts
599
+ import { RemoteRunnable } from "langchain/runnables/remote";
600
+ var RemoteChain = class {
601
+ constructor(options) {
602
+ this.name = options.name;
603
+ this.description = options.description;
604
+ this.chainUrl = options.chainUrl;
605
+ this.parameters = options.parameters;
606
+ this.parameterType = options.parameterType || "multi";
607
+ }
608
+ async toAction() {
609
+ if (!this.parameters) {
610
+ await this.inferLangServeParameters();
611
+ }
612
+ return {
613
+ name: this.name,
614
+ description: this.description,
615
+ parameters: this.parameters,
616
+ handler: async (args) => {
617
+ const runnable = new RemoteRunnable({
618
+ url: this.chainUrl
619
+ });
620
+ let input;
621
+ if (this.parameterType === "single") {
622
+ input = args[Object.keys(args)[0]];
623
+ } else {
624
+ input = args;
625
+ }
626
+ return await runnable.invoke(input);
627
+ }
628
+ };
629
+ }
630
+ async inferLangServeParameters() {
631
+ const supportedTypes = [
632
+ "string",
633
+ "number",
634
+ "boolean"
635
+ ];
636
+ let schemaUrl = this.chainUrl.replace(/\/+$/, "") + "/input_schema";
637
+ let schema = await fetch(schemaUrl).then((res) => res.json()).catch(() => {
638
+ throw new Error("Failed to fetch langserve schema at " + schemaUrl);
639
+ });
640
+ if (supportedTypes.includes(schema.type)) {
641
+ this.parameterType = "single";
642
+ this.parameters = [
643
+ {
644
+ name: "input",
645
+ type: schema.type,
646
+ description: "The input to the chain"
647
+ }
648
+ ];
649
+ } else if (schema.type === "object") {
650
+ this.parameterType = "multi";
651
+ this.parameters = Object.keys(schema.properties).map((key) => {
652
+ var _a;
653
+ let property = schema.properties[key];
654
+ if (!supportedTypes.includes(property.type)) {
655
+ throw new Error("Unsupported schema type");
656
+ }
657
+ return {
658
+ name: key,
659
+ type: property.type,
660
+ description: property.description || "",
661
+ required: ((_a = schema.required) == null ? void 0 : _a.includes(key)) || false
662
+ };
663
+ });
664
+ } else {
665
+ throw new Error("Unsupported schema type");
666
+ }
667
+ }
668
+ };
669
+ __name(RemoteChain, "RemoteChain");
670
+
671
+ export {
672
+ OpenAIAdapter,
673
+ OpenAIAssistantAdapter,
674
+ GoogleGenerativeAIAdapter,
675
+ LangChainAdapter,
676
+ RemoteChain
677
+ };
678
+ //# sourceMappingURL=chunk-5HGYI6EG.mjs.map