@yourgpt/llm-sdk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,4216 @@
1
+ 'use strict';
2
+
3
+ var core = require('@yourgpt/copilot-sdk/core');
4
+ var hono = require('hono');
5
+ var cors = require('hono/cors');
6
+
7
+ // src/server/runtime.ts
8
+
9
+ // src/adapters/base.ts
10
+ function formatMessages(messages, systemPrompt) {
11
+ const formatted = [];
12
+ if (systemPrompt) {
13
+ formatted.push({ role: "system", content: systemPrompt });
14
+ }
15
+ for (const msg of messages) {
16
+ formatted.push({
17
+ role: msg.role,
18
+ content: msg.content ?? ""
19
+ });
20
+ }
21
+ return formatted;
22
+ }
23
+ function parameterToJsonSchema(param) {
24
+ const schema = {
25
+ type: param.type
26
+ };
27
+ if (param.description) {
28
+ schema.description = param.description;
29
+ }
30
+ if (param.enum) {
31
+ schema.enum = param.enum;
32
+ }
33
+ if (param.type === "array" && param.items) {
34
+ schema.items = parameterToJsonSchema(
35
+ param.items
36
+ );
37
+ }
38
+ if (param.type === "object" && param.properties) {
39
+ schema.properties = Object.fromEntries(
40
+ Object.entries(param.properties).map(([key, prop]) => [
41
+ key,
42
+ parameterToJsonSchema(
43
+ prop
44
+ )
45
+ ])
46
+ );
47
+ }
48
+ return schema;
49
+ }
50
+ function formatTools(actions) {
51
+ return actions.map((action) => ({
52
+ type: "function",
53
+ function: {
54
+ name: action.name,
55
+ description: action.description,
56
+ parameters: {
57
+ type: "object",
58
+ properties: action.parameters ? Object.fromEntries(
59
+ Object.entries(action.parameters).map(([key, param]) => [
60
+ key,
61
+ parameterToJsonSchema(param)
62
+ ])
63
+ ) : {},
64
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
65
+ }
66
+ }
67
+ }));
68
+ }
69
+ function hasImageAttachments(message) {
70
+ const attachments = message.metadata?.attachments;
71
+ return attachments?.some((a) => a.type === "image") ?? false;
72
+ }
73
+ function hasMediaAttachments(message) {
74
+ const attachments = message.metadata?.attachments;
75
+ return attachments?.some(
76
+ (a) => a.type === "image" || a.type === "file" && a.mimeType === "application/pdf"
77
+ ) ?? false;
78
+ }
79
+ function attachmentToAnthropicImage(attachment) {
80
+ if (attachment.type !== "image") return null;
81
+ if (attachment.url) {
82
+ return {
83
+ type: "image",
84
+ source: {
85
+ type: "url",
86
+ url: attachment.url
87
+ }
88
+ };
89
+ }
90
+ if (!attachment.data) return null;
91
+ let base64Data = attachment.data;
92
+ if (base64Data.startsWith("data:")) {
93
+ const commaIndex = base64Data.indexOf(",");
94
+ if (commaIndex !== -1) {
95
+ base64Data = base64Data.slice(commaIndex + 1);
96
+ }
97
+ }
98
+ return {
99
+ type: "image",
100
+ source: {
101
+ type: "base64",
102
+ media_type: attachment.mimeType || "image/png",
103
+ data: base64Data
104
+ }
105
+ };
106
+ }
107
+ function attachmentToOpenAIImage(attachment) {
108
+ if (attachment.type !== "image") return null;
109
+ let imageUrl;
110
+ if (attachment.url) {
111
+ imageUrl = attachment.url;
112
+ } else if (attachment.data) {
113
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
114
+ } else {
115
+ return null;
116
+ }
117
+ return {
118
+ type: "image_url",
119
+ image_url: {
120
+ url: imageUrl,
121
+ detail: "auto"
122
+ }
123
+ };
124
+ }
125
+ function attachmentToAnthropicDocument(attachment) {
126
+ if (attachment.type !== "file" || attachment.mimeType !== "application/pdf") {
127
+ return null;
128
+ }
129
+ if (attachment.url) {
130
+ return {
131
+ type: "document",
132
+ source: {
133
+ type: "url",
134
+ url: attachment.url
135
+ }
136
+ };
137
+ }
138
+ if (!attachment.data) return null;
139
+ let base64Data = attachment.data;
140
+ if (base64Data.startsWith("data:")) {
141
+ const commaIndex = base64Data.indexOf(",");
142
+ if (commaIndex !== -1) {
143
+ base64Data = base64Data.slice(commaIndex + 1);
144
+ }
145
+ }
146
+ return {
147
+ type: "document",
148
+ source: {
149
+ type: "base64",
150
+ media_type: "application/pdf",
151
+ data: base64Data
152
+ }
153
+ };
154
+ }
155
+ function messageToAnthropicContent(message) {
156
+ const attachments = message.metadata?.attachments;
157
+ const content = message.content ?? "";
158
+ if (!hasMediaAttachments(message)) {
159
+ return content;
160
+ }
161
+ const blocks = [];
162
+ if (attachments) {
163
+ for (const attachment of attachments) {
164
+ const imageBlock = attachmentToAnthropicImage(attachment);
165
+ if (imageBlock) {
166
+ blocks.push(imageBlock);
167
+ continue;
168
+ }
169
+ const docBlock = attachmentToAnthropicDocument(attachment);
170
+ if (docBlock) {
171
+ blocks.push(docBlock);
172
+ }
173
+ }
174
+ }
175
+ if (content) {
176
+ blocks.push({ type: "text", text: content });
177
+ }
178
+ return blocks;
179
+ }
180
+ function messageToOpenAIContent(message) {
181
+ const attachments = message.metadata?.attachments;
182
+ const content = message.content ?? "";
183
+ if (!hasImageAttachments(message)) {
184
+ return content;
185
+ }
186
+ const blocks = [];
187
+ if (content) {
188
+ blocks.push({ type: "text", text: content });
189
+ }
190
+ if (attachments) {
191
+ for (const attachment of attachments) {
192
+ const imageBlock = attachmentToOpenAIImage(attachment);
193
+ if (imageBlock) {
194
+ blocks.push(imageBlock);
195
+ }
196
+ }
197
+ }
198
+ return blocks;
199
+ }
200
+ function formatMessagesForAnthropic(messages, systemPrompt) {
201
+ const formatted = [];
202
+ for (let i = 0; i < messages.length; i++) {
203
+ const msg = messages[i];
204
+ if (msg.role === "system") continue;
205
+ if (msg.role === "assistant") {
206
+ const content = [];
207
+ if (msg.content) {
208
+ content.push({ type: "text", text: msg.content });
209
+ }
210
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
211
+ for (const tc of msg.tool_calls) {
212
+ content.push({
213
+ type: "tool_use",
214
+ id: tc.id,
215
+ name: tc.function.name,
216
+ input: JSON.parse(tc.function.arguments)
217
+ });
218
+ }
219
+ }
220
+ formatted.push({
221
+ role: "assistant",
222
+ content: content.length === 1 && content[0].type === "text" ? content[0].text : content
223
+ });
224
+ } else if (msg.role === "tool" && msg.tool_call_id) {
225
+ const toolResults = [
226
+ {
227
+ type: "tool_result",
228
+ tool_use_id: msg.tool_call_id,
229
+ content: msg.content ?? ""
230
+ }
231
+ ];
232
+ while (i + 1 < messages.length && messages[i + 1].role === "tool") {
233
+ i++;
234
+ const nextTool = messages[i];
235
+ if (nextTool.tool_call_id) {
236
+ toolResults.push({
237
+ type: "tool_result",
238
+ tool_use_id: nextTool.tool_call_id,
239
+ content: nextTool.content ?? ""
240
+ });
241
+ }
242
+ }
243
+ formatted.push({
244
+ role: "user",
245
+ content: toolResults
246
+ });
247
+ } else if (msg.role === "user") {
248
+ formatted.push({
249
+ role: "user",
250
+ content: messageToAnthropicContent(msg)
251
+ });
252
+ }
253
+ }
254
+ return {
255
+ system: "",
256
+ messages: formatted
257
+ };
258
+ }
259
+ function formatMessagesForOpenAI(messages, systemPrompt) {
260
+ const formatted = [];
261
+ if (systemPrompt) {
262
+ formatted.push({ role: "system", content: systemPrompt });
263
+ }
264
+ for (const msg of messages) {
265
+ if (msg.role === "system") {
266
+ formatted.push({ role: "system", content: msg.content ?? "" });
267
+ } else if (msg.role === "user") {
268
+ formatted.push({
269
+ role: "user",
270
+ content: messageToOpenAIContent(msg)
271
+ });
272
+ } else if (msg.role === "assistant") {
273
+ const assistantMsg = {
274
+ role: "assistant",
275
+ content: msg.content
276
+ };
277
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
278
+ assistantMsg.tool_calls = msg.tool_calls;
279
+ }
280
+ formatted.push(assistantMsg);
281
+ } else if (msg.role === "tool" && msg.tool_call_id) {
282
+ formatted.push({
283
+ role: "tool",
284
+ content: msg.content ?? "",
285
+ tool_call_id: msg.tool_call_id
286
+ });
287
+ }
288
+ }
289
+ return formatted;
290
+ }
291
+ var OpenAIAdapter = class {
292
+ constructor(config) {
293
+ this.provider = "openai";
294
+ this.config = config;
295
+ this.model = config.model || "gpt-4o";
296
+ }
297
+ async getClient() {
298
+ if (!this.client) {
299
+ const { default: OpenAI } = await import('openai');
300
+ this.client = new OpenAI({
301
+ apiKey: this.config.apiKey,
302
+ baseURL: this.config.baseUrl
303
+ });
304
+ }
305
+ return this.client;
306
+ }
307
+ async *stream(request) {
308
+ const client = await this.getClient();
309
+ let messages;
310
+ if (request.rawMessages && request.rawMessages.length > 0) {
311
+ const processedMessages = request.rawMessages.map((msg) => {
312
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
313
+ if (hasAttachments) {
314
+ const content = [];
315
+ if (msg.content) {
316
+ content.push({ type: "text", text: msg.content });
317
+ }
318
+ for (const attachment of msg.attachments) {
319
+ if (attachment.type === "image") {
320
+ let imageUrl;
321
+ if (attachment.url) {
322
+ imageUrl = attachment.url;
323
+ } else if (attachment.data) {
324
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
325
+ } else {
326
+ continue;
327
+ }
328
+ content.push({
329
+ type: "image_url",
330
+ image_url: { url: imageUrl, detail: "auto" }
331
+ });
332
+ }
333
+ }
334
+ return { ...msg, content, attachments: void 0 };
335
+ }
336
+ return msg;
337
+ });
338
+ if (request.systemPrompt) {
339
+ const hasSystem = processedMessages.some((m) => m.role === "system");
340
+ if (!hasSystem) {
341
+ messages = [
342
+ { role: "system", content: request.systemPrompt },
343
+ ...processedMessages
344
+ ];
345
+ } else {
346
+ messages = processedMessages;
347
+ }
348
+ } else {
349
+ messages = processedMessages;
350
+ }
351
+ } else {
352
+ messages = formatMessagesForOpenAI(
353
+ request.messages,
354
+ request.systemPrompt
355
+ );
356
+ }
357
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
358
+ const messageId = core.generateMessageId();
359
+ yield { type: "message:start", id: messageId };
360
+ try {
361
+ const stream = await client.chat.completions.create({
362
+ model: request.config?.model || this.model,
363
+ messages,
364
+ tools,
365
+ temperature: request.config?.temperature ?? this.config.temperature,
366
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
367
+ stream: true
368
+ });
369
+ let currentToolCall = null;
370
+ for await (const chunk of stream) {
371
+ if (request.signal?.aborted) {
372
+ break;
373
+ }
374
+ const delta = chunk.choices[0]?.delta;
375
+ if (delta?.content) {
376
+ yield { type: "message:delta", content: delta.content };
377
+ }
378
+ if (delta?.tool_calls) {
379
+ for (const toolCall of delta.tool_calls) {
380
+ if (toolCall.id) {
381
+ if (currentToolCall) {
382
+ yield {
383
+ type: "action:args",
384
+ id: currentToolCall.id,
385
+ args: currentToolCall.arguments
386
+ };
387
+ }
388
+ currentToolCall = {
389
+ id: toolCall.id,
390
+ name: toolCall.function?.name || "",
391
+ arguments: toolCall.function?.arguments || ""
392
+ };
393
+ yield {
394
+ type: "action:start",
395
+ id: currentToolCall.id,
396
+ name: currentToolCall.name
397
+ };
398
+ } else if (currentToolCall && toolCall.function?.arguments) {
399
+ currentToolCall.arguments += toolCall.function.arguments;
400
+ }
401
+ }
402
+ }
403
+ if (chunk.choices[0]?.finish_reason) {
404
+ if (currentToolCall) {
405
+ yield {
406
+ type: "action:args",
407
+ id: currentToolCall.id,
408
+ args: currentToolCall.arguments
409
+ };
410
+ }
411
+ }
412
+ }
413
+ yield { type: "message:end" };
414
+ yield { type: "done" };
415
+ } catch (error) {
416
+ yield {
417
+ type: "error",
418
+ message: error instanceof Error ? error.message : "Unknown error",
419
+ code: "OPENAI_ERROR"
420
+ };
421
+ }
422
+ }
423
+ };
424
+ function createOpenAIAdapter(config) {
425
+ return new OpenAIAdapter(config);
426
+ }
427
+ var AnthropicAdapter = class {
428
+ constructor(config) {
429
+ this.provider = "anthropic";
430
+ this.config = config;
431
+ this.model = config.model || "claude-3-5-sonnet-latest";
432
+ }
433
+ async getClient() {
434
+ if (!this.client) {
435
+ const { default: Anthropic } = await import('@anthropic-ai/sdk');
436
+ this.client = new Anthropic({
437
+ apiKey: this.config.apiKey
438
+ });
439
+ }
440
+ return this.client;
441
+ }
442
+ /**
443
+ * Convert OpenAI-style messages to Anthropic format
444
+ *
445
+ * OpenAI format:
446
+ * - { role: "assistant", content: "...", tool_calls: [...] }
447
+ * - { role: "tool", tool_call_id: "...", content: "..." }
448
+ *
449
+ * Anthropic format:
450
+ * - { role: "assistant", content: [{ type: "text", text: "..." }, { type: "tool_use", id: "...", name: "...", input: {...} }] }
451
+ * - { role: "user", content: [{ type: "tool_result", tool_use_id: "...", content: "..." }] }
452
+ */
453
+ convertToAnthropicMessages(rawMessages) {
454
+ const messages = [];
455
+ const pendingToolResults = [];
456
+ for (const msg of rawMessages) {
457
+ if (msg.role === "system") continue;
458
+ if (msg.role === "assistant") {
459
+ if (pendingToolResults.length > 0) {
460
+ messages.push({
461
+ role: "user",
462
+ content: pendingToolResults.map((tr) => ({
463
+ type: "tool_result",
464
+ tool_use_id: tr.tool_use_id,
465
+ content: tr.content
466
+ }))
467
+ });
468
+ pendingToolResults.length = 0;
469
+ }
470
+ const content = [];
471
+ if (msg.content && typeof msg.content === "string" && msg.content.trim()) {
472
+ content.push({ type: "text", text: msg.content });
473
+ }
474
+ const toolCalls = msg.tool_calls;
475
+ if (toolCalls && toolCalls.length > 0) {
476
+ for (const tc of toolCalls) {
477
+ let input = {};
478
+ try {
479
+ input = JSON.parse(tc.function.arguments);
480
+ } catch {
481
+ }
482
+ content.push({
483
+ type: "tool_use",
484
+ id: tc.id,
485
+ name: tc.function.name,
486
+ input
487
+ });
488
+ }
489
+ }
490
+ if (content.length > 0) {
491
+ messages.push({ role: "assistant", content });
492
+ }
493
+ } else if (msg.role === "tool") {
494
+ pendingToolResults.push({
495
+ tool_use_id: msg.tool_call_id,
496
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
497
+ });
498
+ } else if (msg.role === "user") {
499
+ if (pendingToolResults.length > 0) {
500
+ messages.push({
501
+ role: "user",
502
+ content: pendingToolResults.map((tr) => ({
503
+ type: "tool_result",
504
+ tool_use_id: tr.tool_use_id,
505
+ content: tr.content
506
+ }))
507
+ });
508
+ pendingToolResults.length = 0;
509
+ }
510
+ if (msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0) {
511
+ const content = [];
512
+ if (msg.content && typeof msg.content === "string") {
513
+ content.push({ type: "text", text: msg.content });
514
+ }
515
+ for (const attachment of msg.attachments) {
516
+ if (attachment.type === "image") {
517
+ if (attachment.url) {
518
+ content.push({
519
+ type: "image",
520
+ source: {
521
+ type: "url",
522
+ url: attachment.url
523
+ }
524
+ });
525
+ } else if (attachment.data) {
526
+ let base64Data = attachment.data;
527
+ if (base64Data.startsWith("data:")) {
528
+ const commaIndex = base64Data.indexOf(",");
529
+ if (commaIndex !== -1) {
530
+ base64Data = base64Data.slice(commaIndex + 1);
531
+ }
532
+ }
533
+ content.push({
534
+ type: "image",
535
+ source: {
536
+ type: "base64",
537
+ media_type: attachment.mimeType || "image/png",
538
+ data: base64Data
539
+ }
540
+ });
541
+ }
542
+ } else if (attachment.type === "file" && attachment.mimeType === "application/pdf") {
543
+ if (attachment.url) {
544
+ content.push({
545
+ type: "document",
546
+ source: {
547
+ type: "url",
548
+ url: attachment.url
549
+ }
550
+ });
551
+ } else if (attachment.data) {
552
+ let base64Data = attachment.data;
553
+ if (base64Data.startsWith("data:")) {
554
+ const commaIndex = base64Data.indexOf(",");
555
+ if (commaIndex !== -1) {
556
+ base64Data = base64Data.slice(commaIndex + 1);
557
+ }
558
+ }
559
+ content.push({
560
+ type: "document",
561
+ source: {
562
+ type: "base64",
563
+ media_type: "application/pdf",
564
+ data: base64Data
565
+ }
566
+ });
567
+ }
568
+ }
569
+ }
570
+ messages.push({ role: "user", content });
571
+ } else {
572
+ messages.push({
573
+ role: "user",
574
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
575
+ });
576
+ }
577
+ }
578
+ }
579
+ if (pendingToolResults.length > 0) {
580
+ messages.push({
581
+ role: "user",
582
+ content: pendingToolResults.map((tr) => ({
583
+ type: "tool_result",
584
+ tool_use_id: tr.tool_use_id,
585
+ content: tr.content
586
+ }))
587
+ });
588
+ }
589
+ return messages;
590
+ }
591
+ /**
592
+ * Build common request options for both streaming and non-streaming
593
+ */
594
+ buildRequestOptions(request) {
595
+ const systemMessage = request.systemPrompt || "";
596
+ let messages;
597
+ if (request.rawMessages && request.rawMessages.length > 0) {
598
+ messages = this.convertToAnthropicMessages(request.rawMessages);
599
+ } else {
600
+ const formatted = formatMessagesForAnthropic(request.messages);
601
+ messages = formatted.messages;
602
+ }
603
+ const tools = request.actions?.map((action) => ({
604
+ name: action.name,
605
+ description: action.description,
606
+ input_schema: {
607
+ type: "object",
608
+ properties: action.parameters ? Object.fromEntries(
609
+ Object.entries(action.parameters).map(([key, param]) => [
610
+ key,
611
+ {
612
+ type: param.type,
613
+ description: param.description,
614
+ enum: param.enum
615
+ }
616
+ ])
617
+ ) : {},
618
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
619
+ }
620
+ }));
621
+ const options = {
622
+ model: request.config?.model || this.model,
623
+ max_tokens: request.config?.maxTokens || this.config.maxTokens || 4096,
624
+ system: systemMessage,
625
+ messages,
626
+ tools: tools?.length ? tools : void 0
627
+ };
628
+ if (this.config.thinking?.type === "enabled") {
629
+ options.thinking = {
630
+ type: "enabled",
631
+ budget_tokens: this.config.thinking.budgetTokens || 1e4
632
+ };
633
+ }
634
+ return { options, messages };
635
+ }
636
+ /**
637
+ * Non-streaming completion (for debugging/comparison with original studio-ai)
638
+ */
639
+ async complete(request) {
640
+ const client = await this.getClient();
641
+ const { options } = this.buildRequestOptions(request);
642
+ const nonStreamingOptions = {
643
+ ...options,
644
+ stream: false
645
+ };
646
+ try {
647
+ const response = await client.messages.create(nonStreamingOptions);
648
+ let content = "";
649
+ let thinking = "";
650
+ const toolCalls = [];
651
+ for (const block of response.content) {
652
+ if (block.type === "text") {
653
+ content += block.text;
654
+ } else if (block.type === "thinking") {
655
+ thinking += block.thinking;
656
+ } else if (block.type === "tool_use") {
657
+ toolCalls.push({
658
+ id: block.id,
659
+ name: block.name,
660
+ args: block.input
661
+ });
662
+ }
663
+ }
664
+ return {
665
+ content,
666
+ toolCalls,
667
+ thinking: thinking || void 0,
668
+ rawResponse: response
669
+ };
670
+ } catch (error) {
671
+ throw error;
672
+ }
673
+ }
674
+ async *stream(request) {
675
+ const client = await this.getClient();
676
+ const { options } = this.buildRequestOptions(request);
677
+ const messageId = core.generateMessageId();
678
+ yield { type: "message:start", id: messageId };
679
+ try {
680
+ const stream = await client.messages.stream(options);
681
+ let currentToolUse = null;
682
+ let isInThinkingBlock = false;
683
+ for await (const event of stream) {
684
+ if (request.signal?.aborted) {
685
+ break;
686
+ }
687
+ switch (event.type) {
688
+ case "content_block_start":
689
+ if (event.content_block.type === "tool_use") {
690
+ currentToolUse = {
691
+ id: event.content_block.id,
692
+ name: event.content_block.name,
693
+ input: ""
694
+ };
695
+ yield {
696
+ type: "action:start",
697
+ id: currentToolUse.id,
698
+ name: currentToolUse.name
699
+ };
700
+ } else if (event.content_block.type === "thinking") {
701
+ isInThinkingBlock = true;
702
+ yield { type: "thinking:start" };
703
+ }
704
+ break;
705
+ case "content_block_delta":
706
+ if (event.delta.type === "text_delta") {
707
+ yield { type: "message:delta", content: event.delta.text };
708
+ } else if (event.delta.type === "thinking_delta") {
709
+ yield { type: "thinking:delta", content: event.delta.thinking };
710
+ } else if (event.delta.type === "input_json_delta" && currentToolUse) {
711
+ currentToolUse.input += event.delta.partial_json;
712
+ }
713
+ break;
714
+ case "content_block_stop":
715
+ if (currentToolUse) {
716
+ yield {
717
+ type: "action:args",
718
+ id: currentToolUse.id,
719
+ args: currentToolUse.input
720
+ };
721
+ currentToolUse = null;
722
+ }
723
+ if (isInThinkingBlock) {
724
+ yield { type: "thinking:end" };
725
+ isInThinkingBlock = false;
726
+ }
727
+ break;
728
+ case "message_stop":
729
+ break;
730
+ }
731
+ }
732
+ yield { type: "message:end" };
733
+ yield { type: "done" };
734
+ } catch (error) {
735
+ yield {
736
+ type: "error",
737
+ message: error instanceof Error ? error.message : "Unknown error",
738
+ code: "ANTHROPIC_ERROR"
739
+ };
740
+ }
741
+ }
742
+ };
743
+ function createAnthropicAdapter(config) {
744
+ return new AnthropicAdapter(config);
745
+ }
746
+ var GroqAdapter = class {
747
+ constructor(config) {
748
+ this.provider = "groq";
749
+ this.config = config;
750
+ this.model = config.model || "llama-3.1-70b-versatile";
751
+ }
752
+ async *stream(request) {
753
+ const messages = formatMessages(request.messages, request.systemPrompt);
754
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
755
+ const messageId = core.generateMessageId();
756
+ yield { type: "message:start", id: messageId };
757
+ try {
758
+ const response = await fetch(
759
+ "https://api.groq.com/openai/v1/chat/completions",
760
+ {
761
+ method: "POST",
762
+ headers: {
763
+ "Content-Type": "application/json",
764
+ Authorization: `Bearer ${this.config.apiKey}`
765
+ },
766
+ body: JSON.stringify({
767
+ model: request.config?.model || this.model,
768
+ messages,
769
+ tools,
770
+ temperature: request.config?.temperature ?? this.config.temperature,
771
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
772
+ stream: true
773
+ }),
774
+ signal: request.signal
775
+ }
776
+ );
777
+ if (!response.ok) {
778
+ throw new Error(`Groq API error: ${response.status}`);
779
+ }
780
+ if (!response.body) {
781
+ throw new Error("No response body");
782
+ }
783
+ const reader = response.body.getReader();
784
+ const decoder = new TextDecoder();
785
+ let buffer = "";
786
+ let currentToolCall = null;
787
+ while (true) {
788
+ const { done, value } = await reader.read();
789
+ if (done) break;
790
+ buffer += decoder.decode(value, { stream: true });
791
+ const lines = buffer.split("\n");
792
+ buffer = lines.pop() || "";
793
+ for (const line of lines) {
794
+ if (!line.startsWith("data: ")) continue;
795
+ const data = line.slice(6).trim();
796
+ if (data === "[DONE]") continue;
797
+ try {
798
+ const chunk = JSON.parse(data);
799
+ const delta = chunk.choices?.[0]?.delta;
800
+ if (delta?.content) {
801
+ yield { type: "message:delta", content: delta.content };
802
+ }
803
+ if (delta?.tool_calls) {
804
+ for (const toolCall of delta.tool_calls) {
805
+ if (toolCall.id) {
806
+ if (currentToolCall) {
807
+ yield {
808
+ type: "action:args",
809
+ id: currentToolCall.id,
810
+ args: currentToolCall.arguments
811
+ };
812
+ }
813
+ currentToolCall = {
814
+ id: toolCall.id,
815
+ name: toolCall.function?.name || "",
816
+ arguments: toolCall.function?.arguments || ""
817
+ };
818
+ yield {
819
+ type: "action:start",
820
+ id: currentToolCall.id,
821
+ name: currentToolCall.name
822
+ };
823
+ } else if (currentToolCall && toolCall.function?.arguments) {
824
+ currentToolCall.arguments += toolCall.function.arguments;
825
+ }
826
+ }
827
+ }
828
+ if (chunk.choices?.[0]?.finish_reason && currentToolCall) {
829
+ yield {
830
+ type: "action:args",
831
+ id: currentToolCall.id,
832
+ args: currentToolCall.arguments
833
+ };
834
+ }
835
+ } catch {
836
+ }
837
+ }
838
+ }
839
+ yield { type: "message:end" };
840
+ yield { type: "done" };
841
+ } catch (error) {
842
+ if (error.name === "AbortError") {
843
+ yield { type: "done" };
844
+ } else {
845
+ yield {
846
+ type: "error",
847
+ message: error instanceof Error ? error.message : "Unknown error",
848
+ code: "GROQ_ERROR"
849
+ };
850
+ }
851
+ }
852
+ }
853
+ };
854
+ function createGroqAdapter(config) {
855
+ return new GroqAdapter(config);
856
+ }
857
+ var OllamaAdapter = class {
858
+ constructor(config = {}) {
859
+ this.provider = "ollama";
860
+ this.config = config;
861
+ this.model = config.model || "llama3";
862
+ this.baseUrl = config.baseUrl || "http://localhost:11434";
863
+ }
864
+ async *stream(request) {
865
+ const messages = formatMessages(request.messages, request.systemPrompt);
866
+ const messageId = core.generateMessageId();
867
+ yield { type: "message:start", id: messageId };
868
+ try {
869
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
870
+ method: "POST",
871
+ headers: {
872
+ "Content-Type": "application/json"
873
+ },
874
+ body: JSON.stringify({
875
+ model: request.config?.model || this.model,
876
+ messages,
877
+ stream: true,
878
+ options: {
879
+ temperature: request.config?.temperature ?? this.config.temperature,
880
+ num_predict: request.config?.maxTokens ?? this.config.maxTokens
881
+ }
882
+ }),
883
+ signal: request.signal
884
+ });
885
+ if (!response.ok) {
886
+ throw new Error(`Ollama API error: ${response.status}`);
887
+ }
888
+ if (!response.body) {
889
+ throw new Error("No response body");
890
+ }
891
+ const reader = response.body.getReader();
892
+ const decoder = new TextDecoder();
893
+ let buffer = "";
894
+ while (true) {
895
+ const { done, value } = await reader.read();
896
+ if (done) break;
897
+ buffer += decoder.decode(value, { stream: true });
898
+ const lines = buffer.split("\n");
899
+ buffer = lines.pop() || "";
900
+ for (const line of lines) {
901
+ if (!line.trim()) continue;
902
+ try {
903
+ const chunk = JSON.parse(line);
904
+ if (chunk.message?.content) {
905
+ yield { type: "message:delta", content: chunk.message.content };
906
+ }
907
+ if (chunk.done) {
908
+ break;
909
+ }
910
+ } catch {
911
+ }
912
+ }
913
+ }
914
+ yield { type: "message:end" };
915
+ yield { type: "done" };
916
+ } catch (error) {
917
+ if (error.name === "AbortError") {
918
+ yield { type: "done" };
919
+ } else {
920
+ yield {
921
+ type: "error",
922
+ message: error instanceof Error ? error.message : "Unknown error",
923
+ code: "OLLAMA_ERROR"
924
+ };
925
+ }
926
+ }
927
+ }
928
+ };
929
+ function createOllamaAdapter(config) {
930
+ return new OllamaAdapter(config);
931
+ }
932
+ function attachmentToGeminiPart(attachment) {
933
+ if (!attachment.data) {
934
+ console.warn(
935
+ "Gemini adapter: URL-based attachments not supported, skipping"
936
+ );
937
+ return null;
938
+ }
939
+ if (attachment.type === "image") {
940
+ let base64Data = attachment.data;
941
+ if (base64Data.startsWith("data:")) {
942
+ const commaIndex = base64Data.indexOf(",");
943
+ if (commaIndex !== -1) {
944
+ base64Data = base64Data.slice(commaIndex + 1);
945
+ }
946
+ }
947
+ return {
948
+ inlineData: {
949
+ mimeType: attachment.mimeType || "image/png",
950
+ data: base64Data
951
+ }
952
+ };
953
+ }
954
+ if (attachment.type === "audio" || attachment.type === "video") {
955
+ let base64Data = attachment.data;
956
+ if (base64Data.startsWith("data:")) {
957
+ const commaIndex = base64Data.indexOf(",");
958
+ if (commaIndex !== -1) {
959
+ base64Data = base64Data.slice(commaIndex + 1);
960
+ }
961
+ }
962
+ return {
963
+ inlineData: {
964
+ mimeType: attachment.mimeType || (attachment.type === "audio" ? "audio/mp3" : "video/mp4"),
965
+ data: base64Data
966
+ }
967
+ };
968
+ }
969
+ return null;
970
+ }
971
+ function messageToGeminiContent(msg) {
972
+ if (msg.role === "system") return null;
973
+ const parts = [];
974
+ if (msg.role === "tool" && msg.tool_call_id) {
975
+ let responseData;
976
+ try {
977
+ responseData = JSON.parse(msg.content || "{}");
978
+ } catch {
979
+ responseData = { result: msg.content || "" };
980
+ }
981
+ const toolName = msg.metadata?.toolName || "tool";
982
+ parts.push({
983
+ functionResponse: {
984
+ name: toolName,
985
+ response: responseData
986
+ }
987
+ });
988
+ return { role: "user", parts };
989
+ }
990
+ if (msg.content) {
991
+ parts.push({ text: msg.content });
992
+ }
993
+ const attachments = msg.metadata?.attachments;
994
+ if (attachments && Array.isArray(attachments)) {
995
+ for (const attachment of attachments) {
996
+ const part = attachmentToGeminiPart(attachment);
997
+ if (part) {
998
+ parts.push(part);
999
+ }
1000
+ }
1001
+ }
1002
+ if (msg.role === "assistant" && msg.tool_calls && msg.tool_calls.length > 0) {
1003
+ for (const tc of msg.tool_calls) {
1004
+ let args = {};
1005
+ try {
1006
+ args = JSON.parse(tc.function.arguments);
1007
+ } catch {
1008
+ }
1009
+ parts.push({
1010
+ functionCall: {
1011
+ name: tc.function.name,
1012
+ args
1013
+ }
1014
+ });
1015
+ }
1016
+ }
1017
+ if (parts.length === 0) return null;
1018
+ return {
1019
+ role: msg.role === "assistant" ? "model" : "user",
1020
+ parts
1021
+ };
1022
+ }
1023
+ function formatToolsForGemini(actions) {
1024
+ if (!actions || actions.length === 0) return void 0;
1025
+ return {
1026
+ functionDeclarations: actions.map((action) => ({
1027
+ name: action.name,
1028
+ description: action.description,
1029
+ parameters: action.parameters ? {
1030
+ type: "object",
1031
+ properties: Object.fromEntries(
1032
+ Object.entries(action.parameters).map(([key, param]) => [
1033
+ key,
1034
+ {
1035
+ type: param.type,
1036
+ description: param.description,
1037
+ enum: param.enum
1038
+ }
1039
+ ])
1040
+ ),
1041
+ required: Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key)
1042
+ } : void 0
1043
+ }))
1044
+ };
1045
+ }
1046
+ var GoogleAdapter = class {
1047
+ constructor(config) {
1048
+ this.provider = "google";
1049
+ this.config = config;
1050
+ this.model = config.model || "gemini-2.0-flash";
1051
+ }
1052
+ async getClient() {
1053
+ if (!this.client) {
1054
+ const { GoogleGenerativeAI } = await import('@google/generative-ai');
1055
+ this.client = new GoogleGenerativeAI(this.config.apiKey);
1056
+ }
1057
+ return this.client;
1058
+ }
1059
+ async *stream(request) {
1060
+ const client = await this.getClient();
1061
+ const modelId = request.config?.model || this.model;
1062
+ const model = client.getGenerativeModel({
1063
+ model: modelId,
1064
+ safetySettings: this.config.safetySettings
1065
+ });
1066
+ let contents = [];
1067
+ let systemInstruction;
1068
+ if (request.rawMessages && request.rawMessages.length > 0) {
1069
+ for (const msg of request.rawMessages) {
1070
+ if (msg.role === "system") {
1071
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1072
+ continue;
1073
+ }
1074
+ const content = messageToGeminiContent(msg);
1075
+ if (content) {
1076
+ contents.push(content);
1077
+ }
1078
+ }
1079
+ if (request.systemPrompt && !systemInstruction) {
1080
+ systemInstruction = request.systemPrompt;
1081
+ }
1082
+ } else {
1083
+ for (const msg of request.messages) {
1084
+ if (msg.role === "system") {
1085
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1086
+ continue;
1087
+ }
1088
+ const content = messageToGeminiContent(msg);
1089
+ if (content) {
1090
+ contents.push(content);
1091
+ }
1092
+ }
1093
+ if (request.systemPrompt) {
1094
+ systemInstruction = request.systemPrompt;
1095
+ }
1096
+ }
1097
+ if (contents.length === 0 || contents[0].role !== "user") {
1098
+ contents = [{ role: "user", parts: [{ text: "" }] }, ...contents];
1099
+ }
1100
+ const mergedContents = [];
1101
+ for (const content of contents) {
1102
+ const last = mergedContents[mergedContents.length - 1];
1103
+ if (last && last.role === content.role) {
1104
+ last.parts.push(...content.parts);
1105
+ } else {
1106
+ mergedContents.push({ ...content, parts: [...content.parts] });
1107
+ }
1108
+ }
1109
+ const tools = formatToolsForGemini(request.actions);
1110
+ const messageId = core.generateMessageId();
1111
+ yield { type: "message:start", id: messageId };
1112
+ try {
1113
+ const chat = model.startChat({
1114
+ history: mergedContents.slice(0, -1),
1115
+ // All but the last message
1116
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
1117
+ tools: tools ? [tools] : void 0,
1118
+ generationConfig: {
1119
+ temperature: request.config?.temperature ?? this.config.temperature,
1120
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
1121
+ }
1122
+ });
1123
+ const lastMessage = mergedContents[mergedContents.length - 1];
1124
+ const result = await chat.sendMessageStream(lastMessage.parts);
1125
+ let currentToolCall = null;
1126
+ for await (const chunk of result.stream) {
1127
+ if (request.signal?.aborted) {
1128
+ break;
1129
+ }
1130
+ const candidate = chunk.candidates?.[0];
1131
+ if (!candidate?.content?.parts) continue;
1132
+ for (const part of candidate.content.parts) {
1133
+ if ("text" in part && part.text) {
1134
+ yield { type: "message:delta", content: part.text };
1135
+ }
1136
+ if ("functionCall" in part && part.functionCall) {
1137
+ const fc = part.functionCall;
1138
+ const toolId = core.generateToolCallId();
1139
+ if (currentToolCall) {
1140
+ yield {
1141
+ type: "action:args",
1142
+ id: currentToolCall.id,
1143
+ args: JSON.stringify(currentToolCall.args)
1144
+ };
1145
+ }
1146
+ currentToolCall = {
1147
+ id: toolId,
1148
+ name: fc.name,
1149
+ args: fc.args || {}
1150
+ };
1151
+ yield {
1152
+ type: "action:start",
1153
+ id: toolId,
1154
+ name: fc.name
1155
+ };
1156
+ }
1157
+ }
1158
+ if (candidate.finishReason) {
1159
+ if (currentToolCall) {
1160
+ yield {
1161
+ type: "action:args",
1162
+ id: currentToolCall.id,
1163
+ args: JSON.stringify(currentToolCall.args)
1164
+ };
1165
+ }
1166
+ }
1167
+ }
1168
+ yield { type: "message:end" };
1169
+ yield { type: "done" };
1170
+ } catch (error) {
1171
+ yield {
1172
+ type: "error",
1173
+ message: error instanceof Error ? error.message : "Unknown error",
1174
+ code: "GOOGLE_ERROR"
1175
+ };
1176
+ }
1177
+ }
1178
+ /**
1179
+ * Non-streaming completion (optional, for debugging)
1180
+ */
1181
+ async complete(request) {
1182
+ const client = await this.getClient();
1183
+ const modelId = request.config?.model || this.model;
1184
+ const model = client.getGenerativeModel({
1185
+ model: modelId,
1186
+ safetySettings: this.config.safetySettings
1187
+ });
1188
+ let contents = [];
1189
+ let systemInstruction;
1190
+ for (const msg of request.messages) {
1191
+ if (msg.role === "system") {
1192
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1193
+ continue;
1194
+ }
1195
+ const content = messageToGeminiContent(msg);
1196
+ if (content) {
1197
+ contents.push(content);
1198
+ }
1199
+ }
1200
+ if (request.systemPrompt) {
1201
+ systemInstruction = request.systemPrompt;
1202
+ }
1203
+ if (contents.length === 0 || contents[0].role !== "user") {
1204
+ contents = [{ role: "user", parts: [{ text: "" }] }, ...contents];
1205
+ }
1206
+ const mergedContents = [];
1207
+ for (const content of contents) {
1208
+ const last = mergedContents[mergedContents.length - 1];
1209
+ if (last && last.role === content.role) {
1210
+ last.parts.push(...content.parts);
1211
+ } else {
1212
+ mergedContents.push({ ...content, parts: [...content.parts] });
1213
+ }
1214
+ }
1215
+ const tools = formatToolsForGemini(request.actions);
1216
+ const chat = model.startChat({
1217
+ history: mergedContents.slice(0, -1),
1218
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
1219
+ tools: tools ? [tools] : void 0,
1220
+ generationConfig: {
1221
+ temperature: request.config?.temperature ?? this.config.temperature,
1222
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
1223
+ }
1224
+ });
1225
+ const lastMessage = mergedContents[mergedContents.length - 1];
1226
+ const result = await chat.sendMessage(lastMessage.parts);
1227
+ const response = result.response;
1228
+ let textContent = "";
1229
+ const toolCalls = [];
1230
+ const candidate = response.candidates?.[0];
1231
+ if (candidate?.content?.parts) {
1232
+ for (const part of candidate.content.parts) {
1233
+ if ("text" in part && part.text) {
1234
+ textContent += part.text;
1235
+ }
1236
+ if ("functionCall" in part && part.functionCall) {
1237
+ toolCalls.push({
1238
+ id: core.generateToolCallId(),
1239
+ name: part.functionCall.name,
1240
+ args: part.functionCall.args || {}
1241
+ });
1242
+ }
1243
+ }
1244
+ }
1245
+ return {
1246
+ content: textContent,
1247
+ toolCalls,
1248
+ rawResponse: response
1249
+ };
1250
+ }
1251
+ };
1252
+ function createGoogleAdapter(config) {
1253
+ return new GoogleAdapter(config);
1254
+ }
1255
+ var XAI_BASE_URL = "https://api.x.ai/v1";
1256
+ var XAIAdapter = class {
1257
+ constructor(config) {
1258
+ this.provider = "xai";
1259
+ this.config = config;
1260
+ this.model = config.model || "grok-2";
1261
+ }
1262
+ async getClient() {
1263
+ if (!this.client) {
1264
+ const { default: OpenAI } = await import('openai');
1265
+ this.client = new OpenAI({
1266
+ apiKey: this.config.apiKey,
1267
+ baseURL: this.config.baseUrl || XAI_BASE_URL
1268
+ });
1269
+ }
1270
+ return this.client;
1271
+ }
1272
+ async *stream(request) {
1273
+ const client = await this.getClient();
1274
+ let messages;
1275
+ if (request.rawMessages && request.rawMessages.length > 0) {
1276
+ const processedMessages = request.rawMessages.map((msg) => {
1277
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
1278
+ if (hasAttachments) {
1279
+ const content = [];
1280
+ if (msg.content) {
1281
+ content.push({ type: "text", text: msg.content });
1282
+ }
1283
+ for (const attachment of msg.attachments) {
1284
+ if (attachment.type === "image") {
1285
+ let imageUrl = attachment.data;
1286
+ if (!imageUrl.startsWith("data:")) {
1287
+ imageUrl = `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
1288
+ }
1289
+ content.push({
1290
+ type: "image_url",
1291
+ image_url: { url: imageUrl, detail: "auto" }
1292
+ });
1293
+ }
1294
+ }
1295
+ return { ...msg, content, attachments: void 0 };
1296
+ }
1297
+ return msg;
1298
+ });
1299
+ if (request.systemPrompt) {
1300
+ const hasSystem = processedMessages.some((m) => m.role === "system");
1301
+ if (!hasSystem) {
1302
+ messages = [
1303
+ { role: "system", content: request.systemPrompt },
1304
+ ...processedMessages
1305
+ ];
1306
+ } else {
1307
+ messages = processedMessages;
1308
+ }
1309
+ } else {
1310
+ messages = processedMessages;
1311
+ }
1312
+ } else {
1313
+ messages = formatMessagesForOpenAI(
1314
+ request.messages,
1315
+ request.systemPrompt
1316
+ );
1317
+ }
1318
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1319
+ const messageId = core.generateMessageId();
1320
+ yield { type: "message:start", id: messageId };
1321
+ try {
1322
+ const stream = await client.chat.completions.create({
1323
+ model: request.config?.model || this.model,
1324
+ messages,
1325
+ tools,
1326
+ temperature: request.config?.temperature ?? this.config.temperature,
1327
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
1328
+ stream: true
1329
+ });
1330
+ let currentToolCall = null;
1331
+ for await (const chunk of stream) {
1332
+ if (request.signal?.aborted) {
1333
+ break;
1334
+ }
1335
+ const delta = chunk.choices[0]?.delta;
1336
+ if (delta?.content) {
1337
+ yield { type: "message:delta", content: delta.content };
1338
+ }
1339
+ if (delta?.tool_calls) {
1340
+ for (const toolCall of delta.tool_calls) {
1341
+ if (toolCall.id) {
1342
+ if (currentToolCall) {
1343
+ yield {
1344
+ type: "action:args",
1345
+ id: currentToolCall.id,
1346
+ args: currentToolCall.arguments
1347
+ };
1348
+ }
1349
+ currentToolCall = {
1350
+ id: toolCall.id,
1351
+ name: toolCall.function?.name || "",
1352
+ arguments: toolCall.function?.arguments || ""
1353
+ };
1354
+ yield {
1355
+ type: "action:start",
1356
+ id: currentToolCall.id,
1357
+ name: currentToolCall.name
1358
+ };
1359
+ } else if (currentToolCall && toolCall.function?.arguments) {
1360
+ currentToolCall.arguments += toolCall.function.arguments;
1361
+ }
1362
+ }
1363
+ }
1364
+ if (chunk.choices[0]?.finish_reason) {
1365
+ if (currentToolCall) {
1366
+ yield {
1367
+ type: "action:args",
1368
+ id: currentToolCall.id,
1369
+ args: currentToolCall.arguments
1370
+ };
1371
+ }
1372
+ }
1373
+ }
1374
+ yield { type: "message:end" };
1375
+ yield { type: "done" };
1376
+ } catch (error) {
1377
+ yield {
1378
+ type: "error",
1379
+ message: error instanceof Error ? error.message : "Unknown error",
1380
+ code: "XAI_ERROR"
1381
+ };
1382
+ }
1383
+ }
1384
+ /**
1385
+ * Non-streaming completion (optional, for debugging)
1386
+ */
1387
+ async complete(request) {
1388
+ const client = await this.getClient();
1389
+ let messages;
1390
+ if (request.rawMessages && request.rawMessages.length > 0) {
1391
+ messages = request.rawMessages;
1392
+ if (request.systemPrompt) {
1393
+ const hasSystem = messages.some((m) => m.role === "system");
1394
+ if (!hasSystem) {
1395
+ messages = [
1396
+ { role: "system", content: request.systemPrompt },
1397
+ ...messages
1398
+ ];
1399
+ }
1400
+ }
1401
+ } else {
1402
+ messages = formatMessagesForOpenAI(
1403
+ request.messages,
1404
+ request.systemPrompt
1405
+ );
1406
+ }
1407
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1408
+ const response = await client.chat.completions.create({
1409
+ model: request.config?.model || this.model,
1410
+ messages,
1411
+ tools,
1412
+ temperature: request.config?.temperature ?? this.config.temperature,
1413
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens
1414
+ });
1415
+ const choice = response.choices[0];
1416
+ const message = choice?.message;
1417
+ const toolCalls = (message?.tool_calls || []).map((tc) => ({
1418
+ id: tc.id,
1419
+ name: tc.function.name,
1420
+ args: JSON.parse(tc.function.arguments || "{}")
1421
+ }));
1422
+ return {
1423
+ content: message?.content || "",
1424
+ toolCalls,
1425
+ rawResponse: response
1426
+ };
1427
+ }
1428
+ };
1429
+ function createXAIAdapter(config) {
1430
+ return new XAIAdapter(config);
1431
+ }
1432
+ var DEFAULT_API_VERSION = "2024-08-01-preview";
1433
+ function buildAzureEndpoint(resourceName, deploymentName, apiVersion) {
1434
+ return `https://${resourceName}.openai.azure.com/openai/deployments/${deploymentName}`;
1435
+ }
1436
+ var AzureAdapter = class {
1437
+ constructor(config) {
1438
+ this.provider = "azure";
1439
+ this.config = config;
1440
+ this.model = config.deploymentName;
1441
+ }
1442
+ async getClient() {
1443
+ if (!this.client) {
1444
+ const { AzureOpenAI } = await import('openai');
1445
+ const apiVersion = this.config.apiVersion || DEFAULT_API_VERSION;
1446
+ const endpoint = this.config.baseUrl || buildAzureEndpoint(
1447
+ this.config.resourceName,
1448
+ this.config.deploymentName);
1449
+ this.client = new AzureOpenAI({
1450
+ apiKey: this.config.apiKey,
1451
+ endpoint,
1452
+ apiVersion,
1453
+ deployment: this.config.deploymentName
1454
+ });
1455
+ }
1456
+ return this.client;
1457
+ }
1458
+ async *stream(request) {
1459
+ const client = await this.getClient();
1460
+ let messages;
1461
+ if (request.rawMessages && request.rawMessages.length > 0) {
1462
+ const processedMessages = request.rawMessages.map((msg) => {
1463
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
1464
+ if (hasAttachments) {
1465
+ const content = [];
1466
+ if (msg.content) {
1467
+ content.push({ type: "text", text: msg.content });
1468
+ }
1469
+ for (const attachment of msg.attachments) {
1470
+ if (attachment.type === "image") {
1471
+ let imageUrl = attachment.data;
1472
+ if (!imageUrl.startsWith("data:")) {
1473
+ imageUrl = `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
1474
+ }
1475
+ content.push({
1476
+ type: "image_url",
1477
+ image_url: { url: imageUrl, detail: "auto" }
1478
+ });
1479
+ }
1480
+ }
1481
+ return { ...msg, content, attachments: void 0 };
1482
+ }
1483
+ return msg;
1484
+ });
1485
+ if (request.systemPrompt) {
1486
+ const hasSystem = processedMessages.some((m) => m.role === "system");
1487
+ if (!hasSystem) {
1488
+ messages = [
1489
+ { role: "system", content: request.systemPrompt },
1490
+ ...processedMessages
1491
+ ];
1492
+ } else {
1493
+ messages = processedMessages;
1494
+ }
1495
+ } else {
1496
+ messages = processedMessages;
1497
+ }
1498
+ } else {
1499
+ messages = formatMessagesForOpenAI(
1500
+ request.messages,
1501
+ request.systemPrompt
1502
+ );
1503
+ }
1504
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1505
+ const messageId = core.generateMessageId();
1506
+ yield { type: "message:start", id: messageId };
1507
+ try {
1508
+ const stream = await client.chat.completions.create({
1509
+ // Azure uses deployment name, not model name
1510
+ model: this.config.deploymentName,
1511
+ messages,
1512
+ tools,
1513
+ temperature: request.config?.temperature ?? this.config.temperature,
1514
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
1515
+ stream: true
1516
+ });
1517
+ let currentToolCall = null;
1518
+ for await (const chunk of stream) {
1519
+ if (request.signal?.aborted) {
1520
+ break;
1521
+ }
1522
+ const delta = chunk.choices[0]?.delta;
1523
+ if (delta?.content) {
1524
+ yield { type: "message:delta", content: delta.content };
1525
+ }
1526
+ if (delta?.tool_calls) {
1527
+ for (const toolCall of delta.tool_calls) {
1528
+ if (toolCall.id) {
1529
+ if (currentToolCall) {
1530
+ yield {
1531
+ type: "action:args",
1532
+ id: currentToolCall.id,
1533
+ args: currentToolCall.arguments
1534
+ };
1535
+ }
1536
+ currentToolCall = {
1537
+ id: toolCall.id,
1538
+ name: toolCall.function?.name || "",
1539
+ arguments: toolCall.function?.arguments || ""
1540
+ };
1541
+ yield {
1542
+ type: "action:start",
1543
+ id: currentToolCall.id,
1544
+ name: currentToolCall.name
1545
+ };
1546
+ } else if (currentToolCall && toolCall.function?.arguments) {
1547
+ currentToolCall.arguments += toolCall.function.arguments;
1548
+ }
1549
+ }
1550
+ }
1551
+ if (chunk.choices[0]?.finish_reason) {
1552
+ if (currentToolCall) {
1553
+ yield {
1554
+ type: "action:args",
1555
+ id: currentToolCall.id,
1556
+ args: currentToolCall.arguments
1557
+ };
1558
+ }
1559
+ }
1560
+ }
1561
+ yield { type: "message:end" };
1562
+ yield { type: "done" };
1563
+ } catch (error) {
1564
+ yield {
1565
+ type: "error",
1566
+ message: error instanceof Error ? error.message : "Unknown error",
1567
+ code: "AZURE_ERROR"
1568
+ };
1569
+ }
1570
+ }
1571
+ /**
1572
+ * Non-streaming completion (optional, for debugging)
1573
+ */
1574
+ async complete(request) {
1575
+ const client = await this.getClient();
1576
+ let messages;
1577
+ if (request.rawMessages && request.rawMessages.length > 0) {
1578
+ messages = request.rawMessages;
1579
+ if (request.systemPrompt) {
1580
+ const hasSystem = messages.some((m) => m.role === "system");
1581
+ if (!hasSystem) {
1582
+ messages = [
1583
+ { role: "system", content: request.systemPrompt },
1584
+ ...messages
1585
+ ];
1586
+ }
1587
+ }
1588
+ } else {
1589
+ messages = formatMessagesForOpenAI(
1590
+ request.messages,
1591
+ request.systemPrompt
1592
+ );
1593
+ }
1594
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1595
+ const response = await client.chat.completions.create({
1596
+ model: this.config.deploymentName,
1597
+ messages,
1598
+ tools,
1599
+ temperature: request.config?.temperature ?? this.config.temperature,
1600
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens
1601
+ });
1602
+ const choice = response.choices[0];
1603
+ const message = choice?.message;
1604
+ const toolCalls = (message?.tool_calls || []).map((tc) => ({
1605
+ id: tc.id,
1606
+ name: tc.function.name,
1607
+ args: JSON.parse(tc.function.arguments || "{}")
1608
+ }));
1609
+ return {
1610
+ content: message?.content || "",
1611
+ toolCalls,
1612
+ rawResponse: response
1613
+ };
1614
+ }
1615
+ };
1616
+ function createAzureAdapter(config) {
1617
+ return new AzureAdapter(config);
1618
+ }
1619
+
1620
+ // src/server/streaming.ts
1621
+ function createSSEHeaders() {
1622
+ return {
1623
+ "Content-Type": "text/event-stream",
1624
+ "Cache-Control": "no-cache, no-transform",
1625
+ Connection: "keep-alive",
1626
+ "X-Accel-Buffering": "no"
1627
+ };
1628
+ }
1629
+ function formatSSEData(event) {
1630
+ return `data: ${JSON.stringify(event)}
1631
+
1632
+ `;
1633
+ }
1634
+ function createEventStream(generator) {
1635
+ const encoder = new TextEncoder();
1636
+ return new ReadableStream({
1637
+ async start(controller) {
1638
+ try {
1639
+ for await (const event of generator) {
1640
+ const data = formatSSEData(event);
1641
+ controller.enqueue(encoder.encode(data));
1642
+ }
1643
+ } catch (error) {
1644
+ const errorEvent = {
1645
+ type: "error",
1646
+ message: error instanceof Error ? error.message : "Unknown error"
1647
+ };
1648
+ controller.enqueue(encoder.encode(formatSSEData(errorEvent)));
1649
+ } finally {
1650
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
1651
+ controller.close();
1652
+ }
1653
+ }
1654
+ });
1655
+ }
1656
+ function createSSEResponse(generator) {
1657
+ return new Response(createEventStream(generator), {
1658
+ headers: createSSEHeaders()
1659
+ });
1660
+ }
1661
+
1662
+ // src/server/runtime.ts
1663
+ function buildToolResultForAI(tool, result, args) {
1664
+ const typedResult = result;
1665
+ const responseMode = typedResult?._aiResponseMode ?? tool?.aiResponseMode ?? "full";
1666
+ if (typedResult?._aiContent && typedResult._aiContent.length > 0) {
1667
+ return typedResult._aiContent;
1668
+ }
1669
+ let aiContext;
1670
+ if (typedResult?._aiContext) {
1671
+ aiContext = typedResult._aiContext;
1672
+ } else if (tool?.aiContext) {
1673
+ aiContext = typeof tool.aiContext === "function" ? tool.aiContext(typedResult, args) : tool.aiContext;
1674
+ }
1675
+ switch (responseMode) {
1676
+ case "none":
1677
+ return aiContext ?? "[Result displayed to user]";
1678
+ case "brief":
1679
+ return aiContext ?? `[Tool ${tool?.name ?? "unknown"} executed successfully]`;
1680
+ case "full":
1681
+ default:
1682
+ const fullData = JSON.stringify(result);
1683
+ return aiContext ? `${aiContext}
1684
+
1685
+ Full data: ${fullData}` : fullData;
1686
+ }
1687
+ }
1688
+ function serializeToolResultContent(content) {
1689
+ if (typeof content === "string") {
1690
+ return content;
1691
+ }
1692
+ return content.map((item) => {
1693
+ if (item.type === "image") {
1694
+ return {
1695
+ type: "image_url",
1696
+ image_url: {
1697
+ url: `data:${item.mediaType};base64,${item.data}`
1698
+ }
1699
+ };
1700
+ }
1701
+ return {
1702
+ type: "text",
1703
+ text: item.text
1704
+ };
1705
+ });
1706
+ }
1707
+ function extractHeaders(request) {
1708
+ if (!request) return {};
1709
+ const headers = {};
1710
+ request.headers.forEach((value, key) => {
1711
+ headers[key.toLowerCase()] = value;
1712
+ });
1713
+ return headers;
1714
+ }
1715
+ function buildToolContext(toolCallId, signal, threadId, httpRequest, toolContextData) {
1716
+ const headers = extractHeaders(httpRequest);
1717
+ return {
1718
+ signal,
1719
+ threadId,
1720
+ toolCallId,
1721
+ headers,
1722
+ request: httpRequest ? {
1723
+ method: httpRequest.method,
1724
+ url: httpRequest.url,
1725
+ headers
1726
+ } : void 0,
1727
+ data: toolContextData
1728
+ };
1729
+ }
1730
+ var Runtime = class {
1731
+ constructor(config) {
1732
+ this.actions = /* @__PURE__ */ new Map();
1733
+ this.tools = /* @__PURE__ */ new Map();
1734
+ this.config = config;
1735
+ if ("provider" in config && config.provider) {
1736
+ this.adapter = config.provider.languageModel(config.model);
1737
+ } else if ("adapter" in config && config.adapter) {
1738
+ this.adapter = config.adapter;
1739
+ } else {
1740
+ this.adapter = this.createAdapter(config);
1741
+ }
1742
+ if (config.actions) {
1743
+ for (const action of config.actions) {
1744
+ this.actions.set(action.name, action);
1745
+ }
1746
+ }
1747
+ if (config.tools) {
1748
+ for (const tool of config.tools) {
1749
+ this.tools.set(tool.name, tool);
1750
+ }
1751
+ }
1752
+ }
1753
+ /**
1754
+ * Create LLM adapter based on config
1755
+ */
1756
+ createAdapter(config) {
1757
+ if (!("llm" in config) || !config.llm) {
1758
+ throw new Error(
1759
+ "LLM configuration is required when adapter is not provided"
1760
+ );
1761
+ }
1762
+ const { llm } = config;
1763
+ switch (llm.provider) {
1764
+ case "openai":
1765
+ return createOpenAIAdapter({
1766
+ apiKey: llm.apiKey,
1767
+ model: llm.model,
1768
+ baseUrl: llm.baseUrl,
1769
+ temperature: llm.temperature,
1770
+ maxTokens: llm.maxTokens
1771
+ });
1772
+ case "anthropic":
1773
+ return createAnthropicAdapter({
1774
+ apiKey: llm.apiKey,
1775
+ model: llm.model,
1776
+ temperature: llm.temperature,
1777
+ maxTokens: llm.maxTokens
1778
+ });
1779
+ case "groq":
1780
+ return createGroqAdapter({
1781
+ apiKey: llm.apiKey,
1782
+ model: llm.model,
1783
+ temperature: llm.temperature,
1784
+ maxTokens: llm.maxTokens
1785
+ });
1786
+ case "ollama":
1787
+ return createOllamaAdapter({
1788
+ model: llm.model,
1789
+ baseUrl: llm.baseUrl,
1790
+ temperature: llm.temperature,
1791
+ maxTokens: llm.maxTokens
1792
+ });
1793
+ default:
1794
+ return createOpenAIAdapter({
1795
+ apiKey: llm.apiKey,
1796
+ model: llm.model,
1797
+ baseUrl: llm.baseUrl,
1798
+ temperature: llm.temperature,
1799
+ maxTokens: llm.maxTokens
1800
+ });
1801
+ }
1802
+ }
1803
+ /**
1804
+ * Process a chat request and return streaming response
1805
+ */
1806
+ async *processChat(request, signal) {
1807
+ const messages = request.messages.map(
1808
+ (m, i) => core.createMessage({
1809
+ id: `msg_${i}`,
1810
+ role: m.role,
1811
+ content: m.content
1812
+ })
1813
+ );
1814
+ const allActions = [...this.actions.values()];
1815
+ if (request.actions) {
1816
+ for (const action of request.actions) {
1817
+ if (!this.actions.has(action.name)) {
1818
+ allActions.push({
1819
+ name: action.name,
1820
+ description: action.description,
1821
+ parameters: action.parameters,
1822
+ handler: async () => {
1823
+ return { handled: false };
1824
+ }
1825
+ });
1826
+ }
1827
+ }
1828
+ }
1829
+ const completionRequest = {
1830
+ messages,
1831
+ actions: allActions.length > 0 ? allActions : void 0,
1832
+ systemPrompt: request.systemPrompt || this.config.systemPrompt,
1833
+ config: request.config,
1834
+ signal
1835
+ };
1836
+ const stream = this.adapter.stream(completionRequest);
1837
+ for await (const event of stream) {
1838
+ if (event.type === "action:args") {
1839
+ const action = this.actions.get(event.id);
1840
+ if (action) {
1841
+ try {
1842
+ const args = JSON.parse(event.args);
1843
+ const result = await action.handler(args);
1844
+ yield {
1845
+ type: "action:end",
1846
+ id: event.id,
1847
+ result
1848
+ };
1849
+ } catch (error) {
1850
+ yield {
1851
+ type: "action:end",
1852
+ id: event.id,
1853
+ error: error instanceof Error ? error.message : "Action failed"
1854
+ };
1855
+ }
1856
+ } else {
1857
+ yield event;
1858
+ }
1859
+ } else {
1860
+ yield event;
1861
+ }
1862
+ }
1863
+ }
1864
+ /**
1865
+ * Handle HTTP request (for use with any framework)
1866
+ */
1867
+ async handleRequest(request) {
1868
+ try {
1869
+ const body = await request.json();
1870
+ if (this.config.debug) {
1871
+ console.log("[Copilot SDK] Request:", JSON.stringify(body, null, 2));
1872
+ }
1873
+ const signal = request.signal;
1874
+ const hasTools = body.tools && body.tools.length > 0 || this.tools.size > 0;
1875
+ const useAgentLoop = hasTools || this.config.agentLoop?.enabled;
1876
+ if (body.streaming === false) {
1877
+ return this.handleNonStreamingRequest(
1878
+ body,
1879
+ signal,
1880
+ useAgentLoop || false,
1881
+ request
1882
+ );
1883
+ }
1884
+ const generator = useAgentLoop ? this.processChatWithLoop(body, signal, void 0, void 0, request) : this.processChat(body, signal);
1885
+ return createSSEResponse(generator);
1886
+ } catch (error) {
1887
+ console.error("[Copilot SDK] Error:", error);
1888
+ return new Response(
1889
+ JSON.stringify({
1890
+ error: error instanceof Error ? error.message : "Unknown error"
1891
+ }),
1892
+ {
1893
+ status: 500,
1894
+ headers: { "Content-Type": "application/json" }
1895
+ }
1896
+ );
1897
+ }
1898
+ }
1899
+ /**
1900
+ * Handle non-streaming request - returns JSON instead of SSE
1901
+ */
1902
+ async handleNonStreamingRequest(body, signal, useAgentLoop, httpRequest) {
1903
+ try {
1904
+ const generator = useAgentLoop ? this.processChatWithLoop(
1905
+ body,
1906
+ signal,
1907
+ void 0,
1908
+ void 0,
1909
+ httpRequest
1910
+ ) : this.processChat(body, signal);
1911
+ const events = [];
1912
+ let content = "";
1913
+ const toolCalls = [];
1914
+ const toolResults = [];
1915
+ let messages;
1916
+ let requiresAction = false;
1917
+ let error;
1918
+ for await (const event of generator) {
1919
+ events.push(event);
1920
+ switch (event.type) {
1921
+ case "message:delta":
1922
+ content += event.content;
1923
+ break;
1924
+ case "action:start":
1925
+ toolCalls.push({ id: event.id, name: event.name, args: {} });
1926
+ break;
1927
+ case "action:args":
1928
+ const tc = toolCalls.find((t) => t.id === event.id);
1929
+ if (tc) {
1930
+ try {
1931
+ tc.args = JSON.parse(event.args || "{}");
1932
+ } catch {
1933
+ tc.args = {};
1934
+ }
1935
+ }
1936
+ break;
1937
+ case "action:end":
1938
+ toolResults.push({
1939
+ id: event.id,
1940
+ result: event.result || event.error
1941
+ });
1942
+ break;
1943
+ case "tool_calls":
1944
+ break;
1945
+ case "done":
1946
+ messages = event.messages;
1947
+ requiresAction = event.requiresAction || false;
1948
+ break;
1949
+ case "error":
1950
+ error = { message: event.message, code: event.code };
1951
+ break;
1952
+ }
1953
+ }
1954
+ const response = {
1955
+ success: !error,
1956
+ content,
1957
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
1958
+ toolResults: toolResults.length > 0 ? toolResults : void 0,
1959
+ messages,
1960
+ requiresAction,
1961
+ error,
1962
+ // Include raw events for debugging
1963
+ _events: this.config.debug ? events : void 0
1964
+ };
1965
+ console.log("[Copilot SDK] Non-streaming response:", {
1966
+ contentLength: content.length,
1967
+ toolCalls: toolCalls.length,
1968
+ toolResults: toolResults.length,
1969
+ messagesCount: messages?.length,
1970
+ requiresAction,
1971
+ hasError: !!error
1972
+ });
1973
+ return new Response(JSON.stringify(response), {
1974
+ status: error ? 500 : 200,
1975
+ headers: {
1976
+ "Content-Type": "application/json",
1977
+ "Access-Control-Allow-Origin": "*"
1978
+ }
1979
+ });
1980
+ } catch (err) {
1981
+ console.error("[Copilot SDK] Non-streaming error:", err);
1982
+ return new Response(
1983
+ JSON.stringify({
1984
+ success: false,
1985
+ error: {
1986
+ message: err instanceof Error ? err.message : "Unknown error"
1987
+ }
1988
+ }),
1989
+ {
1990
+ status: 500,
1991
+ headers: { "Content-Type": "application/json" }
1992
+ }
1993
+ );
1994
+ }
1995
+ }
1996
+ /**
1997
+ * Get registered actions
1998
+ */
1999
+ getActions() {
2000
+ return [...this.actions.values()];
2001
+ }
2002
+ /**
2003
+ * Register a new action
2004
+ */
2005
+ registerAction(action) {
2006
+ this.actions.set(action.name, action);
2007
+ }
2008
+ /**
2009
+ * Unregister an action
2010
+ */
2011
+ unregisterAction(name) {
2012
+ this.actions.delete(name);
2013
+ }
2014
+ /**
2015
+ * Register a new tool
2016
+ */
2017
+ registerTool(tool) {
2018
+ this.tools.set(tool.name, tool);
2019
+ }
2020
+ /**
2021
+ * Unregister a tool
2022
+ */
2023
+ unregisterTool(name) {
2024
+ this.tools.delete(name);
2025
+ }
2026
+ /**
2027
+ * Get registered tools
2028
+ */
2029
+ getTools() {
2030
+ return [...this.tools.values()];
2031
+ }
2032
+ /**
2033
+ * Get the AI provider name from config
2034
+ */
2035
+ getProviderName() {
2036
+ if ("provider" in this.config && this.config.provider) {
2037
+ return this.config.provider.name;
2038
+ }
2039
+ if ("llm" in this.config && this.config.llm) {
2040
+ return this.config.llm.provider;
2041
+ }
2042
+ return "openai";
2043
+ }
2044
+ /**
2045
+ * Get the AI provider instance (if using provider config)
2046
+ */
2047
+ getProvider() {
2048
+ if ("provider" in this.config && this.config.provider) {
2049
+ return this.config.provider;
2050
+ }
2051
+ return null;
2052
+ }
2053
+ /**
2054
+ * Get the current model ID
2055
+ */
2056
+ getModel() {
2057
+ if ("provider" in this.config && this.config.provider) {
2058
+ return this.config.model;
2059
+ }
2060
+ if ("llm" in this.config && this.config.llm) {
2061
+ return this.config.llm.model || "unknown";
2062
+ }
2063
+ return this.adapter.model;
2064
+ }
2065
+ /**
2066
+ * Process a chat request with tool support (Vercel AI SDK pattern)
2067
+ *
2068
+ * This method:
2069
+ * 1. Streams response from adapter
2070
+ * 2. Detects tool calls from streaming events
2071
+ * 3. Server-side tools are executed immediately
2072
+ * 4. Client-side tool calls are yielded for client to execute
2073
+ * 5. Loop continues until no more tool calls or max iterations reached
2074
+ * 6. Returns all new messages in the done event for client to append
2075
+ */
2076
+ async *processChatWithLoop(request, signal, _accumulatedMessages, _isRecursive, _httpRequest) {
2077
+ const debug = this.config.debug || this.config.agentLoop?.debug;
2078
+ if (request.streaming === false) {
2079
+ if (debug) {
2080
+ console.log("[Copilot SDK] Using non-streaming mode");
2081
+ }
2082
+ for await (const event of this.processChatWithLoopNonStreaming(
2083
+ request,
2084
+ signal,
2085
+ _accumulatedMessages,
2086
+ _isRecursive,
2087
+ _httpRequest
2088
+ )) {
2089
+ yield event;
2090
+ }
2091
+ return;
2092
+ }
2093
+ const newMessages = _accumulatedMessages || [];
2094
+ this.config.agentLoop?.maxIterations || 20;
2095
+ const allTools = [...this.tools.values()];
2096
+ if (request.tools) {
2097
+ for (const tool of request.tools) {
2098
+ allTools.push({
2099
+ name: tool.name,
2100
+ description: tool.description,
2101
+ location: "client",
2102
+ inputSchema: tool.inputSchema
2103
+ });
2104
+ }
2105
+ }
2106
+ if (debug) {
2107
+ console.log(
2108
+ `[Copilot SDK] Processing chat with ${allTools.length} tools`
2109
+ );
2110
+ for (let i = 0; i < request.messages.length; i++) {
2111
+ const msg = request.messages[i];
2112
+ const hasAttachments = msg.attachments && msg.attachments.length > 0;
2113
+ if (hasAttachments) {
2114
+ console.log(
2115
+ `[Copilot SDK] Message ${i} (${msg.role}) has ${msg.attachments.length} attachments:`,
2116
+ msg.attachments.map((a) => ({
2117
+ type: a.type,
2118
+ mimeType: a.mimeType,
2119
+ dataLength: a.data?.length || 0
2120
+ }))
2121
+ );
2122
+ }
2123
+ }
2124
+ }
2125
+ const systemPrompt = request.systemPrompt || this.config.systemPrompt || "";
2126
+ let accumulatedText = "";
2127
+ const toolCalls = [];
2128
+ let currentToolCall = null;
2129
+ const completionRequest = {
2130
+ messages: [],
2131
+ // Not used when rawMessages is provided
2132
+ rawMessages: request.messages,
2133
+ actions: this.convertToolsToActions(allTools),
2134
+ systemPrompt,
2135
+ config: request.config,
2136
+ signal
2137
+ };
2138
+ const stream = this.adapter.stream(completionRequest);
2139
+ for await (const event of stream) {
2140
+ switch (event.type) {
2141
+ case "message:start":
2142
+ case "message:end":
2143
+ yield event;
2144
+ break;
2145
+ case "message:delta":
2146
+ accumulatedText += event.content;
2147
+ yield event;
2148
+ break;
2149
+ case "action:start":
2150
+ currentToolCall = { id: event.id, name: event.name, args: "" };
2151
+ if (debug) {
2152
+ console.log(`[Copilot SDK] Tool call started: ${event.name}`);
2153
+ }
2154
+ yield event;
2155
+ break;
2156
+ case "action:args":
2157
+ if (currentToolCall) {
2158
+ try {
2159
+ const parsedArgs = JSON.parse(event.args || "{}");
2160
+ if (debug) {
2161
+ console.log(
2162
+ `[Copilot SDK] Tool args for ${currentToolCall.name}:`,
2163
+ parsedArgs
2164
+ );
2165
+ }
2166
+ toolCalls.push({
2167
+ id: currentToolCall.id,
2168
+ name: currentToolCall.name,
2169
+ args: parsedArgs
2170
+ });
2171
+ } catch (e) {
2172
+ console.error(
2173
+ "[Copilot SDK] Failed to parse tool args:",
2174
+ event.args,
2175
+ e
2176
+ );
2177
+ toolCalls.push({
2178
+ id: currentToolCall.id,
2179
+ name: currentToolCall.name,
2180
+ args: {}
2181
+ });
2182
+ }
2183
+ currentToolCall = null;
2184
+ }
2185
+ yield event;
2186
+ break;
2187
+ case "error":
2188
+ yield event;
2189
+ return;
2190
+ // Exit on error
2191
+ case "done":
2192
+ break;
2193
+ default:
2194
+ yield event;
2195
+ }
2196
+ }
2197
+ if (toolCalls.length > 0) {
2198
+ if (debug) {
2199
+ console.log(
2200
+ `[Copilot SDK] Detected ${toolCalls.length} tool calls:`,
2201
+ toolCalls.map((t) => t.name)
2202
+ );
2203
+ }
2204
+ const serverToolCalls = [];
2205
+ const clientToolCalls = [];
2206
+ for (const tc of toolCalls) {
2207
+ const tool = allTools.find((t) => t.name === tc.name);
2208
+ if (tool?.location === "server" && tool.handler) {
2209
+ serverToolCalls.push(tc);
2210
+ } else {
2211
+ clientToolCalls.push(tc);
2212
+ }
2213
+ }
2214
+ const serverToolResults = [];
2215
+ const toolContextData = "toolContext" in this.config ? this.config.toolContext : void 0;
2216
+ for (const tc of serverToolCalls) {
2217
+ const tool = allTools.find((t) => t.name === tc.name);
2218
+ if (tool?.handler) {
2219
+ if (debug) {
2220
+ console.log(`[Copilot SDK] Executing server-side tool: ${tc.name}`);
2221
+ }
2222
+ const toolContext = buildToolContext(
2223
+ tc.id,
2224
+ signal,
2225
+ request.threadId,
2226
+ _httpRequest,
2227
+ toolContextData
2228
+ );
2229
+ try {
2230
+ const result = await tool.handler(tc.args, toolContext);
2231
+ serverToolResults.push({
2232
+ id: tc.id,
2233
+ name: tc.name,
2234
+ args: tc.args,
2235
+ result,
2236
+ tool
2237
+ });
2238
+ yield {
2239
+ type: "action:end",
2240
+ id: tc.id,
2241
+ result
2242
+ };
2243
+ } catch (error) {
2244
+ const errorResult = {
2245
+ success: false,
2246
+ error: error instanceof Error ? error.message : "Tool execution failed"
2247
+ };
2248
+ serverToolResults.push({
2249
+ id: tc.id,
2250
+ name: tc.name,
2251
+ args: tc.args,
2252
+ result: errorResult,
2253
+ tool
2254
+ });
2255
+ yield {
2256
+ type: "action:end",
2257
+ id: tc.id,
2258
+ error: error instanceof Error ? error.message : "Tool execution failed"
2259
+ };
2260
+ }
2261
+ }
2262
+ }
2263
+ if (serverToolResults.length > 0) {
2264
+ if (debug) {
2265
+ console.log(
2266
+ `[Copilot SDK] Server tools executed, continuing conversation...`
2267
+ );
2268
+ }
2269
+ const assistantWithToolCalls = {
2270
+ role: "assistant",
2271
+ content: accumulatedText || null,
2272
+ tool_calls: serverToolCalls.map((tc) => ({
2273
+ id: tc.id,
2274
+ type: "function",
2275
+ function: {
2276
+ name: tc.name,
2277
+ arguments: JSON.stringify(tc.args)
2278
+ }
2279
+ }))
2280
+ };
2281
+ const toolResultMessages = serverToolResults.map(
2282
+ (tr) => {
2283
+ const aiContent = buildToolResultForAI(tr.tool, tr.result, tr.args);
2284
+ const content = typeof aiContent === "string" ? aiContent : JSON.stringify(serializeToolResultContent(aiContent));
2285
+ return {
2286
+ role: "tool",
2287
+ content,
2288
+ tool_call_id: tr.id
2289
+ };
2290
+ }
2291
+ );
2292
+ newMessages.push(assistantWithToolCalls);
2293
+ newMessages.push(...toolResultMessages);
2294
+ const messagesWithResults = [
2295
+ ...request.messages,
2296
+ assistantWithToolCalls,
2297
+ ...toolResultMessages
2298
+ ];
2299
+ const nextRequest = {
2300
+ ...request,
2301
+ messages: messagesWithResults
2302
+ };
2303
+ for await (const event of this.processChatWithLoop(
2304
+ nextRequest,
2305
+ signal,
2306
+ newMessages,
2307
+ true,
2308
+ // Mark as recursive
2309
+ _httpRequest
2310
+ )) {
2311
+ yield event;
2312
+ }
2313
+ return;
2314
+ }
2315
+ if (clientToolCalls.length > 0) {
2316
+ const assistantMessage = {
2317
+ role: "assistant",
2318
+ content: accumulatedText || null,
2319
+ tool_calls: clientToolCalls.map((tc) => ({
2320
+ id: tc.id,
2321
+ type: "function",
2322
+ function: {
2323
+ name: tc.name,
2324
+ arguments: JSON.stringify(tc.args)
2325
+ }
2326
+ }))
2327
+ };
2328
+ newMessages.push(assistantMessage);
2329
+ yield {
2330
+ type: "tool_calls",
2331
+ toolCalls: clientToolCalls,
2332
+ assistantMessage
2333
+ };
2334
+ yield {
2335
+ type: "done",
2336
+ requiresAction: true,
2337
+ messages: newMessages
2338
+ };
2339
+ return;
2340
+ }
2341
+ }
2342
+ if (accumulatedText) {
2343
+ newMessages.push({
2344
+ role: "assistant",
2345
+ content: accumulatedText
2346
+ });
2347
+ }
2348
+ if (debug) {
2349
+ console.log(
2350
+ `[Copilot SDK] Stream complete, returning ${newMessages.length} new messages`
2351
+ );
2352
+ }
2353
+ yield {
2354
+ type: "done",
2355
+ messages: newMessages.length > 0 ? newMessages : void 0
2356
+ };
2357
+ }
2358
+ /**
2359
+ * Non-streaming agent loop implementation
2360
+ *
2361
+ * Uses adapter.complete() instead of stream() for:
2362
+ * - Better comparison with original studio-ai behavior
2363
+ * - Easier debugging (full response at once)
2364
+ * - More predictable retry behavior
2365
+ */
2366
+ async *processChatWithLoopNonStreaming(request, signal, _accumulatedMessages, _isRecursive, _httpRequest) {
2367
+ const newMessages = _accumulatedMessages || [];
2368
+ const debug = this.config.debug || this.config.agentLoop?.debug;
2369
+ const maxIterations = this.config.agentLoop?.maxIterations || 20;
2370
+ const allTools = [...this.tools.values()];
2371
+ if (request.tools) {
2372
+ for (const tool of request.tools) {
2373
+ allTools.push({
2374
+ name: tool.name,
2375
+ description: tool.description,
2376
+ location: "client",
2377
+ inputSchema: tool.inputSchema
2378
+ });
2379
+ }
2380
+ }
2381
+ const systemPrompt = request.systemPrompt || this.config.systemPrompt || "";
2382
+ let iteration = 0;
2383
+ let conversationMessages = request.messages;
2384
+ while (iteration < maxIterations) {
2385
+ iteration++;
2386
+ if (debug) {
2387
+ console.log(`[Copilot SDK] Iteration ${iteration}/${maxIterations}`);
2388
+ }
2389
+ if (signal?.aborted) {
2390
+ yield {
2391
+ type: "error",
2392
+ message: "Aborted",
2393
+ code: "ABORTED"
2394
+ };
2395
+ return;
2396
+ }
2397
+ if (!this.adapter.complete) {
2398
+ if (debug) {
2399
+ console.log(
2400
+ "[Copilot SDK] Adapter does not support non-streaming, falling back to streaming"
2401
+ );
2402
+ }
2403
+ const streamingRequest = { ...request, streaming: true };
2404
+ for await (const event of this.processChatWithLoop(
2405
+ streamingRequest,
2406
+ signal,
2407
+ _accumulatedMessages,
2408
+ _isRecursive,
2409
+ _httpRequest
2410
+ )) {
2411
+ yield event;
2412
+ }
2413
+ return;
2414
+ }
2415
+ const completionRequest = {
2416
+ messages: [],
2417
+ rawMessages: conversationMessages,
2418
+ actions: this.convertToolsToActions(allTools),
2419
+ systemPrompt,
2420
+ config: request.config,
2421
+ signal
2422
+ };
2423
+ try {
2424
+ const result = await this.adapter.complete(completionRequest);
2425
+ if (debug) {
2426
+ console.log(
2427
+ `[Copilot SDK] Got response: ${result.content.length} chars, ${result.toolCalls.length} tool calls`
2428
+ );
2429
+ }
2430
+ yield { type: "message:start", id: `msg_${Date.now()}` };
2431
+ if (result.content) {
2432
+ yield {
2433
+ type: "message:delta",
2434
+ content: result.content
2435
+ };
2436
+ }
2437
+ yield { type: "message:end" };
2438
+ if (result.toolCalls.length > 0) {
2439
+ const serverToolCalls = [];
2440
+ const clientToolCalls = [];
2441
+ for (const tc of result.toolCalls) {
2442
+ const tool = allTools.find((t) => t.name === tc.name);
2443
+ if (tool?.location === "server" && tool.handler) {
2444
+ serverToolCalls.push(tc);
2445
+ } else {
2446
+ clientToolCalls.push({
2447
+ id: tc.id,
2448
+ name: tc.name,
2449
+ args: tc.args
2450
+ });
2451
+ }
2452
+ }
2453
+ for (const tc of result.toolCalls) {
2454
+ yield {
2455
+ type: "action:start",
2456
+ id: tc.id,
2457
+ name: tc.name
2458
+ };
2459
+ yield {
2460
+ type: "action:args",
2461
+ id: tc.id,
2462
+ args: JSON.stringify(tc.args)
2463
+ };
2464
+ }
2465
+ const serverToolResults = [];
2466
+ const toolContextData = "toolContext" in this.config ? this.config.toolContext : void 0;
2467
+ for (const tc of serverToolCalls) {
2468
+ const tool = allTools.find((t) => t.name === tc.name);
2469
+ if (tool?.handler) {
2470
+ if (debug) {
2471
+ console.log(`[Copilot SDK] Executing tool: ${tc.name}`);
2472
+ }
2473
+ const toolContext = buildToolContext(
2474
+ tc.id,
2475
+ signal,
2476
+ request.threadId,
2477
+ _httpRequest,
2478
+ toolContextData
2479
+ );
2480
+ try {
2481
+ const toolResult = await tool.handler(tc.args, toolContext);
2482
+ serverToolResults.push({
2483
+ id: tc.id,
2484
+ name: tc.name,
2485
+ args: tc.args,
2486
+ result: toolResult,
2487
+ tool
2488
+ });
2489
+ yield {
2490
+ type: "action:end",
2491
+ id: tc.id,
2492
+ result: toolResult
2493
+ };
2494
+ } catch (error) {
2495
+ const errorResult = {
2496
+ success: false,
2497
+ error: error instanceof Error ? error.message : "Tool execution failed"
2498
+ };
2499
+ serverToolResults.push({
2500
+ id: tc.id,
2501
+ name: tc.name,
2502
+ args: tc.args,
2503
+ result: errorResult,
2504
+ tool
2505
+ });
2506
+ yield {
2507
+ type: "action:end",
2508
+ id: tc.id,
2509
+ error: error instanceof Error ? error.message : "Tool execution failed"
2510
+ };
2511
+ }
2512
+ }
2513
+ }
2514
+ if (serverToolResults.length > 0) {
2515
+ const assistantWithToolCalls = {
2516
+ role: "assistant",
2517
+ content: result.content || null,
2518
+ tool_calls: result.toolCalls.map((tc) => ({
2519
+ id: tc.id,
2520
+ type: "function",
2521
+ function: {
2522
+ name: tc.name,
2523
+ arguments: JSON.stringify(tc.args)
2524
+ }
2525
+ }))
2526
+ };
2527
+ const toolResultMessages = serverToolResults.map((tr) => {
2528
+ const aiContent = buildToolResultForAI(
2529
+ tr.tool,
2530
+ tr.result,
2531
+ tr.args
2532
+ );
2533
+ const content = typeof aiContent === "string" ? aiContent : JSON.stringify(serializeToolResultContent(aiContent));
2534
+ return {
2535
+ role: "tool",
2536
+ content,
2537
+ tool_call_id: tr.id
2538
+ };
2539
+ });
2540
+ newMessages.push(assistantWithToolCalls);
2541
+ newMessages.push(...toolResultMessages);
2542
+ conversationMessages = [
2543
+ ...conversationMessages,
2544
+ assistantWithToolCalls,
2545
+ ...toolResultMessages
2546
+ ];
2547
+ continue;
2548
+ }
2549
+ if (clientToolCalls.length > 0) {
2550
+ const assistantMessage = {
2551
+ role: "assistant",
2552
+ content: result.content || null,
2553
+ tool_calls: clientToolCalls.map((tc) => ({
2554
+ id: tc.id,
2555
+ type: "function",
2556
+ function: {
2557
+ name: tc.name,
2558
+ arguments: JSON.stringify(tc.args)
2559
+ }
2560
+ }))
2561
+ };
2562
+ newMessages.push(assistantMessage);
2563
+ yield {
2564
+ type: "tool_calls",
2565
+ toolCalls: clientToolCalls,
2566
+ assistantMessage
2567
+ };
2568
+ yield {
2569
+ type: "done",
2570
+ requiresAction: true,
2571
+ messages: newMessages
2572
+ };
2573
+ return;
2574
+ }
2575
+ }
2576
+ if (result.content) {
2577
+ newMessages.push({
2578
+ role: "assistant",
2579
+ content: result.content
2580
+ });
2581
+ }
2582
+ if (debug) {
2583
+ console.log(`[Copilot SDK] Complete after ${iteration} iterations`);
2584
+ }
2585
+ yield {
2586
+ type: "done",
2587
+ messages: newMessages.length > 0 ? newMessages : void 0
2588
+ };
2589
+ return;
2590
+ } catch (error) {
2591
+ yield {
2592
+ type: "error",
2593
+ message: error instanceof Error ? error.message : "Unknown error",
2594
+ code: "COMPLETION_ERROR"
2595
+ };
2596
+ return;
2597
+ }
2598
+ }
2599
+ if (debug) {
2600
+ console.log(`[Copilot SDK] Max iterations (${maxIterations}) reached`);
2601
+ }
2602
+ yield {
2603
+ type: "done",
2604
+ messages: newMessages.length > 0 ? newMessages : void 0
2605
+ };
2606
+ }
2607
+ /**
2608
+ * Convert tools to legacy action format (for adapter compatibility)
2609
+ */
2610
+ convertToolsToActions(tools) {
2611
+ return tools.map((tool) => ({
2612
+ name: tool.name,
2613
+ description: tool.description,
2614
+ parameters: this.convertInputSchemaToParameters(tool.inputSchema),
2615
+ handler: tool.handler || (async () => ({ handled: false }))
2616
+ }));
2617
+ }
2618
+ /**
2619
+ * Convert JSON Schema property to ActionParameter format recursively
2620
+ */
2621
+ convertSchemaProperty(prop) {
2622
+ const p = prop;
2623
+ const typeMap = {
2624
+ string: "string",
2625
+ number: "number",
2626
+ integer: "number",
2627
+ boolean: "boolean",
2628
+ object: "object",
2629
+ array: "array"
2630
+ };
2631
+ const result = {
2632
+ type: typeMap[p.type || "string"] || "string"
2633
+ };
2634
+ if (p.description) {
2635
+ result.description = p.description;
2636
+ }
2637
+ if (p.enum) {
2638
+ result.enum = p.enum;
2639
+ }
2640
+ if (p.type === "array" && p.items) {
2641
+ result.items = this.convertSchemaProperty(p.items);
2642
+ }
2643
+ if (p.type === "object" && p.properties) {
2644
+ result.properties = Object.fromEntries(
2645
+ Object.entries(p.properties).map(([key, val]) => [
2646
+ key,
2647
+ this.convertSchemaProperty(val)
2648
+ ])
2649
+ );
2650
+ }
2651
+ return result;
2652
+ }
2653
+ /**
2654
+ * Convert JSON Schema to legacy parameters format
2655
+ */
2656
+ convertInputSchemaToParameters(schema) {
2657
+ const parameters = {};
2658
+ for (const [name, prop] of Object.entries(schema.properties)) {
2659
+ const converted = this.convertSchemaProperty(prop);
2660
+ parameters[name] = {
2661
+ ...converted,
2662
+ required: schema.required?.includes(name)
2663
+ };
2664
+ }
2665
+ return parameters;
2666
+ }
2667
+ };
2668
+ function createRuntime(config) {
2669
+ return new Runtime(config);
2670
+ }
2671
+ function createHonoApp(runtime) {
2672
+ const app = new hono.Hono();
2673
+ app.use("*", cors.cors());
2674
+ app.get("/", (c) => {
2675
+ return c.json({ status: "ok", provider: "yourgpt-copilot" });
2676
+ });
2677
+ app.post("/", async (c) => {
2678
+ const request = c.req.raw;
2679
+ return runtime.handleRequest(request);
2680
+ });
2681
+ app.post("/chat", async (c) => {
2682
+ const request = c.req.raw;
2683
+ return runtime.handleRequest(request);
2684
+ });
2685
+ app.post("/chat/loop", async (c) => {
2686
+ try {
2687
+ const body = await c.req.json();
2688
+ const signal = c.req.raw.signal;
2689
+ const generator = runtime.processChatWithLoop(body, signal);
2690
+ return createSSEResponse(generator);
2691
+ } catch (error) {
2692
+ return c.json(
2693
+ { error: error instanceof Error ? error.message : "Unknown error" },
2694
+ 500
2695
+ );
2696
+ }
2697
+ });
2698
+ app.get("/actions", (c) => {
2699
+ const actions = runtime.getActions().map((a) => ({
2700
+ name: a.name,
2701
+ description: a.description,
2702
+ parameters: a.parameters
2703
+ }));
2704
+ return c.json({ actions });
2705
+ });
2706
+ app.get("/tools", (c) => {
2707
+ const tools = runtime.getTools().map((t) => ({
2708
+ name: t.name,
2709
+ description: t.description,
2710
+ location: t.location,
2711
+ inputSchema: t.inputSchema
2712
+ }));
2713
+ return c.json({ tools });
2714
+ });
2715
+ app.get("/capabilities", (c) => {
2716
+ const provider = runtime.getProvider();
2717
+ const model = runtime.getModel();
2718
+ if (provider) {
2719
+ const capabilities = provider.getCapabilities(model);
2720
+ return c.json({
2721
+ provider: provider.name,
2722
+ model,
2723
+ capabilities,
2724
+ supportedModels: provider.supportedModels
2725
+ });
2726
+ }
2727
+ return c.json({
2728
+ provider: "unknown",
2729
+ model,
2730
+ capabilities: {
2731
+ supportsVision: false,
2732
+ supportsTools: true,
2733
+ supportsThinking: false,
2734
+ supportsStreaming: true,
2735
+ supportsPDF: false,
2736
+ supportsAudio: false,
2737
+ supportsVideo: false,
2738
+ maxTokens: 8192,
2739
+ supportedImageTypes: [],
2740
+ supportsJsonMode: false,
2741
+ supportsSystemMessages: true
2742
+ },
2743
+ supportedModels: []
2744
+ });
2745
+ });
2746
+ return app;
2747
+ }
2748
+ function createNextHandler(config) {
2749
+ const runtime = createRuntime(config);
2750
+ return async function handler(request) {
2751
+ return runtime.handleRequest(request);
2752
+ };
2753
+ }
2754
+ function createExpressMiddleware(config) {
2755
+ const runtime = createRuntime(config);
2756
+ createHonoApp(runtime);
2757
+ return async (req, res) => {
2758
+ try {
2759
+ const url = new URL(req.url, "http://localhost");
2760
+ const request = new Request(url, {
2761
+ method: req.method,
2762
+ headers: req.headers,
2763
+ body: req.method !== "GET" ? JSON.stringify(req.body) : void 0
2764
+ });
2765
+ const response = await runtime.handleRequest(request);
2766
+ response.headers.forEach((value, key) => {
2767
+ res.setHeader(key, value);
2768
+ });
2769
+ if (response.body) {
2770
+ const reader = response.body.getReader();
2771
+ const decoder = new TextDecoder();
2772
+ while (true) {
2773
+ const { done, value } = await reader.read();
2774
+ if (done) break;
2775
+ res.write(decoder.decode(value));
2776
+ }
2777
+ }
2778
+ res.end();
2779
+ } catch (error) {
2780
+ res.status(500).json({
2781
+ error: error instanceof Error ? error.message : "Unknown error"
2782
+ });
2783
+ }
2784
+ };
2785
+ }
2786
+ function createNodeHandler(config) {
2787
+ const runtime = createRuntime(config);
2788
+ const app = createHonoApp(runtime);
2789
+ return app.fetch;
2790
+ }
2791
+
2792
+ // src/providers/registry.ts
2793
+ var providerFactories = /* @__PURE__ */ new Map();
2794
+ function registerProvider(name, factory) {
2795
+ providerFactories.set(name, factory);
2796
+ }
2797
+ function getProvider(name, config) {
2798
+ const factory = providerFactories.get(name);
2799
+ if (!factory) {
2800
+ return void 0;
2801
+ }
2802
+ return factory(config);
2803
+ }
2804
+ function hasProvider(name) {
2805
+ return providerFactories.has(name);
2806
+ }
2807
+ function listProviders() {
2808
+ return Array.from(providerFactories.keys());
2809
+ }
2810
+ function getAvailableProviders() {
2811
+ const result = [];
2812
+ for (const [name, factory] of providerFactories) {
2813
+ try {
2814
+ const provider = factory();
2815
+ result.push({
2816
+ name,
2817
+ models: provider.supportedModels
2818
+ });
2819
+ } catch {
2820
+ result.push({
2821
+ name,
2822
+ models: []
2823
+ });
2824
+ }
2825
+ }
2826
+ return result;
2827
+ }
2828
+ function getModelCapabilities(providerName, modelId) {
2829
+ const provider = getProvider(providerName);
2830
+ if (!provider) {
2831
+ return void 0;
2832
+ }
2833
+ return provider.getCapabilities(modelId);
2834
+ }
2835
+
2836
+ // src/providers/openai/index.ts
2837
+ var OPENAI_MODELS = {
2838
+ // GPT-4o series
2839
+ "gpt-4o": {
2840
+ vision: true,
2841
+ tools: true,
2842
+ audio: true,
2843
+ jsonMode: true,
2844
+ maxTokens: 128e3
2845
+ },
2846
+ "gpt-4o-mini": {
2847
+ vision: true,
2848
+ tools: true,
2849
+ audio: false,
2850
+ jsonMode: true,
2851
+ maxTokens: 128e3
2852
+ },
2853
+ "gpt-4o-2024-11-20": {
2854
+ vision: true,
2855
+ tools: true,
2856
+ audio: true,
2857
+ jsonMode: true,
2858
+ maxTokens: 128e3
2859
+ },
2860
+ "gpt-4o-2024-08-06": {
2861
+ vision: true,
2862
+ tools: true,
2863
+ audio: false,
2864
+ jsonMode: true,
2865
+ maxTokens: 128e3
2866
+ },
2867
+ // GPT-4 Turbo series
2868
+ "gpt-4-turbo": {
2869
+ vision: true,
2870
+ tools: true,
2871
+ audio: false,
2872
+ jsonMode: true,
2873
+ maxTokens: 128e3
2874
+ },
2875
+ "gpt-4-turbo-preview": {
2876
+ vision: false,
2877
+ tools: true,
2878
+ audio: false,
2879
+ jsonMode: true,
2880
+ maxTokens: 128e3
2881
+ },
2882
+ // GPT-4 series
2883
+ "gpt-4": {
2884
+ vision: false,
2885
+ tools: true,
2886
+ audio: false,
2887
+ jsonMode: false,
2888
+ maxTokens: 8192
2889
+ },
2890
+ "gpt-4-32k": {
2891
+ vision: false,
2892
+ tools: true,
2893
+ audio: false,
2894
+ jsonMode: false,
2895
+ maxTokens: 32768
2896
+ },
2897
+ // GPT-3.5 series
2898
+ "gpt-3.5-turbo": {
2899
+ vision: false,
2900
+ tools: true,
2901
+ audio: false,
2902
+ jsonMode: true,
2903
+ maxTokens: 16385
2904
+ },
2905
+ "gpt-3.5-turbo-16k": {
2906
+ vision: false,
2907
+ tools: true,
2908
+ audio: false,
2909
+ jsonMode: true,
2910
+ maxTokens: 16385
2911
+ },
2912
+ // O1 reasoning series
2913
+ o1: {
2914
+ vision: true,
2915
+ tools: false,
2916
+ // O1 doesn't support tools yet
2917
+ audio: false,
2918
+ jsonMode: false,
2919
+ maxTokens: 128e3
2920
+ },
2921
+ "o1-mini": {
2922
+ vision: true,
2923
+ tools: false,
2924
+ audio: false,
2925
+ jsonMode: false,
2926
+ maxTokens: 128e3
2927
+ },
2928
+ "o1-preview": {
2929
+ vision: true,
2930
+ tools: false,
2931
+ audio: false,
2932
+ jsonMode: false,
2933
+ maxTokens: 128e3
2934
+ },
2935
+ // O3 reasoning series
2936
+ "o3-mini": {
2937
+ vision: true,
2938
+ tools: false,
2939
+ audio: false,
2940
+ jsonMode: false,
2941
+ maxTokens: 128e3
2942
+ }
2943
+ };
2944
+ function createOpenAI(config = {}) {
2945
+ const apiKey = config.apiKey ?? process.env.OPENAI_API_KEY ?? "";
2946
+ return {
2947
+ name: "openai",
2948
+ supportedModels: Object.keys(OPENAI_MODELS),
2949
+ languageModel(modelId) {
2950
+ return createOpenAIAdapter({
2951
+ apiKey,
2952
+ model: modelId,
2953
+ baseUrl: config.baseUrl
2954
+ });
2955
+ },
2956
+ getCapabilities(modelId) {
2957
+ const model = OPENAI_MODELS[modelId] ?? OPENAI_MODELS["gpt-4o"];
2958
+ return {
2959
+ supportsVision: model.vision,
2960
+ supportsTools: model.tools,
2961
+ supportsThinking: false,
2962
+ // OpenAI doesn't have extended thinking
2963
+ supportsStreaming: true,
2964
+ supportsPDF: false,
2965
+ // OpenAI doesn't support PDFs directly
2966
+ supportsAudio: model.audio,
2967
+ supportsVideo: false,
2968
+ maxTokens: model.maxTokens,
2969
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
2970
+ supportedAudioTypes: model.audio ? ["audio/mp3", "audio/wav", "audio/webm"] : [],
2971
+ supportsJsonMode: model.jsonMode,
2972
+ supportsSystemMessages: true
2973
+ };
2974
+ }
2975
+ };
2976
+ }
2977
+
2978
+ // src/providers/anthropic/index.ts
2979
+ var ANTHROPIC_MODELS = {
2980
+ // Claude 4 series (latest)
2981
+ "claude-sonnet-4-20250514": {
2982
+ vision: true,
2983
+ tools: true,
2984
+ thinking: true,
2985
+ maxTokens: 64e3
2986
+ },
2987
+ "claude-opus-4-20250514": {
2988
+ vision: true,
2989
+ tools: true,
2990
+ thinking: true,
2991
+ maxTokens: 32e3
2992
+ },
2993
+ // Claude 3.5 series
2994
+ "claude-3-5-sonnet-latest": {
2995
+ vision: true,
2996
+ tools: true,
2997
+ thinking: true,
2998
+ maxTokens: 2e5
2999
+ },
3000
+ "claude-3-5-sonnet-20241022": {
3001
+ vision: true,
3002
+ tools: true,
3003
+ thinking: true,
3004
+ maxTokens: 2e5
3005
+ },
3006
+ "claude-3-5-haiku-latest": {
3007
+ vision: true,
3008
+ tools: true,
3009
+ thinking: false,
3010
+ maxTokens: 2e5
3011
+ },
3012
+ "claude-3-5-haiku-20241022": {
3013
+ vision: true,
3014
+ tools: true,
3015
+ thinking: false,
3016
+ maxTokens: 2e5
3017
+ },
3018
+ // Claude 3 series
3019
+ "claude-3-opus-latest": {
3020
+ vision: true,
3021
+ tools: true,
3022
+ thinking: true,
3023
+ maxTokens: 2e5
3024
+ },
3025
+ "claude-3-opus-20240229": {
3026
+ vision: true,
3027
+ tools: true,
3028
+ thinking: true,
3029
+ maxTokens: 2e5
3030
+ },
3031
+ "claude-3-sonnet-20240229": {
3032
+ vision: true,
3033
+ tools: true,
3034
+ thinking: false,
3035
+ maxTokens: 2e5
3036
+ },
3037
+ "claude-3-haiku-20240307": {
3038
+ vision: true,
3039
+ tools: true,
3040
+ thinking: false,
3041
+ maxTokens: 2e5
3042
+ }
3043
+ };
3044
+ function createAnthropic(config = {}) {
3045
+ const apiKey = config.apiKey ?? process.env.ANTHROPIC_API_KEY ?? "";
3046
+ return {
3047
+ name: "anthropic",
3048
+ supportedModels: Object.keys(ANTHROPIC_MODELS),
3049
+ languageModel(modelId) {
3050
+ return createAnthropicAdapter({
3051
+ apiKey,
3052
+ model: modelId,
3053
+ baseUrl: config.baseUrl,
3054
+ thinking: config.thinkingBudget ? { type: "enabled", budgetTokens: config.thinkingBudget } : void 0
3055
+ });
3056
+ },
3057
+ getCapabilities(modelId) {
3058
+ const model = ANTHROPIC_MODELS[modelId] ?? ANTHROPIC_MODELS["claude-3-5-sonnet-latest"];
3059
+ return {
3060
+ supportsVision: model.vision,
3061
+ supportsTools: model.tools,
3062
+ supportsThinking: model.thinking,
3063
+ supportsStreaming: true,
3064
+ supportsPDF: true,
3065
+ // Claude supports PDFs
3066
+ supportsAudio: false,
3067
+ supportsVideo: false,
3068
+ maxTokens: model.maxTokens,
3069
+ supportedImageTypes: [
3070
+ "image/png",
3071
+ "image/jpeg",
3072
+ "image/gif",
3073
+ "image/webp"
3074
+ ],
3075
+ supportsJsonMode: false,
3076
+ // Anthropic doesn't have JSON mode
3077
+ supportsSystemMessages: true
3078
+ };
3079
+ }
3080
+ };
3081
+ }
3082
+
3083
+ // src/providers/groq/index.ts
3084
+ var GROQ_MODELS = {
3085
+ // Llama 3.3 series
3086
+ "llama-3.3-70b-versatile": {
3087
+ vision: false,
3088
+ tools: true,
3089
+ maxTokens: 32768
3090
+ },
3091
+ "llama-3.3-70b-specdec": {
3092
+ vision: false,
3093
+ tools: true,
3094
+ maxTokens: 8192
3095
+ },
3096
+ // Llama 3.2 Vision series
3097
+ "llama-3.2-90b-vision-preview": {
3098
+ vision: true,
3099
+ tools: true,
3100
+ maxTokens: 8192
3101
+ },
3102
+ "llama-3.2-11b-vision-preview": {
3103
+ vision: true,
3104
+ tools: true,
3105
+ maxTokens: 8192
3106
+ },
3107
+ // Llama 3.1 series
3108
+ "llama-3.1-70b-versatile": {
3109
+ vision: false,
3110
+ tools: true,
3111
+ maxTokens: 32768
3112
+ },
3113
+ "llama-3.1-8b-instant": {
3114
+ vision: false,
3115
+ tools: true,
3116
+ maxTokens: 8192
3117
+ },
3118
+ // Mixtral series
3119
+ "mixtral-8x7b-32768": {
3120
+ vision: false,
3121
+ tools: true,
3122
+ maxTokens: 32768
3123
+ },
3124
+ // Gemma series
3125
+ "gemma2-9b-it": {
3126
+ vision: false,
3127
+ tools: false,
3128
+ maxTokens: 8192
3129
+ },
3130
+ // DeepSeek
3131
+ "deepseek-r1-distill-llama-70b": {
3132
+ vision: false,
3133
+ tools: true,
3134
+ maxTokens: 8192
3135
+ }
3136
+ };
3137
+ function createGroq(config = {}) {
3138
+ const apiKey = config.apiKey ?? process.env.GROQ_API_KEY ?? "";
3139
+ return {
3140
+ name: "groq",
3141
+ supportedModels: Object.keys(GROQ_MODELS),
3142
+ languageModel(modelId) {
3143
+ return createGroqAdapter({
3144
+ apiKey,
3145
+ model: modelId
3146
+ });
3147
+ },
3148
+ getCapabilities(modelId) {
3149
+ const model = GROQ_MODELS[modelId] ?? GROQ_MODELS["llama-3.3-70b-versatile"];
3150
+ return {
3151
+ supportsVision: model.vision,
3152
+ supportsTools: model.tools,
3153
+ supportsThinking: false,
3154
+ supportsStreaming: true,
3155
+ supportsPDF: false,
3156
+ supportsAudio: false,
3157
+ supportsVideo: false,
3158
+ maxTokens: model.maxTokens,
3159
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
3160
+ supportsJsonMode: true,
3161
+ supportsSystemMessages: true
3162
+ };
3163
+ }
3164
+ };
3165
+ }
3166
+
3167
+ // src/providers/ollama/index.ts
3168
+ var OLLAMA_MODELS = {
3169
+ // Llama series
3170
+ llama3: {
3171
+ vision: false,
3172
+ tools: true,
3173
+ maxTokens: 8192
3174
+ },
3175
+ "llama3:70b": {
3176
+ vision: false,
3177
+ tools: true,
3178
+ maxTokens: 8192
3179
+ },
3180
+ "llama3.2": {
3181
+ vision: false,
3182
+ tools: true,
3183
+ maxTokens: 8192
3184
+ },
3185
+ "llama3.2-vision": {
3186
+ vision: true,
3187
+ tools: true,
3188
+ maxTokens: 8192
3189
+ },
3190
+ // Mistral series
3191
+ mistral: {
3192
+ vision: false,
3193
+ tools: true,
3194
+ maxTokens: 8192
3195
+ },
3196
+ "mistral-nemo": {
3197
+ vision: false,
3198
+ tools: true,
3199
+ maxTokens: 128e3
3200
+ },
3201
+ mixtral: {
3202
+ vision: false,
3203
+ tools: true,
3204
+ maxTokens: 32768
3205
+ },
3206
+ // CodeLlama
3207
+ codellama: {
3208
+ vision: false,
3209
+ tools: false,
3210
+ maxTokens: 16384
3211
+ },
3212
+ // Phi series
3213
+ phi3: {
3214
+ vision: false,
3215
+ tools: true,
3216
+ maxTokens: 4096
3217
+ },
3218
+ "phi3:medium": {
3219
+ vision: false,
3220
+ tools: true,
3221
+ maxTokens: 4096
3222
+ },
3223
+ // Gemma series
3224
+ gemma2: {
3225
+ vision: false,
3226
+ tools: false,
3227
+ maxTokens: 8192
3228
+ },
3229
+ "gemma2:27b": {
3230
+ vision: false,
3231
+ tools: false,
3232
+ maxTokens: 8192
3233
+ },
3234
+ // Qwen series
3235
+ qwen2: {
3236
+ vision: false,
3237
+ tools: true,
3238
+ maxTokens: 32768
3239
+ },
3240
+ "qwen2.5-coder": {
3241
+ vision: false,
3242
+ tools: true,
3243
+ maxTokens: 32768
3244
+ },
3245
+ // LLaVA (vision)
3246
+ llava: {
3247
+ vision: true,
3248
+ tools: false,
3249
+ maxTokens: 4096
3250
+ },
3251
+ // DeepSeek
3252
+ deepseek: {
3253
+ vision: false,
3254
+ tools: true,
3255
+ maxTokens: 16384
3256
+ },
3257
+ "deepseek-coder": {
3258
+ vision: false,
3259
+ tools: false,
3260
+ maxTokens: 16384
3261
+ }
3262
+ };
3263
+ var DEFAULT_MODEL_CAPS = {
3264
+ vision: false,
3265
+ tools: false,
3266
+ maxTokens: 4096
3267
+ };
3268
+ function createOllama(config = {}) {
3269
+ const baseUrl = config.baseUrl ?? "http://localhost:11434";
3270
+ return {
3271
+ name: "ollama",
3272
+ supportedModels: Object.keys(OLLAMA_MODELS),
3273
+ languageModel(modelId) {
3274
+ return createOllamaAdapter({
3275
+ model: modelId,
3276
+ baseUrl
3277
+ });
3278
+ },
3279
+ getCapabilities(modelId) {
3280
+ const baseModelName = modelId.split(":")[0];
3281
+ const model = OLLAMA_MODELS[modelId] ?? OLLAMA_MODELS[baseModelName] ?? DEFAULT_MODEL_CAPS;
3282
+ return {
3283
+ supportsVision: model.vision,
3284
+ supportsTools: model.tools,
3285
+ supportsThinking: false,
3286
+ supportsStreaming: true,
3287
+ supportsPDF: false,
3288
+ supportsAudio: false,
3289
+ supportsVideo: false,
3290
+ maxTokens: model.maxTokens,
3291
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif"] : [],
3292
+ supportsJsonMode: false,
3293
+ supportsSystemMessages: true
3294
+ };
3295
+ }
3296
+ };
3297
+ }
3298
+
3299
+ // src/providers/google/index.ts
3300
+ var GOOGLE_MODELS = {
3301
+ // Gemini 2.0 series (latest)
3302
+ "gemini-2.0-flash": {
3303
+ vision: true,
3304
+ tools: true,
3305
+ audio: true,
3306
+ video: true,
3307
+ pdf: true,
3308
+ maxTokens: 1e6,
3309
+ outputTokens: 8192
3310
+ },
3311
+ "gemini-2.0-flash-lite": {
3312
+ vision: true,
3313
+ tools: true,
3314
+ audio: false,
3315
+ video: false,
3316
+ pdf: true,
3317
+ maxTokens: 1e6,
3318
+ outputTokens: 8192
3319
+ },
3320
+ // Gemini 2.5 series (experimental)
3321
+ "gemini-2.5-pro-preview-05-06": {
3322
+ vision: true,
3323
+ tools: true,
3324
+ audio: true,
3325
+ video: true,
3326
+ pdf: true,
3327
+ maxTokens: 1e6,
3328
+ outputTokens: 65536
3329
+ },
3330
+ "gemini-2.5-flash-preview-05-20": {
3331
+ vision: true,
3332
+ tools: true,
3333
+ audio: true,
3334
+ video: true,
3335
+ pdf: true,
3336
+ maxTokens: 1e6,
3337
+ outputTokens: 65536
3338
+ },
3339
+ // Gemini 1.5 series
3340
+ "gemini-1.5-pro": {
3341
+ vision: true,
3342
+ tools: true,
3343
+ audio: true,
3344
+ video: true,
3345
+ pdf: true,
3346
+ maxTokens: 2e6,
3347
+ outputTokens: 8192
3348
+ },
3349
+ "gemini-1.5-pro-latest": {
3350
+ vision: true,
3351
+ tools: true,
3352
+ audio: true,
3353
+ video: true,
3354
+ pdf: true,
3355
+ maxTokens: 2e6,
3356
+ outputTokens: 8192
3357
+ },
3358
+ "gemini-1.5-flash": {
3359
+ vision: true,
3360
+ tools: true,
3361
+ audio: true,
3362
+ video: true,
3363
+ pdf: true,
3364
+ maxTokens: 1e6,
3365
+ outputTokens: 8192
3366
+ },
3367
+ "gemini-1.5-flash-latest": {
3368
+ vision: true,
3369
+ tools: true,
3370
+ audio: true,
3371
+ video: true,
3372
+ pdf: true,
3373
+ maxTokens: 1e6,
3374
+ outputTokens: 8192
3375
+ },
3376
+ "gemini-1.5-flash-8b": {
3377
+ vision: true,
3378
+ tools: true,
3379
+ audio: false,
3380
+ video: false,
3381
+ pdf: true,
3382
+ maxTokens: 1e6,
3383
+ outputTokens: 8192
3384
+ },
3385
+ // Gemini 1.0 series (legacy)
3386
+ "gemini-1.0-pro": {
3387
+ vision: false,
3388
+ tools: true,
3389
+ audio: false,
3390
+ video: false,
3391
+ pdf: false,
3392
+ maxTokens: 30720,
3393
+ outputTokens: 2048
3394
+ }
3395
+ };
3396
+ function createGoogle(config = {}) {
3397
+ const apiKey = config.apiKey ?? process.env.GOOGLE_API_KEY ?? "";
3398
+ return {
3399
+ name: "google",
3400
+ supportedModels: Object.keys(GOOGLE_MODELS),
3401
+ languageModel(modelId) {
3402
+ return createGoogleAdapter({
3403
+ apiKey,
3404
+ model: modelId,
3405
+ baseUrl: config.baseUrl,
3406
+ safetySettings: config.safetySettings
3407
+ });
3408
+ },
3409
+ getCapabilities(modelId) {
3410
+ const model = GOOGLE_MODELS[modelId] ?? GOOGLE_MODELS["gemini-2.0-flash"];
3411
+ return {
3412
+ supportsVision: model.vision,
3413
+ supportsTools: model.tools,
3414
+ supportsThinking: false,
3415
+ // Gemini doesn't have extended thinking like Claude
3416
+ supportsStreaming: true,
3417
+ supportsPDF: model.pdf,
3418
+ supportsAudio: model.audio,
3419
+ supportsVideo: model.video,
3420
+ maxTokens: model.maxTokens,
3421
+ supportedImageTypes: model.vision ? [
3422
+ "image/png",
3423
+ "image/jpeg",
3424
+ "image/gif",
3425
+ "image/webp",
3426
+ "image/heic",
3427
+ "image/heif"
3428
+ ] : [],
3429
+ supportedAudioTypes: model.audio ? [
3430
+ "audio/mp3",
3431
+ "audio/wav",
3432
+ "audio/aiff",
3433
+ "audio/aac",
3434
+ "audio/ogg",
3435
+ "audio/flac"
3436
+ ] : [],
3437
+ supportedVideoTypes: model.video ? [
3438
+ "video/mp4",
3439
+ "video/mpeg",
3440
+ "video/mov",
3441
+ "video/avi",
3442
+ "video/webm",
3443
+ "video/mkv"
3444
+ ] : [],
3445
+ supportsJsonMode: true,
3446
+ // Gemini supports JSON mode
3447
+ supportsSystemMessages: true
3448
+ };
3449
+ }
3450
+ };
3451
+ }
3452
+
3453
+ // src/providers/xai/index.ts
3454
+ var XAI_MODELS = {
3455
+ // Grok 2 series (latest)
3456
+ "grok-2": {
3457
+ vision: true,
3458
+ tools: true,
3459
+ maxTokens: 131072,
3460
+ outputTokens: 4096
3461
+ },
3462
+ "grok-2-latest": {
3463
+ vision: true,
3464
+ tools: true,
3465
+ maxTokens: 131072,
3466
+ outputTokens: 4096
3467
+ },
3468
+ "grok-2-mini": {
3469
+ vision: false,
3470
+ tools: true,
3471
+ maxTokens: 131072,
3472
+ outputTokens: 4096
3473
+ },
3474
+ "grok-2-mini-latest": {
3475
+ vision: false,
3476
+ tools: true,
3477
+ maxTokens: 131072,
3478
+ outputTokens: 4096
3479
+ },
3480
+ // Grok Vision
3481
+ "grok-2-vision": {
3482
+ vision: true,
3483
+ tools: true,
3484
+ maxTokens: 32768,
3485
+ outputTokens: 4096
3486
+ },
3487
+ "grok-2-vision-latest": {
3488
+ vision: true,
3489
+ tools: true,
3490
+ maxTokens: 32768,
3491
+ outputTokens: 4096
3492
+ },
3493
+ // Grok Beta (legacy)
3494
+ "grok-beta": {
3495
+ vision: false,
3496
+ tools: true,
3497
+ maxTokens: 131072,
3498
+ outputTokens: 4096
3499
+ },
3500
+ "grok-vision-beta": {
3501
+ vision: true,
3502
+ tools: true,
3503
+ maxTokens: 8192,
3504
+ outputTokens: 4096
3505
+ }
3506
+ };
3507
+ function createXAI(config = {}) {
3508
+ const apiKey = config.apiKey ?? process.env.XAI_API_KEY ?? "";
3509
+ return {
3510
+ name: "xai",
3511
+ supportedModels: Object.keys(XAI_MODELS),
3512
+ languageModel(modelId) {
3513
+ return createXAIAdapter({
3514
+ apiKey,
3515
+ model: modelId,
3516
+ baseUrl: config.baseUrl
3517
+ });
3518
+ },
3519
+ getCapabilities(modelId) {
3520
+ const model = XAI_MODELS[modelId] ?? XAI_MODELS["grok-2"];
3521
+ return {
3522
+ supportsVision: model.vision,
3523
+ supportsTools: model.tools,
3524
+ supportsThinking: false,
3525
+ supportsStreaming: true,
3526
+ supportsPDF: false,
3527
+ supportsAudio: false,
3528
+ supportsVideo: false,
3529
+ maxTokens: model.maxTokens,
3530
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
3531
+ supportsJsonMode: false,
3532
+ // xAI doesn't support JSON mode yet
3533
+ supportsSystemMessages: true
3534
+ };
3535
+ }
3536
+ };
3537
+ }
3538
+
3539
+ // src/providers/azure/index.ts
3540
+ function detectCapabilitiesFromDeployment(deploymentName) {
3541
+ const name = deploymentName.toLowerCase();
3542
+ if (name.includes("gpt-4o") || name.includes("gpt4o")) {
3543
+ return { vision: true, tools: true, maxTokens: 128e3 };
3544
+ }
3545
+ if ((name.includes("gpt-4") || name.includes("gpt4")) && (name.includes("turbo") || name.includes("vision"))) {
3546
+ return { vision: true, tools: true, maxTokens: 128e3 };
3547
+ }
3548
+ if (name.includes("gpt-4") || name.includes("gpt4")) {
3549
+ return { vision: false, tools: true, maxTokens: 8192 };
3550
+ }
3551
+ if (name.includes("gpt-35") || name.includes("gpt-3.5") || name.includes("gpt35")) {
3552
+ return { vision: false, tools: true, maxTokens: 16385 };
3553
+ }
3554
+ if (name.includes("o1")) {
3555
+ return { vision: true, tools: false, maxTokens: 128e3 };
3556
+ }
3557
+ return { vision: false, tools: true, maxTokens: 8192 };
3558
+ }
3559
+ function createAzure(config) {
3560
+ const apiKey = config.apiKey ?? process.env.AZURE_OPENAI_API_KEY ?? "";
3561
+ const resourceName = config.resourceName ?? process.env.AZURE_OPENAI_RESOURCE ?? "";
3562
+ const defaultDeployment = config.deploymentName ?? process.env.AZURE_OPENAI_DEPLOYMENT ?? "";
3563
+ const supportedModels = defaultDeployment ? [defaultDeployment] : [];
3564
+ return {
3565
+ name: "azure",
3566
+ supportedModels,
3567
+ languageModel(deploymentName) {
3568
+ return createAzureAdapter({
3569
+ apiKey,
3570
+ resourceName,
3571
+ deploymentName: deploymentName || defaultDeployment,
3572
+ apiVersion: config.apiVersion,
3573
+ baseUrl: config.baseUrl
3574
+ });
3575
+ },
3576
+ getCapabilities(deploymentName) {
3577
+ const detected = detectCapabilitiesFromDeployment(
3578
+ deploymentName || defaultDeployment
3579
+ );
3580
+ return {
3581
+ supportsVision: detected.vision,
3582
+ supportsTools: detected.tools,
3583
+ supportsThinking: false,
3584
+ supportsStreaming: true,
3585
+ supportsPDF: false,
3586
+ supportsAudio: false,
3587
+ supportsVideo: false,
3588
+ maxTokens: detected.maxTokens,
3589
+ supportedImageTypes: detected.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
3590
+ supportsJsonMode: true,
3591
+ supportsSystemMessages: true
3592
+ };
3593
+ }
3594
+ };
3595
+ }
3596
+
3597
+ // src/providers/openai.ts
3598
+ function transformTools(tools) {
3599
+ return tools.map((tool) => ({
3600
+ type: "function",
3601
+ function: {
3602
+ name: tool.name,
3603
+ description: tool.description,
3604
+ parameters: tool.inputSchema
3605
+ }
3606
+ }));
3607
+ }
3608
+ function parseToolCalls(response) {
3609
+ const resp = response;
3610
+ const choices = resp?.choices;
3611
+ const message = choices?.[0]?.message;
3612
+ const toolCalls = message?.tool_calls || [];
3613
+ return toolCalls.map((tc) => {
3614
+ let input = {};
3615
+ try {
3616
+ input = JSON.parse(tc.function.arguments);
3617
+ } catch (e) {
3618
+ console.error(
3619
+ "Failed to parse tool arguments:",
3620
+ tc.function.arguments,
3621
+ e
3622
+ );
3623
+ }
3624
+ return {
3625
+ id: tc.id,
3626
+ name: tc.function.name,
3627
+ input
3628
+ };
3629
+ });
3630
+ }
3631
+ function extractTextContent(response) {
3632
+ const resp = response;
3633
+ const choices = resp?.choices;
3634
+ const message = choices?.[0]?.message;
3635
+ return message?.content || "";
3636
+ }
3637
+ function formatToolResults(results) {
3638
+ return results.map((r) => ({
3639
+ role: "tool",
3640
+ tool_call_id: r.toolCallId,
3641
+ content: r.content
3642
+ }));
3643
+ }
3644
+ function isToolUseStop(response) {
3645
+ const resp = response;
3646
+ const choices = resp?.choices;
3647
+ return choices?.[0]?.finish_reason === "tool_calls";
3648
+ }
3649
+ function isEndTurnStop(response) {
3650
+ const resp = response;
3651
+ const choices = resp?.choices;
3652
+ return choices?.[0]?.finish_reason === "stop";
3653
+ }
3654
+ function getStopReason(response) {
3655
+ const resp = response;
3656
+ const choices = resp?.choices;
3657
+ return choices?.[0]?.finish_reason || "unknown";
3658
+ }
3659
+ function buildAssistantToolMessage(toolCalls, textContent) {
3660
+ return {
3661
+ role: "assistant",
3662
+ content: textContent || null,
3663
+ tool_calls: toolCalls.map((tc) => ({
3664
+ id: tc.id,
3665
+ type: "function",
3666
+ function: {
3667
+ name: tc.name,
3668
+ arguments: JSON.stringify(tc.input)
3669
+ }
3670
+ }))
3671
+ };
3672
+ }
3673
+ function buildToolResultMessage(results) {
3674
+ return formatToolResults(results);
3675
+ }
3676
+ var openaiFormatter = {
3677
+ transformTools,
3678
+ parseToolCalls,
3679
+ formatToolResults,
3680
+ isToolUseStop,
3681
+ isEndTurnStop,
3682
+ getStopReason,
3683
+ extractTextContent,
3684
+ buildAssistantToolMessage,
3685
+ buildToolResultMessage
3686
+ };
3687
+
3688
+ // src/providers/anthropic.ts
3689
+ function transformTools2(tools) {
3690
+ return tools.map((tool) => ({
3691
+ name: tool.name,
3692
+ description: tool.description,
3693
+ input_schema: tool.inputSchema
3694
+ }));
3695
+ }
3696
+ function parseToolCalls2(response) {
3697
+ const content = Array.isArray(response) ? response : response?.content;
3698
+ if (!Array.isArray(content)) {
3699
+ return [];
3700
+ }
3701
+ return content.filter((block) => block?.type === "tool_use").map((block) => ({
3702
+ id: block.id,
3703
+ name: block.name,
3704
+ input: block.input || {}
3705
+ }));
3706
+ }
3707
+ function extractTextContent2(response) {
3708
+ const content = Array.isArray(response) ? response : response?.content;
3709
+ if (!Array.isArray(content)) {
3710
+ return "";
3711
+ }
3712
+ return content.filter((block) => block?.type === "text").map((block) => block.text || "").join("\n");
3713
+ }
3714
+ function formatToolResults2(results) {
3715
+ return results.map((r) => ({
3716
+ type: "tool_result",
3717
+ tool_use_id: r.toolCallId,
3718
+ content: r.content
3719
+ }));
3720
+ }
3721
+ function isToolUseStop2(response) {
3722
+ const resp = response;
3723
+ return resp?.stop_reason === "tool_use";
3724
+ }
3725
+ function isEndTurnStop2(response) {
3726
+ const resp = response;
3727
+ const stopReason = resp?.stop_reason;
3728
+ return stopReason === "end_turn" || stopReason === "stop";
3729
+ }
3730
+ function getStopReason2(response) {
3731
+ const resp = response;
3732
+ return resp?.stop_reason || "unknown";
3733
+ }
3734
+ function buildAssistantToolMessage2(toolCalls, textContent) {
3735
+ const content = [];
3736
+ if (textContent) {
3737
+ content.push({ type: "text", text: textContent });
3738
+ }
3739
+ toolCalls.forEach((tc) => {
3740
+ content.push({
3741
+ type: "tool_use",
3742
+ id: tc.id,
3743
+ name: tc.name,
3744
+ input: tc.input
3745
+ });
3746
+ });
3747
+ return { role: "assistant", content };
3748
+ }
3749
+ function buildToolResultMessage2(results) {
3750
+ return {
3751
+ role: "user",
3752
+ content: formatToolResults2(results)
3753
+ };
3754
+ }
3755
+ var anthropicFormatter = {
3756
+ transformTools: transformTools2,
3757
+ parseToolCalls: parseToolCalls2,
3758
+ formatToolResults: formatToolResults2,
3759
+ isToolUseStop: isToolUseStop2,
3760
+ isEndTurnStop: isEndTurnStop2,
3761
+ getStopReason: getStopReason2,
3762
+ extractTextContent: extractTextContent2,
3763
+ buildAssistantToolMessage: buildAssistantToolMessage2,
3764
+ buildToolResultMessage: buildToolResultMessage2
3765
+ };
3766
+
3767
+ // src/providers/gemini.ts
3768
+ function transformTools3(tools) {
3769
+ return [
3770
+ {
3771
+ functionDeclarations: tools.map((tool) => ({
3772
+ name: tool.name,
3773
+ description: tool.description,
3774
+ parameters: tool.inputSchema
3775
+ }))
3776
+ }
3777
+ ];
3778
+ }
3779
+ function parseToolCalls3(response) {
3780
+ const resp = response;
3781
+ const candidates = resp?.candidates;
3782
+ const content = candidates?.[0]?.content;
3783
+ const parts = content?.parts;
3784
+ if (!parts) return [];
3785
+ const functionCalls = [];
3786
+ for (const part of parts) {
3787
+ const functionCall = part.functionCall;
3788
+ if (functionCall) {
3789
+ functionCalls.push({
3790
+ id: `gemini_${Date.now()}_${functionCalls.length}`,
3791
+ // Gemini doesn't provide IDs
3792
+ name: functionCall.name,
3793
+ input: functionCall.args || {}
3794
+ });
3795
+ }
3796
+ }
3797
+ return functionCalls;
3798
+ }
3799
+ function extractTextContent3(response) {
3800
+ const resp = response;
3801
+ const candidates = resp?.candidates;
3802
+ const content = candidates?.[0]?.content;
3803
+ const parts = content?.parts;
3804
+ if (!parts) return "";
3805
+ return parts.filter((part) => typeof part.text === "string").map((part) => part.text).join("\n");
3806
+ }
3807
+ function formatToolResults3(results) {
3808
+ return results.map((r) => {
3809
+ let response;
3810
+ try {
3811
+ response = JSON.parse(r.content);
3812
+ } catch {
3813
+ response = { result: r.content };
3814
+ }
3815
+ return {
3816
+ name: r.toolCallId.split("_").slice(2).join("_") || "unknown",
3817
+ // Extract name from ID
3818
+ response
3819
+ };
3820
+ });
3821
+ }
3822
+ function isToolUseStop3(response) {
3823
+ const resp = response;
3824
+ const candidates = resp?.candidates;
3825
+ const content = candidates?.[0]?.content;
3826
+ const parts = content?.parts;
3827
+ if (!parts) return false;
3828
+ return parts.some((part) => part.functionCall !== void 0);
3829
+ }
3830
+ function isEndTurnStop3(response) {
3831
+ const resp = response;
3832
+ const candidates = resp?.candidates;
3833
+ const finishReason = candidates?.[0]?.finishReason;
3834
+ return finishReason === "STOP" || finishReason === "END_TURN";
3835
+ }
3836
+ function getStopReason3(response) {
3837
+ const resp = response;
3838
+ const candidates = resp?.candidates;
3839
+ return candidates?.[0]?.finishReason || "unknown";
3840
+ }
3841
+ function buildAssistantToolMessage3(toolCalls, textContent) {
3842
+ const parts = [];
3843
+ if (textContent) {
3844
+ parts.push({ text: textContent });
3845
+ }
3846
+ toolCalls.forEach((tc) => {
3847
+ parts.push({
3848
+ functionCall: {
3849
+ name: tc.name,
3850
+ args: tc.input
3851
+ }
3852
+ });
3853
+ });
3854
+ return {
3855
+ role: "model",
3856
+ parts
3857
+ };
3858
+ }
3859
+ function buildToolResultMessage3(results) {
3860
+ const parts = results.map((r) => {
3861
+ let response;
3862
+ try {
3863
+ response = JSON.parse(r.content);
3864
+ } catch {
3865
+ response = { result: r.content };
3866
+ }
3867
+ return {
3868
+ functionResponse: {
3869
+ name: "tool",
3870
+ // This should be the actual tool name
3871
+ response
3872
+ }
3873
+ };
3874
+ });
3875
+ return {
3876
+ role: "user",
3877
+ parts
3878
+ };
3879
+ }
3880
+ var geminiFormatter = {
3881
+ transformTools: transformTools3,
3882
+ parseToolCalls: parseToolCalls3,
3883
+ formatToolResults: formatToolResults3,
3884
+ isToolUseStop: isToolUseStop3,
3885
+ isEndTurnStop: isEndTurnStop3,
3886
+ getStopReason: getStopReason3,
3887
+ extractTextContent: extractTextContent3,
3888
+ buildAssistantToolMessage: buildAssistantToolMessage3,
3889
+ buildToolResultMessage: buildToolResultMessage3
3890
+ };
3891
+
3892
+ // src/providers/formatter-registry.ts
3893
+ var formatters = {
3894
+ openai: openaiFormatter,
3895
+ anthropic: anthropicFormatter,
3896
+ google: geminiFormatter,
3897
+ gemini: geminiFormatter,
3898
+ // Alias
3899
+ // OpenAI-compatible providers use openaiFormatter
3900
+ groq: openaiFormatter,
3901
+ ollama: openaiFormatter,
3902
+ xai: openaiFormatter,
3903
+ azure: openaiFormatter
3904
+ };
3905
+ function getFormatter(provider) {
3906
+ const formatter = formatters[provider.toLowerCase()];
3907
+ if (!formatter) {
3908
+ throw new Error(
3909
+ `Unsupported provider: ${provider}. Supported providers: ${Object.keys(formatters).join(", ")}`
3910
+ );
3911
+ }
3912
+ return formatter;
3913
+ }
3914
+ function isProviderSupported(provider) {
3915
+ return provider.toLowerCase() in formatters;
3916
+ }
3917
+ function getSupportedProviders() {
3918
+ return Object.keys(formatters);
3919
+ }
3920
+
3921
+ // src/providers/index.ts
3922
+ registerProvider("openai", (config) => createOpenAI(config));
3923
+ registerProvider("anthropic", (config) => createAnthropic(config));
3924
+ registerProvider("groq", (config) => createGroq(config));
3925
+ registerProvider("ollama", (config) => createOllama(config));
3926
+ registerProvider("google", (config) => createGoogle(config));
3927
+ registerProvider("xai", (config) => createXAI(config));
3928
+ registerProvider("azure", (config) => createAzure(config));
3929
+
3930
+ // src/server/agent-loop.ts
3931
+ var DEFAULT_MAX_ITERATIONS = 20;
3932
+ async function* runAgentLoop(options) {
3933
+ const {
3934
+ messages,
3935
+ tools,
3936
+ systemPrompt,
3937
+ provider,
3938
+ signal,
3939
+ config,
3940
+ callLLM,
3941
+ executeServerTool,
3942
+ waitForClientToolResult
3943
+ } = options;
3944
+ const maxIterations = config?.maxIterations ?? DEFAULT_MAX_ITERATIONS;
3945
+ const debug = config?.debug ?? false;
3946
+ const formatter = getFormatter(provider);
3947
+ const serverTools = tools.filter((t) => t.location === "server");
3948
+ const clientTools = tools.filter((t) => t.location === "client");
3949
+ const allTools = [...serverTools, ...clientTools];
3950
+ const providerTools = formatter.transformTools(allTools);
3951
+ const conversation = buildConversation(
3952
+ messages,
3953
+ systemPrompt
3954
+ );
3955
+ let iteration = 0;
3956
+ if (debug) {
3957
+ console.log("[AgentLoop] Starting with", {
3958
+ messageCount: messages.length,
3959
+ toolCount: allTools.length,
3960
+ serverToolCount: serverTools.length,
3961
+ clientToolCount: clientTools.length,
3962
+ maxIterations
3963
+ });
3964
+ }
3965
+ while (iteration < maxIterations) {
3966
+ if (signal?.aborted) {
3967
+ yield {
3968
+ type: "loop:complete",
3969
+ iterations: iteration,
3970
+ aborted: true
3971
+ };
3972
+ return;
3973
+ }
3974
+ iteration++;
3975
+ yield {
3976
+ type: "loop:iteration",
3977
+ iteration,
3978
+ maxIterations
3979
+ };
3980
+ if (debug) {
3981
+ console.log(`[AgentLoop] Iteration ${iteration}/${maxIterations}`);
3982
+ }
3983
+ try {
3984
+ const response = await callLLM(conversation, providerTools);
3985
+ const toolCalls = formatter.parseToolCalls(response);
3986
+ const textContent = formatter.extractTextContent(response);
3987
+ if (textContent) {
3988
+ const messageId = core.generateMessageId();
3989
+ yield { type: "message:start", id: messageId };
3990
+ yield { type: "message:delta", content: textContent };
3991
+ yield { type: "message:end" };
3992
+ }
3993
+ if (formatter.isToolUseStop(response) && toolCalls.length > 0) {
3994
+ if (debug) {
3995
+ console.log(
3996
+ "[AgentLoop] Tool calls:",
3997
+ toolCalls.map((tc) => tc.name)
3998
+ );
3999
+ }
4000
+ const results = await executeToolCalls(
4001
+ toolCalls,
4002
+ tools,
4003
+ executeServerTool,
4004
+ waitForClientToolResult,
4005
+ function* (event) {
4006
+ yield event;
4007
+ },
4008
+ debug
4009
+ );
4010
+ for (const result of results) {
4011
+ const toolCall = toolCalls.find((tc) => tc.id === result.toolCallId);
4012
+ if (toolCall) {
4013
+ yield {
4014
+ type: "tool:result",
4015
+ id: result.toolCallId,
4016
+ name: toolCall.name,
4017
+ result: JSON.parse(result.content)
4018
+ };
4019
+ }
4020
+ }
4021
+ const assistantMessage = formatter.buildAssistantToolMessage(
4022
+ toolCalls,
4023
+ textContent
4024
+ );
4025
+ conversation.push(assistantMessage);
4026
+ const toolResultMessage = formatter.buildToolResultMessage(results);
4027
+ if (Array.isArray(toolResultMessage)) {
4028
+ conversation.push(...toolResultMessage);
4029
+ } else {
4030
+ conversation.push(toolResultMessage);
4031
+ }
4032
+ continue;
4033
+ }
4034
+ if (formatter.isEndTurnStop(response)) {
4035
+ if (debug) {
4036
+ console.log("[AgentLoop] End turn detected");
4037
+ }
4038
+ break;
4039
+ }
4040
+ const stopReason = formatter.getStopReason(response);
4041
+ if (debug) {
4042
+ console.log("[AgentLoop] Unknown stop reason:", stopReason);
4043
+ }
4044
+ break;
4045
+ } catch (error) {
4046
+ if (debug) {
4047
+ console.error("[AgentLoop] Error:", error);
4048
+ }
4049
+ yield {
4050
+ type: "error",
4051
+ message: error instanceof Error ? error.message : "Unknown error",
4052
+ code: "AGENT_LOOP_ERROR"
4053
+ };
4054
+ break;
4055
+ }
4056
+ }
4057
+ yield {
4058
+ type: "loop:complete",
4059
+ iterations: iteration,
4060
+ maxIterationsReached: iteration >= maxIterations
4061
+ };
4062
+ yield { type: "done" };
4063
+ }
4064
+ function buildConversation(messages, systemPrompt) {
4065
+ const conversation = [];
4066
+ if (systemPrompt) {
4067
+ conversation.push({
4068
+ role: "system",
4069
+ content: systemPrompt
4070
+ });
4071
+ }
4072
+ for (const msg of messages) {
4073
+ conversation.push({
4074
+ role: msg.role,
4075
+ content: msg.content
4076
+ });
4077
+ }
4078
+ return conversation;
4079
+ }
4080
+ async function executeToolCalls(toolCalls, tools, executeServerTool, waitForClientToolResult, emitEvent, debug) {
4081
+ const results = [];
4082
+ for (const toolCall of toolCalls) {
4083
+ const tool = tools.find((t) => t.name === toolCall.name);
4084
+ if (!tool) {
4085
+ if (debug) {
4086
+ console.warn(`[AgentLoop] Unknown tool: ${toolCall.name}`);
4087
+ }
4088
+ results.push({
4089
+ toolCallId: toolCall.id,
4090
+ content: JSON.stringify({
4091
+ success: false,
4092
+ error: `Unknown tool: ${toolCall.name}`
4093
+ }),
4094
+ success: false,
4095
+ error: `Unknown tool: ${toolCall.name}`
4096
+ });
4097
+ continue;
4098
+ }
4099
+ emitEvent?.({
4100
+ type: "action:start",
4101
+ id: toolCall.id,
4102
+ name: toolCall.name
4103
+ });
4104
+ emitEvent?.({
4105
+ type: "action:args",
4106
+ id: toolCall.id,
4107
+ args: JSON.stringify(toolCall.input)
4108
+ });
4109
+ try {
4110
+ let response;
4111
+ if (tool.location === "server") {
4112
+ if (tool.handler) {
4113
+ response = await tool.handler(toolCall.input);
4114
+ } else if (executeServerTool) {
4115
+ response = await executeServerTool(toolCall.name, toolCall.input);
4116
+ } else {
4117
+ response = {
4118
+ success: false,
4119
+ error: `No handler for server tool: ${toolCall.name}`
4120
+ };
4121
+ }
4122
+ } else {
4123
+ if (waitForClientToolResult) {
4124
+ response = await waitForClientToolResult(
4125
+ toolCall.id,
4126
+ toolCall.name,
4127
+ toolCall.input
4128
+ );
4129
+ } else {
4130
+ response = {
4131
+ success: false,
4132
+ error: `No client tool handler for: ${toolCall.name}`
4133
+ };
4134
+ }
4135
+ }
4136
+ emitEvent?.({
4137
+ type: "action:end",
4138
+ id: toolCall.id,
4139
+ name: toolCall.name,
4140
+ result: response
4141
+ });
4142
+ results.push({
4143
+ toolCallId: toolCall.id,
4144
+ content: JSON.stringify(response),
4145
+ success: response.success,
4146
+ error: response.error
4147
+ });
4148
+ } catch (error) {
4149
+ const errorMessage = error instanceof Error ? error.message : "Tool execution failed";
4150
+ emitEvent?.({
4151
+ type: "action:end",
4152
+ id: toolCall.id,
4153
+ name: toolCall.name,
4154
+ error: errorMessage
4155
+ });
4156
+ results.push({
4157
+ toolCallId: toolCall.id,
4158
+ content: JSON.stringify({
4159
+ success: false,
4160
+ error: errorMessage
4161
+ }),
4162
+ success: false,
4163
+ error: errorMessage
4164
+ });
4165
+ }
4166
+ }
4167
+ return results;
4168
+ }
4169
+
4170
+ exports.AnthropicAdapter = AnthropicAdapter;
4171
+ exports.AzureAdapter = AzureAdapter;
4172
+ exports.DEFAULT_MAX_ITERATIONS = DEFAULT_MAX_ITERATIONS;
4173
+ exports.GoogleAdapter = GoogleAdapter;
4174
+ exports.GroqAdapter = GroqAdapter;
4175
+ exports.OllamaAdapter = OllamaAdapter;
4176
+ exports.OpenAIAdapter = OpenAIAdapter;
4177
+ exports.Runtime = Runtime;
4178
+ exports.XAIAdapter = XAIAdapter;
4179
+ exports.anthropicFormatter = anthropicFormatter;
4180
+ exports.createAnthropic = createAnthropic;
4181
+ exports.createAnthropicAdapter = createAnthropicAdapter;
4182
+ exports.createAzure = createAzure;
4183
+ exports.createAzureAdapter = createAzureAdapter;
4184
+ exports.createEventStream = createEventStream;
4185
+ exports.createExpressMiddleware = createExpressMiddleware;
4186
+ exports.createGoogle = createGoogle;
4187
+ exports.createGoogleAdapter = createGoogleAdapter;
4188
+ exports.createGroq = createGroq;
4189
+ exports.createGroqAdapter = createGroqAdapter;
4190
+ exports.createHonoApp = createHonoApp;
4191
+ exports.createNextHandler = createNextHandler;
4192
+ exports.createNodeHandler = createNodeHandler;
4193
+ exports.createOllama = createOllama;
4194
+ exports.createOllamaAdapter = createOllamaAdapter;
4195
+ exports.createOpenAI = createOpenAI;
4196
+ exports.createOpenAIAdapter = createOpenAIAdapter;
4197
+ exports.createRuntime = createRuntime;
4198
+ exports.createSSEHeaders = createSSEHeaders;
4199
+ exports.createSSEResponse = createSSEResponse;
4200
+ exports.createXAI = createXAI;
4201
+ exports.createXAIAdapter = createXAIAdapter;
4202
+ exports.formatSSEData = formatSSEData;
4203
+ exports.geminiFormatter = geminiFormatter;
4204
+ exports.getAvailableProviders = getAvailableProviders;
4205
+ exports.getFormatter = getFormatter;
4206
+ exports.getModelCapabilities = getModelCapabilities;
4207
+ exports.getProvider = getProvider;
4208
+ exports.getSupportedProviders = getSupportedProviders;
4209
+ exports.hasProvider = hasProvider;
4210
+ exports.isProviderSupported = isProviderSupported;
4211
+ exports.listProviders = listProviders;
4212
+ exports.openaiFormatter = openaiFormatter;
4213
+ exports.registerProvider = registerProvider;
4214
+ exports.runAgentLoop = runAgentLoop;
4215
+ //# sourceMappingURL=index.js.map
4216
+ //# sourceMappingURL=index.js.map