@yourgpt/llm-sdk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,4170 @@
1
+ import { generateMessageId, generateToolCallId, createMessage } from '@yourgpt/copilot-sdk/core';
2
+ import { Hono } from 'hono';
3
+ import { cors } from 'hono/cors';
4
+
5
+ // src/server/runtime.ts
6
+
7
+ // src/adapters/base.ts
8
+ function formatMessages(messages, systemPrompt) {
9
+ const formatted = [];
10
+ if (systemPrompt) {
11
+ formatted.push({ role: "system", content: systemPrompt });
12
+ }
13
+ for (const msg of messages) {
14
+ formatted.push({
15
+ role: msg.role,
16
+ content: msg.content ?? ""
17
+ });
18
+ }
19
+ return formatted;
20
+ }
21
+ function parameterToJsonSchema(param) {
22
+ const schema = {
23
+ type: param.type
24
+ };
25
+ if (param.description) {
26
+ schema.description = param.description;
27
+ }
28
+ if (param.enum) {
29
+ schema.enum = param.enum;
30
+ }
31
+ if (param.type === "array" && param.items) {
32
+ schema.items = parameterToJsonSchema(
33
+ param.items
34
+ );
35
+ }
36
+ if (param.type === "object" && param.properties) {
37
+ schema.properties = Object.fromEntries(
38
+ Object.entries(param.properties).map(([key, prop]) => [
39
+ key,
40
+ parameterToJsonSchema(
41
+ prop
42
+ )
43
+ ])
44
+ );
45
+ }
46
+ return schema;
47
+ }
48
+ function formatTools(actions) {
49
+ return actions.map((action) => ({
50
+ type: "function",
51
+ function: {
52
+ name: action.name,
53
+ description: action.description,
54
+ parameters: {
55
+ type: "object",
56
+ properties: action.parameters ? Object.fromEntries(
57
+ Object.entries(action.parameters).map(([key, param]) => [
58
+ key,
59
+ parameterToJsonSchema(param)
60
+ ])
61
+ ) : {},
62
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
63
+ }
64
+ }
65
+ }));
66
+ }
67
+ function hasImageAttachments(message) {
68
+ const attachments = message.metadata?.attachments;
69
+ return attachments?.some((a) => a.type === "image") ?? false;
70
+ }
71
+ function hasMediaAttachments(message) {
72
+ const attachments = message.metadata?.attachments;
73
+ return attachments?.some(
74
+ (a) => a.type === "image" || a.type === "file" && a.mimeType === "application/pdf"
75
+ ) ?? false;
76
+ }
77
+ function attachmentToAnthropicImage(attachment) {
78
+ if (attachment.type !== "image") return null;
79
+ if (attachment.url) {
80
+ return {
81
+ type: "image",
82
+ source: {
83
+ type: "url",
84
+ url: attachment.url
85
+ }
86
+ };
87
+ }
88
+ if (!attachment.data) return null;
89
+ let base64Data = attachment.data;
90
+ if (base64Data.startsWith("data:")) {
91
+ const commaIndex = base64Data.indexOf(",");
92
+ if (commaIndex !== -1) {
93
+ base64Data = base64Data.slice(commaIndex + 1);
94
+ }
95
+ }
96
+ return {
97
+ type: "image",
98
+ source: {
99
+ type: "base64",
100
+ media_type: attachment.mimeType || "image/png",
101
+ data: base64Data
102
+ }
103
+ };
104
+ }
105
+ function attachmentToOpenAIImage(attachment) {
106
+ if (attachment.type !== "image") return null;
107
+ let imageUrl;
108
+ if (attachment.url) {
109
+ imageUrl = attachment.url;
110
+ } else if (attachment.data) {
111
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
112
+ } else {
113
+ return null;
114
+ }
115
+ return {
116
+ type: "image_url",
117
+ image_url: {
118
+ url: imageUrl,
119
+ detail: "auto"
120
+ }
121
+ };
122
+ }
123
+ function attachmentToAnthropicDocument(attachment) {
124
+ if (attachment.type !== "file" || attachment.mimeType !== "application/pdf") {
125
+ return null;
126
+ }
127
+ if (attachment.url) {
128
+ return {
129
+ type: "document",
130
+ source: {
131
+ type: "url",
132
+ url: attachment.url
133
+ }
134
+ };
135
+ }
136
+ if (!attachment.data) return null;
137
+ let base64Data = attachment.data;
138
+ if (base64Data.startsWith("data:")) {
139
+ const commaIndex = base64Data.indexOf(",");
140
+ if (commaIndex !== -1) {
141
+ base64Data = base64Data.slice(commaIndex + 1);
142
+ }
143
+ }
144
+ return {
145
+ type: "document",
146
+ source: {
147
+ type: "base64",
148
+ media_type: "application/pdf",
149
+ data: base64Data
150
+ }
151
+ };
152
+ }
153
+ function messageToAnthropicContent(message) {
154
+ const attachments = message.metadata?.attachments;
155
+ const content = message.content ?? "";
156
+ if (!hasMediaAttachments(message)) {
157
+ return content;
158
+ }
159
+ const blocks = [];
160
+ if (attachments) {
161
+ for (const attachment of attachments) {
162
+ const imageBlock = attachmentToAnthropicImage(attachment);
163
+ if (imageBlock) {
164
+ blocks.push(imageBlock);
165
+ continue;
166
+ }
167
+ const docBlock = attachmentToAnthropicDocument(attachment);
168
+ if (docBlock) {
169
+ blocks.push(docBlock);
170
+ }
171
+ }
172
+ }
173
+ if (content) {
174
+ blocks.push({ type: "text", text: content });
175
+ }
176
+ return blocks;
177
+ }
178
+ function messageToOpenAIContent(message) {
179
+ const attachments = message.metadata?.attachments;
180
+ const content = message.content ?? "";
181
+ if (!hasImageAttachments(message)) {
182
+ return content;
183
+ }
184
+ const blocks = [];
185
+ if (content) {
186
+ blocks.push({ type: "text", text: content });
187
+ }
188
+ if (attachments) {
189
+ for (const attachment of attachments) {
190
+ const imageBlock = attachmentToOpenAIImage(attachment);
191
+ if (imageBlock) {
192
+ blocks.push(imageBlock);
193
+ }
194
+ }
195
+ }
196
+ return blocks;
197
+ }
198
+ function formatMessagesForAnthropic(messages, systemPrompt) {
199
+ const formatted = [];
200
+ for (let i = 0; i < messages.length; i++) {
201
+ const msg = messages[i];
202
+ if (msg.role === "system") continue;
203
+ if (msg.role === "assistant") {
204
+ const content = [];
205
+ if (msg.content) {
206
+ content.push({ type: "text", text: msg.content });
207
+ }
208
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
209
+ for (const tc of msg.tool_calls) {
210
+ content.push({
211
+ type: "tool_use",
212
+ id: tc.id,
213
+ name: tc.function.name,
214
+ input: JSON.parse(tc.function.arguments)
215
+ });
216
+ }
217
+ }
218
+ formatted.push({
219
+ role: "assistant",
220
+ content: content.length === 1 && content[0].type === "text" ? content[0].text : content
221
+ });
222
+ } else if (msg.role === "tool" && msg.tool_call_id) {
223
+ const toolResults = [
224
+ {
225
+ type: "tool_result",
226
+ tool_use_id: msg.tool_call_id,
227
+ content: msg.content ?? ""
228
+ }
229
+ ];
230
+ while (i + 1 < messages.length && messages[i + 1].role === "tool") {
231
+ i++;
232
+ const nextTool = messages[i];
233
+ if (nextTool.tool_call_id) {
234
+ toolResults.push({
235
+ type: "tool_result",
236
+ tool_use_id: nextTool.tool_call_id,
237
+ content: nextTool.content ?? ""
238
+ });
239
+ }
240
+ }
241
+ formatted.push({
242
+ role: "user",
243
+ content: toolResults
244
+ });
245
+ } else if (msg.role === "user") {
246
+ formatted.push({
247
+ role: "user",
248
+ content: messageToAnthropicContent(msg)
249
+ });
250
+ }
251
+ }
252
+ return {
253
+ system: "",
254
+ messages: formatted
255
+ };
256
+ }
257
+ function formatMessagesForOpenAI(messages, systemPrompt) {
258
+ const formatted = [];
259
+ if (systemPrompt) {
260
+ formatted.push({ role: "system", content: systemPrompt });
261
+ }
262
+ for (const msg of messages) {
263
+ if (msg.role === "system") {
264
+ formatted.push({ role: "system", content: msg.content ?? "" });
265
+ } else if (msg.role === "user") {
266
+ formatted.push({
267
+ role: "user",
268
+ content: messageToOpenAIContent(msg)
269
+ });
270
+ } else if (msg.role === "assistant") {
271
+ const assistantMsg = {
272
+ role: "assistant",
273
+ content: msg.content
274
+ };
275
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
276
+ assistantMsg.tool_calls = msg.tool_calls;
277
+ }
278
+ formatted.push(assistantMsg);
279
+ } else if (msg.role === "tool" && msg.tool_call_id) {
280
+ formatted.push({
281
+ role: "tool",
282
+ content: msg.content ?? "",
283
+ tool_call_id: msg.tool_call_id
284
+ });
285
+ }
286
+ }
287
+ return formatted;
288
+ }
289
+ var OpenAIAdapter = class {
290
+ constructor(config) {
291
+ this.provider = "openai";
292
+ this.config = config;
293
+ this.model = config.model || "gpt-4o";
294
+ }
295
+ async getClient() {
296
+ if (!this.client) {
297
+ const { default: OpenAI } = await import('openai');
298
+ this.client = new OpenAI({
299
+ apiKey: this.config.apiKey,
300
+ baseURL: this.config.baseUrl
301
+ });
302
+ }
303
+ return this.client;
304
+ }
305
+ async *stream(request) {
306
+ const client = await this.getClient();
307
+ let messages;
308
+ if (request.rawMessages && request.rawMessages.length > 0) {
309
+ const processedMessages = request.rawMessages.map((msg) => {
310
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
311
+ if (hasAttachments) {
312
+ const content = [];
313
+ if (msg.content) {
314
+ content.push({ type: "text", text: msg.content });
315
+ }
316
+ for (const attachment of msg.attachments) {
317
+ if (attachment.type === "image") {
318
+ let imageUrl;
319
+ if (attachment.url) {
320
+ imageUrl = attachment.url;
321
+ } else if (attachment.data) {
322
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
323
+ } else {
324
+ continue;
325
+ }
326
+ content.push({
327
+ type: "image_url",
328
+ image_url: { url: imageUrl, detail: "auto" }
329
+ });
330
+ }
331
+ }
332
+ return { ...msg, content, attachments: void 0 };
333
+ }
334
+ return msg;
335
+ });
336
+ if (request.systemPrompt) {
337
+ const hasSystem = processedMessages.some((m) => m.role === "system");
338
+ if (!hasSystem) {
339
+ messages = [
340
+ { role: "system", content: request.systemPrompt },
341
+ ...processedMessages
342
+ ];
343
+ } else {
344
+ messages = processedMessages;
345
+ }
346
+ } else {
347
+ messages = processedMessages;
348
+ }
349
+ } else {
350
+ messages = formatMessagesForOpenAI(
351
+ request.messages,
352
+ request.systemPrompt
353
+ );
354
+ }
355
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
356
+ const messageId = generateMessageId();
357
+ yield { type: "message:start", id: messageId };
358
+ try {
359
+ const stream = await client.chat.completions.create({
360
+ model: request.config?.model || this.model,
361
+ messages,
362
+ tools,
363
+ temperature: request.config?.temperature ?? this.config.temperature,
364
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
365
+ stream: true
366
+ });
367
+ let currentToolCall = null;
368
+ for await (const chunk of stream) {
369
+ if (request.signal?.aborted) {
370
+ break;
371
+ }
372
+ const delta = chunk.choices[0]?.delta;
373
+ if (delta?.content) {
374
+ yield { type: "message:delta", content: delta.content };
375
+ }
376
+ if (delta?.tool_calls) {
377
+ for (const toolCall of delta.tool_calls) {
378
+ if (toolCall.id) {
379
+ if (currentToolCall) {
380
+ yield {
381
+ type: "action:args",
382
+ id: currentToolCall.id,
383
+ args: currentToolCall.arguments
384
+ };
385
+ }
386
+ currentToolCall = {
387
+ id: toolCall.id,
388
+ name: toolCall.function?.name || "",
389
+ arguments: toolCall.function?.arguments || ""
390
+ };
391
+ yield {
392
+ type: "action:start",
393
+ id: currentToolCall.id,
394
+ name: currentToolCall.name
395
+ };
396
+ } else if (currentToolCall && toolCall.function?.arguments) {
397
+ currentToolCall.arguments += toolCall.function.arguments;
398
+ }
399
+ }
400
+ }
401
+ if (chunk.choices[0]?.finish_reason) {
402
+ if (currentToolCall) {
403
+ yield {
404
+ type: "action:args",
405
+ id: currentToolCall.id,
406
+ args: currentToolCall.arguments
407
+ };
408
+ }
409
+ }
410
+ }
411
+ yield { type: "message:end" };
412
+ yield { type: "done" };
413
+ } catch (error) {
414
+ yield {
415
+ type: "error",
416
+ message: error instanceof Error ? error.message : "Unknown error",
417
+ code: "OPENAI_ERROR"
418
+ };
419
+ }
420
+ }
421
+ };
422
+ function createOpenAIAdapter(config) {
423
+ return new OpenAIAdapter(config);
424
+ }
425
+ var AnthropicAdapter = class {
426
+ constructor(config) {
427
+ this.provider = "anthropic";
428
+ this.config = config;
429
+ this.model = config.model || "claude-3-5-sonnet-latest";
430
+ }
431
+ async getClient() {
432
+ if (!this.client) {
433
+ const { default: Anthropic } = await import('@anthropic-ai/sdk');
434
+ this.client = new Anthropic({
435
+ apiKey: this.config.apiKey
436
+ });
437
+ }
438
+ return this.client;
439
+ }
440
+ /**
441
+ * Convert OpenAI-style messages to Anthropic format
442
+ *
443
+ * OpenAI format:
444
+ * - { role: "assistant", content: "...", tool_calls: [...] }
445
+ * - { role: "tool", tool_call_id: "...", content: "..." }
446
+ *
447
+ * Anthropic format:
448
+ * - { role: "assistant", content: [{ type: "text", text: "..." }, { type: "tool_use", id: "...", name: "...", input: {...} }] }
449
+ * - { role: "user", content: [{ type: "tool_result", tool_use_id: "...", content: "..." }] }
450
+ */
451
+ convertToAnthropicMessages(rawMessages) {
452
+ const messages = [];
453
+ const pendingToolResults = [];
454
+ for (const msg of rawMessages) {
455
+ if (msg.role === "system") continue;
456
+ if (msg.role === "assistant") {
457
+ if (pendingToolResults.length > 0) {
458
+ messages.push({
459
+ role: "user",
460
+ content: pendingToolResults.map((tr) => ({
461
+ type: "tool_result",
462
+ tool_use_id: tr.tool_use_id,
463
+ content: tr.content
464
+ }))
465
+ });
466
+ pendingToolResults.length = 0;
467
+ }
468
+ const content = [];
469
+ if (msg.content && typeof msg.content === "string" && msg.content.trim()) {
470
+ content.push({ type: "text", text: msg.content });
471
+ }
472
+ const toolCalls = msg.tool_calls;
473
+ if (toolCalls && toolCalls.length > 0) {
474
+ for (const tc of toolCalls) {
475
+ let input = {};
476
+ try {
477
+ input = JSON.parse(tc.function.arguments);
478
+ } catch {
479
+ }
480
+ content.push({
481
+ type: "tool_use",
482
+ id: tc.id,
483
+ name: tc.function.name,
484
+ input
485
+ });
486
+ }
487
+ }
488
+ if (content.length > 0) {
489
+ messages.push({ role: "assistant", content });
490
+ }
491
+ } else if (msg.role === "tool") {
492
+ pendingToolResults.push({
493
+ tool_use_id: msg.tool_call_id,
494
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
495
+ });
496
+ } else if (msg.role === "user") {
497
+ if (pendingToolResults.length > 0) {
498
+ messages.push({
499
+ role: "user",
500
+ content: pendingToolResults.map((tr) => ({
501
+ type: "tool_result",
502
+ tool_use_id: tr.tool_use_id,
503
+ content: tr.content
504
+ }))
505
+ });
506
+ pendingToolResults.length = 0;
507
+ }
508
+ if (msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0) {
509
+ const content = [];
510
+ if (msg.content && typeof msg.content === "string") {
511
+ content.push({ type: "text", text: msg.content });
512
+ }
513
+ for (const attachment of msg.attachments) {
514
+ if (attachment.type === "image") {
515
+ if (attachment.url) {
516
+ content.push({
517
+ type: "image",
518
+ source: {
519
+ type: "url",
520
+ url: attachment.url
521
+ }
522
+ });
523
+ } else if (attachment.data) {
524
+ let base64Data = attachment.data;
525
+ if (base64Data.startsWith("data:")) {
526
+ const commaIndex = base64Data.indexOf(",");
527
+ if (commaIndex !== -1) {
528
+ base64Data = base64Data.slice(commaIndex + 1);
529
+ }
530
+ }
531
+ content.push({
532
+ type: "image",
533
+ source: {
534
+ type: "base64",
535
+ media_type: attachment.mimeType || "image/png",
536
+ data: base64Data
537
+ }
538
+ });
539
+ }
540
+ } else if (attachment.type === "file" && attachment.mimeType === "application/pdf") {
541
+ if (attachment.url) {
542
+ content.push({
543
+ type: "document",
544
+ source: {
545
+ type: "url",
546
+ url: attachment.url
547
+ }
548
+ });
549
+ } else if (attachment.data) {
550
+ let base64Data = attachment.data;
551
+ if (base64Data.startsWith("data:")) {
552
+ const commaIndex = base64Data.indexOf(",");
553
+ if (commaIndex !== -1) {
554
+ base64Data = base64Data.slice(commaIndex + 1);
555
+ }
556
+ }
557
+ content.push({
558
+ type: "document",
559
+ source: {
560
+ type: "base64",
561
+ media_type: "application/pdf",
562
+ data: base64Data
563
+ }
564
+ });
565
+ }
566
+ }
567
+ }
568
+ messages.push({ role: "user", content });
569
+ } else {
570
+ messages.push({
571
+ role: "user",
572
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
573
+ });
574
+ }
575
+ }
576
+ }
577
+ if (pendingToolResults.length > 0) {
578
+ messages.push({
579
+ role: "user",
580
+ content: pendingToolResults.map((tr) => ({
581
+ type: "tool_result",
582
+ tool_use_id: tr.tool_use_id,
583
+ content: tr.content
584
+ }))
585
+ });
586
+ }
587
+ return messages;
588
+ }
589
+ /**
590
+ * Build common request options for both streaming and non-streaming
591
+ */
592
+ buildRequestOptions(request) {
593
+ const systemMessage = request.systemPrompt || "";
594
+ let messages;
595
+ if (request.rawMessages && request.rawMessages.length > 0) {
596
+ messages = this.convertToAnthropicMessages(request.rawMessages);
597
+ } else {
598
+ const formatted = formatMessagesForAnthropic(request.messages);
599
+ messages = formatted.messages;
600
+ }
601
+ const tools = request.actions?.map((action) => ({
602
+ name: action.name,
603
+ description: action.description,
604
+ input_schema: {
605
+ type: "object",
606
+ properties: action.parameters ? Object.fromEntries(
607
+ Object.entries(action.parameters).map(([key, param]) => [
608
+ key,
609
+ {
610
+ type: param.type,
611
+ description: param.description,
612
+ enum: param.enum
613
+ }
614
+ ])
615
+ ) : {},
616
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
617
+ }
618
+ }));
619
+ const options = {
620
+ model: request.config?.model || this.model,
621
+ max_tokens: request.config?.maxTokens || this.config.maxTokens || 4096,
622
+ system: systemMessage,
623
+ messages,
624
+ tools: tools?.length ? tools : void 0
625
+ };
626
+ if (this.config.thinking?.type === "enabled") {
627
+ options.thinking = {
628
+ type: "enabled",
629
+ budget_tokens: this.config.thinking.budgetTokens || 1e4
630
+ };
631
+ }
632
+ return { options, messages };
633
+ }
634
+ /**
635
+ * Non-streaming completion (for debugging/comparison with original studio-ai)
636
+ */
637
+ async complete(request) {
638
+ const client = await this.getClient();
639
+ const { options } = this.buildRequestOptions(request);
640
+ const nonStreamingOptions = {
641
+ ...options,
642
+ stream: false
643
+ };
644
+ try {
645
+ const response = await client.messages.create(nonStreamingOptions);
646
+ let content = "";
647
+ let thinking = "";
648
+ const toolCalls = [];
649
+ for (const block of response.content) {
650
+ if (block.type === "text") {
651
+ content += block.text;
652
+ } else if (block.type === "thinking") {
653
+ thinking += block.thinking;
654
+ } else if (block.type === "tool_use") {
655
+ toolCalls.push({
656
+ id: block.id,
657
+ name: block.name,
658
+ args: block.input
659
+ });
660
+ }
661
+ }
662
+ return {
663
+ content,
664
+ toolCalls,
665
+ thinking: thinking || void 0,
666
+ rawResponse: response
667
+ };
668
+ } catch (error) {
669
+ throw error;
670
+ }
671
+ }
672
+ async *stream(request) {
673
+ const client = await this.getClient();
674
+ const { options } = this.buildRequestOptions(request);
675
+ const messageId = generateMessageId();
676
+ yield { type: "message:start", id: messageId };
677
+ try {
678
+ const stream = await client.messages.stream(options);
679
+ let currentToolUse = null;
680
+ let isInThinkingBlock = false;
681
+ for await (const event of stream) {
682
+ if (request.signal?.aborted) {
683
+ break;
684
+ }
685
+ switch (event.type) {
686
+ case "content_block_start":
687
+ if (event.content_block.type === "tool_use") {
688
+ currentToolUse = {
689
+ id: event.content_block.id,
690
+ name: event.content_block.name,
691
+ input: ""
692
+ };
693
+ yield {
694
+ type: "action:start",
695
+ id: currentToolUse.id,
696
+ name: currentToolUse.name
697
+ };
698
+ } else if (event.content_block.type === "thinking") {
699
+ isInThinkingBlock = true;
700
+ yield { type: "thinking:start" };
701
+ }
702
+ break;
703
+ case "content_block_delta":
704
+ if (event.delta.type === "text_delta") {
705
+ yield { type: "message:delta", content: event.delta.text };
706
+ } else if (event.delta.type === "thinking_delta") {
707
+ yield { type: "thinking:delta", content: event.delta.thinking };
708
+ } else if (event.delta.type === "input_json_delta" && currentToolUse) {
709
+ currentToolUse.input += event.delta.partial_json;
710
+ }
711
+ break;
712
+ case "content_block_stop":
713
+ if (currentToolUse) {
714
+ yield {
715
+ type: "action:args",
716
+ id: currentToolUse.id,
717
+ args: currentToolUse.input
718
+ };
719
+ currentToolUse = null;
720
+ }
721
+ if (isInThinkingBlock) {
722
+ yield { type: "thinking:end" };
723
+ isInThinkingBlock = false;
724
+ }
725
+ break;
726
+ case "message_stop":
727
+ break;
728
+ }
729
+ }
730
+ yield { type: "message:end" };
731
+ yield { type: "done" };
732
+ } catch (error) {
733
+ yield {
734
+ type: "error",
735
+ message: error instanceof Error ? error.message : "Unknown error",
736
+ code: "ANTHROPIC_ERROR"
737
+ };
738
+ }
739
+ }
740
+ };
741
+ function createAnthropicAdapter(config) {
742
+ return new AnthropicAdapter(config);
743
+ }
744
+ var GroqAdapter = class {
745
+ constructor(config) {
746
+ this.provider = "groq";
747
+ this.config = config;
748
+ this.model = config.model || "llama-3.1-70b-versatile";
749
+ }
750
+ async *stream(request) {
751
+ const messages = formatMessages(request.messages, request.systemPrompt);
752
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
753
+ const messageId = generateMessageId();
754
+ yield { type: "message:start", id: messageId };
755
+ try {
756
+ const response = await fetch(
757
+ "https://api.groq.com/openai/v1/chat/completions",
758
+ {
759
+ method: "POST",
760
+ headers: {
761
+ "Content-Type": "application/json",
762
+ Authorization: `Bearer ${this.config.apiKey}`
763
+ },
764
+ body: JSON.stringify({
765
+ model: request.config?.model || this.model,
766
+ messages,
767
+ tools,
768
+ temperature: request.config?.temperature ?? this.config.temperature,
769
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
770
+ stream: true
771
+ }),
772
+ signal: request.signal
773
+ }
774
+ );
775
+ if (!response.ok) {
776
+ throw new Error(`Groq API error: ${response.status}`);
777
+ }
778
+ if (!response.body) {
779
+ throw new Error("No response body");
780
+ }
781
+ const reader = response.body.getReader();
782
+ const decoder = new TextDecoder();
783
+ let buffer = "";
784
+ let currentToolCall = null;
785
+ while (true) {
786
+ const { done, value } = await reader.read();
787
+ if (done) break;
788
+ buffer += decoder.decode(value, { stream: true });
789
+ const lines = buffer.split("\n");
790
+ buffer = lines.pop() || "";
791
+ for (const line of lines) {
792
+ if (!line.startsWith("data: ")) continue;
793
+ const data = line.slice(6).trim();
794
+ if (data === "[DONE]") continue;
795
+ try {
796
+ const chunk = JSON.parse(data);
797
+ const delta = chunk.choices?.[0]?.delta;
798
+ if (delta?.content) {
799
+ yield { type: "message:delta", content: delta.content };
800
+ }
801
+ if (delta?.tool_calls) {
802
+ for (const toolCall of delta.tool_calls) {
803
+ if (toolCall.id) {
804
+ if (currentToolCall) {
805
+ yield {
806
+ type: "action:args",
807
+ id: currentToolCall.id,
808
+ args: currentToolCall.arguments
809
+ };
810
+ }
811
+ currentToolCall = {
812
+ id: toolCall.id,
813
+ name: toolCall.function?.name || "",
814
+ arguments: toolCall.function?.arguments || ""
815
+ };
816
+ yield {
817
+ type: "action:start",
818
+ id: currentToolCall.id,
819
+ name: currentToolCall.name
820
+ };
821
+ } else if (currentToolCall && toolCall.function?.arguments) {
822
+ currentToolCall.arguments += toolCall.function.arguments;
823
+ }
824
+ }
825
+ }
826
+ if (chunk.choices?.[0]?.finish_reason && currentToolCall) {
827
+ yield {
828
+ type: "action:args",
829
+ id: currentToolCall.id,
830
+ args: currentToolCall.arguments
831
+ };
832
+ }
833
+ } catch {
834
+ }
835
+ }
836
+ }
837
+ yield { type: "message:end" };
838
+ yield { type: "done" };
839
+ } catch (error) {
840
+ if (error.name === "AbortError") {
841
+ yield { type: "done" };
842
+ } else {
843
+ yield {
844
+ type: "error",
845
+ message: error instanceof Error ? error.message : "Unknown error",
846
+ code: "GROQ_ERROR"
847
+ };
848
+ }
849
+ }
850
+ }
851
+ };
852
+ function createGroqAdapter(config) {
853
+ return new GroqAdapter(config);
854
+ }
855
+ var OllamaAdapter = class {
856
+ constructor(config = {}) {
857
+ this.provider = "ollama";
858
+ this.config = config;
859
+ this.model = config.model || "llama3";
860
+ this.baseUrl = config.baseUrl || "http://localhost:11434";
861
+ }
862
+ async *stream(request) {
863
+ const messages = formatMessages(request.messages, request.systemPrompt);
864
+ const messageId = generateMessageId();
865
+ yield { type: "message:start", id: messageId };
866
+ try {
867
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
868
+ method: "POST",
869
+ headers: {
870
+ "Content-Type": "application/json"
871
+ },
872
+ body: JSON.stringify({
873
+ model: request.config?.model || this.model,
874
+ messages,
875
+ stream: true,
876
+ options: {
877
+ temperature: request.config?.temperature ?? this.config.temperature,
878
+ num_predict: request.config?.maxTokens ?? this.config.maxTokens
879
+ }
880
+ }),
881
+ signal: request.signal
882
+ });
883
+ if (!response.ok) {
884
+ throw new Error(`Ollama API error: ${response.status}`);
885
+ }
886
+ if (!response.body) {
887
+ throw new Error("No response body");
888
+ }
889
+ const reader = response.body.getReader();
890
+ const decoder = new TextDecoder();
891
+ let buffer = "";
892
+ while (true) {
893
+ const { done, value } = await reader.read();
894
+ if (done) break;
895
+ buffer += decoder.decode(value, { stream: true });
896
+ const lines = buffer.split("\n");
897
+ buffer = lines.pop() || "";
898
+ for (const line of lines) {
899
+ if (!line.trim()) continue;
900
+ try {
901
+ const chunk = JSON.parse(line);
902
+ if (chunk.message?.content) {
903
+ yield { type: "message:delta", content: chunk.message.content };
904
+ }
905
+ if (chunk.done) {
906
+ break;
907
+ }
908
+ } catch {
909
+ }
910
+ }
911
+ }
912
+ yield { type: "message:end" };
913
+ yield { type: "done" };
914
+ } catch (error) {
915
+ if (error.name === "AbortError") {
916
+ yield { type: "done" };
917
+ } else {
918
+ yield {
919
+ type: "error",
920
+ message: error instanceof Error ? error.message : "Unknown error",
921
+ code: "OLLAMA_ERROR"
922
+ };
923
+ }
924
+ }
925
+ }
926
+ };
927
+ function createOllamaAdapter(config) {
928
+ return new OllamaAdapter(config);
929
+ }
930
+ function attachmentToGeminiPart(attachment) {
931
+ if (!attachment.data) {
932
+ console.warn(
933
+ "Gemini adapter: URL-based attachments not supported, skipping"
934
+ );
935
+ return null;
936
+ }
937
+ if (attachment.type === "image") {
938
+ let base64Data = attachment.data;
939
+ if (base64Data.startsWith("data:")) {
940
+ const commaIndex = base64Data.indexOf(",");
941
+ if (commaIndex !== -1) {
942
+ base64Data = base64Data.slice(commaIndex + 1);
943
+ }
944
+ }
945
+ return {
946
+ inlineData: {
947
+ mimeType: attachment.mimeType || "image/png",
948
+ data: base64Data
949
+ }
950
+ };
951
+ }
952
+ if (attachment.type === "audio" || attachment.type === "video") {
953
+ let base64Data = attachment.data;
954
+ if (base64Data.startsWith("data:")) {
955
+ const commaIndex = base64Data.indexOf(",");
956
+ if (commaIndex !== -1) {
957
+ base64Data = base64Data.slice(commaIndex + 1);
958
+ }
959
+ }
960
+ return {
961
+ inlineData: {
962
+ mimeType: attachment.mimeType || (attachment.type === "audio" ? "audio/mp3" : "video/mp4"),
963
+ data: base64Data
964
+ }
965
+ };
966
+ }
967
+ return null;
968
+ }
969
+ function messageToGeminiContent(msg) {
970
+ if (msg.role === "system") return null;
971
+ const parts = [];
972
+ if (msg.role === "tool" && msg.tool_call_id) {
973
+ let responseData;
974
+ try {
975
+ responseData = JSON.parse(msg.content || "{}");
976
+ } catch {
977
+ responseData = { result: msg.content || "" };
978
+ }
979
+ const toolName = msg.metadata?.toolName || "tool";
980
+ parts.push({
981
+ functionResponse: {
982
+ name: toolName,
983
+ response: responseData
984
+ }
985
+ });
986
+ return { role: "user", parts };
987
+ }
988
+ if (msg.content) {
989
+ parts.push({ text: msg.content });
990
+ }
991
+ const attachments = msg.metadata?.attachments;
992
+ if (attachments && Array.isArray(attachments)) {
993
+ for (const attachment of attachments) {
994
+ const part = attachmentToGeminiPart(attachment);
995
+ if (part) {
996
+ parts.push(part);
997
+ }
998
+ }
999
+ }
1000
+ if (msg.role === "assistant" && msg.tool_calls && msg.tool_calls.length > 0) {
1001
+ for (const tc of msg.tool_calls) {
1002
+ let args = {};
1003
+ try {
1004
+ args = JSON.parse(tc.function.arguments);
1005
+ } catch {
1006
+ }
1007
+ parts.push({
1008
+ functionCall: {
1009
+ name: tc.function.name,
1010
+ args
1011
+ }
1012
+ });
1013
+ }
1014
+ }
1015
+ if (parts.length === 0) return null;
1016
+ return {
1017
+ role: msg.role === "assistant" ? "model" : "user",
1018
+ parts
1019
+ };
1020
+ }
1021
+ function formatToolsForGemini(actions) {
1022
+ if (!actions || actions.length === 0) return void 0;
1023
+ return {
1024
+ functionDeclarations: actions.map((action) => ({
1025
+ name: action.name,
1026
+ description: action.description,
1027
+ parameters: action.parameters ? {
1028
+ type: "object",
1029
+ properties: Object.fromEntries(
1030
+ Object.entries(action.parameters).map(([key, param]) => [
1031
+ key,
1032
+ {
1033
+ type: param.type,
1034
+ description: param.description,
1035
+ enum: param.enum
1036
+ }
1037
+ ])
1038
+ ),
1039
+ required: Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key)
1040
+ } : void 0
1041
+ }))
1042
+ };
1043
+ }
1044
+ var GoogleAdapter = class {
1045
+ constructor(config) {
1046
+ this.provider = "google";
1047
+ this.config = config;
1048
+ this.model = config.model || "gemini-2.0-flash";
1049
+ }
1050
+ async getClient() {
1051
+ if (!this.client) {
1052
+ const { GoogleGenerativeAI } = await import('@google/generative-ai');
1053
+ this.client = new GoogleGenerativeAI(this.config.apiKey);
1054
+ }
1055
+ return this.client;
1056
+ }
1057
+ async *stream(request) {
1058
+ const client = await this.getClient();
1059
+ const modelId = request.config?.model || this.model;
1060
+ const model = client.getGenerativeModel({
1061
+ model: modelId,
1062
+ safetySettings: this.config.safetySettings
1063
+ });
1064
+ let contents = [];
1065
+ let systemInstruction;
1066
+ if (request.rawMessages && request.rawMessages.length > 0) {
1067
+ for (const msg of request.rawMessages) {
1068
+ if (msg.role === "system") {
1069
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1070
+ continue;
1071
+ }
1072
+ const content = messageToGeminiContent(msg);
1073
+ if (content) {
1074
+ contents.push(content);
1075
+ }
1076
+ }
1077
+ if (request.systemPrompt && !systemInstruction) {
1078
+ systemInstruction = request.systemPrompt;
1079
+ }
1080
+ } else {
1081
+ for (const msg of request.messages) {
1082
+ if (msg.role === "system") {
1083
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1084
+ continue;
1085
+ }
1086
+ const content = messageToGeminiContent(msg);
1087
+ if (content) {
1088
+ contents.push(content);
1089
+ }
1090
+ }
1091
+ if (request.systemPrompt) {
1092
+ systemInstruction = request.systemPrompt;
1093
+ }
1094
+ }
1095
+ if (contents.length === 0 || contents[0].role !== "user") {
1096
+ contents = [{ role: "user", parts: [{ text: "" }] }, ...contents];
1097
+ }
1098
+ const mergedContents = [];
1099
+ for (const content of contents) {
1100
+ const last = mergedContents[mergedContents.length - 1];
1101
+ if (last && last.role === content.role) {
1102
+ last.parts.push(...content.parts);
1103
+ } else {
1104
+ mergedContents.push({ ...content, parts: [...content.parts] });
1105
+ }
1106
+ }
1107
+ const tools = formatToolsForGemini(request.actions);
1108
+ const messageId = generateMessageId();
1109
+ yield { type: "message:start", id: messageId };
1110
+ try {
1111
+ const chat = model.startChat({
1112
+ history: mergedContents.slice(0, -1),
1113
+ // All but the last message
1114
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
1115
+ tools: tools ? [tools] : void 0,
1116
+ generationConfig: {
1117
+ temperature: request.config?.temperature ?? this.config.temperature,
1118
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
1119
+ }
1120
+ });
1121
+ const lastMessage = mergedContents[mergedContents.length - 1];
1122
+ const result = await chat.sendMessageStream(lastMessage.parts);
1123
+ let currentToolCall = null;
1124
+ for await (const chunk of result.stream) {
1125
+ if (request.signal?.aborted) {
1126
+ break;
1127
+ }
1128
+ const candidate = chunk.candidates?.[0];
1129
+ if (!candidate?.content?.parts) continue;
1130
+ for (const part of candidate.content.parts) {
1131
+ if ("text" in part && part.text) {
1132
+ yield { type: "message:delta", content: part.text };
1133
+ }
1134
+ if ("functionCall" in part && part.functionCall) {
1135
+ const fc = part.functionCall;
1136
+ const toolId = generateToolCallId();
1137
+ if (currentToolCall) {
1138
+ yield {
1139
+ type: "action:args",
1140
+ id: currentToolCall.id,
1141
+ args: JSON.stringify(currentToolCall.args)
1142
+ };
1143
+ }
1144
+ currentToolCall = {
1145
+ id: toolId,
1146
+ name: fc.name,
1147
+ args: fc.args || {}
1148
+ };
1149
+ yield {
1150
+ type: "action:start",
1151
+ id: toolId,
1152
+ name: fc.name
1153
+ };
1154
+ }
1155
+ }
1156
+ if (candidate.finishReason) {
1157
+ if (currentToolCall) {
1158
+ yield {
1159
+ type: "action:args",
1160
+ id: currentToolCall.id,
1161
+ args: JSON.stringify(currentToolCall.args)
1162
+ };
1163
+ }
1164
+ }
1165
+ }
1166
+ yield { type: "message:end" };
1167
+ yield { type: "done" };
1168
+ } catch (error) {
1169
+ yield {
1170
+ type: "error",
1171
+ message: error instanceof Error ? error.message : "Unknown error",
1172
+ code: "GOOGLE_ERROR"
1173
+ };
1174
+ }
1175
+ }
1176
+ /**
1177
+ * Non-streaming completion (optional, for debugging)
1178
+ */
1179
+ async complete(request) {
1180
+ const client = await this.getClient();
1181
+ const modelId = request.config?.model || this.model;
1182
+ const model = client.getGenerativeModel({
1183
+ model: modelId,
1184
+ safetySettings: this.config.safetySettings
1185
+ });
1186
+ let contents = [];
1187
+ let systemInstruction;
1188
+ for (const msg of request.messages) {
1189
+ if (msg.role === "system") {
1190
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1191
+ continue;
1192
+ }
1193
+ const content = messageToGeminiContent(msg);
1194
+ if (content) {
1195
+ contents.push(content);
1196
+ }
1197
+ }
1198
+ if (request.systemPrompt) {
1199
+ systemInstruction = request.systemPrompt;
1200
+ }
1201
+ if (contents.length === 0 || contents[0].role !== "user") {
1202
+ contents = [{ role: "user", parts: [{ text: "" }] }, ...contents];
1203
+ }
1204
+ const mergedContents = [];
1205
+ for (const content of contents) {
1206
+ const last = mergedContents[mergedContents.length - 1];
1207
+ if (last && last.role === content.role) {
1208
+ last.parts.push(...content.parts);
1209
+ } else {
1210
+ mergedContents.push({ ...content, parts: [...content.parts] });
1211
+ }
1212
+ }
1213
+ const tools = formatToolsForGemini(request.actions);
1214
+ const chat = model.startChat({
1215
+ history: mergedContents.slice(0, -1),
1216
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
1217
+ tools: tools ? [tools] : void 0,
1218
+ generationConfig: {
1219
+ temperature: request.config?.temperature ?? this.config.temperature,
1220
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
1221
+ }
1222
+ });
1223
+ const lastMessage = mergedContents[mergedContents.length - 1];
1224
+ const result = await chat.sendMessage(lastMessage.parts);
1225
+ const response = result.response;
1226
+ let textContent = "";
1227
+ const toolCalls = [];
1228
+ const candidate = response.candidates?.[0];
1229
+ if (candidate?.content?.parts) {
1230
+ for (const part of candidate.content.parts) {
1231
+ if ("text" in part && part.text) {
1232
+ textContent += part.text;
1233
+ }
1234
+ if ("functionCall" in part && part.functionCall) {
1235
+ toolCalls.push({
1236
+ id: generateToolCallId(),
1237
+ name: part.functionCall.name,
1238
+ args: part.functionCall.args || {}
1239
+ });
1240
+ }
1241
+ }
1242
+ }
1243
+ return {
1244
+ content: textContent,
1245
+ toolCalls,
1246
+ rawResponse: response
1247
+ };
1248
+ }
1249
+ };
1250
+ function createGoogleAdapter(config) {
1251
+ return new GoogleAdapter(config);
1252
+ }
1253
+ var XAI_BASE_URL = "https://api.x.ai/v1";
1254
+ var XAIAdapter = class {
1255
+ constructor(config) {
1256
+ this.provider = "xai";
1257
+ this.config = config;
1258
+ this.model = config.model || "grok-2";
1259
+ }
1260
+ async getClient() {
1261
+ if (!this.client) {
1262
+ const { default: OpenAI } = await import('openai');
1263
+ this.client = new OpenAI({
1264
+ apiKey: this.config.apiKey,
1265
+ baseURL: this.config.baseUrl || XAI_BASE_URL
1266
+ });
1267
+ }
1268
+ return this.client;
1269
+ }
1270
+ async *stream(request) {
1271
+ const client = await this.getClient();
1272
+ let messages;
1273
+ if (request.rawMessages && request.rawMessages.length > 0) {
1274
+ const processedMessages = request.rawMessages.map((msg) => {
1275
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
1276
+ if (hasAttachments) {
1277
+ const content = [];
1278
+ if (msg.content) {
1279
+ content.push({ type: "text", text: msg.content });
1280
+ }
1281
+ for (const attachment of msg.attachments) {
1282
+ if (attachment.type === "image") {
1283
+ let imageUrl = attachment.data;
1284
+ if (!imageUrl.startsWith("data:")) {
1285
+ imageUrl = `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
1286
+ }
1287
+ content.push({
1288
+ type: "image_url",
1289
+ image_url: { url: imageUrl, detail: "auto" }
1290
+ });
1291
+ }
1292
+ }
1293
+ return { ...msg, content, attachments: void 0 };
1294
+ }
1295
+ return msg;
1296
+ });
1297
+ if (request.systemPrompt) {
1298
+ const hasSystem = processedMessages.some((m) => m.role === "system");
1299
+ if (!hasSystem) {
1300
+ messages = [
1301
+ { role: "system", content: request.systemPrompt },
1302
+ ...processedMessages
1303
+ ];
1304
+ } else {
1305
+ messages = processedMessages;
1306
+ }
1307
+ } else {
1308
+ messages = processedMessages;
1309
+ }
1310
+ } else {
1311
+ messages = formatMessagesForOpenAI(
1312
+ request.messages,
1313
+ request.systemPrompt
1314
+ );
1315
+ }
1316
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1317
+ const messageId = generateMessageId();
1318
+ yield { type: "message:start", id: messageId };
1319
+ try {
1320
+ const stream = await client.chat.completions.create({
1321
+ model: request.config?.model || this.model,
1322
+ messages,
1323
+ tools,
1324
+ temperature: request.config?.temperature ?? this.config.temperature,
1325
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
1326
+ stream: true
1327
+ });
1328
+ let currentToolCall = null;
1329
+ for await (const chunk of stream) {
1330
+ if (request.signal?.aborted) {
1331
+ break;
1332
+ }
1333
+ const delta = chunk.choices[0]?.delta;
1334
+ if (delta?.content) {
1335
+ yield { type: "message:delta", content: delta.content };
1336
+ }
1337
+ if (delta?.tool_calls) {
1338
+ for (const toolCall of delta.tool_calls) {
1339
+ if (toolCall.id) {
1340
+ if (currentToolCall) {
1341
+ yield {
1342
+ type: "action:args",
1343
+ id: currentToolCall.id,
1344
+ args: currentToolCall.arguments
1345
+ };
1346
+ }
1347
+ currentToolCall = {
1348
+ id: toolCall.id,
1349
+ name: toolCall.function?.name || "",
1350
+ arguments: toolCall.function?.arguments || ""
1351
+ };
1352
+ yield {
1353
+ type: "action:start",
1354
+ id: currentToolCall.id,
1355
+ name: currentToolCall.name
1356
+ };
1357
+ } else if (currentToolCall && toolCall.function?.arguments) {
1358
+ currentToolCall.arguments += toolCall.function.arguments;
1359
+ }
1360
+ }
1361
+ }
1362
+ if (chunk.choices[0]?.finish_reason) {
1363
+ if (currentToolCall) {
1364
+ yield {
1365
+ type: "action:args",
1366
+ id: currentToolCall.id,
1367
+ args: currentToolCall.arguments
1368
+ };
1369
+ }
1370
+ }
1371
+ }
1372
+ yield { type: "message:end" };
1373
+ yield { type: "done" };
1374
+ } catch (error) {
1375
+ yield {
1376
+ type: "error",
1377
+ message: error instanceof Error ? error.message : "Unknown error",
1378
+ code: "XAI_ERROR"
1379
+ };
1380
+ }
1381
+ }
1382
+ /**
1383
+ * Non-streaming completion (optional, for debugging)
1384
+ */
1385
+ async complete(request) {
1386
+ const client = await this.getClient();
1387
+ let messages;
1388
+ if (request.rawMessages && request.rawMessages.length > 0) {
1389
+ messages = request.rawMessages;
1390
+ if (request.systemPrompt) {
1391
+ const hasSystem = messages.some((m) => m.role === "system");
1392
+ if (!hasSystem) {
1393
+ messages = [
1394
+ { role: "system", content: request.systemPrompt },
1395
+ ...messages
1396
+ ];
1397
+ }
1398
+ }
1399
+ } else {
1400
+ messages = formatMessagesForOpenAI(
1401
+ request.messages,
1402
+ request.systemPrompt
1403
+ );
1404
+ }
1405
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1406
+ const response = await client.chat.completions.create({
1407
+ model: request.config?.model || this.model,
1408
+ messages,
1409
+ tools,
1410
+ temperature: request.config?.temperature ?? this.config.temperature,
1411
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens
1412
+ });
1413
+ const choice = response.choices[0];
1414
+ const message = choice?.message;
1415
+ const toolCalls = (message?.tool_calls || []).map((tc) => ({
1416
+ id: tc.id,
1417
+ name: tc.function.name,
1418
+ args: JSON.parse(tc.function.arguments || "{}")
1419
+ }));
1420
+ return {
1421
+ content: message?.content || "",
1422
+ toolCalls,
1423
+ rawResponse: response
1424
+ };
1425
+ }
1426
+ };
1427
+ function createXAIAdapter(config) {
1428
+ return new XAIAdapter(config);
1429
+ }
1430
+ var DEFAULT_API_VERSION = "2024-08-01-preview";
1431
+ function buildAzureEndpoint(resourceName, deploymentName, apiVersion) {
1432
+ return `https://${resourceName}.openai.azure.com/openai/deployments/${deploymentName}`;
1433
+ }
1434
+ var AzureAdapter = class {
1435
+ constructor(config) {
1436
+ this.provider = "azure";
1437
+ this.config = config;
1438
+ this.model = config.deploymentName;
1439
+ }
1440
+ async getClient() {
1441
+ if (!this.client) {
1442
+ const { AzureOpenAI } = await import('openai');
1443
+ const apiVersion = this.config.apiVersion || DEFAULT_API_VERSION;
1444
+ const endpoint = this.config.baseUrl || buildAzureEndpoint(
1445
+ this.config.resourceName,
1446
+ this.config.deploymentName);
1447
+ this.client = new AzureOpenAI({
1448
+ apiKey: this.config.apiKey,
1449
+ endpoint,
1450
+ apiVersion,
1451
+ deployment: this.config.deploymentName
1452
+ });
1453
+ }
1454
+ return this.client;
1455
+ }
1456
+ async *stream(request) {
1457
+ const client = await this.getClient();
1458
+ let messages;
1459
+ if (request.rawMessages && request.rawMessages.length > 0) {
1460
+ const processedMessages = request.rawMessages.map((msg) => {
1461
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
1462
+ if (hasAttachments) {
1463
+ const content = [];
1464
+ if (msg.content) {
1465
+ content.push({ type: "text", text: msg.content });
1466
+ }
1467
+ for (const attachment of msg.attachments) {
1468
+ if (attachment.type === "image") {
1469
+ let imageUrl = attachment.data;
1470
+ if (!imageUrl.startsWith("data:")) {
1471
+ imageUrl = `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
1472
+ }
1473
+ content.push({
1474
+ type: "image_url",
1475
+ image_url: { url: imageUrl, detail: "auto" }
1476
+ });
1477
+ }
1478
+ }
1479
+ return { ...msg, content, attachments: void 0 };
1480
+ }
1481
+ return msg;
1482
+ });
1483
+ if (request.systemPrompt) {
1484
+ const hasSystem = processedMessages.some((m) => m.role === "system");
1485
+ if (!hasSystem) {
1486
+ messages = [
1487
+ { role: "system", content: request.systemPrompt },
1488
+ ...processedMessages
1489
+ ];
1490
+ } else {
1491
+ messages = processedMessages;
1492
+ }
1493
+ } else {
1494
+ messages = processedMessages;
1495
+ }
1496
+ } else {
1497
+ messages = formatMessagesForOpenAI(
1498
+ request.messages,
1499
+ request.systemPrompt
1500
+ );
1501
+ }
1502
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1503
+ const messageId = generateMessageId();
1504
+ yield { type: "message:start", id: messageId };
1505
+ try {
1506
+ const stream = await client.chat.completions.create({
1507
+ // Azure uses deployment name, not model name
1508
+ model: this.config.deploymentName,
1509
+ messages,
1510
+ tools,
1511
+ temperature: request.config?.temperature ?? this.config.temperature,
1512
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
1513
+ stream: true
1514
+ });
1515
+ let currentToolCall = null;
1516
+ for await (const chunk of stream) {
1517
+ if (request.signal?.aborted) {
1518
+ break;
1519
+ }
1520
+ const delta = chunk.choices[0]?.delta;
1521
+ if (delta?.content) {
1522
+ yield { type: "message:delta", content: delta.content };
1523
+ }
1524
+ if (delta?.tool_calls) {
1525
+ for (const toolCall of delta.tool_calls) {
1526
+ if (toolCall.id) {
1527
+ if (currentToolCall) {
1528
+ yield {
1529
+ type: "action:args",
1530
+ id: currentToolCall.id,
1531
+ args: currentToolCall.arguments
1532
+ };
1533
+ }
1534
+ currentToolCall = {
1535
+ id: toolCall.id,
1536
+ name: toolCall.function?.name || "",
1537
+ arguments: toolCall.function?.arguments || ""
1538
+ };
1539
+ yield {
1540
+ type: "action:start",
1541
+ id: currentToolCall.id,
1542
+ name: currentToolCall.name
1543
+ };
1544
+ } else if (currentToolCall && toolCall.function?.arguments) {
1545
+ currentToolCall.arguments += toolCall.function.arguments;
1546
+ }
1547
+ }
1548
+ }
1549
+ if (chunk.choices[0]?.finish_reason) {
1550
+ if (currentToolCall) {
1551
+ yield {
1552
+ type: "action:args",
1553
+ id: currentToolCall.id,
1554
+ args: currentToolCall.arguments
1555
+ };
1556
+ }
1557
+ }
1558
+ }
1559
+ yield { type: "message:end" };
1560
+ yield { type: "done" };
1561
+ } catch (error) {
1562
+ yield {
1563
+ type: "error",
1564
+ message: error instanceof Error ? error.message : "Unknown error",
1565
+ code: "AZURE_ERROR"
1566
+ };
1567
+ }
1568
+ }
1569
+ /**
1570
+ * Non-streaming completion (optional, for debugging)
1571
+ */
1572
+ async complete(request) {
1573
+ const client = await this.getClient();
1574
+ let messages;
1575
+ if (request.rawMessages && request.rawMessages.length > 0) {
1576
+ messages = request.rawMessages;
1577
+ if (request.systemPrompt) {
1578
+ const hasSystem = messages.some((m) => m.role === "system");
1579
+ if (!hasSystem) {
1580
+ messages = [
1581
+ { role: "system", content: request.systemPrompt },
1582
+ ...messages
1583
+ ];
1584
+ }
1585
+ }
1586
+ } else {
1587
+ messages = formatMessagesForOpenAI(
1588
+ request.messages,
1589
+ request.systemPrompt
1590
+ );
1591
+ }
1592
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1593
+ const response = await client.chat.completions.create({
1594
+ model: this.config.deploymentName,
1595
+ messages,
1596
+ tools,
1597
+ temperature: request.config?.temperature ?? this.config.temperature,
1598
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens
1599
+ });
1600
+ const choice = response.choices[0];
1601
+ const message = choice?.message;
1602
+ const toolCalls = (message?.tool_calls || []).map((tc) => ({
1603
+ id: tc.id,
1604
+ name: tc.function.name,
1605
+ args: JSON.parse(tc.function.arguments || "{}")
1606
+ }));
1607
+ return {
1608
+ content: message?.content || "",
1609
+ toolCalls,
1610
+ rawResponse: response
1611
+ };
1612
+ }
1613
+ };
1614
+ function createAzureAdapter(config) {
1615
+ return new AzureAdapter(config);
1616
+ }
1617
+
1618
+ // src/server/streaming.ts
1619
+ function createSSEHeaders() {
1620
+ return {
1621
+ "Content-Type": "text/event-stream",
1622
+ "Cache-Control": "no-cache, no-transform",
1623
+ Connection: "keep-alive",
1624
+ "X-Accel-Buffering": "no"
1625
+ };
1626
+ }
1627
+ function formatSSEData(event) {
1628
+ return `data: ${JSON.stringify(event)}
1629
+
1630
+ `;
1631
+ }
1632
+ function createEventStream(generator) {
1633
+ const encoder = new TextEncoder();
1634
+ return new ReadableStream({
1635
+ async start(controller) {
1636
+ try {
1637
+ for await (const event of generator) {
1638
+ const data = formatSSEData(event);
1639
+ controller.enqueue(encoder.encode(data));
1640
+ }
1641
+ } catch (error) {
1642
+ const errorEvent = {
1643
+ type: "error",
1644
+ message: error instanceof Error ? error.message : "Unknown error"
1645
+ };
1646
+ controller.enqueue(encoder.encode(formatSSEData(errorEvent)));
1647
+ } finally {
1648
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
1649
+ controller.close();
1650
+ }
1651
+ }
1652
+ });
1653
+ }
1654
+ function createSSEResponse(generator) {
1655
+ return new Response(createEventStream(generator), {
1656
+ headers: createSSEHeaders()
1657
+ });
1658
+ }
1659
+
1660
+ // src/server/runtime.ts
1661
+ function buildToolResultForAI(tool, result, args) {
1662
+ const typedResult = result;
1663
+ const responseMode = typedResult?._aiResponseMode ?? tool?.aiResponseMode ?? "full";
1664
+ if (typedResult?._aiContent && typedResult._aiContent.length > 0) {
1665
+ return typedResult._aiContent;
1666
+ }
1667
+ let aiContext;
1668
+ if (typedResult?._aiContext) {
1669
+ aiContext = typedResult._aiContext;
1670
+ } else if (tool?.aiContext) {
1671
+ aiContext = typeof tool.aiContext === "function" ? tool.aiContext(typedResult, args) : tool.aiContext;
1672
+ }
1673
+ switch (responseMode) {
1674
+ case "none":
1675
+ return aiContext ?? "[Result displayed to user]";
1676
+ case "brief":
1677
+ return aiContext ?? `[Tool ${tool?.name ?? "unknown"} executed successfully]`;
1678
+ case "full":
1679
+ default:
1680
+ const fullData = JSON.stringify(result);
1681
+ return aiContext ? `${aiContext}
1682
+
1683
+ Full data: ${fullData}` : fullData;
1684
+ }
1685
+ }
1686
+ function serializeToolResultContent(content) {
1687
+ if (typeof content === "string") {
1688
+ return content;
1689
+ }
1690
+ return content.map((item) => {
1691
+ if (item.type === "image") {
1692
+ return {
1693
+ type: "image_url",
1694
+ image_url: {
1695
+ url: `data:${item.mediaType};base64,${item.data}`
1696
+ }
1697
+ };
1698
+ }
1699
+ return {
1700
+ type: "text",
1701
+ text: item.text
1702
+ };
1703
+ });
1704
+ }
1705
+ function extractHeaders(request) {
1706
+ if (!request) return {};
1707
+ const headers = {};
1708
+ request.headers.forEach((value, key) => {
1709
+ headers[key.toLowerCase()] = value;
1710
+ });
1711
+ return headers;
1712
+ }
1713
+ function buildToolContext(toolCallId, signal, threadId, httpRequest, toolContextData) {
1714
+ const headers = extractHeaders(httpRequest);
1715
+ return {
1716
+ signal,
1717
+ threadId,
1718
+ toolCallId,
1719
+ headers,
1720
+ request: httpRequest ? {
1721
+ method: httpRequest.method,
1722
+ url: httpRequest.url,
1723
+ headers
1724
+ } : void 0,
1725
+ data: toolContextData
1726
+ };
1727
+ }
1728
+ var Runtime = class {
1729
+ constructor(config) {
1730
+ this.actions = /* @__PURE__ */ new Map();
1731
+ this.tools = /* @__PURE__ */ new Map();
1732
+ this.config = config;
1733
+ if ("provider" in config && config.provider) {
1734
+ this.adapter = config.provider.languageModel(config.model);
1735
+ } else if ("adapter" in config && config.adapter) {
1736
+ this.adapter = config.adapter;
1737
+ } else {
1738
+ this.adapter = this.createAdapter(config);
1739
+ }
1740
+ if (config.actions) {
1741
+ for (const action of config.actions) {
1742
+ this.actions.set(action.name, action);
1743
+ }
1744
+ }
1745
+ if (config.tools) {
1746
+ for (const tool of config.tools) {
1747
+ this.tools.set(tool.name, tool);
1748
+ }
1749
+ }
1750
+ }
1751
+ /**
1752
+ * Create LLM adapter based on config
1753
+ */
1754
+ createAdapter(config) {
1755
+ if (!("llm" in config) || !config.llm) {
1756
+ throw new Error(
1757
+ "LLM configuration is required when adapter is not provided"
1758
+ );
1759
+ }
1760
+ const { llm } = config;
1761
+ switch (llm.provider) {
1762
+ case "openai":
1763
+ return createOpenAIAdapter({
1764
+ apiKey: llm.apiKey,
1765
+ model: llm.model,
1766
+ baseUrl: llm.baseUrl,
1767
+ temperature: llm.temperature,
1768
+ maxTokens: llm.maxTokens
1769
+ });
1770
+ case "anthropic":
1771
+ return createAnthropicAdapter({
1772
+ apiKey: llm.apiKey,
1773
+ model: llm.model,
1774
+ temperature: llm.temperature,
1775
+ maxTokens: llm.maxTokens
1776
+ });
1777
+ case "groq":
1778
+ return createGroqAdapter({
1779
+ apiKey: llm.apiKey,
1780
+ model: llm.model,
1781
+ temperature: llm.temperature,
1782
+ maxTokens: llm.maxTokens
1783
+ });
1784
+ case "ollama":
1785
+ return createOllamaAdapter({
1786
+ model: llm.model,
1787
+ baseUrl: llm.baseUrl,
1788
+ temperature: llm.temperature,
1789
+ maxTokens: llm.maxTokens
1790
+ });
1791
+ default:
1792
+ return createOpenAIAdapter({
1793
+ apiKey: llm.apiKey,
1794
+ model: llm.model,
1795
+ baseUrl: llm.baseUrl,
1796
+ temperature: llm.temperature,
1797
+ maxTokens: llm.maxTokens
1798
+ });
1799
+ }
1800
+ }
1801
+ /**
1802
+ * Process a chat request and return streaming response
1803
+ */
1804
+ async *processChat(request, signal) {
1805
+ const messages = request.messages.map(
1806
+ (m, i) => createMessage({
1807
+ id: `msg_${i}`,
1808
+ role: m.role,
1809
+ content: m.content
1810
+ })
1811
+ );
1812
+ const allActions = [...this.actions.values()];
1813
+ if (request.actions) {
1814
+ for (const action of request.actions) {
1815
+ if (!this.actions.has(action.name)) {
1816
+ allActions.push({
1817
+ name: action.name,
1818
+ description: action.description,
1819
+ parameters: action.parameters,
1820
+ handler: async () => {
1821
+ return { handled: false };
1822
+ }
1823
+ });
1824
+ }
1825
+ }
1826
+ }
1827
+ const completionRequest = {
1828
+ messages,
1829
+ actions: allActions.length > 0 ? allActions : void 0,
1830
+ systemPrompt: request.systemPrompt || this.config.systemPrompt,
1831
+ config: request.config,
1832
+ signal
1833
+ };
1834
+ const stream = this.adapter.stream(completionRequest);
1835
+ for await (const event of stream) {
1836
+ if (event.type === "action:args") {
1837
+ const action = this.actions.get(event.id);
1838
+ if (action) {
1839
+ try {
1840
+ const args = JSON.parse(event.args);
1841
+ const result = await action.handler(args);
1842
+ yield {
1843
+ type: "action:end",
1844
+ id: event.id,
1845
+ result
1846
+ };
1847
+ } catch (error) {
1848
+ yield {
1849
+ type: "action:end",
1850
+ id: event.id,
1851
+ error: error instanceof Error ? error.message : "Action failed"
1852
+ };
1853
+ }
1854
+ } else {
1855
+ yield event;
1856
+ }
1857
+ } else {
1858
+ yield event;
1859
+ }
1860
+ }
1861
+ }
1862
+ /**
1863
+ * Handle HTTP request (for use with any framework)
1864
+ */
1865
+ async handleRequest(request) {
1866
+ try {
1867
+ const body = await request.json();
1868
+ if (this.config.debug) {
1869
+ console.log("[Copilot SDK] Request:", JSON.stringify(body, null, 2));
1870
+ }
1871
+ const signal = request.signal;
1872
+ const hasTools = body.tools && body.tools.length > 0 || this.tools.size > 0;
1873
+ const useAgentLoop = hasTools || this.config.agentLoop?.enabled;
1874
+ if (body.streaming === false) {
1875
+ return this.handleNonStreamingRequest(
1876
+ body,
1877
+ signal,
1878
+ useAgentLoop || false,
1879
+ request
1880
+ );
1881
+ }
1882
+ const generator = useAgentLoop ? this.processChatWithLoop(body, signal, void 0, void 0, request) : this.processChat(body, signal);
1883
+ return createSSEResponse(generator);
1884
+ } catch (error) {
1885
+ console.error("[Copilot SDK] Error:", error);
1886
+ return new Response(
1887
+ JSON.stringify({
1888
+ error: error instanceof Error ? error.message : "Unknown error"
1889
+ }),
1890
+ {
1891
+ status: 500,
1892
+ headers: { "Content-Type": "application/json" }
1893
+ }
1894
+ );
1895
+ }
1896
+ }
1897
+ /**
1898
+ * Handle non-streaming request - returns JSON instead of SSE
1899
+ */
1900
+ async handleNonStreamingRequest(body, signal, useAgentLoop, httpRequest) {
1901
+ try {
1902
+ const generator = useAgentLoop ? this.processChatWithLoop(
1903
+ body,
1904
+ signal,
1905
+ void 0,
1906
+ void 0,
1907
+ httpRequest
1908
+ ) : this.processChat(body, signal);
1909
+ const events = [];
1910
+ let content = "";
1911
+ const toolCalls = [];
1912
+ const toolResults = [];
1913
+ let messages;
1914
+ let requiresAction = false;
1915
+ let error;
1916
+ for await (const event of generator) {
1917
+ events.push(event);
1918
+ switch (event.type) {
1919
+ case "message:delta":
1920
+ content += event.content;
1921
+ break;
1922
+ case "action:start":
1923
+ toolCalls.push({ id: event.id, name: event.name, args: {} });
1924
+ break;
1925
+ case "action:args":
1926
+ const tc = toolCalls.find((t) => t.id === event.id);
1927
+ if (tc) {
1928
+ try {
1929
+ tc.args = JSON.parse(event.args || "{}");
1930
+ } catch {
1931
+ tc.args = {};
1932
+ }
1933
+ }
1934
+ break;
1935
+ case "action:end":
1936
+ toolResults.push({
1937
+ id: event.id,
1938
+ result: event.result || event.error
1939
+ });
1940
+ break;
1941
+ case "tool_calls":
1942
+ break;
1943
+ case "done":
1944
+ messages = event.messages;
1945
+ requiresAction = event.requiresAction || false;
1946
+ break;
1947
+ case "error":
1948
+ error = { message: event.message, code: event.code };
1949
+ break;
1950
+ }
1951
+ }
1952
+ const response = {
1953
+ success: !error,
1954
+ content,
1955
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
1956
+ toolResults: toolResults.length > 0 ? toolResults : void 0,
1957
+ messages,
1958
+ requiresAction,
1959
+ error,
1960
+ // Include raw events for debugging
1961
+ _events: this.config.debug ? events : void 0
1962
+ };
1963
+ console.log("[Copilot SDK] Non-streaming response:", {
1964
+ contentLength: content.length,
1965
+ toolCalls: toolCalls.length,
1966
+ toolResults: toolResults.length,
1967
+ messagesCount: messages?.length,
1968
+ requiresAction,
1969
+ hasError: !!error
1970
+ });
1971
+ return new Response(JSON.stringify(response), {
1972
+ status: error ? 500 : 200,
1973
+ headers: {
1974
+ "Content-Type": "application/json",
1975
+ "Access-Control-Allow-Origin": "*"
1976
+ }
1977
+ });
1978
+ } catch (err) {
1979
+ console.error("[Copilot SDK] Non-streaming error:", err);
1980
+ return new Response(
1981
+ JSON.stringify({
1982
+ success: false,
1983
+ error: {
1984
+ message: err instanceof Error ? err.message : "Unknown error"
1985
+ }
1986
+ }),
1987
+ {
1988
+ status: 500,
1989
+ headers: { "Content-Type": "application/json" }
1990
+ }
1991
+ );
1992
+ }
1993
+ }
1994
+ /**
1995
+ * Get registered actions
1996
+ */
1997
+ getActions() {
1998
+ return [...this.actions.values()];
1999
+ }
2000
+ /**
2001
+ * Register a new action
2002
+ */
2003
+ registerAction(action) {
2004
+ this.actions.set(action.name, action);
2005
+ }
2006
+ /**
2007
+ * Unregister an action
2008
+ */
2009
+ unregisterAction(name) {
2010
+ this.actions.delete(name);
2011
+ }
2012
+ /**
2013
+ * Register a new tool
2014
+ */
2015
+ registerTool(tool) {
2016
+ this.tools.set(tool.name, tool);
2017
+ }
2018
+ /**
2019
+ * Unregister a tool
2020
+ */
2021
+ unregisterTool(name) {
2022
+ this.tools.delete(name);
2023
+ }
2024
+ /**
2025
+ * Get registered tools
2026
+ */
2027
+ getTools() {
2028
+ return [...this.tools.values()];
2029
+ }
2030
+ /**
2031
+ * Get the AI provider name from config
2032
+ */
2033
+ getProviderName() {
2034
+ if ("provider" in this.config && this.config.provider) {
2035
+ return this.config.provider.name;
2036
+ }
2037
+ if ("llm" in this.config && this.config.llm) {
2038
+ return this.config.llm.provider;
2039
+ }
2040
+ return "openai";
2041
+ }
2042
+ /**
2043
+ * Get the AI provider instance (if using provider config)
2044
+ */
2045
+ getProvider() {
2046
+ if ("provider" in this.config && this.config.provider) {
2047
+ return this.config.provider;
2048
+ }
2049
+ return null;
2050
+ }
2051
+ /**
2052
+ * Get the current model ID
2053
+ */
2054
+ getModel() {
2055
+ if ("provider" in this.config && this.config.provider) {
2056
+ return this.config.model;
2057
+ }
2058
+ if ("llm" in this.config && this.config.llm) {
2059
+ return this.config.llm.model || "unknown";
2060
+ }
2061
+ return this.adapter.model;
2062
+ }
2063
+ /**
2064
+ * Process a chat request with tool support (Vercel AI SDK pattern)
2065
+ *
2066
+ * This method:
2067
+ * 1. Streams response from adapter
2068
+ * 2. Detects tool calls from streaming events
2069
+ * 3. Server-side tools are executed immediately
2070
+ * 4. Client-side tool calls are yielded for client to execute
2071
+ * 5. Loop continues until no more tool calls or max iterations reached
2072
+ * 6. Returns all new messages in the done event for client to append
2073
+ */
2074
+ async *processChatWithLoop(request, signal, _accumulatedMessages, _isRecursive, _httpRequest) {
2075
+ const debug = this.config.debug || this.config.agentLoop?.debug;
2076
+ if (request.streaming === false) {
2077
+ if (debug) {
2078
+ console.log("[Copilot SDK] Using non-streaming mode");
2079
+ }
2080
+ for await (const event of this.processChatWithLoopNonStreaming(
2081
+ request,
2082
+ signal,
2083
+ _accumulatedMessages,
2084
+ _isRecursive,
2085
+ _httpRequest
2086
+ )) {
2087
+ yield event;
2088
+ }
2089
+ return;
2090
+ }
2091
+ const newMessages = _accumulatedMessages || [];
2092
+ this.config.agentLoop?.maxIterations || 20;
2093
+ const allTools = [...this.tools.values()];
2094
+ if (request.tools) {
2095
+ for (const tool of request.tools) {
2096
+ allTools.push({
2097
+ name: tool.name,
2098
+ description: tool.description,
2099
+ location: "client",
2100
+ inputSchema: tool.inputSchema
2101
+ });
2102
+ }
2103
+ }
2104
+ if (debug) {
2105
+ console.log(
2106
+ `[Copilot SDK] Processing chat with ${allTools.length} tools`
2107
+ );
2108
+ for (let i = 0; i < request.messages.length; i++) {
2109
+ const msg = request.messages[i];
2110
+ const hasAttachments = msg.attachments && msg.attachments.length > 0;
2111
+ if (hasAttachments) {
2112
+ console.log(
2113
+ `[Copilot SDK] Message ${i} (${msg.role}) has ${msg.attachments.length} attachments:`,
2114
+ msg.attachments.map((a) => ({
2115
+ type: a.type,
2116
+ mimeType: a.mimeType,
2117
+ dataLength: a.data?.length || 0
2118
+ }))
2119
+ );
2120
+ }
2121
+ }
2122
+ }
2123
+ const systemPrompt = request.systemPrompt || this.config.systemPrompt || "";
2124
+ let accumulatedText = "";
2125
+ const toolCalls = [];
2126
+ let currentToolCall = null;
2127
+ const completionRequest = {
2128
+ messages: [],
2129
+ // Not used when rawMessages is provided
2130
+ rawMessages: request.messages,
2131
+ actions: this.convertToolsToActions(allTools),
2132
+ systemPrompt,
2133
+ config: request.config,
2134
+ signal
2135
+ };
2136
+ const stream = this.adapter.stream(completionRequest);
2137
+ for await (const event of stream) {
2138
+ switch (event.type) {
2139
+ case "message:start":
2140
+ case "message:end":
2141
+ yield event;
2142
+ break;
2143
+ case "message:delta":
2144
+ accumulatedText += event.content;
2145
+ yield event;
2146
+ break;
2147
+ case "action:start":
2148
+ currentToolCall = { id: event.id, name: event.name, args: "" };
2149
+ if (debug) {
2150
+ console.log(`[Copilot SDK] Tool call started: ${event.name}`);
2151
+ }
2152
+ yield event;
2153
+ break;
2154
+ case "action:args":
2155
+ if (currentToolCall) {
2156
+ try {
2157
+ const parsedArgs = JSON.parse(event.args || "{}");
2158
+ if (debug) {
2159
+ console.log(
2160
+ `[Copilot SDK] Tool args for ${currentToolCall.name}:`,
2161
+ parsedArgs
2162
+ );
2163
+ }
2164
+ toolCalls.push({
2165
+ id: currentToolCall.id,
2166
+ name: currentToolCall.name,
2167
+ args: parsedArgs
2168
+ });
2169
+ } catch (e) {
2170
+ console.error(
2171
+ "[Copilot SDK] Failed to parse tool args:",
2172
+ event.args,
2173
+ e
2174
+ );
2175
+ toolCalls.push({
2176
+ id: currentToolCall.id,
2177
+ name: currentToolCall.name,
2178
+ args: {}
2179
+ });
2180
+ }
2181
+ currentToolCall = null;
2182
+ }
2183
+ yield event;
2184
+ break;
2185
+ case "error":
2186
+ yield event;
2187
+ return;
2188
+ // Exit on error
2189
+ case "done":
2190
+ break;
2191
+ default:
2192
+ yield event;
2193
+ }
2194
+ }
2195
+ if (toolCalls.length > 0) {
2196
+ if (debug) {
2197
+ console.log(
2198
+ `[Copilot SDK] Detected ${toolCalls.length} tool calls:`,
2199
+ toolCalls.map((t) => t.name)
2200
+ );
2201
+ }
2202
+ const serverToolCalls = [];
2203
+ const clientToolCalls = [];
2204
+ for (const tc of toolCalls) {
2205
+ const tool = allTools.find((t) => t.name === tc.name);
2206
+ if (tool?.location === "server" && tool.handler) {
2207
+ serverToolCalls.push(tc);
2208
+ } else {
2209
+ clientToolCalls.push(tc);
2210
+ }
2211
+ }
2212
+ const serverToolResults = [];
2213
+ const toolContextData = "toolContext" in this.config ? this.config.toolContext : void 0;
2214
+ for (const tc of serverToolCalls) {
2215
+ const tool = allTools.find((t) => t.name === tc.name);
2216
+ if (tool?.handler) {
2217
+ if (debug) {
2218
+ console.log(`[Copilot SDK] Executing server-side tool: ${tc.name}`);
2219
+ }
2220
+ const toolContext = buildToolContext(
2221
+ tc.id,
2222
+ signal,
2223
+ request.threadId,
2224
+ _httpRequest,
2225
+ toolContextData
2226
+ );
2227
+ try {
2228
+ const result = await tool.handler(tc.args, toolContext);
2229
+ serverToolResults.push({
2230
+ id: tc.id,
2231
+ name: tc.name,
2232
+ args: tc.args,
2233
+ result,
2234
+ tool
2235
+ });
2236
+ yield {
2237
+ type: "action:end",
2238
+ id: tc.id,
2239
+ result
2240
+ };
2241
+ } catch (error) {
2242
+ const errorResult = {
2243
+ success: false,
2244
+ error: error instanceof Error ? error.message : "Tool execution failed"
2245
+ };
2246
+ serverToolResults.push({
2247
+ id: tc.id,
2248
+ name: tc.name,
2249
+ args: tc.args,
2250
+ result: errorResult,
2251
+ tool
2252
+ });
2253
+ yield {
2254
+ type: "action:end",
2255
+ id: tc.id,
2256
+ error: error instanceof Error ? error.message : "Tool execution failed"
2257
+ };
2258
+ }
2259
+ }
2260
+ }
2261
+ if (serverToolResults.length > 0) {
2262
+ if (debug) {
2263
+ console.log(
2264
+ `[Copilot SDK] Server tools executed, continuing conversation...`
2265
+ );
2266
+ }
2267
+ const assistantWithToolCalls = {
2268
+ role: "assistant",
2269
+ content: accumulatedText || null,
2270
+ tool_calls: serverToolCalls.map((tc) => ({
2271
+ id: tc.id,
2272
+ type: "function",
2273
+ function: {
2274
+ name: tc.name,
2275
+ arguments: JSON.stringify(tc.args)
2276
+ }
2277
+ }))
2278
+ };
2279
+ const toolResultMessages = serverToolResults.map(
2280
+ (tr) => {
2281
+ const aiContent = buildToolResultForAI(tr.tool, tr.result, tr.args);
2282
+ const content = typeof aiContent === "string" ? aiContent : JSON.stringify(serializeToolResultContent(aiContent));
2283
+ return {
2284
+ role: "tool",
2285
+ content,
2286
+ tool_call_id: tr.id
2287
+ };
2288
+ }
2289
+ );
2290
+ newMessages.push(assistantWithToolCalls);
2291
+ newMessages.push(...toolResultMessages);
2292
+ const messagesWithResults = [
2293
+ ...request.messages,
2294
+ assistantWithToolCalls,
2295
+ ...toolResultMessages
2296
+ ];
2297
+ const nextRequest = {
2298
+ ...request,
2299
+ messages: messagesWithResults
2300
+ };
2301
+ for await (const event of this.processChatWithLoop(
2302
+ nextRequest,
2303
+ signal,
2304
+ newMessages,
2305
+ true,
2306
+ // Mark as recursive
2307
+ _httpRequest
2308
+ )) {
2309
+ yield event;
2310
+ }
2311
+ return;
2312
+ }
2313
+ if (clientToolCalls.length > 0) {
2314
+ const assistantMessage = {
2315
+ role: "assistant",
2316
+ content: accumulatedText || null,
2317
+ tool_calls: clientToolCalls.map((tc) => ({
2318
+ id: tc.id,
2319
+ type: "function",
2320
+ function: {
2321
+ name: tc.name,
2322
+ arguments: JSON.stringify(tc.args)
2323
+ }
2324
+ }))
2325
+ };
2326
+ newMessages.push(assistantMessage);
2327
+ yield {
2328
+ type: "tool_calls",
2329
+ toolCalls: clientToolCalls,
2330
+ assistantMessage
2331
+ };
2332
+ yield {
2333
+ type: "done",
2334
+ requiresAction: true,
2335
+ messages: newMessages
2336
+ };
2337
+ return;
2338
+ }
2339
+ }
2340
+ if (accumulatedText) {
2341
+ newMessages.push({
2342
+ role: "assistant",
2343
+ content: accumulatedText
2344
+ });
2345
+ }
2346
+ if (debug) {
2347
+ console.log(
2348
+ `[Copilot SDK] Stream complete, returning ${newMessages.length} new messages`
2349
+ );
2350
+ }
2351
+ yield {
2352
+ type: "done",
2353
+ messages: newMessages.length > 0 ? newMessages : void 0
2354
+ };
2355
+ }
2356
+ /**
2357
+ * Non-streaming agent loop implementation
2358
+ *
2359
+ * Uses adapter.complete() instead of stream() for:
2360
+ * - Better comparison with original studio-ai behavior
2361
+ * - Easier debugging (full response at once)
2362
+ * - More predictable retry behavior
2363
+ */
2364
+ async *processChatWithLoopNonStreaming(request, signal, _accumulatedMessages, _isRecursive, _httpRequest) {
2365
+ const newMessages = _accumulatedMessages || [];
2366
+ const debug = this.config.debug || this.config.agentLoop?.debug;
2367
+ const maxIterations = this.config.agentLoop?.maxIterations || 20;
2368
+ const allTools = [...this.tools.values()];
2369
+ if (request.tools) {
2370
+ for (const tool of request.tools) {
2371
+ allTools.push({
2372
+ name: tool.name,
2373
+ description: tool.description,
2374
+ location: "client",
2375
+ inputSchema: tool.inputSchema
2376
+ });
2377
+ }
2378
+ }
2379
+ const systemPrompt = request.systemPrompt || this.config.systemPrompt || "";
2380
+ let iteration = 0;
2381
+ let conversationMessages = request.messages;
2382
+ while (iteration < maxIterations) {
2383
+ iteration++;
2384
+ if (debug) {
2385
+ console.log(`[Copilot SDK] Iteration ${iteration}/${maxIterations}`);
2386
+ }
2387
+ if (signal?.aborted) {
2388
+ yield {
2389
+ type: "error",
2390
+ message: "Aborted",
2391
+ code: "ABORTED"
2392
+ };
2393
+ return;
2394
+ }
2395
+ if (!this.adapter.complete) {
2396
+ if (debug) {
2397
+ console.log(
2398
+ "[Copilot SDK] Adapter does not support non-streaming, falling back to streaming"
2399
+ );
2400
+ }
2401
+ const streamingRequest = { ...request, streaming: true };
2402
+ for await (const event of this.processChatWithLoop(
2403
+ streamingRequest,
2404
+ signal,
2405
+ _accumulatedMessages,
2406
+ _isRecursive,
2407
+ _httpRequest
2408
+ )) {
2409
+ yield event;
2410
+ }
2411
+ return;
2412
+ }
2413
+ const completionRequest = {
2414
+ messages: [],
2415
+ rawMessages: conversationMessages,
2416
+ actions: this.convertToolsToActions(allTools),
2417
+ systemPrompt,
2418
+ config: request.config,
2419
+ signal
2420
+ };
2421
+ try {
2422
+ const result = await this.adapter.complete(completionRequest);
2423
+ if (debug) {
2424
+ console.log(
2425
+ `[Copilot SDK] Got response: ${result.content.length} chars, ${result.toolCalls.length} tool calls`
2426
+ );
2427
+ }
2428
+ yield { type: "message:start", id: `msg_${Date.now()}` };
2429
+ if (result.content) {
2430
+ yield {
2431
+ type: "message:delta",
2432
+ content: result.content
2433
+ };
2434
+ }
2435
+ yield { type: "message:end" };
2436
+ if (result.toolCalls.length > 0) {
2437
+ const serverToolCalls = [];
2438
+ const clientToolCalls = [];
2439
+ for (const tc of result.toolCalls) {
2440
+ const tool = allTools.find((t) => t.name === tc.name);
2441
+ if (tool?.location === "server" && tool.handler) {
2442
+ serverToolCalls.push(tc);
2443
+ } else {
2444
+ clientToolCalls.push({
2445
+ id: tc.id,
2446
+ name: tc.name,
2447
+ args: tc.args
2448
+ });
2449
+ }
2450
+ }
2451
+ for (const tc of result.toolCalls) {
2452
+ yield {
2453
+ type: "action:start",
2454
+ id: tc.id,
2455
+ name: tc.name
2456
+ };
2457
+ yield {
2458
+ type: "action:args",
2459
+ id: tc.id,
2460
+ args: JSON.stringify(tc.args)
2461
+ };
2462
+ }
2463
+ const serverToolResults = [];
2464
+ const toolContextData = "toolContext" in this.config ? this.config.toolContext : void 0;
2465
+ for (const tc of serverToolCalls) {
2466
+ const tool = allTools.find((t) => t.name === tc.name);
2467
+ if (tool?.handler) {
2468
+ if (debug) {
2469
+ console.log(`[Copilot SDK] Executing tool: ${tc.name}`);
2470
+ }
2471
+ const toolContext = buildToolContext(
2472
+ tc.id,
2473
+ signal,
2474
+ request.threadId,
2475
+ _httpRequest,
2476
+ toolContextData
2477
+ );
2478
+ try {
2479
+ const toolResult = await tool.handler(tc.args, toolContext);
2480
+ serverToolResults.push({
2481
+ id: tc.id,
2482
+ name: tc.name,
2483
+ args: tc.args,
2484
+ result: toolResult,
2485
+ tool
2486
+ });
2487
+ yield {
2488
+ type: "action:end",
2489
+ id: tc.id,
2490
+ result: toolResult
2491
+ };
2492
+ } catch (error) {
2493
+ const errorResult = {
2494
+ success: false,
2495
+ error: error instanceof Error ? error.message : "Tool execution failed"
2496
+ };
2497
+ serverToolResults.push({
2498
+ id: tc.id,
2499
+ name: tc.name,
2500
+ args: tc.args,
2501
+ result: errorResult,
2502
+ tool
2503
+ });
2504
+ yield {
2505
+ type: "action:end",
2506
+ id: tc.id,
2507
+ error: error instanceof Error ? error.message : "Tool execution failed"
2508
+ };
2509
+ }
2510
+ }
2511
+ }
2512
+ if (serverToolResults.length > 0) {
2513
+ const assistantWithToolCalls = {
2514
+ role: "assistant",
2515
+ content: result.content || null,
2516
+ tool_calls: result.toolCalls.map((tc) => ({
2517
+ id: tc.id,
2518
+ type: "function",
2519
+ function: {
2520
+ name: tc.name,
2521
+ arguments: JSON.stringify(tc.args)
2522
+ }
2523
+ }))
2524
+ };
2525
+ const toolResultMessages = serverToolResults.map((tr) => {
2526
+ const aiContent = buildToolResultForAI(
2527
+ tr.tool,
2528
+ tr.result,
2529
+ tr.args
2530
+ );
2531
+ const content = typeof aiContent === "string" ? aiContent : JSON.stringify(serializeToolResultContent(aiContent));
2532
+ return {
2533
+ role: "tool",
2534
+ content,
2535
+ tool_call_id: tr.id
2536
+ };
2537
+ });
2538
+ newMessages.push(assistantWithToolCalls);
2539
+ newMessages.push(...toolResultMessages);
2540
+ conversationMessages = [
2541
+ ...conversationMessages,
2542
+ assistantWithToolCalls,
2543
+ ...toolResultMessages
2544
+ ];
2545
+ continue;
2546
+ }
2547
+ if (clientToolCalls.length > 0) {
2548
+ const assistantMessage = {
2549
+ role: "assistant",
2550
+ content: result.content || null,
2551
+ tool_calls: clientToolCalls.map((tc) => ({
2552
+ id: tc.id,
2553
+ type: "function",
2554
+ function: {
2555
+ name: tc.name,
2556
+ arguments: JSON.stringify(tc.args)
2557
+ }
2558
+ }))
2559
+ };
2560
+ newMessages.push(assistantMessage);
2561
+ yield {
2562
+ type: "tool_calls",
2563
+ toolCalls: clientToolCalls,
2564
+ assistantMessage
2565
+ };
2566
+ yield {
2567
+ type: "done",
2568
+ requiresAction: true,
2569
+ messages: newMessages
2570
+ };
2571
+ return;
2572
+ }
2573
+ }
2574
+ if (result.content) {
2575
+ newMessages.push({
2576
+ role: "assistant",
2577
+ content: result.content
2578
+ });
2579
+ }
2580
+ if (debug) {
2581
+ console.log(`[Copilot SDK] Complete after ${iteration} iterations`);
2582
+ }
2583
+ yield {
2584
+ type: "done",
2585
+ messages: newMessages.length > 0 ? newMessages : void 0
2586
+ };
2587
+ return;
2588
+ } catch (error) {
2589
+ yield {
2590
+ type: "error",
2591
+ message: error instanceof Error ? error.message : "Unknown error",
2592
+ code: "COMPLETION_ERROR"
2593
+ };
2594
+ return;
2595
+ }
2596
+ }
2597
+ if (debug) {
2598
+ console.log(`[Copilot SDK] Max iterations (${maxIterations}) reached`);
2599
+ }
2600
+ yield {
2601
+ type: "done",
2602
+ messages: newMessages.length > 0 ? newMessages : void 0
2603
+ };
2604
+ }
2605
+ /**
2606
+ * Convert tools to legacy action format (for adapter compatibility)
2607
+ */
2608
+ convertToolsToActions(tools) {
2609
+ return tools.map((tool) => ({
2610
+ name: tool.name,
2611
+ description: tool.description,
2612
+ parameters: this.convertInputSchemaToParameters(tool.inputSchema),
2613
+ handler: tool.handler || (async () => ({ handled: false }))
2614
+ }));
2615
+ }
2616
+ /**
2617
+ * Convert JSON Schema property to ActionParameter format recursively
2618
+ */
2619
+ convertSchemaProperty(prop) {
2620
+ const p = prop;
2621
+ const typeMap = {
2622
+ string: "string",
2623
+ number: "number",
2624
+ integer: "number",
2625
+ boolean: "boolean",
2626
+ object: "object",
2627
+ array: "array"
2628
+ };
2629
+ const result = {
2630
+ type: typeMap[p.type || "string"] || "string"
2631
+ };
2632
+ if (p.description) {
2633
+ result.description = p.description;
2634
+ }
2635
+ if (p.enum) {
2636
+ result.enum = p.enum;
2637
+ }
2638
+ if (p.type === "array" && p.items) {
2639
+ result.items = this.convertSchemaProperty(p.items);
2640
+ }
2641
+ if (p.type === "object" && p.properties) {
2642
+ result.properties = Object.fromEntries(
2643
+ Object.entries(p.properties).map(([key, val]) => [
2644
+ key,
2645
+ this.convertSchemaProperty(val)
2646
+ ])
2647
+ );
2648
+ }
2649
+ return result;
2650
+ }
2651
+ /**
2652
+ * Convert JSON Schema to legacy parameters format
2653
+ */
2654
+ convertInputSchemaToParameters(schema) {
2655
+ const parameters = {};
2656
+ for (const [name, prop] of Object.entries(schema.properties)) {
2657
+ const converted = this.convertSchemaProperty(prop);
2658
+ parameters[name] = {
2659
+ ...converted,
2660
+ required: schema.required?.includes(name)
2661
+ };
2662
+ }
2663
+ return parameters;
2664
+ }
2665
+ };
2666
+ function createRuntime(config) {
2667
+ return new Runtime(config);
2668
+ }
2669
+ function createHonoApp(runtime) {
2670
+ const app = new Hono();
2671
+ app.use("*", cors());
2672
+ app.get("/", (c) => {
2673
+ return c.json({ status: "ok", provider: "yourgpt-copilot" });
2674
+ });
2675
+ app.post("/", async (c) => {
2676
+ const request = c.req.raw;
2677
+ return runtime.handleRequest(request);
2678
+ });
2679
+ app.post("/chat", async (c) => {
2680
+ const request = c.req.raw;
2681
+ return runtime.handleRequest(request);
2682
+ });
2683
+ app.post("/chat/loop", async (c) => {
2684
+ try {
2685
+ const body = await c.req.json();
2686
+ const signal = c.req.raw.signal;
2687
+ const generator = runtime.processChatWithLoop(body, signal);
2688
+ return createSSEResponse(generator);
2689
+ } catch (error) {
2690
+ return c.json(
2691
+ { error: error instanceof Error ? error.message : "Unknown error" },
2692
+ 500
2693
+ );
2694
+ }
2695
+ });
2696
+ app.get("/actions", (c) => {
2697
+ const actions = runtime.getActions().map((a) => ({
2698
+ name: a.name,
2699
+ description: a.description,
2700
+ parameters: a.parameters
2701
+ }));
2702
+ return c.json({ actions });
2703
+ });
2704
+ app.get("/tools", (c) => {
2705
+ const tools = runtime.getTools().map((t) => ({
2706
+ name: t.name,
2707
+ description: t.description,
2708
+ location: t.location,
2709
+ inputSchema: t.inputSchema
2710
+ }));
2711
+ return c.json({ tools });
2712
+ });
2713
+ app.get("/capabilities", (c) => {
2714
+ const provider = runtime.getProvider();
2715
+ const model = runtime.getModel();
2716
+ if (provider) {
2717
+ const capabilities = provider.getCapabilities(model);
2718
+ return c.json({
2719
+ provider: provider.name,
2720
+ model,
2721
+ capabilities,
2722
+ supportedModels: provider.supportedModels
2723
+ });
2724
+ }
2725
+ return c.json({
2726
+ provider: "unknown",
2727
+ model,
2728
+ capabilities: {
2729
+ supportsVision: false,
2730
+ supportsTools: true,
2731
+ supportsThinking: false,
2732
+ supportsStreaming: true,
2733
+ supportsPDF: false,
2734
+ supportsAudio: false,
2735
+ supportsVideo: false,
2736
+ maxTokens: 8192,
2737
+ supportedImageTypes: [],
2738
+ supportsJsonMode: false,
2739
+ supportsSystemMessages: true
2740
+ },
2741
+ supportedModels: []
2742
+ });
2743
+ });
2744
+ return app;
2745
+ }
2746
+ function createNextHandler(config) {
2747
+ const runtime = createRuntime(config);
2748
+ return async function handler(request) {
2749
+ return runtime.handleRequest(request);
2750
+ };
2751
+ }
2752
+ function createExpressMiddleware(config) {
2753
+ const runtime = createRuntime(config);
2754
+ createHonoApp(runtime);
2755
+ return async (req, res) => {
2756
+ try {
2757
+ const url = new URL(req.url, "http://localhost");
2758
+ const request = new Request(url, {
2759
+ method: req.method,
2760
+ headers: req.headers,
2761
+ body: req.method !== "GET" ? JSON.stringify(req.body) : void 0
2762
+ });
2763
+ const response = await runtime.handleRequest(request);
2764
+ response.headers.forEach((value, key) => {
2765
+ res.setHeader(key, value);
2766
+ });
2767
+ if (response.body) {
2768
+ const reader = response.body.getReader();
2769
+ const decoder = new TextDecoder();
2770
+ while (true) {
2771
+ const { done, value } = await reader.read();
2772
+ if (done) break;
2773
+ res.write(decoder.decode(value));
2774
+ }
2775
+ }
2776
+ res.end();
2777
+ } catch (error) {
2778
+ res.status(500).json({
2779
+ error: error instanceof Error ? error.message : "Unknown error"
2780
+ });
2781
+ }
2782
+ };
2783
+ }
2784
+ function createNodeHandler(config) {
2785
+ const runtime = createRuntime(config);
2786
+ const app = createHonoApp(runtime);
2787
+ return app.fetch;
2788
+ }
2789
+
2790
+ // src/providers/registry.ts
2791
+ var providerFactories = /* @__PURE__ */ new Map();
2792
+ function registerProvider(name, factory) {
2793
+ providerFactories.set(name, factory);
2794
+ }
2795
+ function getProvider(name, config) {
2796
+ const factory = providerFactories.get(name);
2797
+ if (!factory) {
2798
+ return void 0;
2799
+ }
2800
+ return factory(config);
2801
+ }
2802
+ function hasProvider(name) {
2803
+ return providerFactories.has(name);
2804
+ }
2805
+ function listProviders() {
2806
+ return Array.from(providerFactories.keys());
2807
+ }
2808
+ function getAvailableProviders() {
2809
+ const result = [];
2810
+ for (const [name, factory] of providerFactories) {
2811
+ try {
2812
+ const provider = factory();
2813
+ result.push({
2814
+ name,
2815
+ models: provider.supportedModels
2816
+ });
2817
+ } catch {
2818
+ result.push({
2819
+ name,
2820
+ models: []
2821
+ });
2822
+ }
2823
+ }
2824
+ return result;
2825
+ }
2826
+ function getModelCapabilities(providerName, modelId) {
2827
+ const provider = getProvider(providerName);
2828
+ if (!provider) {
2829
+ return void 0;
2830
+ }
2831
+ return provider.getCapabilities(modelId);
2832
+ }
2833
+
2834
+ // src/providers/openai/index.ts
2835
+ var OPENAI_MODELS = {
2836
+ // GPT-4o series
2837
+ "gpt-4o": {
2838
+ vision: true,
2839
+ tools: true,
2840
+ audio: true,
2841
+ jsonMode: true,
2842
+ maxTokens: 128e3
2843
+ },
2844
+ "gpt-4o-mini": {
2845
+ vision: true,
2846
+ tools: true,
2847
+ audio: false,
2848
+ jsonMode: true,
2849
+ maxTokens: 128e3
2850
+ },
2851
+ "gpt-4o-2024-11-20": {
2852
+ vision: true,
2853
+ tools: true,
2854
+ audio: true,
2855
+ jsonMode: true,
2856
+ maxTokens: 128e3
2857
+ },
2858
+ "gpt-4o-2024-08-06": {
2859
+ vision: true,
2860
+ tools: true,
2861
+ audio: false,
2862
+ jsonMode: true,
2863
+ maxTokens: 128e3
2864
+ },
2865
+ // GPT-4 Turbo series
2866
+ "gpt-4-turbo": {
2867
+ vision: true,
2868
+ tools: true,
2869
+ audio: false,
2870
+ jsonMode: true,
2871
+ maxTokens: 128e3
2872
+ },
2873
+ "gpt-4-turbo-preview": {
2874
+ vision: false,
2875
+ tools: true,
2876
+ audio: false,
2877
+ jsonMode: true,
2878
+ maxTokens: 128e3
2879
+ },
2880
+ // GPT-4 series
2881
+ "gpt-4": {
2882
+ vision: false,
2883
+ tools: true,
2884
+ audio: false,
2885
+ jsonMode: false,
2886
+ maxTokens: 8192
2887
+ },
2888
+ "gpt-4-32k": {
2889
+ vision: false,
2890
+ tools: true,
2891
+ audio: false,
2892
+ jsonMode: false,
2893
+ maxTokens: 32768
2894
+ },
2895
+ // GPT-3.5 series
2896
+ "gpt-3.5-turbo": {
2897
+ vision: false,
2898
+ tools: true,
2899
+ audio: false,
2900
+ jsonMode: true,
2901
+ maxTokens: 16385
2902
+ },
2903
+ "gpt-3.5-turbo-16k": {
2904
+ vision: false,
2905
+ tools: true,
2906
+ audio: false,
2907
+ jsonMode: true,
2908
+ maxTokens: 16385
2909
+ },
2910
+ // O1 reasoning series
2911
+ o1: {
2912
+ vision: true,
2913
+ tools: false,
2914
+ // O1 doesn't support tools yet
2915
+ audio: false,
2916
+ jsonMode: false,
2917
+ maxTokens: 128e3
2918
+ },
2919
+ "o1-mini": {
2920
+ vision: true,
2921
+ tools: false,
2922
+ audio: false,
2923
+ jsonMode: false,
2924
+ maxTokens: 128e3
2925
+ },
2926
+ "o1-preview": {
2927
+ vision: true,
2928
+ tools: false,
2929
+ audio: false,
2930
+ jsonMode: false,
2931
+ maxTokens: 128e3
2932
+ },
2933
+ // O3 reasoning series
2934
+ "o3-mini": {
2935
+ vision: true,
2936
+ tools: false,
2937
+ audio: false,
2938
+ jsonMode: false,
2939
+ maxTokens: 128e3
2940
+ }
2941
+ };
2942
+ function createOpenAI(config = {}) {
2943
+ const apiKey = config.apiKey ?? process.env.OPENAI_API_KEY ?? "";
2944
+ return {
2945
+ name: "openai",
2946
+ supportedModels: Object.keys(OPENAI_MODELS),
2947
+ languageModel(modelId) {
2948
+ return createOpenAIAdapter({
2949
+ apiKey,
2950
+ model: modelId,
2951
+ baseUrl: config.baseUrl
2952
+ });
2953
+ },
2954
+ getCapabilities(modelId) {
2955
+ const model = OPENAI_MODELS[modelId] ?? OPENAI_MODELS["gpt-4o"];
2956
+ return {
2957
+ supportsVision: model.vision,
2958
+ supportsTools: model.tools,
2959
+ supportsThinking: false,
2960
+ // OpenAI doesn't have extended thinking
2961
+ supportsStreaming: true,
2962
+ supportsPDF: false,
2963
+ // OpenAI doesn't support PDFs directly
2964
+ supportsAudio: model.audio,
2965
+ supportsVideo: false,
2966
+ maxTokens: model.maxTokens,
2967
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
2968
+ supportedAudioTypes: model.audio ? ["audio/mp3", "audio/wav", "audio/webm"] : [],
2969
+ supportsJsonMode: model.jsonMode,
2970
+ supportsSystemMessages: true
2971
+ };
2972
+ }
2973
+ };
2974
+ }
2975
+
2976
+ // src/providers/anthropic/index.ts
2977
+ var ANTHROPIC_MODELS = {
2978
+ // Claude 4 series (latest)
2979
+ "claude-sonnet-4-20250514": {
2980
+ vision: true,
2981
+ tools: true,
2982
+ thinking: true,
2983
+ maxTokens: 64e3
2984
+ },
2985
+ "claude-opus-4-20250514": {
2986
+ vision: true,
2987
+ tools: true,
2988
+ thinking: true,
2989
+ maxTokens: 32e3
2990
+ },
2991
+ // Claude 3.5 series
2992
+ "claude-3-5-sonnet-latest": {
2993
+ vision: true,
2994
+ tools: true,
2995
+ thinking: true,
2996
+ maxTokens: 2e5
2997
+ },
2998
+ "claude-3-5-sonnet-20241022": {
2999
+ vision: true,
3000
+ tools: true,
3001
+ thinking: true,
3002
+ maxTokens: 2e5
3003
+ },
3004
+ "claude-3-5-haiku-latest": {
3005
+ vision: true,
3006
+ tools: true,
3007
+ thinking: false,
3008
+ maxTokens: 2e5
3009
+ },
3010
+ "claude-3-5-haiku-20241022": {
3011
+ vision: true,
3012
+ tools: true,
3013
+ thinking: false,
3014
+ maxTokens: 2e5
3015
+ },
3016
+ // Claude 3 series
3017
+ "claude-3-opus-latest": {
3018
+ vision: true,
3019
+ tools: true,
3020
+ thinking: true,
3021
+ maxTokens: 2e5
3022
+ },
3023
+ "claude-3-opus-20240229": {
3024
+ vision: true,
3025
+ tools: true,
3026
+ thinking: true,
3027
+ maxTokens: 2e5
3028
+ },
3029
+ "claude-3-sonnet-20240229": {
3030
+ vision: true,
3031
+ tools: true,
3032
+ thinking: false,
3033
+ maxTokens: 2e5
3034
+ },
3035
+ "claude-3-haiku-20240307": {
3036
+ vision: true,
3037
+ tools: true,
3038
+ thinking: false,
3039
+ maxTokens: 2e5
3040
+ }
3041
+ };
3042
+ function createAnthropic(config = {}) {
3043
+ const apiKey = config.apiKey ?? process.env.ANTHROPIC_API_KEY ?? "";
3044
+ return {
3045
+ name: "anthropic",
3046
+ supportedModels: Object.keys(ANTHROPIC_MODELS),
3047
+ languageModel(modelId) {
3048
+ return createAnthropicAdapter({
3049
+ apiKey,
3050
+ model: modelId,
3051
+ baseUrl: config.baseUrl,
3052
+ thinking: config.thinkingBudget ? { type: "enabled", budgetTokens: config.thinkingBudget } : void 0
3053
+ });
3054
+ },
3055
+ getCapabilities(modelId) {
3056
+ const model = ANTHROPIC_MODELS[modelId] ?? ANTHROPIC_MODELS["claude-3-5-sonnet-latest"];
3057
+ return {
3058
+ supportsVision: model.vision,
3059
+ supportsTools: model.tools,
3060
+ supportsThinking: model.thinking,
3061
+ supportsStreaming: true,
3062
+ supportsPDF: true,
3063
+ // Claude supports PDFs
3064
+ supportsAudio: false,
3065
+ supportsVideo: false,
3066
+ maxTokens: model.maxTokens,
3067
+ supportedImageTypes: [
3068
+ "image/png",
3069
+ "image/jpeg",
3070
+ "image/gif",
3071
+ "image/webp"
3072
+ ],
3073
+ supportsJsonMode: false,
3074
+ // Anthropic doesn't have JSON mode
3075
+ supportsSystemMessages: true
3076
+ };
3077
+ }
3078
+ };
3079
+ }
3080
+
3081
+ // src/providers/groq/index.ts
3082
+ var GROQ_MODELS = {
3083
+ // Llama 3.3 series
3084
+ "llama-3.3-70b-versatile": {
3085
+ vision: false,
3086
+ tools: true,
3087
+ maxTokens: 32768
3088
+ },
3089
+ "llama-3.3-70b-specdec": {
3090
+ vision: false,
3091
+ tools: true,
3092
+ maxTokens: 8192
3093
+ },
3094
+ // Llama 3.2 Vision series
3095
+ "llama-3.2-90b-vision-preview": {
3096
+ vision: true,
3097
+ tools: true,
3098
+ maxTokens: 8192
3099
+ },
3100
+ "llama-3.2-11b-vision-preview": {
3101
+ vision: true,
3102
+ tools: true,
3103
+ maxTokens: 8192
3104
+ },
3105
+ // Llama 3.1 series
3106
+ "llama-3.1-70b-versatile": {
3107
+ vision: false,
3108
+ tools: true,
3109
+ maxTokens: 32768
3110
+ },
3111
+ "llama-3.1-8b-instant": {
3112
+ vision: false,
3113
+ tools: true,
3114
+ maxTokens: 8192
3115
+ },
3116
+ // Mixtral series
3117
+ "mixtral-8x7b-32768": {
3118
+ vision: false,
3119
+ tools: true,
3120
+ maxTokens: 32768
3121
+ },
3122
+ // Gemma series
3123
+ "gemma2-9b-it": {
3124
+ vision: false,
3125
+ tools: false,
3126
+ maxTokens: 8192
3127
+ },
3128
+ // DeepSeek
3129
+ "deepseek-r1-distill-llama-70b": {
3130
+ vision: false,
3131
+ tools: true,
3132
+ maxTokens: 8192
3133
+ }
3134
+ };
3135
+ function createGroq(config = {}) {
3136
+ const apiKey = config.apiKey ?? process.env.GROQ_API_KEY ?? "";
3137
+ return {
3138
+ name: "groq",
3139
+ supportedModels: Object.keys(GROQ_MODELS),
3140
+ languageModel(modelId) {
3141
+ return createGroqAdapter({
3142
+ apiKey,
3143
+ model: modelId
3144
+ });
3145
+ },
3146
+ getCapabilities(modelId) {
3147
+ const model = GROQ_MODELS[modelId] ?? GROQ_MODELS["llama-3.3-70b-versatile"];
3148
+ return {
3149
+ supportsVision: model.vision,
3150
+ supportsTools: model.tools,
3151
+ supportsThinking: false,
3152
+ supportsStreaming: true,
3153
+ supportsPDF: false,
3154
+ supportsAudio: false,
3155
+ supportsVideo: false,
3156
+ maxTokens: model.maxTokens,
3157
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
3158
+ supportsJsonMode: true,
3159
+ supportsSystemMessages: true
3160
+ };
3161
+ }
3162
+ };
3163
+ }
3164
+
3165
+ // src/providers/ollama/index.ts
3166
+ var OLLAMA_MODELS = {
3167
+ // Llama series
3168
+ llama3: {
3169
+ vision: false,
3170
+ tools: true,
3171
+ maxTokens: 8192
3172
+ },
3173
+ "llama3:70b": {
3174
+ vision: false,
3175
+ tools: true,
3176
+ maxTokens: 8192
3177
+ },
3178
+ "llama3.2": {
3179
+ vision: false,
3180
+ tools: true,
3181
+ maxTokens: 8192
3182
+ },
3183
+ "llama3.2-vision": {
3184
+ vision: true,
3185
+ tools: true,
3186
+ maxTokens: 8192
3187
+ },
3188
+ // Mistral series
3189
+ mistral: {
3190
+ vision: false,
3191
+ tools: true,
3192
+ maxTokens: 8192
3193
+ },
3194
+ "mistral-nemo": {
3195
+ vision: false,
3196
+ tools: true,
3197
+ maxTokens: 128e3
3198
+ },
3199
+ mixtral: {
3200
+ vision: false,
3201
+ tools: true,
3202
+ maxTokens: 32768
3203
+ },
3204
+ // CodeLlama
3205
+ codellama: {
3206
+ vision: false,
3207
+ tools: false,
3208
+ maxTokens: 16384
3209
+ },
3210
+ // Phi series
3211
+ phi3: {
3212
+ vision: false,
3213
+ tools: true,
3214
+ maxTokens: 4096
3215
+ },
3216
+ "phi3:medium": {
3217
+ vision: false,
3218
+ tools: true,
3219
+ maxTokens: 4096
3220
+ },
3221
+ // Gemma series
3222
+ gemma2: {
3223
+ vision: false,
3224
+ tools: false,
3225
+ maxTokens: 8192
3226
+ },
3227
+ "gemma2:27b": {
3228
+ vision: false,
3229
+ tools: false,
3230
+ maxTokens: 8192
3231
+ },
3232
+ // Qwen series
3233
+ qwen2: {
3234
+ vision: false,
3235
+ tools: true,
3236
+ maxTokens: 32768
3237
+ },
3238
+ "qwen2.5-coder": {
3239
+ vision: false,
3240
+ tools: true,
3241
+ maxTokens: 32768
3242
+ },
3243
+ // LLaVA (vision)
3244
+ llava: {
3245
+ vision: true,
3246
+ tools: false,
3247
+ maxTokens: 4096
3248
+ },
3249
+ // DeepSeek
3250
+ deepseek: {
3251
+ vision: false,
3252
+ tools: true,
3253
+ maxTokens: 16384
3254
+ },
3255
+ "deepseek-coder": {
3256
+ vision: false,
3257
+ tools: false,
3258
+ maxTokens: 16384
3259
+ }
3260
+ };
3261
+ var DEFAULT_MODEL_CAPS = {
3262
+ vision: false,
3263
+ tools: false,
3264
+ maxTokens: 4096
3265
+ };
3266
+ function createOllama(config = {}) {
3267
+ const baseUrl = config.baseUrl ?? "http://localhost:11434";
3268
+ return {
3269
+ name: "ollama",
3270
+ supportedModels: Object.keys(OLLAMA_MODELS),
3271
+ languageModel(modelId) {
3272
+ return createOllamaAdapter({
3273
+ model: modelId,
3274
+ baseUrl
3275
+ });
3276
+ },
3277
+ getCapabilities(modelId) {
3278
+ const baseModelName = modelId.split(":")[0];
3279
+ const model = OLLAMA_MODELS[modelId] ?? OLLAMA_MODELS[baseModelName] ?? DEFAULT_MODEL_CAPS;
3280
+ return {
3281
+ supportsVision: model.vision,
3282
+ supportsTools: model.tools,
3283
+ supportsThinking: false,
3284
+ supportsStreaming: true,
3285
+ supportsPDF: false,
3286
+ supportsAudio: false,
3287
+ supportsVideo: false,
3288
+ maxTokens: model.maxTokens,
3289
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif"] : [],
3290
+ supportsJsonMode: false,
3291
+ supportsSystemMessages: true
3292
+ };
3293
+ }
3294
+ };
3295
+ }
3296
+
3297
+ // src/providers/google/index.ts
3298
+ var GOOGLE_MODELS = {
3299
+ // Gemini 2.0 series (latest)
3300
+ "gemini-2.0-flash": {
3301
+ vision: true,
3302
+ tools: true,
3303
+ audio: true,
3304
+ video: true,
3305
+ pdf: true,
3306
+ maxTokens: 1e6,
3307
+ outputTokens: 8192
3308
+ },
3309
+ "gemini-2.0-flash-lite": {
3310
+ vision: true,
3311
+ tools: true,
3312
+ audio: false,
3313
+ video: false,
3314
+ pdf: true,
3315
+ maxTokens: 1e6,
3316
+ outputTokens: 8192
3317
+ },
3318
+ // Gemini 2.5 series (experimental)
3319
+ "gemini-2.5-pro-preview-05-06": {
3320
+ vision: true,
3321
+ tools: true,
3322
+ audio: true,
3323
+ video: true,
3324
+ pdf: true,
3325
+ maxTokens: 1e6,
3326
+ outputTokens: 65536
3327
+ },
3328
+ "gemini-2.5-flash-preview-05-20": {
3329
+ vision: true,
3330
+ tools: true,
3331
+ audio: true,
3332
+ video: true,
3333
+ pdf: true,
3334
+ maxTokens: 1e6,
3335
+ outputTokens: 65536
3336
+ },
3337
+ // Gemini 1.5 series
3338
+ "gemini-1.5-pro": {
3339
+ vision: true,
3340
+ tools: true,
3341
+ audio: true,
3342
+ video: true,
3343
+ pdf: true,
3344
+ maxTokens: 2e6,
3345
+ outputTokens: 8192
3346
+ },
3347
+ "gemini-1.5-pro-latest": {
3348
+ vision: true,
3349
+ tools: true,
3350
+ audio: true,
3351
+ video: true,
3352
+ pdf: true,
3353
+ maxTokens: 2e6,
3354
+ outputTokens: 8192
3355
+ },
3356
+ "gemini-1.5-flash": {
3357
+ vision: true,
3358
+ tools: true,
3359
+ audio: true,
3360
+ video: true,
3361
+ pdf: true,
3362
+ maxTokens: 1e6,
3363
+ outputTokens: 8192
3364
+ },
3365
+ "gemini-1.5-flash-latest": {
3366
+ vision: true,
3367
+ tools: true,
3368
+ audio: true,
3369
+ video: true,
3370
+ pdf: true,
3371
+ maxTokens: 1e6,
3372
+ outputTokens: 8192
3373
+ },
3374
+ "gemini-1.5-flash-8b": {
3375
+ vision: true,
3376
+ tools: true,
3377
+ audio: false,
3378
+ video: false,
3379
+ pdf: true,
3380
+ maxTokens: 1e6,
3381
+ outputTokens: 8192
3382
+ },
3383
+ // Gemini 1.0 series (legacy)
3384
+ "gemini-1.0-pro": {
3385
+ vision: false,
3386
+ tools: true,
3387
+ audio: false,
3388
+ video: false,
3389
+ pdf: false,
3390
+ maxTokens: 30720,
3391
+ outputTokens: 2048
3392
+ }
3393
+ };
3394
+ function createGoogle(config = {}) {
3395
+ const apiKey = config.apiKey ?? process.env.GOOGLE_API_KEY ?? "";
3396
+ return {
3397
+ name: "google",
3398
+ supportedModels: Object.keys(GOOGLE_MODELS),
3399
+ languageModel(modelId) {
3400
+ return createGoogleAdapter({
3401
+ apiKey,
3402
+ model: modelId,
3403
+ baseUrl: config.baseUrl,
3404
+ safetySettings: config.safetySettings
3405
+ });
3406
+ },
3407
+ getCapabilities(modelId) {
3408
+ const model = GOOGLE_MODELS[modelId] ?? GOOGLE_MODELS["gemini-2.0-flash"];
3409
+ return {
3410
+ supportsVision: model.vision,
3411
+ supportsTools: model.tools,
3412
+ supportsThinking: false,
3413
+ // Gemini doesn't have extended thinking like Claude
3414
+ supportsStreaming: true,
3415
+ supportsPDF: model.pdf,
3416
+ supportsAudio: model.audio,
3417
+ supportsVideo: model.video,
3418
+ maxTokens: model.maxTokens,
3419
+ supportedImageTypes: model.vision ? [
3420
+ "image/png",
3421
+ "image/jpeg",
3422
+ "image/gif",
3423
+ "image/webp",
3424
+ "image/heic",
3425
+ "image/heif"
3426
+ ] : [],
3427
+ supportedAudioTypes: model.audio ? [
3428
+ "audio/mp3",
3429
+ "audio/wav",
3430
+ "audio/aiff",
3431
+ "audio/aac",
3432
+ "audio/ogg",
3433
+ "audio/flac"
3434
+ ] : [],
3435
+ supportedVideoTypes: model.video ? [
3436
+ "video/mp4",
3437
+ "video/mpeg",
3438
+ "video/mov",
3439
+ "video/avi",
3440
+ "video/webm",
3441
+ "video/mkv"
3442
+ ] : [],
3443
+ supportsJsonMode: true,
3444
+ // Gemini supports JSON mode
3445
+ supportsSystemMessages: true
3446
+ };
3447
+ }
3448
+ };
3449
+ }
3450
+
3451
+ // src/providers/xai/index.ts
3452
+ var XAI_MODELS = {
3453
+ // Grok 2 series (latest)
3454
+ "grok-2": {
3455
+ vision: true,
3456
+ tools: true,
3457
+ maxTokens: 131072,
3458
+ outputTokens: 4096
3459
+ },
3460
+ "grok-2-latest": {
3461
+ vision: true,
3462
+ tools: true,
3463
+ maxTokens: 131072,
3464
+ outputTokens: 4096
3465
+ },
3466
+ "grok-2-mini": {
3467
+ vision: false,
3468
+ tools: true,
3469
+ maxTokens: 131072,
3470
+ outputTokens: 4096
3471
+ },
3472
+ "grok-2-mini-latest": {
3473
+ vision: false,
3474
+ tools: true,
3475
+ maxTokens: 131072,
3476
+ outputTokens: 4096
3477
+ },
3478
+ // Grok Vision
3479
+ "grok-2-vision": {
3480
+ vision: true,
3481
+ tools: true,
3482
+ maxTokens: 32768,
3483
+ outputTokens: 4096
3484
+ },
3485
+ "grok-2-vision-latest": {
3486
+ vision: true,
3487
+ tools: true,
3488
+ maxTokens: 32768,
3489
+ outputTokens: 4096
3490
+ },
3491
+ // Grok Beta (legacy)
3492
+ "grok-beta": {
3493
+ vision: false,
3494
+ tools: true,
3495
+ maxTokens: 131072,
3496
+ outputTokens: 4096
3497
+ },
3498
+ "grok-vision-beta": {
3499
+ vision: true,
3500
+ tools: true,
3501
+ maxTokens: 8192,
3502
+ outputTokens: 4096
3503
+ }
3504
+ };
3505
+ function createXAI(config = {}) {
3506
+ const apiKey = config.apiKey ?? process.env.XAI_API_KEY ?? "";
3507
+ return {
3508
+ name: "xai",
3509
+ supportedModels: Object.keys(XAI_MODELS),
3510
+ languageModel(modelId) {
3511
+ return createXAIAdapter({
3512
+ apiKey,
3513
+ model: modelId,
3514
+ baseUrl: config.baseUrl
3515
+ });
3516
+ },
3517
+ getCapabilities(modelId) {
3518
+ const model = XAI_MODELS[modelId] ?? XAI_MODELS["grok-2"];
3519
+ return {
3520
+ supportsVision: model.vision,
3521
+ supportsTools: model.tools,
3522
+ supportsThinking: false,
3523
+ supportsStreaming: true,
3524
+ supportsPDF: false,
3525
+ supportsAudio: false,
3526
+ supportsVideo: false,
3527
+ maxTokens: model.maxTokens,
3528
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
3529
+ supportsJsonMode: false,
3530
+ // xAI doesn't support JSON mode yet
3531
+ supportsSystemMessages: true
3532
+ };
3533
+ }
3534
+ };
3535
+ }
3536
+
3537
+ // src/providers/azure/index.ts
3538
+ function detectCapabilitiesFromDeployment(deploymentName) {
3539
+ const name = deploymentName.toLowerCase();
3540
+ if (name.includes("gpt-4o") || name.includes("gpt4o")) {
3541
+ return { vision: true, tools: true, maxTokens: 128e3 };
3542
+ }
3543
+ if ((name.includes("gpt-4") || name.includes("gpt4")) && (name.includes("turbo") || name.includes("vision"))) {
3544
+ return { vision: true, tools: true, maxTokens: 128e3 };
3545
+ }
3546
+ if (name.includes("gpt-4") || name.includes("gpt4")) {
3547
+ return { vision: false, tools: true, maxTokens: 8192 };
3548
+ }
3549
+ if (name.includes("gpt-35") || name.includes("gpt-3.5") || name.includes("gpt35")) {
3550
+ return { vision: false, tools: true, maxTokens: 16385 };
3551
+ }
3552
+ if (name.includes("o1")) {
3553
+ return { vision: true, tools: false, maxTokens: 128e3 };
3554
+ }
3555
+ return { vision: false, tools: true, maxTokens: 8192 };
3556
+ }
3557
+ function createAzure(config) {
3558
+ const apiKey = config.apiKey ?? process.env.AZURE_OPENAI_API_KEY ?? "";
3559
+ const resourceName = config.resourceName ?? process.env.AZURE_OPENAI_RESOURCE ?? "";
3560
+ const defaultDeployment = config.deploymentName ?? process.env.AZURE_OPENAI_DEPLOYMENT ?? "";
3561
+ const supportedModels = defaultDeployment ? [defaultDeployment] : [];
3562
+ return {
3563
+ name: "azure",
3564
+ supportedModels,
3565
+ languageModel(deploymentName) {
3566
+ return createAzureAdapter({
3567
+ apiKey,
3568
+ resourceName,
3569
+ deploymentName: deploymentName || defaultDeployment,
3570
+ apiVersion: config.apiVersion,
3571
+ baseUrl: config.baseUrl
3572
+ });
3573
+ },
3574
+ getCapabilities(deploymentName) {
3575
+ const detected = detectCapabilitiesFromDeployment(
3576
+ deploymentName || defaultDeployment
3577
+ );
3578
+ return {
3579
+ supportsVision: detected.vision,
3580
+ supportsTools: detected.tools,
3581
+ supportsThinking: false,
3582
+ supportsStreaming: true,
3583
+ supportsPDF: false,
3584
+ supportsAudio: false,
3585
+ supportsVideo: false,
3586
+ maxTokens: detected.maxTokens,
3587
+ supportedImageTypes: detected.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
3588
+ supportsJsonMode: true,
3589
+ supportsSystemMessages: true
3590
+ };
3591
+ }
3592
+ };
3593
+ }
3594
+
3595
+ // src/providers/openai.ts
3596
+ function transformTools(tools) {
3597
+ return tools.map((tool) => ({
3598
+ type: "function",
3599
+ function: {
3600
+ name: tool.name,
3601
+ description: tool.description,
3602
+ parameters: tool.inputSchema
3603
+ }
3604
+ }));
3605
+ }
3606
+ function parseToolCalls(response) {
3607
+ const resp = response;
3608
+ const choices = resp?.choices;
3609
+ const message = choices?.[0]?.message;
3610
+ const toolCalls = message?.tool_calls || [];
3611
+ return toolCalls.map((tc) => {
3612
+ let input = {};
3613
+ try {
3614
+ input = JSON.parse(tc.function.arguments);
3615
+ } catch (e) {
3616
+ console.error(
3617
+ "Failed to parse tool arguments:",
3618
+ tc.function.arguments,
3619
+ e
3620
+ );
3621
+ }
3622
+ return {
3623
+ id: tc.id,
3624
+ name: tc.function.name,
3625
+ input
3626
+ };
3627
+ });
3628
+ }
3629
+ function extractTextContent(response) {
3630
+ const resp = response;
3631
+ const choices = resp?.choices;
3632
+ const message = choices?.[0]?.message;
3633
+ return message?.content || "";
3634
+ }
3635
+ function formatToolResults(results) {
3636
+ return results.map((r) => ({
3637
+ role: "tool",
3638
+ tool_call_id: r.toolCallId,
3639
+ content: r.content
3640
+ }));
3641
+ }
3642
+ function isToolUseStop(response) {
3643
+ const resp = response;
3644
+ const choices = resp?.choices;
3645
+ return choices?.[0]?.finish_reason === "tool_calls";
3646
+ }
3647
+ function isEndTurnStop(response) {
3648
+ const resp = response;
3649
+ const choices = resp?.choices;
3650
+ return choices?.[0]?.finish_reason === "stop";
3651
+ }
3652
+ function getStopReason(response) {
3653
+ const resp = response;
3654
+ const choices = resp?.choices;
3655
+ return choices?.[0]?.finish_reason || "unknown";
3656
+ }
3657
+ function buildAssistantToolMessage(toolCalls, textContent) {
3658
+ return {
3659
+ role: "assistant",
3660
+ content: textContent || null,
3661
+ tool_calls: toolCalls.map((tc) => ({
3662
+ id: tc.id,
3663
+ type: "function",
3664
+ function: {
3665
+ name: tc.name,
3666
+ arguments: JSON.stringify(tc.input)
3667
+ }
3668
+ }))
3669
+ };
3670
+ }
3671
+ function buildToolResultMessage(results) {
3672
+ return formatToolResults(results);
3673
+ }
3674
+ var openaiFormatter = {
3675
+ transformTools,
3676
+ parseToolCalls,
3677
+ formatToolResults,
3678
+ isToolUseStop,
3679
+ isEndTurnStop,
3680
+ getStopReason,
3681
+ extractTextContent,
3682
+ buildAssistantToolMessage,
3683
+ buildToolResultMessage
3684
+ };
3685
+
3686
+ // src/providers/anthropic.ts
3687
+ function transformTools2(tools) {
3688
+ return tools.map((tool) => ({
3689
+ name: tool.name,
3690
+ description: tool.description,
3691
+ input_schema: tool.inputSchema
3692
+ }));
3693
+ }
3694
+ function parseToolCalls2(response) {
3695
+ const content = Array.isArray(response) ? response : response?.content;
3696
+ if (!Array.isArray(content)) {
3697
+ return [];
3698
+ }
3699
+ return content.filter((block) => block?.type === "tool_use").map((block) => ({
3700
+ id: block.id,
3701
+ name: block.name,
3702
+ input: block.input || {}
3703
+ }));
3704
+ }
3705
+ function extractTextContent2(response) {
3706
+ const content = Array.isArray(response) ? response : response?.content;
3707
+ if (!Array.isArray(content)) {
3708
+ return "";
3709
+ }
3710
+ return content.filter((block) => block?.type === "text").map((block) => block.text || "").join("\n");
3711
+ }
3712
+ function formatToolResults2(results) {
3713
+ return results.map((r) => ({
3714
+ type: "tool_result",
3715
+ tool_use_id: r.toolCallId,
3716
+ content: r.content
3717
+ }));
3718
+ }
3719
+ function isToolUseStop2(response) {
3720
+ const resp = response;
3721
+ return resp?.stop_reason === "tool_use";
3722
+ }
3723
+ function isEndTurnStop2(response) {
3724
+ const resp = response;
3725
+ const stopReason = resp?.stop_reason;
3726
+ return stopReason === "end_turn" || stopReason === "stop";
3727
+ }
3728
+ function getStopReason2(response) {
3729
+ const resp = response;
3730
+ return resp?.stop_reason || "unknown";
3731
+ }
3732
+ function buildAssistantToolMessage2(toolCalls, textContent) {
3733
+ const content = [];
3734
+ if (textContent) {
3735
+ content.push({ type: "text", text: textContent });
3736
+ }
3737
+ toolCalls.forEach((tc) => {
3738
+ content.push({
3739
+ type: "tool_use",
3740
+ id: tc.id,
3741
+ name: tc.name,
3742
+ input: tc.input
3743
+ });
3744
+ });
3745
+ return { role: "assistant", content };
3746
+ }
3747
+ function buildToolResultMessage2(results) {
3748
+ return {
3749
+ role: "user",
3750
+ content: formatToolResults2(results)
3751
+ };
3752
+ }
3753
+ var anthropicFormatter = {
3754
+ transformTools: transformTools2,
3755
+ parseToolCalls: parseToolCalls2,
3756
+ formatToolResults: formatToolResults2,
3757
+ isToolUseStop: isToolUseStop2,
3758
+ isEndTurnStop: isEndTurnStop2,
3759
+ getStopReason: getStopReason2,
3760
+ extractTextContent: extractTextContent2,
3761
+ buildAssistantToolMessage: buildAssistantToolMessage2,
3762
+ buildToolResultMessage: buildToolResultMessage2
3763
+ };
3764
+
3765
+ // src/providers/gemini.ts
3766
+ function transformTools3(tools) {
3767
+ return [
3768
+ {
3769
+ functionDeclarations: tools.map((tool) => ({
3770
+ name: tool.name,
3771
+ description: tool.description,
3772
+ parameters: tool.inputSchema
3773
+ }))
3774
+ }
3775
+ ];
3776
+ }
3777
+ function parseToolCalls3(response) {
3778
+ const resp = response;
3779
+ const candidates = resp?.candidates;
3780
+ const content = candidates?.[0]?.content;
3781
+ const parts = content?.parts;
3782
+ if (!parts) return [];
3783
+ const functionCalls = [];
3784
+ for (const part of parts) {
3785
+ const functionCall = part.functionCall;
3786
+ if (functionCall) {
3787
+ functionCalls.push({
3788
+ id: `gemini_${Date.now()}_${functionCalls.length}`,
3789
+ // Gemini doesn't provide IDs
3790
+ name: functionCall.name,
3791
+ input: functionCall.args || {}
3792
+ });
3793
+ }
3794
+ }
3795
+ return functionCalls;
3796
+ }
3797
+ function extractTextContent3(response) {
3798
+ const resp = response;
3799
+ const candidates = resp?.candidates;
3800
+ const content = candidates?.[0]?.content;
3801
+ const parts = content?.parts;
3802
+ if (!parts) return "";
3803
+ return parts.filter((part) => typeof part.text === "string").map((part) => part.text).join("\n");
3804
+ }
3805
+ function formatToolResults3(results) {
3806
+ return results.map((r) => {
3807
+ let response;
3808
+ try {
3809
+ response = JSON.parse(r.content);
3810
+ } catch {
3811
+ response = { result: r.content };
3812
+ }
3813
+ return {
3814
+ name: r.toolCallId.split("_").slice(2).join("_") || "unknown",
3815
+ // Extract name from ID
3816
+ response
3817
+ };
3818
+ });
3819
+ }
3820
+ function isToolUseStop3(response) {
3821
+ const resp = response;
3822
+ const candidates = resp?.candidates;
3823
+ const content = candidates?.[0]?.content;
3824
+ const parts = content?.parts;
3825
+ if (!parts) return false;
3826
+ return parts.some((part) => part.functionCall !== void 0);
3827
+ }
3828
+ function isEndTurnStop3(response) {
3829
+ const resp = response;
3830
+ const candidates = resp?.candidates;
3831
+ const finishReason = candidates?.[0]?.finishReason;
3832
+ return finishReason === "STOP" || finishReason === "END_TURN";
3833
+ }
3834
+ function getStopReason3(response) {
3835
+ const resp = response;
3836
+ const candidates = resp?.candidates;
3837
+ return candidates?.[0]?.finishReason || "unknown";
3838
+ }
3839
+ function buildAssistantToolMessage3(toolCalls, textContent) {
3840
+ const parts = [];
3841
+ if (textContent) {
3842
+ parts.push({ text: textContent });
3843
+ }
3844
+ toolCalls.forEach((tc) => {
3845
+ parts.push({
3846
+ functionCall: {
3847
+ name: tc.name,
3848
+ args: tc.input
3849
+ }
3850
+ });
3851
+ });
3852
+ return {
3853
+ role: "model",
3854
+ parts
3855
+ };
3856
+ }
3857
+ function buildToolResultMessage3(results) {
3858
+ const parts = results.map((r) => {
3859
+ let response;
3860
+ try {
3861
+ response = JSON.parse(r.content);
3862
+ } catch {
3863
+ response = { result: r.content };
3864
+ }
3865
+ return {
3866
+ functionResponse: {
3867
+ name: "tool",
3868
+ // This should be the actual tool name
3869
+ response
3870
+ }
3871
+ };
3872
+ });
3873
+ return {
3874
+ role: "user",
3875
+ parts
3876
+ };
3877
+ }
3878
+ var geminiFormatter = {
3879
+ transformTools: transformTools3,
3880
+ parseToolCalls: parseToolCalls3,
3881
+ formatToolResults: formatToolResults3,
3882
+ isToolUseStop: isToolUseStop3,
3883
+ isEndTurnStop: isEndTurnStop3,
3884
+ getStopReason: getStopReason3,
3885
+ extractTextContent: extractTextContent3,
3886
+ buildAssistantToolMessage: buildAssistantToolMessage3,
3887
+ buildToolResultMessage: buildToolResultMessage3
3888
+ };
3889
+
3890
+ // src/providers/formatter-registry.ts
3891
+ var formatters = {
3892
+ openai: openaiFormatter,
3893
+ anthropic: anthropicFormatter,
3894
+ google: geminiFormatter,
3895
+ gemini: geminiFormatter,
3896
+ // Alias
3897
+ // OpenAI-compatible providers use openaiFormatter
3898
+ groq: openaiFormatter,
3899
+ ollama: openaiFormatter,
3900
+ xai: openaiFormatter,
3901
+ azure: openaiFormatter
3902
+ };
3903
+ function getFormatter(provider) {
3904
+ const formatter = formatters[provider.toLowerCase()];
3905
+ if (!formatter) {
3906
+ throw new Error(
3907
+ `Unsupported provider: ${provider}. Supported providers: ${Object.keys(formatters).join(", ")}`
3908
+ );
3909
+ }
3910
+ return formatter;
3911
+ }
3912
+ function isProviderSupported(provider) {
3913
+ return provider.toLowerCase() in formatters;
3914
+ }
3915
+ function getSupportedProviders() {
3916
+ return Object.keys(formatters);
3917
+ }
3918
+
3919
+ // src/providers/index.ts
3920
+ registerProvider("openai", (config) => createOpenAI(config));
3921
+ registerProvider("anthropic", (config) => createAnthropic(config));
3922
+ registerProvider("groq", (config) => createGroq(config));
3923
+ registerProvider("ollama", (config) => createOllama(config));
3924
+ registerProvider("google", (config) => createGoogle(config));
3925
+ registerProvider("xai", (config) => createXAI(config));
3926
+ registerProvider("azure", (config) => createAzure(config));
3927
+
3928
+ // src/server/agent-loop.ts
3929
+ var DEFAULT_MAX_ITERATIONS = 20;
3930
+ async function* runAgentLoop(options) {
3931
+ const {
3932
+ messages,
3933
+ tools,
3934
+ systemPrompt,
3935
+ provider,
3936
+ signal,
3937
+ config,
3938
+ callLLM,
3939
+ executeServerTool,
3940
+ waitForClientToolResult
3941
+ } = options;
3942
+ const maxIterations = config?.maxIterations ?? DEFAULT_MAX_ITERATIONS;
3943
+ const debug = config?.debug ?? false;
3944
+ const formatter = getFormatter(provider);
3945
+ const serverTools = tools.filter((t) => t.location === "server");
3946
+ const clientTools = tools.filter((t) => t.location === "client");
3947
+ const allTools = [...serverTools, ...clientTools];
3948
+ const providerTools = formatter.transformTools(allTools);
3949
+ const conversation = buildConversation(
3950
+ messages,
3951
+ systemPrompt
3952
+ );
3953
+ let iteration = 0;
3954
+ if (debug) {
3955
+ console.log("[AgentLoop] Starting with", {
3956
+ messageCount: messages.length,
3957
+ toolCount: allTools.length,
3958
+ serverToolCount: serverTools.length,
3959
+ clientToolCount: clientTools.length,
3960
+ maxIterations
3961
+ });
3962
+ }
3963
+ while (iteration < maxIterations) {
3964
+ if (signal?.aborted) {
3965
+ yield {
3966
+ type: "loop:complete",
3967
+ iterations: iteration,
3968
+ aborted: true
3969
+ };
3970
+ return;
3971
+ }
3972
+ iteration++;
3973
+ yield {
3974
+ type: "loop:iteration",
3975
+ iteration,
3976
+ maxIterations
3977
+ };
3978
+ if (debug) {
3979
+ console.log(`[AgentLoop] Iteration ${iteration}/${maxIterations}`);
3980
+ }
3981
+ try {
3982
+ const response = await callLLM(conversation, providerTools);
3983
+ const toolCalls = formatter.parseToolCalls(response);
3984
+ const textContent = formatter.extractTextContent(response);
3985
+ if (textContent) {
3986
+ const messageId = generateMessageId();
3987
+ yield { type: "message:start", id: messageId };
3988
+ yield { type: "message:delta", content: textContent };
3989
+ yield { type: "message:end" };
3990
+ }
3991
+ if (formatter.isToolUseStop(response) && toolCalls.length > 0) {
3992
+ if (debug) {
3993
+ console.log(
3994
+ "[AgentLoop] Tool calls:",
3995
+ toolCalls.map((tc) => tc.name)
3996
+ );
3997
+ }
3998
+ const results = await executeToolCalls(
3999
+ toolCalls,
4000
+ tools,
4001
+ executeServerTool,
4002
+ waitForClientToolResult,
4003
+ function* (event) {
4004
+ yield event;
4005
+ },
4006
+ debug
4007
+ );
4008
+ for (const result of results) {
4009
+ const toolCall = toolCalls.find((tc) => tc.id === result.toolCallId);
4010
+ if (toolCall) {
4011
+ yield {
4012
+ type: "tool:result",
4013
+ id: result.toolCallId,
4014
+ name: toolCall.name,
4015
+ result: JSON.parse(result.content)
4016
+ };
4017
+ }
4018
+ }
4019
+ const assistantMessage = formatter.buildAssistantToolMessage(
4020
+ toolCalls,
4021
+ textContent
4022
+ );
4023
+ conversation.push(assistantMessage);
4024
+ const toolResultMessage = formatter.buildToolResultMessage(results);
4025
+ if (Array.isArray(toolResultMessage)) {
4026
+ conversation.push(...toolResultMessage);
4027
+ } else {
4028
+ conversation.push(toolResultMessage);
4029
+ }
4030
+ continue;
4031
+ }
4032
+ if (formatter.isEndTurnStop(response)) {
4033
+ if (debug) {
4034
+ console.log("[AgentLoop] End turn detected");
4035
+ }
4036
+ break;
4037
+ }
4038
+ const stopReason = formatter.getStopReason(response);
4039
+ if (debug) {
4040
+ console.log("[AgentLoop] Unknown stop reason:", stopReason);
4041
+ }
4042
+ break;
4043
+ } catch (error) {
4044
+ if (debug) {
4045
+ console.error("[AgentLoop] Error:", error);
4046
+ }
4047
+ yield {
4048
+ type: "error",
4049
+ message: error instanceof Error ? error.message : "Unknown error",
4050
+ code: "AGENT_LOOP_ERROR"
4051
+ };
4052
+ break;
4053
+ }
4054
+ }
4055
+ yield {
4056
+ type: "loop:complete",
4057
+ iterations: iteration,
4058
+ maxIterationsReached: iteration >= maxIterations
4059
+ };
4060
+ yield { type: "done" };
4061
+ }
4062
+ function buildConversation(messages, systemPrompt) {
4063
+ const conversation = [];
4064
+ if (systemPrompt) {
4065
+ conversation.push({
4066
+ role: "system",
4067
+ content: systemPrompt
4068
+ });
4069
+ }
4070
+ for (const msg of messages) {
4071
+ conversation.push({
4072
+ role: msg.role,
4073
+ content: msg.content
4074
+ });
4075
+ }
4076
+ return conversation;
4077
+ }
4078
+ async function executeToolCalls(toolCalls, tools, executeServerTool, waitForClientToolResult, emitEvent, debug) {
4079
+ const results = [];
4080
+ for (const toolCall of toolCalls) {
4081
+ const tool = tools.find((t) => t.name === toolCall.name);
4082
+ if (!tool) {
4083
+ if (debug) {
4084
+ console.warn(`[AgentLoop] Unknown tool: ${toolCall.name}`);
4085
+ }
4086
+ results.push({
4087
+ toolCallId: toolCall.id,
4088
+ content: JSON.stringify({
4089
+ success: false,
4090
+ error: `Unknown tool: ${toolCall.name}`
4091
+ }),
4092
+ success: false,
4093
+ error: `Unknown tool: ${toolCall.name}`
4094
+ });
4095
+ continue;
4096
+ }
4097
+ emitEvent?.({
4098
+ type: "action:start",
4099
+ id: toolCall.id,
4100
+ name: toolCall.name
4101
+ });
4102
+ emitEvent?.({
4103
+ type: "action:args",
4104
+ id: toolCall.id,
4105
+ args: JSON.stringify(toolCall.input)
4106
+ });
4107
+ try {
4108
+ let response;
4109
+ if (tool.location === "server") {
4110
+ if (tool.handler) {
4111
+ response = await tool.handler(toolCall.input);
4112
+ } else if (executeServerTool) {
4113
+ response = await executeServerTool(toolCall.name, toolCall.input);
4114
+ } else {
4115
+ response = {
4116
+ success: false,
4117
+ error: `No handler for server tool: ${toolCall.name}`
4118
+ };
4119
+ }
4120
+ } else {
4121
+ if (waitForClientToolResult) {
4122
+ response = await waitForClientToolResult(
4123
+ toolCall.id,
4124
+ toolCall.name,
4125
+ toolCall.input
4126
+ );
4127
+ } else {
4128
+ response = {
4129
+ success: false,
4130
+ error: `No client tool handler for: ${toolCall.name}`
4131
+ };
4132
+ }
4133
+ }
4134
+ emitEvent?.({
4135
+ type: "action:end",
4136
+ id: toolCall.id,
4137
+ name: toolCall.name,
4138
+ result: response
4139
+ });
4140
+ results.push({
4141
+ toolCallId: toolCall.id,
4142
+ content: JSON.stringify(response),
4143
+ success: response.success,
4144
+ error: response.error
4145
+ });
4146
+ } catch (error) {
4147
+ const errorMessage = error instanceof Error ? error.message : "Tool execution failed";
4148
+ emitEvent?.({
4149
+ type: "action:end",
4150
+ id: toolCall.id,
4151
+ name: toolCall.name,
4152
+ error: errorMessage
4153
+ });
4154
+ results.push({
4155
+ toolCallId: toolCall.id,
4156
+ content: JSON.stringify({
4157
+ success: false,
4158
+ error: errorMessage
4159
+ }),
4160
+ success: false,
4161
+ error: errorMessage
4162
+ });
4163
+ }
4164
+ }
4165
+ return results;
4166
+ }
4167
+
4168
+ export { AnthropicAdapter, AzureAdapter, DEFAULT_MAX_ITERATIONS, GoogleAdapter, GroqAdapter, OllamaAdapter, OpenAIAdapter, Runtime, XAIAdapter, anthropicFormatter, createAnthropic, createAnthropicAdapter, createAzure, createAzureAdapter, createEventStream, createExpressMiddleware, createGoogle, createGoogleAdapter, createGroq, createGroqAdapter, createHonoApp, createNextHandler, createNodeHandler, createOllama, createOllamaAdapter, createOpenAI, createOpenAIAdapter, createRuntime, createSSEHeaders, createSSEResponse, createXAI, createXAIAdapter, formatSSEData, geminiFormatter, getAvailableProviders, getFormatter, getModelCapabilities, getProvider, getSupportedProviders, hasProvider, isProviderSupported, listProviders, openaiFormatter, registerProvider, runAgentLoop };
4169
+ //# sourceMappingURL=index.mjs.map
4170
+ //# sourceMappingURL=index.mjs.map