@yourgpt/llm-sdk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1616 @@
1
+ import { generateMessageId, generateToolCallId } from '@yourgpt/copilot-sdk/core';
2
+
3
+ // src/adapters/base.ts
4
+ function formatMessages(messages, systemPrompt) {
5
+ const formatted = [];
6
+ if (systemPrompt) {
7
+ formatted.push({ role: "system", content: systemPrompt });
8
+ }
9
+ for (const msg of messages) {
10
+ formatted.push({
11
+ role: msg.role,
12
+ content: msg.content ?? ""
13
+ });
14
+ }
15
+ return formatted;
16
+ }
17
+ function parameterToJsonSchema(param) {
18
+ const schema = {
19
+ type: param.type
20
+ };
21
+ if (param.description) {
22
+ schema.description = param.description;
23
+ }
24
+ if (param.enum) {
25
+ schema.enum = param.enum;
26
+ }
27
+ if (param.type === "array" && param.items) {
28
+ schema.items = parameterToJsonSchema(
29
+ param.items
30
+ );
31
+ }
32
+ if (param.type === "object" && param.properties) {
33
+ schema.properties = Object.fromEntries(
34
+ Object.entries(param.properties).map(([key, prop]) => [
35
+ key,
36
+ parameterToJsonSchema(
37
+ prop
38
+ )
39
+ ])
40
+ );
41
+ }
42
+ return schema;
43
+ }
44
+ function formatTools(actions) {
45
+ return actions.map((action) => ({
46
+ type: "function",
47
+ function: {
48
+ name: action.name,
49
+ description: action.description,
50
+ parameters: {
51
+ type: "object",
52
+ properties: action.parameters ? Object.fromEntries(
53
+ Object.entries(action.parameters).map(([key, param]) => [
54
+ key,
55
+ parameterToJsonSchema(param)
56
+ ])
57
+ ) : {},
58
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
59
+ }
60
+ }
61
+ }));
62
+ }
63
+ function hasImageAttachments(message) {
64
+ const attachments = message.metadata?.attachments;
65
+ return attachments?.some((a) => a.type === "image") ?? false;
66
+ }
67
+ function hasMediaAttachments(message) {
68
+ const attachments = message.metadata?.attachments;
69
+ return attachments?.some(
70
+ (a) => a.type === "image" || a.type === "file" && a.mimeType === "application/pdf"
71
+ ) ?? false;
72
+ }
73
+ function attachmentToAnthropicImage(attachment) {
74
+ if (attachment.type !== "image") return null;
75
+ if (attachment.url) {
76
+ return {
77
+ type: "image",
78
+ source: {
79
+ type: "url",
80
+ url: attachment.url
81
+ }
82
+ };
83
+ }
84
+ if (!attachment.data) return null;
85
+ let base64Data = attachment.data;
86
+ if (base64Data.startsWith("data:")) {
87
+ const commaIndex = base64Data.indexOf(",");
88
+ if (commaIndex !== -1) {
89
+ base64Data = base64Data.slice(commaIndex + 1);
90
+ }
91
+ }
92
+ return {
93
+ type: "image",
94
+ source: {
95
+ type: "base64",
96
+ media_type: attachment.mimeType || "image/png",
97
+ data: base64Data
98
+ }
99
+ };
100
+ }
101
+ function attachmentToOpenAIImage(attachment) {
102
+ if (attachment.type !== "image") return null;
103
+ let imageUrl;
104
+ if (attachment.url) {
105
+ imageUrl = attachment.url;
106
+ } else if (attachment.data) {
107
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
108
+ } else {
109
+ return null;
110
+ }
111
+ return {
112
+ type: "image_url",
113
+ image_url: {
114
+ url: imageUrl,
115
+ detail: "auto"
116
+ }
117
+ };
118
+ }
119
+ function attachmentToAnthropicDocument(attachment) {
120
+ if (attachment.type !== "file" || attachment.mimeType !== "application/pdf") {
121
+ return null;
122
+ }
123
+ if (attachment.url) {
124
+ return {
125
+ type: "document",
126
+ source: {
127
+ type: "url",
128
+ url: attachment.url
129
+ }
130
+ };
131
+ }
132
+ if (!attachment.data) return null;
133
+ let base64Data = attachment.data;
134
+ if (base64Data.startsWith("data:")) {
135
+ const commaIndex = base64Data.indexOf(",");
136
+ if (commaIndex !== -1) {
137
+ base64Data = base64Data.slice(commaIndex + 1);
138
+ }
139
+ }
140
+ return {
141
+ type: "document",
142
+ source: {
143
+ type: "base64",
144
+ media_type: "application/pdf",
145
+ data: base64Data
146
+ }
147
+ };
148
+ }
149
+ function messageToAnthropicContent(message) {
150
+ const attachments = message.metadata?.attachments;
151
+ const content = message.content ?? "";
152
+ if (!hasMediaAttachments(message)) {
153
+ return content;
154
+ }
155
+ const blocks = [];
156
+ if (attachments) {
157
+ for (const attachment of attachments) {
158
+ const imageBlock = attachmentToAnthropicImage(attachment);
159
+ if (imageBlock) {
160
+ blocks.push(imageBlock);
161
+ continue;
162
+ }
163
+ const docBlock = attachmentToAnthropicDocument(attachment);
164
+ if (docBlock) {
165
+ blocks.push(docBlock);
166
+ }
167
+ }
168
+ }
169
+ if (content) {
170
+ blocks.push({ type: "text", text: content });
171
+ }
172
+ return blocks;
173
+ }
174
+ function messageToOpenAIContent(message) {
175
+ const attachments = message.metadata?.attachments;
176
+ const content = message.content ?? "";
177
+ if (!hasImageAttachments(message)) {
178
+ return content;
179
+ }
180
+ const blocks = [];
181
+ if (content) {
182
+ blocks.push({ type: "text", text: content });
183
+ }
184
+ if (attachments) {
185
+ for (const attachment of attachments) {
186
+ const imageBlock = attachmentToOpenAIImage(attachment);
187
+ if (imageBlock) {
188
+ blocks.push(imageBlock);
189
+ }
190
+ }
191
+ }
192
+ return blocks;
193
+ }
194
+ function formatMessagesForAnthropic(messages, systemPrompt) {
195
+ const formatted = [];
196
+ for (let i = 0; i < messages.length; i++) {
197
+ const msg = messages[i];
198
+ if (msg.role === "system") continue;
199
+ if (msg.role === "assistant") {
200
+ const content = [];
201
+ if (msg.content) {
202
+ content.push({ type: "text", text: msg.content });
203
+ }
204
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
205
+ for (const tc of msg.tool_calls) {
206
+ content.push({
207
+ type: "tool_use",
208
+ id: tc.id,
209
+ name: tc.function.name,
210
+ input: JSON.parse(tc.function.arguments)
211
+ });
212
+ }
213
+ }
214
+ formatted.push({
215
+ role: "assistant",
216
+ content: content.length === 1 && content[0].type === "text" ? content[0].text : content
217
+ });
218
+ } else if (msg.role === "tool" && msg.tool_call_id) {
219
+ const toolResults = [
220
+ {
221
+ type: "tool_result",
222
+ tool_use_id: msg.tool_call_id,
223
+ content: msg.content ?? ""
224
+ }
225
+ ];
226
+ while (i + 1 < messages.length && messages[i + 1].role === "tool") {
227
+ i++;
228
+ const nextTool = messages[i];
229
+ if (nextTool.tool_call_id) {
230
+ toolResults.push({
231
+ type: "tool_result",
232
+ tool_use_id: nextTool.tool_call_id,
233
+ content: nextTool.content ?? ""
234
+ });
235
+ }
236
+ }
237
+ formatted.push({
238
+ role: "user",
239
+ content: toolResults
240
+ });
241
+ } else if (msg.role === "user") {
242
+ formatted.push({
243
+ role: "user",
244
+ content: messageToAnthropicContent(msg)
245
+ });
246
+ }
247
+ }
248
+ return {
249
+ system: systemPrompt || "",
250
+ messages: formatted
251
+ };
252
+ }
253
+ function formatMessagesForOpenAI(messages, systemPrompt) {
254
+ const formatted = [];
255
+ if (systemPrompt) {
256
+ formatted.push({ role: "system", content: systemPrompt });
257
+ }
258
+ for (const msg of messages) {
259
+ if (msg.role === "system") {
260
+ formatted.push({ role: "system", content: msg.content ?? "" });
261
+ } else if (msg.role === "user") {
262
+ formatted.push({
263
+ role: "user",
264
+ content: messageToOpenAIContent(msg)
265
+ });
266
+ } else if (msg.role === "assistant") {
267
+ const assistantMsg = {
268
+ role: "assistant",
269
+ content: msg.content
270
+ };
271
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
272
+ assistantMsg.tool_calls = msg.tool_calls;
273
+ }
274
+ formatted.push(assistantMsg);
275
+ } else if (msg.role === "tool" && msg.tool_call_id) {
276
+ formatted.push({
277
+ role: "tool",
278
+ content: msg.content ?? "",
279
+ tool_call_id: msg.tool_call_id
280
+ });
281
+ }
282
+ }
283
+ return formatted;
284
+ }
285
+ var OpenAIAdapter = class {
286
+ constructor(config) {
287
+ this.provider = "openai";
288
+ this.config = config;
289
+ this.model = config.model || "gpt-4o";
290
+ }
291
+ async getClient() {
292
+ if (!this.client) {
293
+ const { default: OpenAI } = await import('openai');
294
+ this.client = new OpenAI({
295
+ apiKey: this.config.apiKey,
296
+ baseURL: this.config.baseUrl
297
+ });
298
+ }
299
+ return this.client;
300
+ }
301
+ async *stream(request) {
302
+ const client = await this.getClient();
303
+ let messages;
304
+ if (request.rawMessages && request.rawMessages.length > 0) {
305
+ const processedMessages = request.rawMessages.map((msg) => {
306
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
307
+ if (hasAttachments) {
308
+ const content = [];
309
+ if (msg.content) {
310
+ content.push({ type: "text", text: msg.content });
311
+ }
312
+ for (const attachment of msg.attachments) {
313
+ if (attachment.type === "image") {
314
+ let imageUrl;
315
+ if (attachment.url) {
316
+ imageUrl = attachment.url;
317
+ } else if (attachment.data) {
318
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
319
+ } else {
320
+ continue;
321
+ }
322
+ content.push({
323
+ type: "image_url",
324
+ image_url: { url: imageUrl, detail: "auto" }
325
+ });
326
+ }
327
+ }
328
+ return { ...msg, content, attachments: void 0 };
329
+ }
330
+ return msg;
331
+ });
332
+ if (request.systemPrompt) {
333
+ const hasSystem = processedMessages.some((m) => m.role === "system");
334
+ if (!hasSystem) {
335
+ messages = [
336
+ { role: "system", content: request.systemPrompt },
337
+ ...processedMessages
338
+ ];
339
+ } else {
340
+ messages = processedMessages;
341
+ }
342
+ } else {
343
+ messages = processedMessages;
344
+ }
345
+ } else {
346
+ messages = formatMessagesForOpenAI(
347
+ request.messages,
348
+ request.systemPrompt
349
+ );
350
+ }
351
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
352
+ const messageId = generateMessageId();
353
+ yield { type: "message:start", id: messageId };
354
+ try {
355
+ const stream = await client.chat.completions.create({
356
+ model: request.config?.model || this.model,
357
+ messages,
358
+ tools,
359
+ temperature: request.config?.temperature ?? this.config.temperature,
360
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
361
+ stream: true
362
+ });
363
+ let currentToolCall = null;
364
+ for await (const chunk of stream) {
365
+ if (request.signal?.aborted) {
366
+ break;
367
+ }
368
+ const delta = chunk.choices[0]?.delta;
369
+ if (delta?.content) {
370
+ yield { type: "message:delta", content: delta.content };
371
+ }
372
+ if (delta?.tool_calls) {
373
+ for (const toolCall of delta.tool_calls) {
374
+ if (toolCall.id) {
375
+ if (currentToolCall) {
376
+ yield {
377
+ type: "action:args",
378
+ id: currentToolCall.id,
379
+ args: currentToolCall.arguments
380
+ };
381
+ }
382
+ currentToolCall = {
383
+ id: toolCall.id,
384
+ name: toolCall.function?.name || "",
385
+ arguments: toolCall.function?.arguments || ""
386
+ };
387
+ yield {
388
+ type: "action:start",
389
+ id: currentToolCall.id,
390
+ name: currentToolCall.name
391
+ };
392
+ } else if (currentToolCall && toolCall.function?.arguments) {
393
+ currentToolCall.arguments += toolCall.function.arguments;
394
+ }
395
+ }
396
+ }
397
+ if (chunk.choices[0]?.finish_reason) {
398
+ if (currentToolCall) {
399
+ yield {
400
+ type: "action:args",
401
+ id: currentToolCall.id,
402
+ args: currentToolCall.arguments
403
+ };
404
+ }
405
+ }
406
+ }
407
+ yield { type: "message:end" };
408
+ yield { type: "done" };
409
+ } catch (error) {
410
+ yield {
411
+ type: "error",
412
+ message: error instanceof Error ? error.message : "Unknown error",
413
+ code: "OPENAI_ERROR"
414
+ };
415
+ }
416
+ }
417
+ };
418
+ function createOpenAIAdapter(config) {
419
+ return new OpenAIAdapter(config);
420
+ }
421
+ var AnthropicAdapter = class {
422
+ constructor(config) {
423
+ this.provider = "anthropic";
424
+ this.config = config;
425
+ this.model = config.model || "claude-3-5-sonnet-latest";
426
+ }
427
+ async getClient() {
428
+ if (!this.client) {
429
+ const { default: Anthropic } = await import('@anthropic-ai/sdk');
430
+ this.client = new Anthropic({
431
+ apiKey: this.config.apiKey
432
+ });
433
+ }
434
+ return this.client;
435
+ }
436
+ /**
437
+ * Convert OpenAI-style messages to Anthropic format
438
+ *
439
+ * OpenAI format:
440
+ * - { role: "assistant", content: "...", tool_calls: [...] }
441
+ * - { role: "tool", tool_call_id: "...", content: "..." }
442
+ *
443
+ * Anthropic format:
444
+ * - { role: "assistant", content: [{ type: "text", text: "..." }, { type: "tool_use", id: "...", name: "...", input: {...} }] }
445
+ * - { role: "user", content: [{ type: "tool_result", tool_use_id: "...", content: "..." }] }
446
+ */
447
+ convertToAnthropicMessages(rawMessages) {
448
+ const messages = [];
449
+ const pendingToolResults = [];
450
+ for (const msg of rawMessages) {
451
+ if (msg.role === "system") continue;
452
+ if (msg.role === "assistant") {
453
+ if (pendingToolResults.length > 0) {
454
+ messages.push({
455
+ role: "user",
456
+ content: pendingToolResults.map((tr) => ({
457
+ type: "tool_result",
458
+ tool_use_id: tr.tool_use_id,
459
+ content: tr.content
460
+ }))
461
+ });
462
+ pendingToolResults.length = 0;
463
+ }
464
+ const content = [];
465
+ if (msg.content && typeof msg.content === "string" && msg.content.trim()) {
466
+ content.push({ type: "text", text: msg.content });
467
+ }
468
+ const toolCalls = msg.tool_calls;
469
+ if (toolCalls && toolCalls.length > 0) {
470
+ for (const tc of toolCalls) {
471
+ let input = {};
472
+ try {
473
+ input = JSON.parse(tc.function.arguments);
474
+ } catch {
475
+ }
476
+ content.push({
477
+ type: "tool_use",
478
+ id: tc.id,
479
+ name: tc.function.name,
480
+ input
481
+ });
482
+ }
483
+ }
484
+ if (content.length > 0) {
485
+ messages.push({ role: "assistant", content });
486
+ }
487
+ } else if (msg.role === "tool") {
488
+ pendingToolResults.push({
489
+ tool_use_id: msg.tool_call_id,
490
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
491
+ });
492
+ } else if (msg.role === "user") {
493
+ if (pendingToolResults.length > 0) {
494
+ messages.push({
495
+ role: "user",
496
+ content: pendingToolResults.map((tr) => ({
497
+ type: "tool_result",
498
+ tool_use_id: tr.tool_use_id,
499
+ content: tr.content
500
+ }))
501
+ });
502
+ pendingToolResults.length = 0;
503
+ }
504
+ if (msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0) {
505
+ const content = [];
506
+ if (msg.content && typeof msg.content === "string") {
507
+ content.push({ type: "text", text: msg.content });
508
+ }
509
+ for (const attachment of msg.attachments) {
510
+ if (attachment.type === "image") {
511
+ if (attachment.url) {
512
+ content.push({
513
+ type: "image",
514
+ source: {
515
+ type: "url",
516
+ url: attachment.url
517
+ }
518
+ });
519
+ } else if (attachment.data) {
520
+ let base64Data = attachment.data;
521
+ if (base64Data.startsWith("data:")) {
522
+ const commaIndex = base64Data.indexOf(",");
523
+ if (commaIndex !== -1) {
524
+ base64Data = base64Data.slice(commaIndex + 1);
525
+ }
526
+ }
527
+ content.push({
528
+ type: "image",
529
+ source: {
530
+ type: "base64",
531
+ media_type: attachment.mimeType || "image/png",
532
+ data: base64Data
533
+ }
534
+ });
535
+ }
536
+ } else if (attachment.type === "file" && attachment.mimeType === "application/pdf") {
537
+ if (attachment.url) {
538
+ content.push({
539
+ type: "document",
540
+ source: {
541
+ type: "url",
542
+ url: attachment.url
543
+ }
544
+ });
545
+ } else if (attachment.data) {
546
+ let base64Data = attachment.data;
547
+ if (base64Data.startsWith("data:")) {
548
+ const commaIndex = base64Data.indexOf(",");
549
+ if (commaIndex !== -1) {
550
+ base64Data = base64Data.slice(commaIndex + 1);
551
+ }
552
+ }
553
+ content.push({
554
+ type: "document",
555
+ source: {
556
+ type: "base64",
557
+ media_type: "application/pdf",
558
+ data: base64Data
559
+ }
560
+ });
561
+ }
562
+ }
563
+ }
564
+ messages.push({ role: "user", content });
565
+ } else {
566
+ messages.push({
567
+ role: "user",
568
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
569
+ });
570
+ }
571
+ }
572
+ }
573
+ if (pendingToolResults.length > 0) {
574
+ messages.push({
575
+ role: "user",
576
+ content: pendingToolResults.map((tr) => ({
577
+ type: "tool_result",
578
+ tool_use_id: tr.tool_use_id,
579
+ content: tr.content
580
+ }))
581
+ });
582
+ }
583
+ return messages;
584
+ }
585
+ /**
586
+ * Build common request options for both streaming and non-streaming
587
+ */
588
+ buildRequestOptions(request) {
589
+ const systemMessage = request.systemPrompt || "";
590
+ let messages;
591
+ if (request.rawMessages && request.rawMessages.length > 0) {
592
+ messages = this.convertToAnthropicMessages(request.rawMessages);
593
+ } else {
594
+ const formatted = formatMessagesForAnthropic(request.messages, void 0);
595
+ messages = formatted.messages;
596
+ }
597
+ const tools = request.actions?.map((action) => ({
598
+ name: action.name,
599
+ description: action.description,
600
+ input_schema: {
601
+ type: "object",
602
+ properties: action.parameters ? Object.fromEntries(
603
+ Object.entries(action.parameters).map(([key, param]) => [
604
+ key,
605
+ {
606
+ type: param.type,
607
+ description: param.description,
608
+ enum: param.enum
609
+ }
610
+ ])
611
+ ) : {},
612
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
613
+ }
614
+ }));
615
+ const options = {
616
+ model: request.config?.model || this.model,
617
+ max_tokens: request.config?.maxTokens || this.config.maxTokens || 4096,
618
+ system: systemMessage,
619
+ messages,
620
+ tools: tools?.length ? tools : void 0
621
+ };
622
+ if (this.config.thinking?.type === "enabled") {
623
+ options.thinking = {
624
+ type: "enabled",
625
+ budget_tokens: this.config.thinking.budgetTokens || 1e4
626
+ };
627
+ }
628
+ return { options, messages };
629
+ }
630
+ /**
631
+ * Non-streaming completion (for debugging/comparison with original studio-ai)
632
+ */
633
+ async complete(request) {
634
+ const client = await this.getClient();
635
+ const { options } = this.buildRequestOptions(request);
636
+ const nonStreamingOptions = {
637
+ ...options,
638
+ stream: false
639
+ };
640
+ try {
641
+ const response = await client.messages.create(nonStreamingOptions);
642
+ let content = "";
643
+ let thinking = "";
644
+ const toolCalls = [];
645
+ for (const block of response.content) {
646
+ if (block.type === "text") {
647
+ content += block.text;
648
+ } else if (block.type === "thinking") {
649
+ thinking += block.thinking;
650
+ } else if (block.type === "tool_use") {
651
+ toolCalls.push({
652
+ id: block.id,
653
+ name: block.name,
654
+ args: block.input
655
+ });
656
+ }
657
+ }
658
+ return {
659
+ content,
660
+ toolCalls,
661
+ thinking: thinking || void 0,
662
+ rawResponse: response
663
+ };
664
+ } catch (error) {
665
+ throw error;
666
+ }
667
+ }
668
+ async *stream(request) {
669
+ const client = await this.getClient();
670
+ const { options } = this.buildRequestOptions(request);
671
+ const messageId = generateMessageId();
672
+ yield { type: "message:start", id: messageId };
673
+ try {
674
+ const stream = await client.messages.stream(options);
675
+ let currentToolUse = null;
676
+ let isInThinkingBlock = false;
677
+ for await (const event of stream) {
678
+ if (request.signal?.aborted) {
679
+ break;
680
+ }
681
+ switch (event.type) {
682
+ case "content_block_start":
683
+ if (event.content_block.type === "tool_use") {
684
+ currentToolUse = {
685
+ id: event.content_block.id,
686
+ name: event.content_block.name,
687
+ input: ""
688
+ };
689
+ yield {
690
+ type: "action:start",
691
+ id: currentToolUse.id,
692
+ name: currentToolUse.name
693
+ };
694
+ } else if (event.content_block.type === "thinking") {
695
+ isInThinkingBlock = true;
696
+ yield { type: "thinking:start" };
697
+ }
698
+ break;
699
+ case "content_block_delta":
700
+ if (event.delta.type === "text_delta") {
701
+ yield { type: "message:delta", content: event.delta.text };
702
+ } else if (event.delta.type === "thinking_delta") {
703
+ yield { type: "thinking:delta", content: event.delta.thinking };
704
+ } else if (event.delta.type === "input_json_delta" && currentToolUse) {
705
+ currentToolUse.input += event.delta.partial_json;
706
+ }
707
+ break;
708
+ case "content_block_stop":
709
+ if (currentToolUse) {
710
+ yield {
711
+ type: "action:args",
712
+ id: currentToolUse.id,
713
+ args: currentToolUse.input
714
+ };
715
+ currentToolUse = null;
716
+ }
717
+ if (isInThinkingBlock) {
718
+ yield { type: "thinking:end" };
719
+ isInThinkingBlock = false;
720
+ }
721
+ break;
722
+ case "message_stop":
723
+ break;
724
+ }
725
+ }
726
+ yield { type: "message:end" };
727
+ yield { type: "done" };
728
+ } catch (error) {
729
+ yield {
730
+ type: "error",
731
+ message: error instanceof Error ? error.message : "Unknown error",
732
+ code: "ANTHROPIC_ERROR"
733
+ };
734
+ }
735
+ }
736
+ };
737
+ function createAnthropicAdapter(config) {
738
+ return new AnthropicAdapter(config);
739
+ }
740
+ var GroqAdapter = class {
741
+ constructor(config) {
742
+ this.provider = "groq";
743
+ this.config = config;
744
+ this.model = config.model || "llama-3.1-70b-versatile";
745
+ }
746
+ async *stream(request) {
747
+ const messages = formatMessages(request.messages, request.systemPrompt);
748
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
749
+ const messageId = generateMessageId();
750
+ yield { type: "message:start", id: messageId };
751
+ try {
752
+ const response = await fetch(
753
+ "https://api.groq.com/openai/v1/chat/completions",
754
+ {
755
+ method: "POST",
756
+ headers: {
757
+ "Content-Type": "application/json",
758
+ Authorization: `Bearer ${this.config.apiKey}`
759
+ },
760
+ body: JSON.stringify({
761
+ model: request.config?.model || this.model,
762
+ messages,
763
+ tools,
764
+ temperature: request.config?.temperature ?? this.config.temperature,
765
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
766
+ stream: true
767
+ }),
768
+ signal: request.signal
769
+ }
770
+ );
771
+ if (!response.ok) {
772
+ throw new Error(`Groq API error: ${response.status}`);
773
+ }
774
+ if (!response.body) {
775
+ throw new Error("No response body");
776
+ }
777
+ const reader = response.body.getReader();
778
+ const decoder = new TextDecoder();
779
+ let buffer = "";
780
+ let currentToolCall = null;
781
+ while (true) {
782
+ const { done, value } = await reader.read();
783
+ if (done) break;
784
+ buffer += decoder.decode(value, { stream: true });
785
+ const lines = buffer.split("\n");
786
+ buffer = lines.pop() || "";
787
+ for (const line of lines) {
788
+ if (!line.startsWith("data: ")) continue;
789
+ const data = line.slice(6).trim();
790
+ if (data === "[DONE]") continue;
791
+ try {
792
+ const chunk = JSON.parse(data);
793
+ const delta = chunk.choices?.[0]?.delta;
794
+ if (delta?.content) {
795
+ yield { type: "message:delta", content: delta.content };
796
+ }
797
+ if (delta?.tool_calls) {
798
+ for (const toolCall of delta.tool_calls) {
799
+ if (toolCall.id) {
800
+ if (currentToolCall) {
801
+ yield {
802
+ type: "action:args",
803
+ id: currentToolCall.id,
804
+ args: currentToolCall.arguments
805
+ };
806
+ }
807
+ currentToolCall = {
808
+ id: toolCall.id,
809
+ name: toolCall.function?.name || "",
810
+ arguments: toolCall.function?.arguments || ""
811
+ };
812
+ yield {
813
+ type: "action:start",
814
+ id: currentToolCall.id,
815
+ name: currentToolCall.name
816
+ };
817
+ } else if (currentToolCall && toolCall.function?.arguments) {
818
+ currentToolCall.arguments += toolCall.function.arguments;
819
+ }
820
+ }
821
+ }
822
+ if (chunk.choices?.[0]?.finish_reason && currentToolCall) {
823
+ yield {
824
+ type: "action:args",
825
+ id: currentToolCall.id,
826
+ args: currentToolCall.arguments
827
+ };
828
+ }
829
+ } catch {
830
+ }
831
+ }
832
+ }
833
+ yield { type: "message:end" };
834
+ yield { type: "done" };
835
+ } catch (error) {
836
+ if (error.name === "AbortError") {
837
+ yield { type: "done" };
838
+ } else {
839
+ yield {
840
+ type: "error",
841
+ message: error instanceof Error ? error.message : "Unknown error",
842
+ code: "GROQ_ERROR"
843
+ };
844
+ }
845
+ }
846
+ }
847
+ };
848
+ function createGroqAdapter(config) {
849
+ return new GroqAdapter(config);
850
+ }
851
+ var OllamaAdapter = class {
852
+ constructor(config = {}) {
853
+ this.provider = "ollama";
854
+ this.config = config;
855
+ this.model = config.model || "llama3";
856
+ this.baseUrl = config.baseUrl || "http://localhost:11434";
857
+ }
858
+ async *stream(request) {
859
+ const messages = formatMessages(request.messages, request.systemPrompt);
860
+ const messageId = generateMessageId();
861
+ yield { type: "message:start", id: messageId };
862
+ try {
863
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
864
+ method: "POST",
865
+ headers: {
866
+ "Content-Type": "application/json"
867
+ },
868
+ body: JSON.stringify({
869
+ model: request.config?.model || this.model,
870
+ messages,
871
+ stream: true,
872
+ options: {
873
+ temperature: request.config?.temperature ?? this.config.temperature,
874
+ num_predict: request.config?.maxTokens ?? this.config.maxTokens
875
+ }
876
+ }),
877
+ signal: request.signal
878
+ });
879
+ if (!response.ok) {
880
+ throw new Error(`Ollama API error: ${response.status}`);
881
+ }
882
+ if (!response.body) {
883
+ throw new Error("No response body");
884
+ }
885
+ const reader = response.body.getReader();
886
+ const decoder = new TextDecoder();
887
+ let buffer = "";
888
+ while (true) {
889
+ const { done, value } = await reader.read();
890
+ if (done) break;
891
+ buffer += decoder.decode(value, { stream: true });
892
+ const lines = buffer.split("\n");
893
+ buffer = lines.pop() || "";
894
+ for (const line of lines) {
895
+ if (!line.trim()) continue;
896
+ try {
897
+ const chunk = JSON.parse(line);
898
+ if (chunk.message?.content) {
899
+ yield { type: "message:delta", content: chunk.message.content };
900
+ }
901
+ if (chunk.done) {
902
+ break;
903
+ }
904
+ } catch {
905
+ }
906
+ }
907
+ }
908
+ yield { type: "message:end" };
909
+ yield { type: "done" };
910
+ } catch (error) {
911
+ if (error.name === "AbortError") {
912
+ yield { type: "done" };
913
+ } else {
914
+ yield {
915
+ type: "error",
916
+ message: error instanceof Error ? error.message : "Unknown error",
917
+ code: "OLLAMA_ERROR"
918
+ };
919
+ }
920
+ }
921
+ }
922
+ };
923
+ function createOllamaAdapter(config) {
924
+ return new OllamaAdapter(config);
925
+ }
926
+ function attachmentToGeminiPart(attachment) {
927
+ if (!attachment.data) {
928
+ console.warn(
929
+ "Gemini adapter: URL-based attachments not supported, skipping"
930
+ );
931
+ return null;
932
+ }
933
+ if (attachment.type === "image") {
934
+ let base64Data = attachment.data;
935
+ if (base64Data.startsWith("data:")) {
936
+ const commaIndex = base64Data.indexOf(",");
937
+ if (commaIndex !== -1) {
938
+ base64Data = base64Data.slice(commaIndex + 1);
939
+ }
940
+ }
941
+ return {
942
+ inlineData: {
943
+ mimeType: attachment.mimeType || "image/png",
944
+ data: base64Data
945
+ }
946
+ };
947
+ }
948
+ if (attachment.type === "audio" || attachment.type === "video") {
949
+ let base64Data = attachment.data;
950
+ if (base64Data.startsWith("data:")) {
951
+ const commaIndex = base64Data.indexOf(",");
952
+ if (commaIndex !== -1) {
953
+ base64Data = base64Data.slice(commaIndex + 1);
954
+ }
955
+ }
956
+ return {
957
+ inlineData: {
958
+ mimeType: attachment.mimeType || (attachment.type === "audio" ? "audio/mp3" : "video/mp4"),
959
+ data: base64Data
960
+ }
961
+ };
962
+ }
963
+ return null;
964
+ }
965
+ function messageToGeminiContent(msg) {
966
+ if (msg.role === "system") return null;
967
+ const parts = [];
968
+ if (msg.role === "tool" && msg.tool_call_id) {
969
+ let responseData;
970
+ try {
971
+ responseData = JSON.parse(msg.content || "{}");
972
+ } catch {
973
+ responseData = { result: msg.content || "" };
974
+ }
975
+ const toolName = msg.metadata?.toolName || "tool";
976
+ parts.push({
977
+ functionResponse: {
978
+ name: toolName,
979
+ response: responseData
980
+ }
981
+ });
982
+ return { role: "user", parts };
983
+ }
984
+ if (msg.content) {
985
+ parts.push({ text: msg.content });
986
+ }
987
+ const attachments = msg.metadata?.attachments;
988
+ if (attachments && Array.isArray(attachments)) {
989
+ for (const attachment of attachments) {
990
+ const part = attachmentToGeminiPart(attachment);
991
+ if (part) {
992
+ parts.push(part);
993
+ }
994
+ }
995
+ }
996
+ if (msg.role === "assistant" && msg.tool_calls && msg.tool_calls.length > 0) {
997
+ for (const tc of msg.tool_calls) {
998
+ let args = {};
999
+ try {
1000
+ args = JSON.parse(tc.function.arguments);
1001
+ } catch {
1002
+ }
1003
+ parts.push({
1004
+ functionCall: {
1005
+ name: tc.function.name,
1006
+ args
1007
+ }
1008
+ });
1009
+ }
1010
+ }
1011
+ if (parts.length === 0) return null;
1012
+ return {
1013
+ role: msg.role === "assistant" ? "model" : "user",
1014
+ parts
1015
+ };
1016
+ }
1017
+ function formatToolsForGemini(actions) {
1018
+ if (!actions || actions.length === 0) return void 0;
1019
+ return {
1020
+ functionDeclarations: actions.map((action) => ({
1021
+ name: action.name,
1022
+ description: action.description,
1023
+ parameters: action.parameters ? {
1024
+ type: "object",
1025
+ properties: Object.fromEntries(
1026
+ Object.entries(action.parameters).map(([key, param]) => [
1027
+ key,
1028
+ {
1029
+ type: param.type,
1030
+ description: param.description,
1031
+ enum: param.enum
1032
+ }
1033
+ ])
1034
+ ),
1035
+ required: Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key)
1036
+ } : void 0
1037
+ }))
1038
+ };
1039
+ }
1040
+ var GoogleAdapter = class {
1041
+ constructor(config) {
1042
+ this.provider = "google";
1043
+ this.config = config;
1044
+ this.model = config.model || "gemini-2.0-flash";
1045
+ }
1046
+ async getClient() {
1047
+ if (!this.client) {
1048
+ const { GoogleGenerativeAI } = await import('@google/generative-ai');
1049
+ this.client = new GoogleGenerativeAI(this.config.apiKey);
1050
+ }
1051
+ return this.client;
1052
+ }
1053
+ async *stream(request) {
1054
+ const client = await this.getClient();
1055
+ const modelId = request.config?.model || this.model;
1056
+ const model = client.getGenerativeModel({
1057
+ model: modelId,
1058
+ safetySettings: this.config.safetySettings
1059
+ });
1060
+ let contents = [];
1061
+ let systemInstruction;
1062
+ if (request.rawMessages && request.rawMessages.length > 0) {
1063
+ for (const msg of request.rawMessages) {
1064
+ if (msg.role === "system") {
1065
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1066
+ continue;
1067
+ }
1068
+ const content = messageToGeminiContent(msg);
1069
+ if (content) {
1070
+ contents.push(content);
1071
+ }
1072
+ }
1073
+ if (request.systemPrompt && !systemInstruction) {
1074
+ systemInstruction = request.systemPrompt;
1075
+ }
1076
+ } else {
1077
+ for (const msg of request.messages) {
1078
+ if (msg.role === "system") {
1079
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1080
+ continue;
1081
+ }
1082
+ const content = messageToGeminiContent(msg);
1083
+ if (content) {
1084
+ contents.push(content);
1085
+ }
1086
+ }
1087
+ if (request.systemPrompt) {
1088
+ systemInstruction = request.systemPrompt;
1089
+ }
1090
+ }
1091
+ if (contents.length === 0 || contents[0].role !== "user") {
1092
+ contents = [{ role: "user", parts: [{ text: "" }] }, ...contents];
1093
+ }
1094
+ const mergedContents = [];
1095
+ for (const content of contents) {
1096
+ const last = mergedContents[mergedContents.length - 1];
1097
+ if (last && last.role === content.role) {
1098
+ last.parts.push(...content.parts);
1099
+ } else {
1100
+ mergedContents.push({ ...content, parts: [...content.parts] });
1101
+ }
1102
+ }
1103
+ const tools = formatToolsForGemini(request.actions);
1104
+ const messageId = generateMessageId();
1105
+ yield { type: "message:start", id: messageId };
1106
+ try {
1107
+ const chat = model.startChat({
1108
+ history: mergedContents.slice(0, -1),
1109
+ // All but the last message
1110
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
1111
+ tools: tools ? [tools] : void 0,
1112
+ generationConfig: {
1113
+ temperature: request.config?.temperature ?? this.config.temperature,
1114
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
1115
+ }
1116
+ });
1117
+ const lastMessage = mergedContents[mergedContents.length - 1];
1118
+ const result = await chat.sendMessageStream(lastMessage.parts);
1119
+ let currentToolCall = null;
1120
+ for await (const chunk of result.stream) {
1121
+ if (request.signal?.aborted) {
1122
+ break;
1123
+ }
1124
+ const candidate = chunk.candidates?.[0];
1125
+ if (!candidate?.content?.parts) continue;
1126
+ for (const part of candidate.content.parts) {
1127
+ if ("text" in part && part.text) {
1128
+ yield { type: "message:delta", content: part.text };
1129
+ }
1130
+ if ("functionCall" in part && part.functionCall) {
1131
+ const fc = part.functionCall;
1132
+ const toolId = generateToolCallId();
1133
+ if (currentToolCall) {
1134
+ yield {
1135
+ type: "action:args",
1136
+ id: currentToolCall.id,
1137
+ args: JSON.stringify(currentToolCall.args)
1138
+ };
1139
+ }
1140
+ currentToolCall = {
1141
+ id: toolId,
1142
+ name: fc.name,
1143
+ args: fc.args || {}
1144
+ };
1145
+ yield {
1146
+ type: "action:start",
1147
+ id: toolId,
1148
+ name: fc.name
1149
+ };
1150
+ }
1151
+ }
1152
+ if (candidate.finishReason) {
1153
+ if (currentToolCall) {
1154
+ yield {
1155
+ type: "action:args",
1156
+ id: currentToolCall.id,
1157
+ args: JSON.stringify(currentToolCall.args)
1158
+ };
1159
+ }
1160
+ }
1161
+ }
1162
+ yield { type: "message:end" };
1163
+ yield { type: "done" };
1164
+ } catch (error) {
1165
+ yield {
1166
+ type: "error",
1167
+ message: error instanceof Error ? error.message : "Unknown error",
1168
+ code: "GOOGLE_ERROR"
1169
+ };
1170
+ }
1171
+ }
1172
+ /**
1173
+ * Non-streaming completion (optional, for debugging)
1174
+ */
1175
+ async complete(request) {
1176
+ const client = await this.getClient();
1177
+ const modelId = request.config?.model || this.model;
1178
+ const model = client.getGenerativeModel({
1179
+ model: modelId,
1180
+ safetySettings: this.config.safetySettings
1181
+ });
1182
+ let contents = [];
1183
+ let systemInstruction;
1184
+ for (const msg of request.messages) {
1185
+ if (msg.role === "system") {
1186
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1187
+ continue;
1188
+ }
1189
+ const content = messageToGeminiContent(msg);
1190
+ if (content) {
1191
+ contents.push(content);
1192
+ }
1193
+ }
1194
+ if (request.systemPrompt) {
1195
+ systemInstruction = request.systemPrompt;
1196
+ }
1197
+ if (contents.length === 0 || contents[0].role !== "user") {
1198
+ contents = [{ role: "user", parts: [{ text: "" }] }, ...contents];
1199
+ }
1200
+ const mergedContents = [];
1201
+ for (const content of contents) {
1202
+ const last = mergedContents[mergedContents.length - 1];
1203
+ if (last && last.role === content.role) {
1204
+ last.parts.push(...content.parts);
1205
+ } else {
1206
+ mergedContents.push({ ...content, parts: [...content.parts] });
1207
+ }
1208
+ }
1209
+ const tools = formatToolsForGemini(request.actions);
1210
+ const chat = model.startChat({
1211
+ history: mergedContents.slice(0, -1),
1212
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
1213
+ tools: tools ? [tools] : void 0,
1214
+ generationConfig: {
1215
+ temperature: request.config?.temperature ?? this.config.temperature,
1216
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
1217
+ }
1218
+ });
1219
+ const lastMessage = mergedContents[mergedContents.length - 1];
1220
+ const result = await chat.sendMessage(lastMessage.parts);
1221
+ const response = result.response;
1222
+ let textContent = "";
1223
+ const toolCalls = [];
1224
+ const candidate = response.candidates?.[0];
1225
+ if (candidate?.content?.parts) {
1226
+ for (const part of candidate.content.parts) {
1227
+ if ("text" in part && part.text) {
1228
+ textContent += part.text;
1229
+ }
1230
+ if ("functionCall" in part && part.functionCall) {
1231
+ toolCalls.push({
1232
+ id: generateToolCallId(),
1233
+ name: part.functionCall.name,
1234
+ args: part.functionCall.args || {}
1235
+ });
1236
+ }
1237
+ }
1238
+ }
1239
+ return {
1240
+ content: textContent,
1241
+ toolCalls,
1242
+ rawResponse: response
1243
+ };
1244
+ }
1245
+ };
1246
+ function createGoogleAdapter(config) {
1247
+ return new GoogleAdapter(config);
1248
+ }
1249
+ var XAI_BASE_URL = "https://api.x.ai/v1";
1250
+ var XAIAdapter = class {
1251
+ constructor(config) {
1252
+ this.provider = "xai";
1253
+ this.config = config;
1254
+ this.model = config.model || "grok-2";
1255
+ }
1256
+ async getClient() {
1257
+ if (!this.client) {
1258
+ const { default: OpenAI } = await import('openai');
1259
+ this.client = new OpenAI({
1260
+ apiKey: this.config.apiKey,
1261
+ baseURL: this.config.baseUrl || XAI_BASE_URL
1262
+ });
1263
+ }
1264
+ return this.client;
1265
+ }
1266
+ async *stream(request) {
1267
+ const client = await this.getClient();
1268
+ let messages;
1269
+ if (request.rawMessages && request.rawMessages.length > 0) {
1270
+ const processedMessages = request.rawMessages.map((msg) => {
1271
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
1272
+ if (hasAttachments) {
1273
+ const content = [];
1274
+ if (msg.content) {
1275
+ content.push({ type: "text", text: msg.content });
1276
+ }
1277
+ for (const attachment of msg.attachments) {
1278
+ if (attachment.type === "image") {
1279
+ let imageUrl = attachment.data;
1280
+ if (!imageUrl.startsWith("data:")) {
1281
+ imageUrl = `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
1282
+ }
1283
+ content.push({
1284
+ type: "image_url",
1285
+ image_url: { url: imageUrl, detail: "auto" }
1286
+ });
1287
+ }
1288
+ }
1289
+ return { ...msg, content, attachments: void 0 };
1290
+ }
1291
+ return msg;
1292
+ });
1293
+ if (request.systemPrompt) {
1294
+ const hasSystem = processedMessages.some((m) => m.role === "system");
1295
+ if (!hasSystem) {
1296
+ messages = [
1297
+ { role: "system", content: request.systemPrompt },
1298
+ ...processedMessages
1299
+ ];
1300
+ } else {
1301
+ messages = processedMessages;
1302
+ }
1303
+ } else {
1304
+ messages = processedMessages;
1305
+ }
1306
+ } else {
1307
+ messages = formatMessagesForOpenAI(
1308
+ request.messages,
1309
+ request.systemPrompt
1310
+ );
1311
+ }
1312
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1313
+ const messageId = generateMessageId();
1314
+ yield { type: "message:start", id: messageId };
1315
+ try {
1316
+ const stream = await client.chat.completions.create({
1317
+ model: request.config?.model || this.model,
1318
+ messages,
1319
+ tools,
1320
+ temperature: request.config?.temperature ?? this.config.temperature,
1321
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
1322
+ stream: true
1323
+ });
1324
+ let currentToolCall = null;
1325
+ for await (const chunk of stream) {
1326
+ if (request.signal?.aborted) {
1327
+ break;
1328
+ }
1329
+ const delta = chunk.choices[0]?.delta;
1330
+ if (delta?.content) {
1331
+ yield { type: "message:delta", content: delta.content };
1332
+ }
1333
+ if (delta?.tool_calls) {
1334
+ for (const toolCall of delta.tool_calls) {
1335
+ if (toolCall.id) {
1336
+ if (currentToolCall) {
1337
+ yield {
1338
+ type: "action:args",
1339
+ id: currentToolCall.id,
1340
+ args: currentToolCall.arguments
1341
+ };
1342
+ }
1343
+ currentToolCall = {
1344
+ id: toolCall.id,
1345
+ name: toolCall.function?.name || "",
1346
+ arguments: toolCall.function?.arguments || ""
1347
+ };
1348
+ yield {
1349
+ type: "action:start",
1350
+ id: currentToolCall.id,
1351
+ name: currentToolCall.name
1352
+ };
1353
+ } else if (currentToolCall && toolCall.function?.arguments) {
1354
+ currentToolCall.arguments += toolCall.function.arguments;
1355
+ }
1356
+ }
1357
+ }
1358
+ if (chunk.choices[0]?.finish_reason) {
1359
+ if (currentToolCall) {
1360
+ yield {
1361
+ type: "action:args",
1362
+ id: currentToolCall.id,
1363
+ args: currentToolCall.arguments
1364
+ };
1365
+ }
1366
+ }
1367
+ }
1368
+ yield { type: "message:end" };
1369
+ yield { type: "done" };
1370
+ } catch (error) {
1371
+ yield {
1372
+ type: "error",
1373
+ message: error instanceof Error ? error.message : "Unknown error",
1374
+ code: "XAI_ERROR"
1375
+ };
1376
+ }
1377
+ }
1378
+ /**
1379
+ * Non-streaming completion (optional, for debugging)
1380
+ */
1381
+ async complete(request) {
1382
+ const client = await this.getClient();
1383
+ let messages;
1384
+ if (request.rawMessages && request.rawMessages.length > 0) {
1385
+ messages = request.rawMessages;
1386
+ if (request.systemPrompt) {
1387
+ const hasSystem = messages.some((m) => m.role === "system");
1388
+ if (!hasSystem) {
1389
+ messages = [
1390
+ { role: "system", content: request.systemPrompt },
1391
+ ...messages
1392
+ ];
1393
+ }
1394
+ }
1395
+ } else {
1396
+ messages = formatMessagesForOpenAI(
1397
+ request.messages,
1398
+ request.systemPrompt
1399
+ );
1400
+ }
1401
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1402
+ const response = await client.chat.completions.create({
1403
+ model: request.config?.model || this.model,
1404
+ messages,
1405
+ tools,
1406
+ temperature: request.config?.temperature ?? this.config.temperature,
1407
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens
1408
+ });
1409
+ const choice = response.choices[0];
1410
+ const message = choice?.message;
1411
+ const toolCalls = (message?.tool_calls || []).map((tc) => ({
1412
+ id: tc.id,
1413
+ name: tc.function.name,
1414
+ args: JSON.parse(tc.function.arguments || "{}")
1415
+ }));
1416
+ return {
1417
+ content: message?.content || "",
1418
+ toolCalls,
1419
+ rawResponse: response
1420
+ };
1421
+ }
1422
+ };
1423
+ function createXAIAdapter(config) {
1424
+ return new XAIAdapter(config);
1425
+ }
1426
+ var DEFAULT_API_VERSION = "2024-08-01-preview";
1427
+ function buildAzureEndpoint(resourceName, deploymentName, apiVersion) {
1428
+ return `https://${resourceName}.openai.azure.com/openai/deployments/${deploymentName}`;
1429
+ }
1430
+ var AzureAdapter = class {
1431
+ constructor(config) {
1432
+ this.provider = "azure";
1433
+ this.config = config;
1434
+ this.model = config.deploymentName;
1435
+ }
1436
+ async getClient() {
1437
+ if (!this.client) {
1438
+ const { AzureOpenAI } = await import('openai');
1439
+ const apiVersion = this.config.apiVersion || DEFAULT_API_VERSION;
1440
+ const endpoint = this.config.baseUrl || buildAzureEndpoint(
1441
+ this.config.resourceName,
1442
+ this.config.deploymentName);
1443
+ this.client = new AzureOpenAI({
1444
+ apiKey: this.config.apiKey,
1445
+ endpoint,
1446
+ apiVersion,
1447
+ deployment: this.config.deploymentName
1448
+ });
1449
+ }
1450
+ return this.client;
1451
+ }
1452
+ async *stream(request) {
1453
+ const client = await this.getClient();
1454
+ let messages;
1455
+ if (request.rawMessages && request.rawMessages.length > 0) {
1456
+ const processedMessages = request.rawMessages.map((msg) => {
1457
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
1458
+ if (hasAttachments) {
1459
+ const content = [];
1460
+ if (msg.content) {
1461
+ content.push({ type: "text", text: msg.content });
1462
+ }
1463
+ for (const attachment of msg.attachments) {
1464
+ if (attachment.type === "image") {
1465
+ let imageUrl = attachment.data;
1466
+ if (!imageUrl.startsWith("data:")) {
1467
+ imageUrl = `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
1468
+ }
1469
+ content.push({
1470
+ type: "image_url",
1471
+ image_url: { url: imageUrl, detail: "auto" }
1472
+ });
1473
+ }
1474
+ }
1475
+ return { ...msg, content, attachments: void 0 };
1476
+ }
1477
+ return msg;
1478
+ });
1479
+ if (request.systemPrompt) {
1480
+ const hasSystem = processedMessages.some((m) => m.role === "system");
1481
+ if (!hasSystem) {
1482
+ messages = [
1483
+ { role: "system", content: request.systemPrompt },
1484
+ ...processedMessages
1485
+ ];
1486
+ } else {
1487
+ messages = processedMessages;
1488
+ }
1489
+ } else {
1490
+ messages = processedMessages;
1491
+ }
1492
+ } else {
1493
+ messages = formatMessagesForOpenAI(
1494
+ request.messages,
1495
+ request.systemPrompt
1496
+ );
1497
+ }
1498
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1499
+ const messageId = generateMessageId();
1500
+ yield { type: "message:start", id: messageId };
1501
+ try {
1502
+ const stream = await client.chat.completions.create({
1503
+ // Azure uses deployment name, not model name
1504
+ model: this.config.deploymentName,
1505
+ messages,
1506
+ tools,
1507
+ temperature: request.config?.temperature ?? this.config.temperature,
1508
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
1509
+ stream: true
1510
+ });
1511
+ let currentToolCall = null;
1512
+ for await (const chunk of stream) {
1513
+ if (request.signal?.aborted) {
1514
+ break;
1515
+ }
1516
+ const delta = chunk.choices[0]?.delta;
1517
+ if (delta?.content) {
1518
+ yield { type: "message:delta", content: delta.content };
1519
+ }
1520
+ if (delta?.tool_calls) {
1521
+ for (const toolCall of delta.tool_calls) {
1522
+ if (toolCall.id) {
1523
+ if (currentToolCall) {
1524
+ yield {
1525
+ type: "action:args",
1526
+ id: currentToolCall.id,
1527
+ args: currentToolCall.arguments
1528
+ };
1529
+ }
1530
+ currentToolCall = {
1531
+ id: toolCall.id,
1532
+ name: toolCall.function?.name || "",
1533
+ arguments: toolCall.function?.arguments || ""
1534
+ };
1535
+ yield {
1536
+ type: "action:start",
1537
+ id: currentToolCall.id,
1538
+ name: currentToolCall.name
1539
+ };
1540
+ } else if (currentToolCall && toolCall.function?.arguments) {
1541
+ currentToolCall.arguments += toolCall.function.arguments;
1542
+ }
1543
+ }
1544
+ }
1545
+ if (chunk.choices[0]?.finish_reason) {
1546
+ if (currentToolCall) {
1547
+ yield {
1548
+ type: "action:args",
1549
+ id: currentToolCall.id,
1550
+ args: currentToolCall.arguments
1551
+ };
1552
+ }
1553
+ }
1554
+ }
1555
+ yield { type: "message:end" };
1556
+ yield { type: "done" };
1557
+ } catch (error) {
1558
+ yield {
1559
+ type: "error",
1560
+ message: error instanceof Error ? error.message : "Unknown error",
1561
+ code: "AZURE_ERROR"
1562
+ };
1563
+ }
1564
+ }
1565
+ /**
1566
+ * Non-streaming completion (optional, for debugging)
1567
+ */
1568
+ async complete(request) {
1569
+ const client = await this.getClient();
1570
+ let messages;
1571
+ if (request.rawMessages && request.rawMessages.length > 0) {
1572
+ messages = request.rawMessages;
1573
+ if (request.systemPrompt) {
1574
+ const hasSystem = messages.some((m) => m.role === "system");
1575
+ if (!hasSystem) {
1576
+ messages = [
1577
+ { role: "system", content: request.systemPrompt },
1578
+ ...messages
1579
+ ];
1580
+ }
1581
+ }
1582
+ } else {
1583
+ messages = formatMessagesForOpenAI(
1584
+ request.messages,
1585
+ request.systemPrompt
1586
+ );
1587
+ }
1588
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1589
+ const response = await client.chat.completions.create({
1590
+ model: this.config.deploymentName,
1591
+ messages,
1592
+ tools,
1593
+ temperature: request.config?.temperature ?? this.config.temperature,
1594
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens
1595
+ });
1596
+ const choice = response.choices[0];
1597
+ const message = choice?.message;
1598
+ const toolCalls = (message?.tool_calls || []).map((tc) => ({
1599
+ id: tc.id,
1600
+ name: tc.function.name,
1601
+ args: JSON.parse(tc.function.arguments || "{}")
1602
+ }));
1603
+ return {
1604
+ content: message?.content || "",
1605
+ toolCalls,
1606
+ rawResponse: response
1607
+ };
1608
+ }
1609
+ };
1610
+ function createAzureAdapter(config) {
1611
+ return new AzureAdapter(config);
1612
+ }
1613
+
1614
+ export { AnthropicAdapter, AzureAdapter, GoogleAdapter, GroqAdapter, OllamaAdapter, OpenAIAdapter, XAIAdapter, attachmentToAnthropicDocument, attachmentToAnthropicImage, attachmentToOpenAIImage, createAnthropicAdapter, createAzureAdapter, createGoogleAdapter, createGroqAdapter, createOllamaAdapter, createOpenAIAdapter, createXAIAdapter, formatMessages, formatMessagesForAnthropic, formatMessagesForOpenAI, formatTools, hasImageAttachments, hasMediaAttachments, messageToAnthropicContent, messageToOpenAIContent };
1615
+ //# sourceMappingURL=index.mjs.map
1616
+ //# sourceMappingURL=index.mjs.map