@yourgpt/llm-sdk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1642 @@
1
+ 'use strict';
2
+
3
+ var core = require('@yourgpt/copilot-sdk/core');
4
+
5
+ // src/adapters/base.ts
6
+ function formatMessages(messages, systemPrompt) {
7
+ const formatted = [];
8
+ if (systemPrompt) {
9
+ formatted.push({ role: "system", content: systemPrompt });
10
+ }
11
+ for (const msg of messages) {
12
+ formatted.push({
13
+ role: msg.role,
14
+ content: msg.content ?? ""
15
+ });
16
+ }
17
+ return formatted;
18
+ }
19
+ function parameterToJsonSchema(param) {
20
+ const schema = {
21
+ type: param.type
22
+ };
23
+ if (param.description) {
24
+ schema.description = param.description;
25
+ }
26
+ if (param.enum) {
27
+ schema.enum = param.enum;
28
+ }
29
+ if (param.type === "array" && param.items) {
30
+ schema.items = parameterToJsonSchema(
31
+ param.items
32
+ );
33
+ }
34
+ if (param.type === "object" && param.properties) {
35
+ schema.properties = Object.fromEntries(
36
+ Object.entries(param.properties).map(([key, prop]) => [
37
+ key,
38
+ parameterToJsonSchema(
39
+ prop
40
+ )
41
+ ])
42
+ );
43
+ }
44
+ return schema;
45
+ }
46
+ function formatTools(actions) {
47
+ return actions.map((action) => ({
48
+ type: "function",
49
+ function: {
50
+ name: action.name,
51
+ description: action.description,
52
+ parameters: {
53
+ type: "object",
54
+ properties: action.parameters ? Object.fromEntries(
55
+ Object.entries(action.parameters).map(([key, param]) => [
56
+ key,
57
+ parameterToJsonSchema(param)
58
+ ])
59
+ ) : {},
60
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
61
+ }
62
+ }
63
+ }));
64
+ }
65
+ function hasImageAttachments(message) {
66
+ const attachments = message.metadata?.attachments;
67
+ return attachments?.some((a) => a.type === "image") ?? false;
68
+ }
69
+ function hasMediaAttachments(message) {
70
+ const attachments = message.metadata?.attachments;
71
+ return attachments?.some(
72
+ (a) => a.type === "image" || a.type === "file" && a.mimeType === "application/pdf"
73
+ ) ?? false;
74
+ }
75
+ function attachmentToAnthropicImage(attachment) {
76
+ if (attachment.type !== "image") return null;
77
+ if (attachment.url) {
78
+ return {
79
+ type: "image",
80
+ source: {
81
+ type: "url",
82
+ url: attachment.url
83
+ }
84
+ };
85
+ }
86
+ if (!attachment.data) return null;
87
+ let base64Data = attachment.data;
88
+ if (base64Data.startsWith("data:")) {
89
+ const commaIndex = base64Data.indexOf(",");
90
+ if (commaIndex !== -1) {
91
+ base64Data = base64Data.slice(commaIndex + 1);
92
+ }
93
+ }
94
+ return {
95
+ type: "image",
96
+ source: {
97
+ type: "base64",
98
+ media_type: attachment.mimeType || "image/png",
99
+ data: base64Data
100
+ }
101
+ };
102
+ }
103
+ function attachmentToOpenAIImage(attachment) {
104
+ if (attachment.type !== "image") return null;
105
+ let imageUrl;
106
+ if (attachment.url) {
107
+ imageUrl = attachment.url;
108
+ } else if (attachment.data) {
109
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
110
+ } else {
111
+ return null;
112
+ }
113
+ return {
114
+ type: "image_url",
115
+ image_url: {
116
+ url: imageUrl,
117
+ detail: "auto"
118
+ }
119
+ };
120
+ }
121
+ function attachmentToAnthropicDocument(attachment) {
122
+ if (attachment.type !== "file" || attachment.mimeType !== "application/pdf") {
123
+ return null;
124
+ }
125
+ if (attachment.url) {
126
+ return {
127
+ type: "document",
128
+ source: {
129
+ type: "url",
130
+ url: attachment.url
131
+ }
132
+ };
133
+ }
134
+ if (!attachment.data) return null;
135
+ let base64Data = attachment.data;
136
+ if (base64Data.startsWith("data:")) {
137
+ const commaIndex = base64Data.indexOf(",");
138
+ if (commaIndex !== -1) {
139
+ base64Data = base64Data.slice(commaIndex + 1);
140
+ }
141
+ }
142
+ return {
143
+ type: "document",
144
+ source: {
145
+ type: "base64",
146
+ media_type: "application/pdf",
147
+ data: base64Data
148
+ }
149
+ };
150
+ }
151
+ function messageToAnthropicContent(message) {
152
+ const attachments = message.metadata?.attachments;
153
+ const content = message.content ?? "";
154
+ if (!hasMediaAttachments(message)) {
155
+ return content;
156
+ }
157
+ const blocks = [];
158
+ if (attachments) {
159
+ for (const attachment of attachments) {
160
+ const imageBlock = attachmentToAnthropicImage(attachment);
161
+ if (imageBlock) {
162
+ blocks.push(imageBlock);
163
+ continue;
164
+ }
165
+ const docBlock = attachmentToAnthropicDocument(attachment);
166
+ if (docBlock) {
167
+ blocks.push(docBlock);
168
+ }
169
+ }
170
+ }
171
+ if (content) {
172
+ blocks.push({ type: "text", text: content });
173
+ }
174
+ return blocks;
175
+ }
176
+ function messageToOpenAIContent(message) {
177
+ const attachments = message.metadata?.attachments;
178
+ const content = message.content ?? "";
179
+ if (!hasImageAttachments(message)) {
180
+ return content;
181
+ }
182
+ const blocks = [];
183
+ if (content) {
184
+ blocks.push({ type: "text", text: content });
185
+ }
186
+ if (attachments) {
187
+ for (const attachment of attachments) {
188
+ const imageBlock = attachmentToOpenAIImage(attachment);
189
+ if (imageBlock) {
190
+ blocks.push(imageBlock);
191
+ }
192
+ }
193
+ }
194
+ return blocks;
195
+ }
196
+ function formatMessagesForAnthropic(messages, systemPrompt) {
197
+ const formatted = [];
198
+ for (let i = 0; i < messages.length; i++) {
199
+ const msg = messages[i];
200
+ if (msg.role === "system") continue;
201
+ if (msg.role === "assistant") {
202
+ const content = [];
203
+ if (msg.content) {
204
+ content.push({ type: "text", text: msg.content });
205
+ }
206
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
207
+ for (const tc of msg.tool_calls) {
208
+ content.push({
209
+ type: "tool_use",
210
+ id: tc.id,
211
+ name: tc.function.name,
212
+ input: JSON.parse(tc.function.arguments)
213
+ });
214
+ }
215
+ }
216
+ formatted.push({
217
+ role: "assistant",
218
+ content: content.length === 1 && content[0].type === "text" ? content[0].text : content
219
+ });
220
+ } else if (msg.role === "tool" && msg.tool_call_id) {
221
+ const toolResults = [
222
+ {
223
+ type: "tool_result",
224
+ tool_use_id: msg.tool_call_id,
225
+ content: msg.content ?? ""
226
+ }
227
+ ];
228
+ while (i + 1 < messages.length && messages[i + 1].role === "tool") {
229
+ i++;
230
+ const nextTool = messages[i];
231
+ if (nextTool.tool_call_id) {
232
+ toolResults.push({
233
+ type: "tool_result",
234
+ tool_use_id: nextTool.tool_call_id,
235
+ content: nextTool.content ?? ""
236
+ });
237
+ }
238
+ }
239
+ formatted.push({
240
+ role: "user",
241
+ content: toolResults
242
+ });
243
+ } else if (msg.role === "user") {
244
+ formatted.push({
245
+ role: "user",
246
+ content: messageToAnthropicContent(msg)
247
+ });
248
+ }
249
+ }
250
+ return {
251
+ system: systemPrompt || "",
252
+ messages: formatted
253
+ };
254
+ }
255
+ function formatMessagesForOpenAI(messages, systemPrompt) {
256
+ const formatted = [];
257
+ if (systemPrompt) {
258
+ formatted.push({ role: "system", content: systemPrompt });
259
+ }
260
+ for (const msg of messages) {
261
+ if (msg.role === "system") {
262
+ formatted.push({ role: "system", content: msg.content ?? "" });
263
+ } else if (msg.role === "user") {
264
+ formatted.push({
265
+ role: "user",
266
+ content: messageToOpenAIContent(msg)
267
+ });
268
+ } else if (msg.role === "assistant") {
269
+ const assistantMsg = {
270
+ role: "assistant",
271
+ content: msg.content
272
+ };
273
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
274
+ assistantMsg.tool_calls = msg.tool_calls;
275
+ }
276
+ formatted.push(assistantMsg);
277
+ } else if (msg.role === "tool" && msg.tool_call_id) {
278
+ formatted.push({
279
+ role: "tool",
280
+ content: msg.content ?? "",
281
+ tool_call_id: msg.tool_call_id
282
+ });
283
+ }
284
+ }
285
+ return formatted;
286
+ }
287
+ var OpenAIAdapter = class {
288
+ constructor(config) {
289
+ this.provider = "openai";
290
+ this.config = config;
291
+ this.model = config.model || "gpt-4o";
292
+ }
293
+ async getClient() {
294
+ if (!this.client) {
295
+ const { default: OpenAI } = await import('openai');
296
+ this.client = new OpenAI({
297
+ apiKey: this.config.apiKey,
298
+ baseURL: this.config.baseUrl
299
+ });
300
+ }
301
+ return this.client;
302
+ }
303
+ async *stream(request) {
304
+ const client = await this.getClient();
305
+ let messages;
306
+ if (request.rawMessages && request.rawMessages.length > 0) {
307
+ const processedMessages = request.rawMessages.map((msg) => {
308
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
309
+ if (hasAttachments) {
310
+ const content = [];
311
+ if (msg.content) {
312
+ content.push({ type: "text", text: msg.content });
313
+ }
314
+ for (const attachment of msg.attachments) {
315
+ if (attachment.type === "image") {
316
+ let imageUrl;
317
+ if (attachment.url) {
318
+ imageUrl = attachment.url;
319
+ } else if (attachment.data) {
320
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
321
+ } else {
322
+ continue;
323
+ }
324
+ content.push({
325
+ type: "image_url",
326
+ image_url: { url: imageUrl, detail: "auto" }
327
+ });
328
+ }
329
+ }
330
+ return { ...msg, content, attachments: void 0 };
331
+ }
332
+ return msg;
333
+ });
334
+ if (request.systemPrompt) {
335
+ const hasSystem = processedMessages.some((m) => m.role === "system");
336
+ if (!hasSystem) {
337
+ messages = [
338
+ { role: "system", content: request.systemPrompt },
339
+ ...processedMessages
340
+ ];
341
+ } else {
342
+ messages = processedMessages;
343
+ }
344
+ } else {
345
+ messages = processedMessages;
346
+ }
347
+ } else {
348
+ messages = formatMessagesForOpenAI(
349
+ request.messages,
350
+ request.systemPrompt
351
+ );
352
+ }
353
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
354
+ const messageId = core.generateMessageId();
355
+ yield { type: "message:start", id: messageId };
356
+ try {
357
+ const stream = await client.chat.completions.create({
358
+ model: request.config?.model || this.model,
359
+ messages,
360
+ tools,
361
+ temperature: request.config?.temperature ?? this.config.temperature,
362
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
363
+ stream: true
364
+ });
365
+ let currentToolCall = null;
366
+ for await (const chunk of stream) {
367
+ if (request.signal?.aborted) {
368
+ break;
369
+ }
370
+ const delta = chunk.choices[0]?.delta;
371
+ if (delta?.content) {
372
+ yield { type: "message:delta", content: delta.content };
373
+ }
374
+ if (delta?.tool_calls) {
375
+ for (const toolCall of delta.tool_calls) {
376
+ if (toolCall.id) {
377
+ if (currentToolCall) {
378
+ yield {
379
+ type: "action:args",
380
+ id: currentToolCall.id,
381
+ args: currentToolCall.arguments
382
+ };
383
+ }
384
+ currentToolCall = {
385
+ id: toolCall.id,
386
+ name: toolCall.function?.name || "",
387
+ arguments: toolCall.function?.arguments || ""
388
+ };
389
+ yield {
390
+ type: "action:start",
391
+ id: currentToolCall.id,
392
+ name: currentToolCall.name
393
+ };
394
+ } else if (currentToolCall && toolCall.function?.arguments) {
395
+ currentToolCall.arguments += toolCall.function.arguments;
396
+ }
397
+ }
398
+ }
399
+ if (chunk.choices[0]?.finish_reason) {
400
+ if (currentToolCall) {
401
+ yield {
402
+ type: "action:args",
403
+ id: currentToolCall.id,
404
+ args: currentToolCall.arguments
405
+ };
406
+ }
407
+ }
408
+ }
409
+ yield { type: "message:end" };
410
+ yield { type: "done" };
411
+ } catch (error) {
412
+ yield {
413
+ type: "error",
414
+ message: error instanceof Error ? error.message : "Unknown error",
415
+ code: "OPENAI_ERROR"
416
+ };
417
+ }
418
+ }
419
+ };
420
+ function createOpenAIAdapter(config) {
421
+ return new OpenAIAdapter(config);
422
+ }
423
+ var AnthropicAdapter = class {
424
+ constructor(config) {
425
+ this.provider = "anthropic";
426
+ this.config = config;
427
+ this.model = config.model || "claude-3-5-sonnet-latest";
428
+ }
429
+ async getClient() {
430
+ if (!this.client) {
431
+ const { default: Anthropic } = await import('@anthropic-ai/sdk');
432
+ this.client = new Anthropic({
433
+ apiKey: this.config.apiKey
434
+ });
435
+ }
436
+ return this.client;
437
+ }
438
+ /**
439
+ * Convert OpenAI-style messages to Anthropic format
440
+ *
441
+ * OpenAI format:
442
+ * - { role: "assistant", content: "...", tool_calls: [...] }
443
+ * - { role: "tool", tool_call_id: "...", content: "..." }
444
+ *
445
+ * Anthropic format:
446
+ * - { role: "assistant", content: [{ type: "text", text: "..." }, { type: "tool_use", id: "...", name: "...", input: {...} }] }
447
+ * - { role: "user", content: [{ type: "tool_result", tool_use_id: "...", content: "..." }] }
448
+ */
449
+ convertToAnthropicMessages(rawMessages) {
450
+ const messages = [];
451
+ const pendingToolResults = [];
452
+ for (const msg of rawMessages) {
453
+ if (msg.role === "system") continue;
454
+ if (msg.role === "assistant") {
455
+ if (pendingToolResults.length > 0) {
456
+ messages.push({
457
+ role: "user",
458
+ content: pendingToolResults.map((tr) => ({
459
+ type: "tool_result",
460
+ tool_use_id: tr.tool_use_id,
461
+ content: tr.content
462
+ }))
463
+ });
464
+ pendingToolResults.length = 0;
465
+ }
466
+ const content = [];
467
+ if (msg.content && typeof msg.content === "string" && msg.content.trim()) {
468
+ content.push({ type: "text", text: msg.content });
469
+ }
470
+ const toolCalls = msg.tool_calls;
471
+ if (toolCalls && toolCalls.length > 0) {
472
+ for (const tc of toolCalls) {
473
+ let input = {};
474
+ try {
475
+ input = JSON.parse(tc.function.arguments);
476
+ } catch {
477
+ }
478
+ content.push({
479
+ type: "tool_use",
480
+ id: tc.id,
481
+ name: tc.function.name,
482
+ input
483
+ });
484
+ }
485
+ }
486
+ if (content.length > 0) {
487
+ messages.push({ role: "assistant", content });
488
+ }
489
+ } else if (msg.role === "tool") {
490
+ pendingToolResults.push({
491
+ tool_use_id: msg.tool_call_id,
492
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
493
+ });
494
+ } else if (msg.role === "user") {
495
+ if (pendingToolResults.length > 0) {
496
+ messages.push({
497
+ role: "user",
498
+ content: pendingToolResults.map((tr) => ({
499
+ type: "tool_result",
500
+ tool_use_id: tr.tool_use_id,
501
+ content: tr.content
502
+ }))
503
+ });
504
+ pendingToolResults.length = 0;
505
+ }
506
+ if (msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0) {
507
+ const content = [];
508
+ if (msg.content && typeof msg.content === "string") {
509
+ content.push({ type: "text", text: msg.content });
510
+ }
511
+ for (const attachment of msg.attachments) {
512
+ if (attachment.type === "image") {
513
+ if (attachment.url) {
514
+ content.push({
515
+ type: "image",
516
+ source: {
517
+ type: "url",
518
+ url: attachment.url
519
+ }
520
+ });
521
+ } else if (attachment.data) {
522
+ let base64Data = attachment.data;
523
+ if (base64Data.startsWith("data:")) {
524
+ const commaIndex = base64Data.indexOf(",");
525
+ if (commaIndex !== -1) {
526
+ base64Data = base64Data.slice(commaIndex + 1);
527
+ }
528
+ }
529
+ content.push({
530
+ type: "image",
531
+ source: {
532
+ type: "base64",
533
+ media_type: attachment.mimeType || "image/png",
534
+ data: base64Data
535
+ }
536
+ });
537
+ }
538
+ } else if (attachment.type === "file" && attachment.mimeType === "application/pdf") {
539
+ if (attachment.url) {
540
+ content.push({
541
+ type: "document",
542
+ source: {
543
+ type: "url",
544
+ url: attachment.url
545
+ }
546
+ });
547
+ } else if (attachment.data) {
548
+ let base64Data = attachment.data;
549
+ if (base64Data.startsWith("data:")) {
550
+ const commaIndex = base64Data.indexOf(",");
551
+ if (commaIndex !== -1) {
552
+ base64Data = base64Data.slice(commaIndex + 1);
553
+ }
554
+ }
555
+ content.push({
556
+ type: "document",
557
+ source: {
558
+ type: "base64",
559
+ media_type: "application/pdf",
560
+ data: base64Data
561
+ }
562
+ });
563
+ }
564
+ }
565
+ }
566
+ messages.push({ role: "user", content });
567
+ } else {
568
+ messages.push({
569
+ role: "user",
570
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
571
+ });
572
+ }
573
+ }
574
+ }
575
+ if (pendingToolResults.length > 0) {
576
+ messages.push({
577
+ role: "user",
578
+ content: pendingToolResults.map((tr) => ({
579
+ type: "tool_result",
580
+ tool_use_id: tr.tool_use_id,
581
+ content: tr.content
582
+ }))
583
+ });
584
+ }
585
+ return messages;
586
+ }
587
+ /**
588
+ * Build common request options for both streaming and non-streaming
589
+ */
590
+ buildRequestOptions(request) {
591
+ const systemMessage = request.systemPrompt || "";
592
+ let messages;
593
+ if (request.rawMessages && request.rawMessages.length > 0) {
594
+ messages = this.convertToAnthropicMessages(request.rawMessages);
595
+ } else {
596
+ const formatted = formatMessagesForAnthropic(request.messages, void 0);
597
+ messages = formatted.messages;
598
+ }
599
+ const tools = request.actions?.map((action) => ({
600
+ name: action.name,
601
+ description: action.description,
602
+ input_schema: {
603
+ type: "object",
604
+ properties: action.parameters ? Object.fromEntries(
605
+ Object.entries(action.parameters).map(([key, param]) => [
606
+ key,
607
+ {
608
+ type: param.type,
609
+ description: param.description,
610
+ enum: param.enum
611
+ }
612
+ ])
613
+ ) : {},
614
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
615
+ }
616
+ }));
617
+ const options = {
618
+ model: request.config?.model || this.model,
619
+ max_tokens: request.config?.maxTokens || this.config.maxTokens || 4096,
620
+ system: systemMessage,
621
+ messages,
622
+ tools: tools?.length ? tools : void 0
623
+ };
624
+ if (this.config.thinking?.type === "enabled") {
625
+ options.thinking = {
626
+ type: "enabled",
627
+ budget_tokens: this.config.thinking.budgetTokens || 1e4
628
+ };
629
+ }
630
+ return { options, messages };
631
+ }
632
+ /**
633
+ * Non-streaming completion (for debugging/comparison with original studio-ai)
634
+ */
635
+ async complete(request) {
636
+ const client = await this.getClient();
637
+ const { options } = this.buildRequestOptions(request);
638
+ const nonStreamingOptions = {
639
+ ...options,
640
+ stream: false
641
+ };
642
+ try {
643
+ const response = await client.messages.create(nonStreamingOptions);
644
+ let content = "";
645
+ let thinking = "";
646
+ const toolCalls = [];
647
+ for (const block of response.content) {
648
+ if (block.type === "text") {
649
+ content += block.text;
650
+ } else if (block.type === "thinking") {
651
+ thinking += block.thinking;
652
+ } else if (block.type === "tool_use") {
653
+ toolCalls.push({
654
+ id: block.id,
655
+ name: block.name,
656
+ args: block.input
657
+ });
658
+ }
659
+ }
660
+ return {
661
+ content,
662
+ toolCalls,
663
+ thinking: thinking || void 0,
664
+ rawResponse: response
665
+ };
666
+ } catch (error) {
667
+ throw error;
668
+ }
669
+ }
670
+ async *stream(request) {
671
+ const client = await this.getClient();
672
+ const { options } = this.buildRequestOptions(request);
673
+ const messageId = core.generateMessageId();
674
+ yield { type: "message:start", id: messageId };
675
+ try {
676
+ const stream = await client.messages.stream(options);
677
+ let currentToolUse = null;
678
+ let isInThinkingBlock = false;
679
+ for await (const event of stream) {
680
+ if (request.signal?.aborted) {
681
+ break;
682
+ }
683
+ switch (event.type) {
684
+ case "content_block_start":
685
+ if (event.content_block.type === "tool_use") {
686
+ currentToolUse = {
687
+ id: event.content_block.id,
688
+ name: event.content_block.name,
689
+ input: ""
690
+ };
691
+ yield {
692
+ type: "action:start",
693
+ id: currentToolUse.id,
694
+ name: currentToolUse.name
695
+ };
696
+ } else if (event.content_block.type === "thinking") {
697
+ isInThinkingBlock = true;
698
+ yield { type: "thinking:start" };
699
+ }
700
+ break;
701
+ case "content_block_delta":
702
+ if (event.delta.type === "text_delta") {
703
+ yield { type: "message:delta", content: event.delta.text };
704
+ } else if (event.delta.type === "thinking_delta") {
705
+ yield { type: "thinking:delta", content: event.delta.thinking };
706
+ } else if (event.delta.type === "input_json_delta" && currentToolUse) {
707
+ currentToolUse.input += event.delta.partial_json;
708
+ }
709
+ break;
710
+ case "content_block_stop":
711
+ if (currentToolUse) {
712
+ yield {
713
+ type: "action:args",
714
+ id: currentToolUse.id,
715
+ args: currentToolUse.input
716
+ };
717
+ currentToolUse = null;
718
+ }
719
+ if (isInThinkingBlock) {
720
+ yield { type: "thinking:end" };
721
+ isInThinkingBlock = false;
722
+ }
723
+ break;
724
+ case "message_stop":
725
+ break;
726
+ }
727
+ }
728
+ yield { type: "message:end" };
729
+ yield { type: "done" };
730
+ } catch (error) {
731
+ yield {
732
+ type: "error",
733
+ message: error instanceof Error ? error.message : "Unknown error",
734
+ code: "ANTHROPIC_ERROR"
735
+ };
736
+ }
737
+ }
738
+ };
739
+ function createAnthropicAdapter(config) {
740
+ return new AnthropicAdapter(config);
741
+ }
742
+ var GroqAdapter = class {
743
+ constructor(config) {
744
+ this.provider = "groq";
745
+ this.config = config;
746
+ this.model = config.model || "llama-3.1-70b-versatile";
747
+ }
748
+ async *stream(request) {
749
+ const messages = formatMessages(request.messages, request.systemPrompt);
750
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
751
+ const messageId = core.generateMessageId();
752
+ yield { type: "message:start", id: messageId };
753
+ try {
754
+ const response = await fetch(
755
+ "https://api.groq.com/openai/v1/chat/completions",
756
+ {
757
+ method: "POST",
758
+ headers: {
759
+ "Content-Type": "application/json",
760
+ Authorization: `Bearer ${this.config.apiKey}`
761
+ },
762
+ body: JSON.stringify({
763
+ model: request.config?.model || this.model,
764
+ messages,
765
+ tools,
766
+ temperature: request.config?.temperature ?? this.config.temperature,
767
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
768
+ stream: true
769
+ }),
770
+ signal: request.signal
771
+ }
772
+ );
773
+ if (!response.ok) {
774
+ throw new Error(`Groq API error: ${response.status}`);
775
+ }
776
+ if (!response.body) {
777
+ throw new Error("No response body");
778
+ }
779
+ const reader = response.body.getReader();
780
+ const decoder = new TextDecoder();
781
+ let buffer = "";
782
+ let currentToolCall = null;
783
+ while (true) {
784
+ const { done, value } = await reader.read();
785
+ if (done) break;
786
+ buffer += decoder.decode(value, { stream: true });
787
+ const lines = buffer.split("\n");
788
+ buffer = lines.pop() || "";
789
+ for (const line of lines) {
790
+ if (!line.startsWith("data: ")) continue;
791
+ const data = line.slice(6).trim();
792
+ if (data === "[DONE]") continue;
793
+ try {
794
+ const chunk = JSON.parse(data);
795
+ const delta = chunk.choices?.[0]?.delta;
796
+ if (delta?.content) {
797
+ yield { type: "message:delta", content: delta.content };
798
+ }
799
+ if (delta?.tool_calls) {
800
+ for (const toolCall of delta.tool_calls) {
801
+ if (toolCall.id) {
802
+ if (currentToolCall) {
803
+ yield {
804
+ type: "action:args",
805
+ id: currentToolCall.id,
806
+ args: currentToolCall.arguments
807
+ };
808
+ }
809
+ currentToolCall = {
810
+ id: toolCall.id,
811
+ name: toolCall.function?.name || "",
812
+ arguments: toolCall.function?.arguments || ""
813
+ };
814
+ yield {
815
+ type: "action:start",
816
+ id: currentToolCall.id,
817
+ name: currentToolCall.name
818
+ };
819
+ } else if (currentToolCall && toolCall.function?.arguments) {
820
+ currentToolCall.arguments += toolCall.function.arguments;
821
+ }
822
+ }
823
+ }
824
+ if (chunk.choices?.[0]?.finish_reason && currentToolCall) {
825
+ yield {
826
+ type: "action:args",
827
+ id: currentToolCall.id,
828
+ args: currentToolCall.arguments
829
+ };
830
+ }
831
+ } catch {
832
+ }
833
+ }
834
+ }
835
+ yield { type: "message:end" };
836
+ yield { type: "done" };
837
+ } catch (error) {
838
+ if (error.name === "AbortError") {
839
+ yield { type: "done" };
840
+ } else {
841
+ yield {
842
+ type: "error",
843
+ message: error instanceof Error ? error.message : "Unknown error",
844
+ code: "GROQ_ERROR"
845
+ };
846
+ }
847
+ }
848
+ }
849
+ };
850
+ function createGroqAdapter(config) {
851
+ return new GroqAdapter(config);
852
+ }
853
+ var OllamaAdapter = class {
854
+ constructor(config = {}) {
855
+ this.provider = "ollama";
856
+ this.config = config;
857
+ this.model = config.model || "llama3";
858
+ this.baseUrl = config.baseUrl || "http://localhost:11434";
859
+ }
860
+ async *stream(request) {
861
+ const messages = formatMessages(request.messages, request.systemPrompt);
862
+ const messageId = core.generateMessageId();
863
+ yield { type: "message:start", id: messageId };
864
+ try {
865
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
866
+ method: "POST",
867
+ headers: {
868
+ "Content-Type": "application/json"
869
+ },
870
+ body: JSON.stringify({
871
+ model: request.config?.model || this.model,
872
+ messages,
873
+ stream: true,
874
+ options: {
875
+ temperature: request.config?.temperature ?? this.config.temperature,
876
+ num_predict: request.config?.maxTokens ?? this.config.maxTokens
877
+ }
878
+ }),
879
+ signal: request.signal
880
+ });
881
+ if (!response.ok) {
882
+ throw new Error(`Ollama API error: ${response.status}`);
883
+ }
884
+ if (!response.body) {
885
+ throw new Error("No response body");
886
+ }
887
+ const reader = response.body.getReader();
888
+ const decoder = new TextDecoder();
889
+ let buffer = "";
890
+ while (true) {
891
+ const { done, value } = await reader.read();
892
+ if (done) break;
893
+ buffer += decoder.decode(value, { stream: true });
894
+ const lines = buffer.split("\n");
895
+ buffer = lines.pop() || "";
896
+ for (const line of lines) {
897
+ if (!line.trim()) continue;
898
+ try {
899
+ const chunk = JSON.parse(line);
900
+ if (chunk.message?.content) {
901
+ yield { type: "message:delta", content: chunk.message.content };
902
+ }
903
+ if (chunk.done) {
904
+ break;
905
+ }
906
+ } catch {
907
+ }
908
+ }
909
+ }
910
+ yield { type: "message:end" };
911
+ yield { type: "done" };
912
+ } catch (error) {
913
+ if (error.name === "AbortError") {
914
+ yield { type: "done" };
915
+ } else {
916
+ yield {
917
+ type: "error",
918
+ message: error instanceof Error ? error.message : "Unknown error",
919
+ code: "OLLAMA_ERROR"
920
+ };
921
+ }
922
+ }
923
+ }
924
+ };
925
+ function createOllamaAdapter(config) {
926
+ return new OllamaAdapter(config);
927
+ }
928
+ function attachmentToGeminiPart(attachment) {
929
+ if (!attachment.data) {
930
+ console.warn(
931
+ "Gemini adapter: URL-based attachments not supported, skipping"
932
+ );
933
+ return null;
934
+ }
935
+ if (attachment.type === "image") {
936
+ let base64Data = attachment.data;
937
+ if (base64Data.startsWith("data:")) {
938
+ const commaIndex = base64Data.indexOf(",");
939
+ if (commaIndex !== -1) {
940
+ base64Data = base64Data.slice(commaIndex + 1);
941
+ }
942
+ }
943
+ return {
944
+ inlineData: {
945
+ mimeType: attachment.mimeType || "image/png",
946
+ data: base64Data
947
+ }
948
+ };
949
+ }
950
+ if (attachment.type === "audio" || attachment.type === "video") {
951
+ let base64Data = attachment.data;
952
+ if (base64Data.startsWith("data:")) {
953
+ const commaIndex = base64Data.indexOf(",");
954
+ if (commaIndex !== -1) {
955
+ base64Data = base64Data.slice(commaIndex + 1);
956
+ }
957
+ }
958
+ return {
959
+ inlineData: {
960
+ mimeType: attachment.mimeType || (attachment.type === "audio" ? "audio/mp3" : "video/mp4"),
961
+ data: base64Data
962
+ }
963
+ };
964
+ }
965
+ return null;
966
+ }
967
+ function messageToGeminiContent(msg) {
968
+ if (msg.role === "system") return null;
969
+ const parts = [];
970
+ if (msg.role === "tool" && msg.tool_call_id) {
971
+ let responseData;
972
+ try {
973
+ responseData = JSON.parse(msg.content || "{}");
974
+ } catch {
975
+ responseData = { result: msg.content || "" };
976
+ }
977
+ const toolName = msg.metadata?.toolName || "tool";
978
+ parts.push({
979
+ functionResponse: {
980
+ name: toolName,
981
+ response: responseData
982
+ }
983
+ });
984
+ return { role: "user", parts };
985
+ }
986
+ if (msg.content) {
987
+ parts.push({ text: msg.content });
988
+ }
989
+ const attachments = msg.metadata?.attachments;
990
+ if (attachments && Array.isArray(attachments)) {
991
+ for (const attachment of attachments) {
992
+ const part = attachmentToGeminiPart(attachment);
993
+ if (part) {
994
+ parts.push(part);
995
+ }
996
+ }
997
+ }
998
+ if (msg.role === "assistant" && msg.tool_calls && msg.tool_calls.length > 0) {
999
+ for (const tc of msg.tool_calls) {
1000
+ let args = {};
1001
+ try {
1002
+ args = JSON.parse(tc.function.arguments);
1003
+ } catch {
1004
+ }
1005
+ parts.push({
1006
+ functionCall: {
1007
+ name: tc.function.name,
1008
+ args
1009
+ }
1010
+ });
1011
+ }
1012
+ }
1013
+ if (parts.length === 0) return null;
1014
+ return {
1015
+ role: msg.role === "assistant" ? "model" : "user",
1016
+ parts
1017
+ };
1018
+ }
1019
+ function formatToolsForGemini(actions) {
1020
+ if (!actions || actions.length === 0) return void 0;
1021
+ return {
1022
+ functionDeclarations: actions.map((action) => ({
1023
+ name: action.name,
1024
+ description: action.description,
1025
+ parameters: action.parameters ? {
1026
+ type: "object",
1027
+ properties: Object.fromEntries(
1028
+ Object.entries(action.parameters).map(([key, param]) => [
1029
+ key,
1030
+ {
1031
+ type: param.type,
1032
+ description: param.description,
1033
+ enum: param.enum
1034
+ }
1035
+ ])
1036
+ ),
1037
+ required: Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key)
1038
+ } : void 0
1039
+ }))
1040
+ };
1041
+ }
1042
+ var GoogleAdapter = class {
1043
+ constructor(config) {
1044
+ this.provider = "google";
1045
+ this.config = config;
1046
+ this.model = config.model || "gemini-2.0-flash";
1047
+ }
1048
+ async getClient() {
1049
+ if (!this.client) {
1050
+ const { GoogleGenerativeAI } = await import('@google/generative-ai');
1051
+ this.client = new GoogleGenerativeAI(this.config.apiKey);
1052
+ }
1053
+ return this.client;
1054
+ }
1055
+ async *stream(request) {
1056
+ const client = await this.getClient();
1057
+ const modelId = request.config?.model || this.model;
1058
+ const model = client.getGenerativeModel({
1059
+ model: modelId,
1060
+ safetySettings: this.config.safetySettings
1061
+ });
1062
+ let contents = [];
1063
+ let systemInstruction;
1064
+ if (request.rawMessages && request.rawMessages.length > 0) {
1065
+ for (const msg of request.rawMessages) {
1066
+ if (msg.role === "system") {
1067
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1068
+ continue;
1069
+ }
1070
+ const content = messageToGeminiContent(msg);
1071
+ if (content) {
1072
+ contents.push(content);
1073
+ }
1074
+ }
1075
+ if (request.systemPrompt && !systemInstruction) {
1076
+ systemInstruction = request.systemPrompt;
1077
+ }
1078
+ } else {
1079
+ for (const msg of request.messages) {
1080
+ if (msg.role === "system") {
1081
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1082
+ continue;
1083
+ }
1084
+ const content = messageToGeminiContent(msg);
1085
+ if (content) {
1086
+ contents.push(content);
1087
+ }
1088
+ }
1089
+ if (request.systemPrompt) {
1090
+ systemInstruction = request.systemPrompt;
1091
+ }
1092
+ }
1093
+ if (contents.length === 0 || contents[0].role !== "user") {
1094
+ contents = [{ role: "user", parts: [{ text: "" }] }, ...contents];
1095
+ }
1096
+ const mergedContents = [];
1097
+ for (const content of contents) {
1098
+ const last = mergedContents[mergedContents.length - 1];
1099
+ if (last && last.role === content.role) {
1100
+ last.parts.push(...content.parts);
1101
+ } else {
1102
+ mergedContents.push({ ...content, parts: [...content.parts] });
1103
+ }
1104
+ }
1105
+ const tools = formatToolsForGemini(request.actions);
1106
+ const messageId = core.generateMessageId();
1107
+ yield { type: "message:start", id: messageId };
1108
+ try {
1109
+ const chat = model.startChat({
1110
+ history: mergedContents.slice(0, -1),
1111
+ // All but the last message
1112
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
1113
+ tools: tools ? [tools] : void 0,
1114
+ generationConfig: {
1115
+ temperature: request.config?.temperature ?? this.config.temperature,
1116
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
1117
+ }
1118
+ });
1119
+ const lastMessage = mergedContents[mergedContents.length - 1];
1120
+ const result = await chat.sendMessageStream(lastMessage.parts);
1121
+ let currentToolCall = null;
1122
+ for await (const chunk of result.stream) {
1123
+ if (request.signal?.aborted) {
1124
+ break;
1125
+ }
1126
+ const candidate = chunk.candidates?.[0];
1127
+ if (!candidate?.content?.parts) continue;
1128
+ for (const part of candidate.content.parts) {
1129
+ if ("text" in part && part.text) {
1130
+ yield { type: "message:delta", content: part.text };
1131
+ }
1132
+ if ("functionCall" in part && part.functionCall) {
1133
+ const fc = part.functionCall;
1134
+ const toolId = core.generateToolCallId();
1135
+ if (currentToolCall) {
1136
+ yield {
1137
+ type: "action:args",
1138
+ id: currentToolCall.id,
1139
+ args: JSON.stringify(currentToolCall.args)
1140
+ };
1141
+ }
1142
+ currentToolCall = {
1143
+ id: toolId,
1144
+ name: fc.name,
1145
+ args: fc.args || {}
1146
+ };
1147
+ yield {
1148
+ type: "action:start",
1149
+ id: toolId,
1150
+ name: fc.name
1151
+ };
1152
+ }
1153
+ }
1154
+ if (candidate.finishReason) {
1155
+ if (currentToolCall) {
1156
+ yield {
1157
+ type: "action:args",
1158
+ id: currentToolCall.id,
1159
+ args: JSON.stringify(currentToolCall.args)
1160
+ };
1161
+ }
1162
+ }
1163
+ }
1164
+ yield { type: "message:end" };
1165
+ yield { type: "done" };
1166
+ } catch (error) {
1167
+ yield {
1168
+ type: "error",
1169
+ message: error instanceof Error ? error.message : "Unknown error",
1170
+ code: "GOOGLE_ERROR"
1171
+ };
1172
+ }
1173
+ }
1174
+ /**
1175
+ * Non-streaming completion (optional, for debugging)
1176
+ */
1177
+ async complete(request) {
1178
+ const client = await this.getClient();
1179
+ const modelId = request.config?.model || this.model;
1180
+ const model = client.getGenerativeModel({
1181
+ model: modelId,
1182
+ safetySettings: this.config.safetySettings
1183
+ });
1184
+ let contents = [];
1185
+ let systemInstruction;
1186
+ for (const msg of request.messages) {
1187
+ if (msg.role === "system") {
1188
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
1189
+ continue;
1190
+ }
1191
+ const content = messageToGeminiContent(msg);
1192
+ if (content) {
1193
+ contents.push(content);
1194
+ }
1195
+ }
1196
+ if (request.systemPrompt) {
1197
+ systemInstruction = request.systemPrompt;
1198
+ }
1199
+ if (contents.length === 0 || contents[0].role !== "user") {
1200
+ contents = [{ role: "user", parts: [{ text: "" }] }, ...contents];
1201
+ }
1202
+ const mergedContents = [];
1203
+ for (const content of contents) {
1204
+ const last = mergedContents[mergedContents.length - 1];
1205
+ if (last && last.role === content.role) {
1206
+ last.parts.push(...content.parts);
1207
+ } else {
1208
+ mergedContents.push({ ...content, parts: [...content.parts] });
1209
+ }
1210
+ }
1211
+ const tools = formatToolsForGemini(request.actions);
1212
+ const chat = model.startChat({
1213
+ history: mergedContents.slice(0, -1),
1214
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
1215
+ tools: tools ? [tools] : void 0,
1216
+ generationConfig: {
1217
+ temperature: request.config?.temperature ?? this.config.temperature,
1218
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
1219
+ }
1220
+ });
1221
+ const lastMessage = mergedContents[mergedContents.length - 1];
1222
+ const result = await chat.sendMessage(lastMessage.parts);
1223
+ const response = result.response;
1224
+ let textContent = "";
1225
+ const toolCalls = [];
1226
+ const candidate = response.candidates?.[0];
1227
+ if (candidate?.content?.parts) {
1228
+ for (const part of candidate.content.parts) {
1229
+ if ("text" in part && part.text) {
1230
+ textContent += part.text;
1231
+ }
1232
+ if ("functionCall" in part && part.functionCall) {
1233
+ toolCalls.push({
1234
+ id: core.generateToolCallId(),
1235
+ name: part.functionCall.name,
1236
+ args: part.functionCall.args || {}
1237
+ });
1238
+ }
1239
+ }
1240
+ }
1241
+ return {
1242
+ content: textContent,
1243
+ toolCalls,
1244
+ rawResponse: response
1245
+ };
1246
+ }
1247
+ };
1248
+ function createGoogleAdapter(config) {
1249
+ return new GoogleAdapter(config);
1250
+ }
1251
+ var XAI_BASE_URL = "https://api.x.ai/v1";
1252
+ var XAIAdapter = class {
1253
+ constructor(config) {
1254
+ this.provider = "xai";
1255
+ this.config = config;
1256
+ this.model = config.model || "grok-2";
1257
+ }
1258
+ async getClient() {
1259
+ if (!this.client) {
1260
+ const { default: OpenAI } = await import('openai');
1261
+ this.client = new OpenAI({
1262
+ apiKey: this.config.apiKey,
1263
+ baseURL: this.config.baseUrl || XAI_BASE_URL
1264
+ });
1265
+ }
1266
+ return this.client;
1267
+ }
1268
+ async *stream(request) {
1269
+ const client = await this.getClient();
1270
+ let messages;
1271
+ if (request.rawMessages && request.rawMessages.length > 0) {
1272
+ const processedMessages = request.rawMessages.map((msg) => {
1273
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
1274
+ if (hasAttachments) {
1275
+ const content = [];
1276
+ if (msg.content) {
1277
+ content.push({ type: "text", text: msg.content });
1278
+ }
1279
+ for (const attachment of msg.attachments) {
1280
+ if (attachment.type === "image") {
1281
+ let imageUrl = attachment.data;
1282
+ if (!imageUrl.startsWith("data:")) {
1283
+ imageUrl = `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
1284
+ }
1285
+ content.push({
1286
+ type: "image_url",
1287
+ image_url: { url: imageUrl, detail: "auto" }
1288
+ });
1289
+ }
1290
+ }
1291
+ return { ...msg, content, attachments: void 0 };
1292
+ }
1293
+ return msg;
1294
+ });
1295
+ if (request.systemPrompt) {
1296
+ const hasSystem = processedMessages.some((m) => m.role === "system");
1297
+ if (!hasSystem) {
1298
+ messages = [
1299
+ { role: "system", content: request.systemPrompt },
1300
+ ...processedMessages
1301
+ ];
1302
+ } else {
1303
+ messages = processedMessages;
1304
+ }
1305
+ } else {
1306
+ messages = processedMessages;
1307
+ }
1308
+ } else {
1309
+ messages = formatMessagesForOpenAI(
1310
+ request.messages,
1311
+ request.systemPrompt
1312
+ );
1313
+ }
1314
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1315
+ const messageId = core.generateMessageId();
1316
+ yield { type: "message:start", id: messageId };
1317
+ try {
1318
+ const stream = await client.chat.completions.create({
1319
+ model: request.config?.model || this.model,
1320
+ messages,
1321
+ tools,
1322
+ temperature: request.config?.temperature ?? this.config.temperature,
1323
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
1324
+ stream: true
1325
+ });
1326
+ let currentToolCall = null;
1327
+ for await (const chunk of stream) {
1328
+ if (request.signal?.aborted) {
1329
+ break;
1330
+ }
1331
+ const delta = chunk.choices[0]?.delta;
1332
+ if (delta?.content) {
1333
+ yield { type: "message:delta", content: delta.content };
1334
+ }
1335
+ if (delta?.tool_calls) {
1336
+ for (const toolCall of delta.tool_calls) {
1337
+ if (toolCall.id) {
1338
+ if (currentToolCall) {
1339
+ yield {
1340
+ type: "action:args",
1341
+ id: currentToolCall.id,
1342
+ args: currentToolCall.arguments
1343
+ };
1344
+ }
1345
+ currentToolCall = {
1346
+ id: toolCall.id,
1347
+ name: toolCall.function?.name || "",
1348
+ arguments: toolCall.function?.arguments || ""
1349
+ };
1350
+ yield {
1351
+ type: "action:start",
1352
+ id: currentToolCall.id,
1353
+ name: currentToolCall.name
1354
+ };
1355
+ } else if (currentToolCall && toolCall.function?.arguments) {
1356
+ currentToolCall.arguments += toolCall.function.arguments;
1357
+ }
1358
+ }
1359
+ }
1360
+ if (chunk.choices[0]?.finish_reason) {
1361
+ if (currentToolCall) {
1362
+ yield {
1363
+ type: "action:args",
1364
+ id: currentToolCall.id,
1365
+ args: currentToolCall.arguments
1366
+ };
1367
+ }
1368
+ }
1369
+ }
1370
+ yield { type: "message:end" };
1371
+ yield { type: "done" };
1372
+ } catch (error) {
1373
+ yield {
1374
+ type: "error",
1375
+ message: error instanceof Error ? error.message : "Unknown error",
1376
+ code: "XAI_ERROR"
1377
+ };
1378
+ }
1379
+ }
1380
+ /**
1381
+ * Non-streaming completion (optional, for debugging)
1382
+ */
1383
+ async complete(request) {
1384
+ const client = await this.getClient();
1385
+ let messages;
1386
+ if (request.rawMessages && request.rawMessages.length > 0) {
1387
+ messages = request.rawMessages;
1388
+ if (request.systemPrompt) {
1389
+ const hasSystem = messages.some((m) => m.role === "system");
1390
+ if (!hasSystem) {
1391
+ messages = [
1392
+ { role: "system", content: request.systemPrompt },
1393
+ ...messages
1394
+ ];
1395
+ }
1396
+ }
1397
+ } else {
1398
+ messages = formatMessagesForOpenAI(
1399
+ request.messages,
1400
+ request.systemPrompt
1401
+ );
1402
+ }
1403
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1404
+ const response = await client.chat.completions.create({
1405
+ model: request.config?.model || this.model,
1406
+ messages,
1407
+ tools,
1408
+ temperature: request.config?.temperature ?? this.config.temperature,
1409
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens
1410
+ });
1411
+ const choice = response.choices[0];
1412
+ const message = choice?.message;
1413
+ const toolCalls = (message?.tool_calls || []).map((tc) => ({
1414
+ id: tc.id,
1415
+ name: tc.function.name,
1416
+ args: JSON.parse(tc.function.arguments || "{}")
1417
+ }));
1418
+ return {
1419
+ content: message?.content || "",
1420
+ toolCalls,
1421
+ rawResponse: response
1422
+ };
1423
+ }
1424
+ };
1425
+ function createXAIAdapter(config) {
1426
+ return new XAIAdapter(config);
1427
+ }
1428
+ var DEFAULT_API_VERSION = "2024-08-01-preview";
1429
+ function buildAzureEndpoint(resourceName, deploymentName, apiVersion) {
1430
+ return `https://${resourceName}.openai.azure.com/openai/deployments/${deploymentName}`;
1431
+ }
1432
+ var AzureAdapter = class {
1433
+ constructor(config) {
1434
+ this.provider = "azure";
1435
+ this.config = config;
1436
+ this.model = config.deploymentName;
1437
+ }
1438
+ async getClient() {
1439
+ if (!this.client) {
1440
+ const { AzureOpenAI } = await import('openai');
1441
+ const apiVersion = this.config.apiVersion || DEFAULT_API_VERSION;
1442
+ const endpoint = this.config.baseUrl || buildAzureEndpoint(
1443
+ this.config.resourceName,
1444
+ this.config.deploymentName);
1445
+ this.client = new AzureOpenAI({
1446
+ apiKey: this.config.apiKey,
1447
+ endpoint,
1448
+ apiVersion,
1449
+ deployment: this.config.deploymentName
1450
+ });
1451
+ }
1452
+ return this.client;
1453
+ }
1454
+ async *stream(request) {
1455
+ const client = await this.getClient();
1456
+ let messages;
1457
+ if (request.rawMessages && request.rawMessages.length > 0) {
1458
+ const processedMessages = request.rawMessages.map((msg) => {
1459
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
1460
+ if (hasAttachments) {
1461
+ const content = [];
1462
+ if (msg.content) {
1463
+ content.push({ type: "text", text: msg.content });
1464
+ }
1465
+ for (const attachment of msg.attachments) {
1466
+ if (attachment.type === "image") {
1467
+ let imageUrl = attachment.data;
1468
+ if (!imageUrl.startsWith("data:")) {
1469
+ imageUrl = `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
1470
+ }
1471
+ content.push({
1472
+ type: "image_url",
1473
+ image_url: { url: imageUrl, detail: "auto" }
1474
+ });
1475
+ }
1476
+ }
1477
+ return { ...msg, content, attachments: void 0 };
1478
+ }
1479
+ return msg;
1480
+ });
1481
+ if (request.systemPrompt) {
1482
+ const hasSystem = processedMessages.some((m) => m.role === "system");
1483
+ if (!hasSystem) {
1484
+ messages = [
1485
+ { role: "system", content: request.systemPrompt },
1486
+ ...processedMessages
1487
+ ];
1488
+ } else {
1489
+ messages = processedMessages;
1490
+ }
1491
+ } else {
1492
+ messages = processedMessages;
1493
+ }
1494
+ } else {
1495
+ messages = formatMessagesForOpenAI(
1496
+ request.messages,
1497
+ request.systemPrompt
1498
+ );
1499
+ }
1500
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1501
+ const messageId = core.generateMessageId();
1502
+ yield { type: "message:start", id: messageId };
1503
+ try {
1504
+ const stream = await client.chat.completions.create({
1505
+ // Azure uses deployment name, not model name
1506
+ model: this.config.deploymentName,
1507
+ messages,
1508
+ tools,
1509
+ temperature: request.config?.temperature ?? this.config.temperature,
1510
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
1511
+ stream: true
1512
+ });
1513
+ let currentToolCall = null;
1514
+ for await (const chunk of stream) {
1515
+ if (request.signal?.aborted) {
1516
+ break;
1517
+ }
1518
+ const delta = chunk.choices[0]?.delta;
1519
+ if (delta?.content) {
1520
+ yield { type: "message:delta", content: delta.content };
1521
+ }
1522
+ if (delta?.tool_calls) {
1523
+ for (const toolCall of delta.tool_calls) {
1524
+ if (toolCall.id) {
1525
+ if (currentToolCall) {
1526
+ yield {
1527
+ type: "action:args",
1528
+ id: currentToolCall.id,
1529
+ args: currentToolCall.arguments
1530
+ };
1531
+ }
1532
+ currentToolCall = {
1533
+ id: toolCall.id,
1534
+ name: toolCall.function?.name || "",
1535
+ arguments: toolCall.function?.arguments || ""
1536
+ };
1537
+ yield {
1538
+ type: "action:start",
1539
+ id: currentToolCall.id,
1540
+ name: currentToolCall.name
1541
+ };
1542
+ } else if (currentToolCall && toolCall.function?.arguments) {
1543
+ currentToolCall.arguments += toolCall.function.arguments;
1544
+ }
1545
+ }
1546
+ }
1547
+ if (chunk.choices[0]?.finish_reason) {
1548
+ if (currentToolCall) {
1549
+ yield {
1550
+ type: "action:args",
1551
+ id: currentToolCall.id,
1552
+ args: currentToolCall.arguments
1553
+ };
1554
+ }
1555
+ }
1556
+ }
1557
+ yield { type: "message:end" };
1558
+ yield { type: "done" };
1559
+ } catch (error) {
1560
+ yield {
1561
+ type: "error",
1562
+ message: error instanceof Error ? error.message : "Unknown error",
1563
+ code: "AZURE_ERROR"
1564
+ };
1565
+ }
1566
+ }
1567
+ /**
1568
+ * Non-streaming completion (optional, for debugging)
1569
+ */
1570
+ async complete(request) {
1571
+ const client = await this.getClient();
1572
+ let messages;
1573
+ if (request.rawMessages && request.rawMessages.length > 0) {
1574
+ messages = request.rawMessages;
1575
+ if (request.systemPrompt) {
1576
+ const hasSystem = messages.some((m) => m.role === "system");
1577
+ if (!hasSystem) {
1578
+ messages = [
1579
+ { role: "system", content: request.systemPrompt },
1580
+ ...messages
1581
+ ];
1582
+ }
1583
+ }
1584
+ } else {
1585
+ messages = formatMessagesForOpenAI(
1586
+ request.messages,
1587
+ request.systemPrompt
1588
+ );
1589
+ }
1590
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1591
+ const response = await client.chat.completions.create({
1592
+ model: this.config.deploymentName,
1593
+ messages,
1594
+ tools,
1595
+ temperature: request.config?.temperature ?? this.config.temperature,
1596
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens
1597
+ });
1598
+ const choice = response.choices[0];
1599
+ const message = choice?.message;
1600
+ const toolCalls = (message?.tool_calls || []).map((tc) => ({
1601
+ id: tc.id,
1602
+ name: tc.function.name,
1603
+ args: JSON.parse(tc.function.arguments || "{}")
1604
+ }));
1605
+ return {
1606
+ content: message?.content || "",
1607
+ toolCalls,
1608
+ rawResponse: response
1609
+ };
1610
+ }
1611
+ };
1612
+ function createAzureAdapter(config) {
1613
+ return new AzureAdapter(config);
1614
+ }
1615
+
1616
+ exports.AnthropicAdapter = AnthropicAdapter;
1617
+ exports.AzureAdapter = AzureAdapter;
1618
+ exports.GoogleAdapter = GoogleAdapter;
1619
+ exports.GroqAdapter = GroqAdapter;
1620
+ exports.OllamaAdapter = OllamaAdapter;
1621
+ exports.OpenAIAdapter = OpenAIAdapter;
1622
+ exports.XAIAdapter = XAIAdapter;
1623
+ exports.attachmentToAnthropicDocument = attachmentToAnthropicDocument;
1624
+ exports.attachmentToAnthropicImage = attachmentToAnthropicImage;
1625
+ exports.attachmentToOpenAIImage = attachmentToOpenAIImage;
1626
+ exports.createAnthropicAdapter = createAnthropicAdapter;
1627
+ exports.createAzureAdapter = createAzureAdapter;
1628
+ exports.createGoogleAdapter = createGoogleAdapter;
1629
+ exports.createGroqAdapter = createGroqAdapter;
1630
+ exports.createOllamaAdapter = createOllamaAdapter;
1631
+ exports.createOpenAIAdapter = createOpenAIAdapter;
1632
+ exports.createXAIAdapter = createXAIAdapter;
1633
+ exports.formatMessages = formatMessages;
1634
+ exports.formatMessagesForAnthropic = formatMessagesForAnthropic;
1635
+ exports.formatMessagesForOpenAI = formatMessagesForOpenAI;
1636
+ exports.formatTools = formatTools;
1637
+ exports.hasImageAttachments = hasImageAttachments;
1638
+ exports.hasMediaAttachments = hasMediaAttachments;
1639
+ exports.messageToAnthropicContent = messageToAnthropicContent;
1640
+ exports.messageToOpenAIContent = messageToOpenAIContent;
1641
+ //# sourceMappingURL=index.js.map
1642
+ //# sourceMappingURL=index.js.map