@cloudbase/agent-adapter-llm 1.0.1-alpha.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,705 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ LLMAgent: () => LLMAgent,
24
+ convertMessagesToAnthropic: () => convertMessagesToAnthropic,
25
+ convertMessagesToOpenAI: () => convertMessagesToOpenAI,
26
+ convertToolsToAnthropic: () => convertToolsToAnthropic,
27
+ convertToolsToOpenAI: () => convertToolsToOpenAI,
28
+ createAnthropicStreamOptions: () => createAnthropicStreamOptions,
29
+ createOpenAIStreamOptions: () => createOpenAIStreamOptions,
30
+ processAnthropicStream: () => processAnthropicStream,
31
+ processOpenAIStream: () => processOpenAIStream
32
+ });
33
+ module.exports = __toCommonJS(index_exports);
34
+
35
+ // src/agent.ts
36
+ var import_client3 = require("@ag-ui/client");
37
+ var import_rxjs = require("rxjs");
38
+
39
+ // src/converters/openai.ts
40
+ function convertMessagesToOpenAI(messages, systemPrompt) {
41
+ const openaiMessages = [];
42
+ if (systemPrompt) {
43
+ openaiMessages.push({
44
+ role: "system",
45
+ content: systemPrompt
46
+ });
47
+ }
48
+ for (const msg of messages) {
49
+ if (msg.role === "user") {
50
+ openaiMessages.push({
51
+ role: "user",
52
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
53
+ });
54
+ } else if (msg.role === "assistant") {
55
+ openaiMessages.push({
56
+ role: "assistant",
57
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content) || "",
58
+ tool_calls: msg.toolCalls?.map((tc) => ({
59
+ id: tc.id,
60
+ type: "function",
61
+ function: {
62
+ name: tc.function.name,
63
+ arguments: tc.function.arguments
64
+ }
65
+ }))
66
+ });
67
+ } else if (msg.role === "tool") {
68
+ openaiMessages.push({
69
+ role: "tool",
70
+ tool_call_id: msg.toolCallId,
71
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
72
+ });
73
+ }
74
+ }
75
+ return openaiMessages;
76
+ }
77
+ function convertToolsToOpenAI(tools) {
78
+ if (!tools || tools.length === 0) {
79
+ return void 0;
80
+ }
81
+ return tools.map((tool) => ({
82
+ type: "function",
83
+ function: {
84
+ name: tool.name,
85
+ description: tool.description,
86
+ parameters: typeof tool.parameters === "string" ? JSON.parse(tool.parameters) : tool.parameters
87
+ }
88
+ }));
89
+ }
90
+ function createOpenAIStreamOptions(config) {
91
+ return {
92
+ model: config.modelName,
93
+ messages: config.messages,
94
+ tools: config.tools,
95
+ temperature: config.temperature,
96
+ max_tokens: config.maxTokens,
97
+ user: config.user,
98
+ stream: true
99
+ };
100
+ }
101
+
102
+ // src/converters/anthropic.ts
103
+ function convertMessagesToAnthropic(messages) {
104
+ const anthropicMessages = [];
105
+ for (const msg of messages) {
106
+ if (msg.role === "user") {
107
+ const contentStr = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
108
+ anthropicMessages.push({
109
+ role: "user",
110
+ content: contentStr
111
+ });
112
+ } else if (msg.role === "assistant") {
113
+ const content = [];
114
+ if (msg.content) {
115
+ const contentStr = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
116
+ content.push({
117
+ type: "text",
118
+ text: contentStr
119
+ });
120
+ }
121
+ if (msg.toolCalls) {
122
+ for (const tc of msg.toolCalls) {
123
+ content.push({
124
+ type: "tool_use",
125
+ id: tc.id,
126
+ name: tc.function.name,
127
+ input: JSON.parse(tc.function.arguments)
128
+ });
129
+ }
130
+ }
131
+ anthropicMessages.push({
132
+ role: "assistant",
133
+ content
134
+ });
135
+ } else if (msg.role === "tool") {
136
+ const contentStr = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
137
+ anthropicMessages.push({
138
+ role: "user",
139
+ content: [{
140
+ type: "tool_result",
141
+ tool_use_id: msg.toolCallId,
142
+ content: contentStr
143
+ }]
144
+ });
145
+ }
146
+ }
147
+ return anthropicMessages;
148
+ }
149
+ function convertToolsToAnthropic(tools) {
150
+ if (!tools || tools.length === 0) {
151
+ return void 0;
152
+ }
153
+ return tools.map((tool) => ({
154
+ name: tool.name,
155
+ description: tool.description,
156
+ input_schema: typeof tool.parameters === "string" ? JSON.parse(tool.parameters) : tool.parameters
157
+ }));
158
+ }
159
+ function createAnthropicStreamOptions(config) {
160
+ return {
161
+ model: config.modelName,
162
+ max_tokens: config.maxTokens || 4096,
163
+ temperature: config.temperature,
164
+ system: config.systemPrompt || void 0,
165
+ messages: config.messages,
166
+ tools: config.tools,
167
+ stream: true
168
+ };
169
+ }
170
+
171
+ // src/processors/openai-stream.ts
172
+ var import_client = require("@ag-ui/client");
173
+ async function* processOpenAIStream(stream, context) {
174
+ const { threadId, runId, messageId } = context;
175
+ const state = {
176
+ hasStarted: false,
177
+ fullContent: "",
178
+ toolCallsMap: /* @__PURE__ */ new Map()
179
+ };
180
+ const reasoningState = {
181
+ hasStarted: false,
182
+ fullContent: "",
183
+ toolCallsMap: /* @__PURE__ */ new Map()
184
+ };
185
+ for await (const chunk of stream) {
186
+ const delta = chunk.choices[0]?.delta;
187
+ if (!delta) continue;
188
+ if (delta.role === "tool") {
189
+ const toolCallId = delta.tool_call_id;
190
+ if (toolCallId) {
191
+ if (state.toolCallsMap.has(toolCallId)) {
192
+ yield {
193
+ type: import_client.EventType.TOOL_CALL_END,
194
+ threadId,
195
+ runId,
196
+ toolCallId
197
+ };
198
+ state.toolCallsMap.delete(toolCallId);
199
+ }
200
+ yield {
201
+ type: import_client.EventType.TOOL_CALL_RESULT,
202
+ threadId,
203
+ runId,
204
+ toolCallId,
205
+ content: delta.content || ""
206
+ };
207
+ }
208
+ continue;
209
+ }
210
+ if (delta.content) {
211
+ if (reasoningState.hasStarted) {
212
+ reasoningState.hasStarted = false;
213
+ yield {
214
+ type: import_client.EventType.THINKING_TEXT_MESSAGE_END,
215
+ threadId,
216
+ runId,
217
+ messageId
218
+ };
219
+ yield {
220
+ type: import_client.EventType.THINKING_END,
221
+ threadId,
222
+ runId,
223
+ messageId
224
+ };
225
+ }
226
+ if (!state.hasStarted) {
227
+ yield {
228
+ type: import_client.EventType.TEXT_MESSAGE_START,
229
+ threadId,
230
+ runId,
231
+ messageId,
232
+ role: "assistant"
233
+ };
234
+ state.hasStarted = true;
235
+ }
236
+ state.fullContent += delta.content;
237
+ yield {
238
+ type: import_client.EventType.TEXT_MESSAGE_CONTENT,
239
+ threadId,
240
+ runId,
241
+ messageId,
242
+ delta: delta.content
243
+ };
244
+ }
245
+ if (delta.reasoning_content) {
246
+ if (!reasoningState.hasStarted) {
247
+ yield {
248
+ type: import_client.EventType.THINKING_START,
249
+ threadId,
250
+ runId,
251
+ messageId
252
+ };
253
+ yield {
254
+ type: import_client.EventType.THINKING_TEXT_MESSAGE_START,
255
+ threadId,
256
+ runId,
257
+ messageId,
258
+ role: "assistant"
259
+ };
260
+ reasoningState.hasStarted = true;
261
+ }
262
+ reasoningState.fullContent += delta.reasoning_content;
263
+ yield {
264
+ type: import_client.EventType.THINKING_TEXT_MESSAGE_CONTENT,
265
+ threadId,
266
+ runId,
267
+ messageId,
268
+ delta: delta.reasoning_content
269
+ };
270
+ }
271
+ if (delta.tool_calls) {
272
+ for (const toolCall of delta.tool_calls) {
273
+ const toolCallId = toolCall.id || `tool_${toolCall.index}`;
274
+ if (toolCall.function?.name) {
275
+ yield {
276
+ type: import_client.EventType.TOOL_CALL_START,
277
+ threadId,
278
+ runId,
279
+ toolCallId,
280
+ toolCallName: toolCall.function.name
281
+ };
282
+ if (toolCall.function.arguments) {
283
+ yield {
284
+ type: import_client.EventType.TOOL_CALL_ARGS,
285
+ threadId,
286
+ runId,
287
+ toolCallId,
288
+ delta: toolCall.function.arguments
289
+ };
290
+ }
291
+ state.toolCallsMap.set(toolCallId, {
292
+ name: toolCall.function.name,
293
+ args: toolCall.function.arguments || ""
294
+ });
295
+ } else if (toolCall.function?.arguments) {
296
+ const existing = state.toolCallsMap.get(toolCallId);
297
+ if (existing) {
298
+ existing.args += toolCall.function.arguments;
299
+ yield {
300
+ type: import_client.EventType.TOOL_CALL_ARGS,
301
+ threadId,
302
+ runId,
303
+ toolCallId,
304
+ delta: toolCall.function.arguments
305
+ };
306
+ }
307
+ }
308
+ }
309
+ }
310
+ }
311
+ if (state.hasStarted) {
312
+ yield {
313
+ type: import_client.EventType.TEXT_MESSAGE_END,
314
+ threadId,
315
+ runId,
316
+ messageId
317
+ };
318
+ }
319
+ if (reasoningState.hasStarted) {
320
+ yield {
321
+ type: import_client.EventType.THINKING_TEXT_MESSAGE_END,
322
+ threadId,
323
+ runId,
324
+ messageId
325
+ };
326
+ yield {
327
+ type: import_client.EventType.THINKING_END,
328
+ threadId,
329
+ runId,
330
+ messageId
331
+ };
332
+ }
333
+ for (const [toolCallId] of state.toolCallsMap) {
334
+ yield {
335
+ type: import_client.EventType.TOOL_CALL_END,
336
+ threadId,
337
+ runId,
338
+ toolCallId
339
+ };
340
+ }
341
+ }
342
+
343
+ // src/processors/anthropic-stream.ts
344
+ var import_client2 = require("@ag-ui/client");
345
+ async function* processAnthropicStream(stream, context) {
346
+ const { threadId, runId, messageId } = context;
347
+ const state = {
348
+ hasStarted: false,
349
+ fullContent: "",
350
+ toolCallsMap: /* @__PURE__ */ new Map(),
351
+ indexToIdMap: /* @__PURE__ */ new Map()
352
+ };
353
+ for await (const event of stream) {
354
+ if (event.type === "content_block_start") {
355
+ const block = event.content_block;
356
+ if (block.type === "text") {
357
+ if (!state.hasStarted) {
358
+ yield {
359
+ type: import_client2.EventType.TEXT_MESSAGE_START,
360
+ threadId,
361
+ runId,
362
+ messageId,
363
+ role: "assistant"
364
+ };
365
+ state.hasStarted = true;
366
+ }
367
+ } else if (block.type === "tool_use") {
368
+ state.indexToIdMap.set(event.index, block.id);
369
+ yield {
370
+ type: import_client2.EventType.TOOL_CALL_START,
371
+ threadId,
372
+ runId,
373
+ toolCallId: block.id,
374
+ toolCallName: block.name
375
+ };
376
+ state.toolCallsMap.set(block.id, {
377
+ name: block.name,
378
+ input: ""
379
+ });
380
+ }
381
+ } else if (event.type === "content_block_delta") {
382
+ const delta = event.delta;
383
+ if (delta.type === "text_delta") {
384
+ state.fullContent += delta.text;
385
+ yield {
386
+ type: import_client2.EventType.TEXT_MESSAGE_CONTENT,
387
+ threadId,
388
+ runId,
389
+ messageId,
390
+ delta: delta.text
391
+ };
392
+ } else if (delta.type === "input_json_delta") {
393
+ const toolCallId = state.indexToIdMap.get(event.index);
394
+ if (toolCallId) {
395
+ const toolCall = state.toolCallsMap.get(toolCallId);
396
+ if (toolCall) {
397
+ toolCall.input += delta.partial_json;
398
+ yield {
399
+ type: import_client2.EventType.TOOL_CALL_ARGS,
400
+ threadId,
401
+ runId,
402
+ toolCallId,
403
+ delta: delta.partial_json
404
+ };
405
+ }
406
+ }
407
+ }
408
+ } else if (event.type === "content_block_stop") {
409
+ const toolCallId = state.indexToIdMap.get(event.index);
410
+ if (toolCallId && state.toolCallsMap.has(toolCallId)) {
411
+ yield {
412
+ type: import_client2.EventType.TOOL_CALL_END,
413
+ threadId,
414
+ runId,
415
+ toolCallId
416
+ };
417
+ }
418
+ }
419
+ }
420
+ if (state.hasStarted) {
421
+ yield {
422
+ type: import_client2.EventType.TEXT_MESSAGE_END,
423
+ threadId,
424
+ runId,
425
+ messageId
426
+ };
427
+ }
428
+ yield {
429
+ type: import_client2.EventType.RUN_FINISHED,
430
+ threadId,
431
+ runId
432
+ };
433
+ }
434
+
435
+ // src/agent.ts
436
+ var import_crypto = require("crypto");
437
+ function detectModelProviderType(model) {
438
+ if ("chat" in model && typeof model.chat?.completions?.create === "function") {
439
+ return "openai";
440
+ }
441
+ if ("messages" in model && typeof model.messages?.create === "function") {
442
+ return "anthropic";
443
+ }
444
+ throw new Error(
445
+ "Unsupported model provider. Expected OpenAI or Anthropic SDK instance."
446
+ );
447
+ }
448
+ var LLMAgent = class extends import_client3.AbstractAgent {
449
+ constructor(config) {
450
+ super({
451
+ agentId: config.agentId || config.name || "llm-agent",
452
+ description: config.description || "",
453
+ threadId: config.threadId || "",
454
+ ...config
455
+ });
456
+ this.model = config.model;
457
+ this.modelType = detectModelProviderType(config.model);
458
+ this.modelName = config.modelName;
459
+ this.systemPrompt = config.systemPrompt;
460
+ this.temperature = config.temperature;
461
+ this.maxTokens = config.maxTokens;
462
+ this.maxToolRounds = config.maxToolRounds || 5;
463
+ this.onToolCall = config.onToolCall;
464
+ this.onToolResult = config.onToolResult;
465
+ this.name = config.name || config.agentId || "llm-agent";
466
+ }
467
+ run(input) {
468
+ return new import_rxjs.Observable((subscriber) => {
469
+ this._run(subscriber, input).catch((error) => {
470
+ subscriber.next({
471
+ type: import_client3.EventType.RUN_ERROR,
472
+ message: error instanceof Error ? error.message : String(error),
473
+ code: error instanceof Error ? error.name : "UNKNOWN_ERROR"
474
+ });
475
+ subscriber.error(error);
476
+ });
477
+ });
478
+ }
479
+ async _run(subscriber, input) {
480
+ const { runId, threadId, tools } = input;
481
+ try {
482
+ subscriber.next({
483
+ type: import_client3.EventType.RUN_STARTED,
484
+ threadId,
485
+ runId
486
+ });
487
+ if (!tools || tools.length === 0) {
488
+ await this.runOnce(subscriber, input);
489
+ subscriber.next({
490
+ type: import_client3.EventType.RUN_FINISHED,
491
+ threadId,
492
+ runId
493
+ });
494
+ subscriber.complete();
495
+ return;
496
+ }
497
+ let messages = [...input.messages];
498
+ let round = 0;
499
+ while (round < this.maxToolRounds) {
500
+ round++;
501
+ const events = [];
502
+ const toolCallsMap = /* @__PURE__ */ new Map();
503
+ await this.runOnce(subscriber, { ...input, messages }, (event) => {
504
+ events.push(event);
505
+ if (event.type === import_client3.EventType.TOOL_CALL_START) {
506
+ toolCallsMap.set(event.toolCallId, {
507
+ name: event.toolCallName,
508
+ args: ""
509
+ });
510
+ } else if (event.type === import_client3.EventType.TOOL_CALL_ARGS) {
511
+ const existing = toolCallsMap.get(event.toolCallId);
512
+ if (existing) {
513
+ existing.args += event.delta;
514
+ }
515
+ }
516
+ });
517
+ if (toolCallsMap.size === 0) {
518
+ subscriber.next({
519
+ type: import_client3.EventType.RUN_FINISHED,
520
+ threadId,
521
+ runId
522
+ });
523
+ subscriber.complete();
524
+ return;
525
+ }
526
+ const toolCalls = Array.from(toolCallsMap.entries()).map(
527
+ ([id, data]) => ({
528
+ id,
529
+ name: data.name,
530
+ arguments: data.args
531
+ })
532
+ );
533
+ const assistantMessage = {
534
+ id: `msg_${Date.now()}`,
535
+ role: "assistant",
536
+ content: this.extractTextFromEvents(events),
537
+ toolCalls: toolCalls.map((tc) => ({
538
+ id: tc.id,
539
+ type: "function",
540
+ function: {
541
+ name: tc.name,
542
+ arguments: tc.arguments
543
+ }
544
+ }))
545
+ };
546
+ messages.push(assistantMessage);
547
+ const toolResults = await this.executeTools(toolCalls, tools);
548
+ const toolMessages = this.toolResultsToMessages(toolResults);
549
+ messages.push(...toolMessages);
550
+ }
551
+ subscriber.next({
552
+ type: import_client3.EventType.RUN_ERROR,
553
+ message: `Maximum tool execution rounds (${this.maxToolRounds}) reached`,
554
+ code: "MAX_TOOL_ROUNDS_EXCEEDED"
555
+ });
556
+ subscriber.complete();
557
+ } catch (error) {
558
+ throw error;
559
+ }
560
+ }
561
+ async runOpenAI(subscriber, input) {
562
+ const { messages, runId, threadId, tools, state, forwardedProps } = input;
563
+ const openai = this.model;
564
+ const openaiMessages = convertMessagesToOpenAI(messages, this.systemPrompt);
565
+ const openaiTools = convertToolsToOpenAI(tools);
566
+ const streamOptions = createOpenAIStreamOptions({
567
+ modelName: this.modelName,
568
+ messages: openaiMessages,
569
+ tools: openaiTools,
570
+ temperature: this.temperature,
571
+ maxTokens: this.maxTokens,
572
+ user: state?.__request_context__?.user?.id || forwardedProps?.user || (0, import_crypto.randomUUID)()
573
+ });
574
+ const stream = await openai.chat.completions.create(streamOptions);
575
+ const messageId = `msg_${Date.now()}`;
576
+ const context = { threadId, runId, messageId };
577
+ for await (const event of processOpenAIStream(stream, context)) {
578
+ subscriber.next(event);
579
+ }
580
+ }
581
+ async runAnthropic(subscriber, input) {
582
+ const { messages, runId, threadId, tools } = input;
583
+ const anthropic = this.model;
584
+ const anthropicMessages = convertMessagesToAnthropic(messages);
585
+ const anthropicTools = convertToolsToAnthropic(tools);
586
+ const streamOptions = createAnthropicStreamOptions({
587
+ modelName: this.modelName,
588
+ messages: anthropicMessages,
589
+ tools: anthropicTools,
590
+ systemPrompt: this.systemPrompt,
591
+ temperature: this.temperature,
592
+ maxTokens: this.maxTokens
593
+ });
594
+ const stream = await anthropic.messages.create(streamOptions);
595
+ const messageId = `msg_${Date.now()}`;
596
+ const context = { threadId, runId, messageId };
597
+ for await (const event of processAnthropicStream(stream, context)) {
598
+ subscriber.next(event);
599
+ }
600
+ }
601
+ /**
602
+ * Run model once (single round, no tool execution)
603
+ */
604
+ async runOnce(subscriber, input, eventCallback) {
605
+ const wrappedSubscriber = eventCallback ? {
606
+ next: (event) => {
607
+ subscriber.next(event);
608
+ eventCallback(event);
609
+ }
610
+ } : subscriber;
611
+ if (this.modelType === "openai") {
612
+ await this.runOpenAI(wrappedSubscriber, input);
613
+ } else if (this.modelType === "anthropic") {
614
+ await this.runAnthropic(wrappedSubscriber, input);
615
+ } else {
616
+ throw new Error("Unsupported model provider");
617
+ }
618
+ }
619
+ /**
620
+ * Execute tools
621
+ */
622
+ async executeTools(toolCalls, tools) {
623
+ const toolsMap = new Map(tools.map((t) => [t.name, t]));
624
+ return Promise.all(
625
+ toolCalls.map(async (toolCall) => {
626
+ if (this.onToolCall) {
627
+ this.onToolCall(toolCall);
628
+ }
629
+ try {
630
+ const tool = toolsMap.get(toolCall.name);
631
+ if (!tool) {
632
+ throw new Error(`Tool not found: ${toolCall.name}`);
633
+ }
634
+ const args = JSON.parse(toolCall.arguments);
635
+ const result = await this.executeToolFunction(tool, args);
636
+ const toolResult = {
637
+ toolCallId: toolCall.id,
638
+ result
639
+ };
640
+ if (this.onToolResult) {
641
+ this.onToolResult(toolResult);
642
+ }
643
+ return toolResult;
644
+ } catch (error) {
645
+ const toolResult = {
646
+ toolCallId: toolCall.id,
647
+ result: null,
648
+ error: error instanceof Error ? error.message : String(error)
649
+ };
650
+ if (this.onToolResult) {
651
+ this.onToolResult(toolResult);
652
+ }
653
+ return toolResult;
654
+ }
655
+ })
656
+ );
657
+ }
658
+ /**
659
+ * Execute tool function
660
+ */
661
+ async executeToolFunction(tool, args) {
662
+ if (tool.execute) {
663
+ return await tool.execute(args);
664
+ }
665
+ throw new Error(`Tool ${tool.name} does not have an execute method`);
666
+ }
667
+ /**
668
+ * Convert tool results to messages
669
+ */
670
+ toolResultsToMessages(results) {
671
+ return results.map(
672
+ (result) => ({
673
+ id: `tool_result_${result.toolCallId}`,
674
+ role: "tool",
675
+ toolCallId: result.toolCallId,
676
+ content: result.error ? `Error: ${result.error}` : JSON.stringify(result.result)
677
+ })
678
+ );
679
+ }
680
+ /**
681
+ * Extract text content from events
682
+ */
683
+ extractTextFromEvents(events) {
684
+ let text = "";
685
+ for (const event of events) {
686
+ if (event.type === import_client3.EventType.TEXT_MESSAGE_CONTENT) {
687
+ text += event.delta;
688
+ }
689
+ }
690
+ return text;
691
+ }
692
+ };
693
+ // Annotate the CommonJS export names for ESM import in node:
694
+ 0 && (module.exports = {
695
+ LLMAgent,
696
+ convertMessagesToAnthropic,
697
+ convertMessagesToOpenAI,
698
+ convertToolsToAnthropic,
699
+ convertToolsToOpenAI,
700
+ createAnthropicStreamOptions,
701
+ createOpenAIStreamOptions,
702
+ processAnthropicStream,
703
+ processOpenAIStream
704
+ });
705
+ //# sourceMappingURL=index.js.map