@cloudbase/agent-adapter-llm 0.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,673 @@
1
+ // src/agent.ts
2
+ import {
3
+ AbstractAgent,
4
+ EventType as EventType3
5
+ } from "@ag-ui/client";
6
+ import { Observable } from "rxjs";
7
+
8
+ // src/converters/openai.ts
9
+ function convertMessagesToOpenAI(messages, systemPrompt) {
10
+ const openaiMessages = [];
11
+ if (systemPrompt) {
12
+ openaiMessages.push({
13
+ role: "system",
14
+ content: systemPrompt
15
+ });
16
+ }
17
+ for (const msg of messages) {
18
+ if (msg.role === "user") {
19
+ openaiMessages.push({
20
+ role: "user",
21
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
22
+ });
23
+ } else if (msg.role === "assistant") {
24
+ openaiMessages.push({
25
+ role: "assistant",
26
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content) || "",
27
+ tool_calls: msg.toolCalls?.map((tc) => ({
28
+ id: tc.id,
29
+ type: "function",
30
+ function: {
31
+ name: tc.function.name,
32
+ arguments: tc.function.arguments
33
+ }
34
+ }))
35
+ });
36
+ } else if (msg.role === "tool") {
37
+ openaiMessages.push({
38
+ role: "tool",
39
+ tool_call_id: msg.toolCallId,
40
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
41
+ });
42
+ }
43
+ }
44
+ return openaiMessages;
45
+ }
46
+ function convertToolsToOpenAI(tools) {
47
+ if (!tools || tools.length === 0) {
48
+ return void 0;
49
+ }
50
+ return tools.map((tool) => ({
51
+ type: "function",
52
+ function: {
53
+ name: tool.name,
54
+ description: tool.description,
55
+ parameters: typeof tool.parameters === "string" ? JSON.parse(tool.parameters) : tool.parameters
56
+ }
57
+ }));
58
+ }
59
+ function createOpenAIStreamOptions(config) {
60
+ return {
61
+ model: config.modelName,
62
+ messages: config.messages,
63
+ tools: config.tools,
64
+ temperature: config.temperature,
65
+ max_tokens: config.maxTokens,
66
+ user: config.user,
67
+ stream: true
68
+ };
69
+ }
70
+
71
+ // src/converters/anthropic.ts
72
+ function convertMessagesToAnthropic(messages) {
73
+ const anthropicMessages = [];
74
+ for (const msg of messages) {
75
+ if (msg.role === "user") {
76
+ const contentStr = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
77
+ anthropicMessages.push({
78
+ role: "user",
79
+ content: contentStr
80
+ });
81
+ } else if (msg.role === "assistant") {
82
+ const content = [];
83
+ if (msg.content) {
84
+ const contentStr = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
85
+ content.push({
86
+ type: "text",
87
+ text: contentStr
88
+ });
89
+ }
90
+ if (msg.toolCalls) {
91
+ for (const tc of msg.toolCalls) {
92
+ content.push({
93
+ type: "tool_use",
94
+ id: tc.id,
95
+ name: tc.function.name,
96
+ input: JSON.parse(tc.function.arguments)
97
+ });
98
+ }
99
+ }
100
+ anthropicMessages.push({
101
+ role: "assistant",
102
+ content
103
+ });
104
+ } else if (msg.role === "tool") {
105
+ const contentStr = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
106
+ anthropicMessages.push({
107
+ role: "user",
108
+ content: [{
109
+ type: "tool_result",
110
+ tool_use_id: msg.toolCallId,
111
+ content: contentStr
112
+ }]
113
+ });
114
+ }
115
+ }
116
+ return anthropicMessages;
117
+ }
118
+ function convertToolsToAnthropic(tools) {
119
+ if (!tools || tools.length === 0) {
120
+ return void 0;
121
+ }
122
+ return tools.map((tool) => ({
123
+ name: tool.name,
124
+ description: tool.description,
125
+ input_schema: typeof tool.parameters === "string" ? JSON.parse(tool.parameters) : tool.parameters
126
+ }));
127
+ }
128
+ function createAnthropicStreamOptions(config) {
129
+ return {
130
+ model: config.modelName,
131
+ max_tokens: config.maxTokens || 4096,
132
+ temperature: config.temperature,
133
+ system: config.systemPrompt || void 0,
134
+ messages: config.messages,
135
+ tools: config.tools,
136
+ stream: true
137
+ };
138
+ }
139
+
140
+ // src/processors/openai-stream.ts
141
+ import { EventType } from "@ag-ui/client";
142
+ async function* processOpenAIStream(stream, context) {
143
+ const { threadId, runId, messageId } = context;
144
+ const state = {
145
+ hasStarted: false,
146
+ fullContent: "",
147
+ toolCallsMap: /* @__PURE__ */ new Map()
148
+ };
149
+ const reasoningState = {
150
+ hasStarted: false,
151
+ fullContent: "",
152
+ toolCallsMap: /* @__PURE__ */ new Map()
153
+ };
154
+ for await (const chunk of stream) {
155
+ const delta = chunk.choices[0]?.delta;
156
+ if (!delta) continue;
157
+ if (delta.role === "tool") {
158
+ const toolCallId = delta.tool_call_id;
159
+ if (toolCallId) {
160
+ if (state.toolCallsMap.has(toolCallId)) {
161
+ yield {
162
+ type: EventType.TOOL_CALL_END,
163
+ threadId,
164
+ runId,
165
+ toolCallId
166
+ };
167
+ state.toolCallsMap.delete(toolCallId);
168
+ }
169
+ yield {
170
+ type: EventType.TOOL_CALL_RESULT,
171
+ threadId,
172
+ runId,
173
+ toolCallId,
174
+ content: delta.content || ""
175
+ };
176
+ }
177
+ continue;
178
+ }
179
+ if (delta.content) {
180
+ if (reasoningState.hasStarted) {
181
+ reasoningState.hasStarted = false;
182
+ yield {
183
+ type: EventType.THINKING_TEXT_MESSAGE_END,
184
+ threadId,
185
+ runId,
186
+ messageId
187
+ };
188
+ yield {
189
+ type: EventType.THINKING_END,
190
+ threadId,
191
+ runId,
192
+ messageId
193
+ };
194
+ }
195
+ if (!state.hasStarted) {
196
+ yield {
197
+ type: EventType.TEXT_MESSAGE_START,
198
+ threadId,
199
+ runId,
200
+ messageId,
201
+ role: "assistant"
202
+ };
203
+ state.hasStarted = true;
204
+ }
205
+ state.fullContent += delta.content;
206
+ yield {
207
+ type: EventType.TEXT_MESSAGE_CONTENT,
208
+ threadId,
209
+ runId,
210
+ messageId,
211
+ delta: delta.content
212
+ };
213
+ }
214
+ if (delta.reasoning_content) {
215
+ if (!reasoningState.hasStarted) {
216
+ yield {
217
+ type: EventType.THINKING_START,
218
+ threadId,
219
+ runId,
220
+ messageId
221
+ };
222
+ yield {
223
+ type: EventType.THINKING_TEXT_MESSAGE_START,
224
+ threadId,
225
+ runId,
226
+ messageId,
227
+ role: "assistant"
228
+ };
229
+ reasoningState.hasStarted = true;
230
+ }
231
+ reasoningState.fullContent += delta.reasoning_content;
232
+ yield {
233
+ type: EventType.THINKING_TEXT_MESSAGE_CONTENT,
234
+ threadId,
235
+ runId,
236
+ messageId,
237
+ delta: delta.reasoning_content
238
+ };
239
+ }
240
+ if (delta.tool_calls) {
241
+ for (const toolCall of delta.tool_calls) {
242
+ const toolCallId = toolCall.id || `tool_${toolCall.index}`;
243
+ if (toolCall.function?.name) {
244
+ yield {
245
+ type: EventType.TOOL_CALL_START,
246
+ threadId,
247
+ runId,
248
+ toolCallId,
249
+ toolCallName: toolCall.function.name
250
+ };
251
+ if (toolCall.function.arguments) {
252
+ yield {
253
+ type: EventType.TOOL_CALL_ARGS,
254
+ threadId,
255
+ runId,
256
+ toolCallId,
257
+ delta: toolCall.function.arguments
258
+ };
259
+ }
260
+ state.toolCallsMap.set(toolCallId, {
261
+ name: toolCall.function.name,
262
+ args: toolCall.function.arguments || ""
263
+ });
264
+ } else if (toolCall.function?.arguments) {
265
+ const existing = state.toolCallsMap.get(toolCallId);
266
+ if (existing) {
267
+ existing.args += toolCall.function.arguments;
268
+ yield {
269
+ type: EventType.TOOL_CALL_ARGS,
270
+ threadId,
271
+ runId,
272
+ toolCallId,
273
+ delta: toolCall.function.arguments
274
+ };
275
+ }
276
+ }
277
+ }
278
+ }
279
+ }
280
+ if (state.hasStarted) {
281
+ yield {
282
+ type: EventType.TEXT_MESSAGE_END,
283
+ threadId,
284
+ runId,
285
+ messageId
286
+ };
287
+ }
288
+ if (reasoningState.hasStarted) {
289
+ yield {
290
+ type: EventType.THINKING_TEXT_MESSAGE_END,
291
+ threadId,
292
+ runId,
293
+ messageId
294
+ };
295
+ yield {
296
+ type: EventType.THINKING_END,
297
+ threadId,
298
+ runId,
299
+ messageId
300
+ };
301
+ }
302
+ for (const [toolCallId] of state.toolCallsMap) {
303
+ yield {
304
+ type: EventType.TOOL_CALL_END,
305
+ threadId,
306
+ runId,
307
+ toolCallId
308
+ };
309
+ }
310
+ }
311
+
312
+ // src/processors/anthropic-stream.ts
313
+ import { EventType as EventType2 } from "@ag-ui/client";
314
+ async function* processAnthropicStream(stream, context) {
315
+ const { threadId, runId, messageId } = context;
316
+ const state = {
317
+ hasStarted: false,
318
+ fullContent: "",
319
+ toolCallsMap: /* @__PURE__ */ new Map(),
320
+ indexToIdMap: /* @__PURE__ */ new Map()
321
+ };
322
+ for await (const event of stream) {
323
+ if (event.type === "content_block_start") {
324
+ const block = event.content_block;
325
+ if (block.type === "text") {
326
+ if (!state.hasStarted) {
327
+ yield {
328
+ type: EventType2.TEXT_MESSAGE_START,
329
+ threadId,
330
+ runId,
331
+ messageId,
332
+ role: "assistant"
333
+ };
334
+ state.hasStarted = true;
335
+ }
336
+ } else if (block.type === "tool_use") {
337
+ state.indexToIdMap.set(event.index, block.id);
338
+ yield {
339
+ type: EventType2.TOOL_CALL_START,
340
+ threadId,
341
+ runId,
342
+ toolCallId: block.id,
343
+ toolCallName: block.name
344
+ };
345
+ state.toolCallsMap.set(block.id, {
346
+ name: block.name,
347
+ input: ""
348
+ });
349
+ }
350
+ } else if (event.type === "content_block_delta") {
351
+ const delta = event.delta;
352
+ if (delta.type === "text_delta") {
353
+ state.fullContent += delta.text;
354
+ yield {
355
+ type: EventType2.TEXT_MESSAGE_CONTENT,
356
+ threadId,
357
+ runId,
358
+ messageId,
359
+ delta: delta.text
360
+ };
361
+ } else if (delta.type === "input_json_delta") {
362
+ const toolCallId = state.indexToIdMap.get(event.index);
363
+ if (toolCallId) {
364
+ const toolCall = state.toolCallsMap.get(toolCallId);
365
+ if (toolCall) {
366
+ toolCall.input += delta.partial_json;
367
+ yield {
368
+ type: EventType2.TOOL_CALL_ARGS,
369
+ threadId,
370
+ runId,
371
+ toolCallId,
372
+ delta: delta.partial_json
373
+ };
374
+ }
375
+ }
376
+ }
377
+ } else if (event.type === "content_block_stop") {
378
+ const toolCallId = state.indexToIdMap.get(event.index);
379
+ if (toolCallId && state.toolCallsMap.has(toolCallId)) {
380
+ yield {
381
+ type: EventType2.TOOL_CALL_END,
382
+ threadId,
383
+ runId,
384
+ toolCallId
385
+ };
386
+ }
387
+ }
388
+ }
389
+ if (state.hasStarted) {
390
+ yield {
391
+ type: EventType2.TEXT_MESSAGE_END,
392
+ threadId,
393
+ runId,
394
+ messageId
395
+ };
396
+ }
397
+ yield {
398
+ type: EventType2.RUN_FINISHED,
399
+ threadId,
400
+ runId
401
+ };
402
+ }
403
+
404
+ // src/agent.ts
405
+ import { randomUUID } from "crypto";
406
+ function detectModelProviderType(model) {
407
+ if ("chat" in model && typeof model.chat?.completions?.create === "function") {
408
+ return "openai";
409
+ }
410
+ if ("messages" in model && typeof model.messages?.create === "function") {
411
+ return "anthropic";
412
+ }
413
+ throw new Error(
414
+ "Unsupported model provider. Expected OpenAI or Anthropic SDK instance."
415
+ );
416
+ }
417
+ var LLMAgent = class extends AbstractAgent {
418
+ constructor(config) {
419
+ super({
420
+ agentId: config.agentId || config.name || "llm-agent",
421
+ description: config.description || "",
422
+ threadId: config.threadId || "",
423
+ ...config
424
+ });
425
+ this.model = config.model;
426
+ this.modelType = detectModelProviderType(config.model);
427
+ this.modelName = config.modelName;
428
+ this.systemPrompt = config.systemPrompt;
429
+ this.temperature = config.temperature;
430
+ this.maxTokens = config.maxTokens;
431
+ this.maxToolRounds = config.maxToolRounds || 5;
432
+ this.onToolCall = config.onToolCall;
433
+ this.onToolResult = config.onToolResult;
434
+ this.name = config.name || config.agentId || "llm-agent";
435
+ }
436
+ run(input) {
437
+ return new Observable((subscriber) => {
438
+ this._run(subscriber, input).catch((error) => {
439
+ subscriber.next({
440
+ type: EventType3.RUN_ERROR,
441
+ message: error instanceof Error ? error.message : String(error),
442
+ code: error instanceof Error ? error.name : "UNKNOWN_ERROR"
443
+ });
444
+ subscriber.error(error);
445
+ });
446
+ });
447
+ }
448
+ async _run(subscriber, input) {
449
+ const { runId, threadId, tools } = input;
450
+ try {
451
+ subscriber.next({
452
+ type: EventType3.RUN_STARTED,
453
+ threadId,
454
+ runId
455
+ });
456
+ if (!tools || tools.length === 0) {
457
+ await this.runOnce(subscriber, input);
458
+ subscriber.next({
459
+ type: EventType3.RUN_FINISHED,
460
+ threadId,
461
+ runId
462
+ });
463
+ subscriber.complete();
464
+ return;
465
+ }
466
+ let messages = [...input.messages];
467
+ let round = 0;
468
+ while (round < this.maxToolRounds) {
469
+ round++;
470
+ const events = [];
471
+ const toolCallsMap = /* @__PURE__ */ new Map();
472
+ await this.runOnce(subscriber, { ...input, messages }, (event) => {
473
+ events.push(event);
474
+ if (event.type === EventType3.TOOL_CALL_START) {
475
+ toolCallsMap.set(event.toolCallId, {
476
+ name: event.toolCallName,
477
+ args: ""
478
+ });
479
+ } else if (event.type === EventType3.TOOL_CALL_ARGS) {
480
+ const existing = toolCallsMap.get(event.toolCallId);
481
+ if (existing) {
482
+ existing.args += event.delta;
483
+ }
484
+ }
485
+ });
486
+ if (toolCallsMap.size === 0) {
487
+ subscriber.next({
488
+ type: EventType3.RUN_FINISHED,
489
+ threadId,
490
+ runId
491
+ });
492
+ subscriber.complete();
493
+ return;
494
+ }
495
+ const toolCalls = Array.from(toolCallsMap.entries()).map(
496
+ ([id, data]) => ({
497
+ id,
498
+ name: data.name,
499
+ arguments: data.args
500
+ })
501
+ );
502
+ const assistantMessage = {
503
+ id: `msg_${Date.now()}`,
504
+ role: "assistant",
505
+ content: this.extractTextFromEvents(events),
506
+ toolCalls: toolCalls.map((tc) => ({
507
+ id: tc.id,
508
+ type: "function",
509
+ function: {
510
+ name: tc.name,
511
+ arguments: tc.arguments
512
+ }
513
+ }))
514
+ };
515
+ messages.push(assistantMessage);
516
+ const toolResults = await this.executeTools(toolCalls, tools);
517
+ const toolMessages = this.toolResultsToMessages(toolResults);
518
+ messages.push(...toolMessages);
519
+ }
520
+ subscriber.next({
521
+ type: EventType3.RUN_ERROR,
522
+ message: `Maximum tool execution rounds (${this.maxToolRounds}) reached`,
523
+ code: "MAX_TOOL_ROUNDS_EXCEEDED"
524
+ });
525
+ subscriber.complete();
526
+ } catch (error) {
527
+ throw error;
528
+ }
529
+ }
530
+ async runOpenAI(subscriber, input) {
531
+ const { messages, runId, threadId, tools, state, forwardedProps } = input;
532
+ const openai = this.model;
533
+ const openaiMessages = convertMessagesToOpenAI(messages, this.systemPrompt);
534
+ const openaiTools = convertToolsToOpenAI(tools);
535
+ const streamOptions = createOpenAIStreamOptions({
536
+ modelName: this.modelName,
537
+ messages: openaiMessages,
538
+ tools: openaiTools,
539
+ temperature: this.temperature,
540
+ maxTokens: this.maxTokens,
541
+ user: state?.__request_context__?.user?.id || forwardedProps?.user || randomUUID()
542
+ });
543
+ const stream = await openai.chat.completions.create(streamOptions);
544
+ const messageId = `msg_${Date.now()}`;
545
+ const context = { threadId, runId, messageId };
546
+ for await (const event of processOpenAIStream(stream, context)) {
547
+ subscriber.next(event);
548
+ }
549
+ }
550
+ async runAnthropic(subscriber, input) {
551
+ const { messages, runId, threadId, tools } = input;
552
+ const anthropic = this.model;
553
+ const anthropicMessages = convertMessagesToAnthropic(messages);
554
+ const anthropicTools = convertToolsToAnthropic(tools);
555
+ const streamOptions = createAnthropicStreamOptions({
556
+ modelName: this.modelName,
557
+ messages: anthropicMessages,
558
+ tools: anthropicTools,
559
+ systemPrompt: this.systemPrompt,
560
+ temperature: this.temperature,
561
+ maxTokens: this.maxTokens
562
+ });
563
+ const stream = await anthropic.messages.create(streamOptions);
564
+ const messageId = `msg_${Date.now()}`;
565
+ const context = { threadId, runId, messageId };
566
+ for await (const event of processAnthropicStream(stream, context)) {
567
+ subscriber.next(event);
568
+ }
569
+ }
570
+ /**
571
+ * Run model once (single round, no tool execution)
572
+ */
573
+ async runOnce(subscriber, input, eventCallback) {
574
+ const wrappedSubscriber = eventCallback ? {
575
+ next: (event) => {
576
+ subscriber.next(event);
577
+ eventCallback(event);
578
+ }
579
+ } : subscriber;
580
+ if (this.modelType === "openai") {
581
+ await this.runOpenAI(wrappedSubscriber, input);
582
+ } else if (this.modelType === "anthropic") {
583
+ await this.runAnthropic(wrappedSubscriber, input);
584
+ } else {
585
+ throw new Error("Unsupported model provider");
586
+ }
587
+ }
588
+ /**
589
+ * Execute tools
590
+ */
591
+ async executeTools(toolCalls, tools) {
592
+ const toolsMap = new Map(tools.map((t) => [t.name, t]));
593
+ return Promise.all(
594
+ toolCalls.map(async (toolCall) => {
595
+ if (this.onToolCall) {
596
+ this.onToolCall(toolCall);
597
+ }
598
+ try {
599
+ const tool = toolsMap.get(toolCall.name);
600
+ if (!tool) {
601
+ throw new Error(`Tool not found: ${toolCall.name}`);
602
+ }
603
+ const args = JSON.parse(toolCall.arguments);
604
+ const result = await this.executeToolFunction(tool, args);
605
+ const toolResult = {
606
+ toolCallId: toolCall.id,
607
+ result
608
+ };
609
+ if (this.onToolResult) {
610
+ this.onToolResult(toolResult);
611
+ }
612
+ return toolResult;
613
+ } catch (error) {
614
+ const toolResult = {
615
+ toolCallId: toolCall.id,
616
+ result: null,
617
+ error: error instanceof Error ? error.message : String(error)
618
+ };
619
+ if (this.onToolResult) {
620
+ this.onToolResult(toolResult);
621
+ }
622
+ return toolResult;
623
+ }
624
+ })
625
+ );
626
+ }
627
+ /**
628
+ * Execute tool function
629
+ */
630
+ async executeToolFunction(tool, args) {
631
+ if (tool.execute) {
632
+ return await tool.execute(args);
633
+ }
634
+ throw new Error(`Tool ${tool.name} does not have an execute method`);
635
+ }
636
+ /**
637
+ * Convert tool results to messages
638
+ */
639
+ toolResultsToMessages(results) {
640
+ return results.map(
641
+ (result) => ({
642
+ id: `tool_result_${result.toolCallId}`,
643
+ role: "tool",
644
+ toolCallId: result.toolCallId,
645
+ content: result.error ? `Error: ${result.error}` : JSON.stringify(result.result)
646
+ })
647
+ );
648
+ }
649
+ /**
650
+ * Extract text content from events
651
+ */
652
+ extractTextFromEvents(events) {
653
+ let text = "";
654
+ for (const event of events) {
655
+ if (event.type === EventType3.TEXT_MESSAGE_CONTENT) {
656
+ text += event.delta;
657
+ }
658
+ }
659
+ return text;
660
+ }
661
+ };
662
+ export {
663
+ LLMAgent,
664
+ convertMessagesToAnthropic,
665
+ convertMessagesToOpenAI,
666
+ convertToolsToAnthropic,
667
+ convertToolsToOpenAI,
668
+ createAnthropicStreamOptions,
669
+ createOpenAIStreamOptions,
670
+ processAnthropicStream,
671
+ processOpenAIStream
672
+ };
673
+ //# sourceMappingURL=index.mjs.map