@agentick/ai-sdk 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,761 @@
1
+ /**
2
+ * AI SDK Adapter
3
+ *
4
+ * Wraps Vercel AI SDK models for use with the engine.
5
+ * Supports any LanguageModel from AI SDK providers (OpenAI, Anthropic, Google, etc.)
6
+ *
7
+ * Uses createAdapter for minimal boilerplate - the framework handles:
8
+ * - Stream lifecycle (message_start, content_start/delta/end, message_end)
9
+ * - Content accumulation and ModelOutput construction
10
+ * - Event generation with proper timing and IDs
11
+ */
12
+ import { StopReason,
13
+ // bufferToBase64Source,
14
+ // isUrlString,
15
+ } from "@agentick/shared";
16
+ import { createAdapter, } from "@agentick/core/model";
17
+ import {} from "@agentick/core";
18
+ import { Logger } from "@agentick/core";
19
+ import { mergeDeep } from "@agentick/shared/utils";
20
+ import { generateText, streamText, jsonSchema, } from "ai";
21
+ const logger = Logger.for("AiSdkAdapter");
22
+ // ============================================================================
23
+ // Stop Reason Mapping
24
+ // ============================================================================
25
+ export function toStopReason(reason) {
26
+ switch (reason) {
27
+ case "length":
28
+ return StopReason.MAX_TOKENS;
29
+ case "other":
30
+ return StopReason.OTHER;
31
+ case "stop":
32
+ return StopReason.STOP;
33
+ case "content-filter":
34
+ return StopReason.CONTENT_FILTER;
35
+ case "tool-calls":
36
+ return StopReason.TOOL_USE;
37
+ case "error":
38
+ return StopReason.ERROR;
39
+ default:
40
+ return StopReason.UNSPECIFIED;
41
+ }
42
+ }
43
+ // ============================================================================
44
+ // Tool Conversion
45
+ // ============================================================================
46
+ /**
47
+ * Convert ModelToolReference[] to AI SDK ToolSet format.
48
+ * Tools are passed as definitions only - engine handles execution.
49
+ */
50
+ export function convertToolsToToolSet(tools) {
51
+ if (!tools || tools.length === 0) {
52
+ return {};
53
+ }
54
+ const toolSet = {};
55
+ for (const toolRef of tools) {
56
+ if (typeof toolRef === "string") {
57
+ logger.warn(`🚨 Tool reference ${toolRef} is a string, skipping`);
58
+ // String reference - can't resolve without registry, skip
59
+ continue;
60
+ }
61
+ else if ("metadata" in toolRef && "run" in toolRef) {
62
+ const toolDef = toolRef;
63
+ const libraryOptions = toolDef.metadata?.libraryOptions || {};
64
+ const libraryProviderOptions = libraryOptions["ai-sdk"]?.providerOptions || {};
65
+ const providerOptions = mergeDeep({}, toolDef.metadata.providerOptions || {}, libraryProviderOptions || {});
66
+ // ExecutableTool - engine will execute these
67
+ toolSet[toolDef.metadata.name] = {
68
+ description: toolDef.metadata.description || "",
69
+ inputSchema: toolDef.metadata.input, // zod schema already
70
+ ...libraryOptions,
71
+ providerOptions,
72
+ // No execute - engine handles execution
73
+ };
74
+ }
75
+ else if ("name" in toolRef && "input" in toolRef) {
76
+ const toolDef = toolRef;
77
+ const libraryOptions = toolDef.libraryOptions || {};
78
+ const libraryProviderOptions = libraryOptions["ai-sdk"]?.providerOptions || {};
79
+ const providerOptions = mergeDeep({}, toolDef.providerOptions || {}, libraryProviderOptions || {});
80
+ // ToolDefinition - engine will execute these
81
+ toolSet[toolDef.name] = {
82
+ description: toolDef.description || "",
83
+ inputSchema: jsonSchema(toolDef.input || {}),
84
+ ...libraryOptions,
85
+ providerOptions,
86
+ // No execute - engine handles execution
87
+ };
88
+ }
89
+ }
90
+ return toolSet;
91
+ }
92
+ // ============================================================================
93
+ // Factory Function
94
+ // ============================================================================
95
+ /**
96
+ * Create an AI SDK adapter for use with the engine.
97
+ *
98
+ * @example
99
+ * ```typescript
100
+ * import { openai } from '@ai-sdk/openai';
101
+ *
102
+ * const model = createAiSdkModel({
103
+ * model: openai('gpt-4o'),
104
+ * temperature: 0.7,
105
+ * });
106
+ *
107
+ * // As JSX
108
+ * <model><MyAgent /></model>
109
+ *
110
+ * // With createApp
111
+ * const app = createApp(MyAgent, { model });
112
+ * ```
113
+ */
114
+ export function createAiSdkModel(config) {
115
+ const { model, system: defaultSystem, tools: defaultTools, ...defaultParams } = config;
116
+ return createAdapter({
117
+ metadata: {
118
+ id: `ai-sdk:${model.modelId || "unknown"}`,
119
+ provider: model.provider || "ai-sdk",
120
+ type: "language",
121
+ capabilities: [
122
+ { stream: true, toolCalls: true },
123
+ {
124
+ // Dynamic function that inspects the underlying model
125
+ messageTransformation: (modelId, provider) => {
126
+ // Determine renderer based on provider/model
127
+ // Anthropic/Claude models work best with XML structure
128
+ const isAnthropic = provider === "anthropic" || modelId.toLowerCase().includes("claude");
129
+ const preferredRenderer = isAnthropic ? "xml" : "markdown";
130
+ // Determine role mapping based on provider/model
131
+ const supportsDeveloper = provider === "anthropic" ||
132
+ (provider === "openai" &&
133
+ (modelId.startsWith("gpt-4") ||
134
+ modelId.startsWith("o1") ||
135
+ modelId.startsWith("gpt-5")));
136
+ return {
137
+ preferredRenderer,
138
+ roleMapping: {
139
+ event: supportsDeveloper ? "developer" : "user",
140
+ ephemeral: supportsDeveloper ? "developer" : "user",
141
+ },
142
+ delimiters: {
143
+ useDelimiters: !supportsDeveloper, // Only use delimiters if no developer role
144
+ event: "[Event]",
145
+ ephemeral: "[Context]",
146
+ },
147
+ ephemeralPosition: "flow",
148
+ };
149
+ },
150
+ },
151
+ ],
152
+ },
153
+ // =========================================================================
154
+ // prepareInput: ModelInput → AI SDK format
155
+ // =========================================================================
156
+ prepareInput: (input) => {
157
+ const { libraryOptions = {}, providerOptions = {}, ...params } = input;
158
+ const sdkOptions = libraryOptions || {};
159
+ const { tools: adapterTools, system: adapterSystem, ...restOfLibraryOptions } = sdkOptions;
160
+ // Ensure messages is Message[]
161
+ const messages = Array.isArray(params.messages)
162
+ ? params.messages.filter((m) => typeof m !== "string")
163
+ : [];
164
+ const aiSdkMessages = toAiSdkMessages(messages, adapterSystem, defaultSystem);
165
+ // Merge tools: default -> adapter -> input
166
+ const inputToolSet = convertToolsToToolSet(params.tools);
167
+ const mergedTools = {
168
+ ...defaultTools,
169
+ ...(adapterTools || {}),
170
+ ...inputToolSet,
171
+ };
172
+ // Map responseFormat to AI SDK options
173
+ let outputMode;
174
+ let outputSchema;
175
+ const mergedProviderOptions = {
176
+ ...defaultParams.providerOptions,
177
+ ...providerOptions,
178
+ ...(sdkOptions.providerOptions || {}),
179
+ };
180
+ if (params.responseFormat) {
181
+ const rf = params.responseFormat;
182
+ if (rf.type === "json") {
183
+ // Use providerOptions to request JSON mode
184
+ mergedProviderOptions.response_format = { type: "json_object" };
185
+ }
186
+ else if (rf.type === "json_schema") {
187
+ outputMode = "object";
188
+ outputSchema = jsonSchema(rf.schema);
189
+ }
190
+ }
191
+ const result = {
192
+ model,
193
+ tools: Object.keys(mergedTools).length > 0 ? mergedTools : undefined,
194
+ messages: aiSdkMessages,
195
+ temperature: params.temperature ?? defaultParams.temperature,
196
+ maxOutputTokens: params.maxTokens ?? defaultParams.maxTokens,
197
+ topP: params.topP ?? defaultParams.topP,
198
+ frequencyPenalty: params.frequencyPenalty ?? defaultParams.frequencyPenalty,
199
+ presencePenalty: params.presencePenalty ?? defaultParams.presencePenalty,
200
+ ...restOfLibraryOptions,
201
+ providerOptions: mergedProviderOptions,
202
+ };
203
+ if (outputMode === "object" && outputSchema) {
204
+ result.output = "object";
205
+ result.schema = outputSchema;
206
+ }
207
+ return result;
208
+ },
209
+ // =========================================================================
210
+ // mapChunk: AI SDK chunk → AdapterDelta (~50 lines vs 240 lines)
211
+ // The framework handles lifecycle (content_start/end) automatically
212
+ // =========================================================================
213
+ mapChunk: (chunk) => {
214
+ switch (chunk.type) {
215
+ // Text content
216
+ case "text-delta":
217
+ return { type: "text", delta: chunk.text || "" };
218
+ // Reasoning/thinking
219
+ case "reasoning-delta":
220
+ return { type: "reasoning", delta: chunk.text || "" };
221
+ // Tool calls (streamed)
222
+ case "tool-input-start":
223
+ return { type: "tool_call_start", id: chunk.id || "", name: chunk.toolName || "" };
224
+ case "tool-input-delta":
225
+ return { type: "tool_call_delta", id: chunk.id || "", delta: chunk.delta || "" };
226
+ case "tool-input-end":
227
+ return { type: "tool_call_end", id: chunk.id || "", input: undefined };
228
+ // Tool call (complete)
229
+ case "tool-call":
230
+ return {
231
+ type: "tool_call",
232
+ id: chunk.toolCallId,
233
+ name: chunk.toolName,
234
+ input: chunk.args || chunk.input || {},
235
+ };
236
+ // Message lifecycle
237
+ case "start":
238
+ return { type: "message_start" };
239
+ case "finish": {
240
+ const tu = chunk.totalUsage;
241
+ const inTokens = tu?.inputTokens ?? tu?.promptTokens ?? 0;
242
+ const outTokens = tu?.outputTokens ?? tu?.completionTokens ?? 0;
243
+ const totalTokens = tu?.totalTokens ?? inTokens + outTokens;
244
+ return {
245
+ type: "message_end",
246
+ stopReason: toStopReason(chunk.finishReason),
247
+ usage: tu
248
+ ? {
249
+ inputTokens: inTokens,
250
+ outputTokens: outTokens,
251
+ totalTokens,
252
+ ...(tu.reasoningTokens !== undefined && { reasoningTokens: tu.reasoningTokens }),
253
+ ...(tu.cachedInputTokens !== undefined && {
254
+ cachedInputTokens: tu.cachedInputTokens,
255
+ }),
256
+ }
257
+ : undefined,
258
+ };
259
+ }
260
+ // Errors
261
+ case "abort":
262
+ return { type: "error", error: "Stream aborted", code: "abort" };
263
+ case "error":
264
+ return {
265
+ type: "error",
266
+ error: chunk.error?.message || "Stream error",
267
+ code: "stream_error",
268
+ };
269
+ // Pass through as raw - sources, files, steps
270
+ case "source":
271
+ case "file":
272
+ case "start-step":
273
+ case "finish-step":
274
+ case "tool-result":
275
+ case "tool-error":
276
+ case "raw":
277
+ return { type: "raw", data: chunk };
278
+ // Lifecycle events we don't need (handled by framework)
279
+ case "text-start":
280
+ case "text-end":
281
+ case "reasoning-start":
282
+ case "reasoning-end":
283
+ return null;
284
+ default:
285
+ // Unknown chunk type - pass through as raw
286
+ return { type: "raw", data: chunk };
287
+ }
288
+ },
289
+ // =========================================================================
290
+ // processOutput: Non-streaming result → ModelOutput
291
+ // =========================================================================
292
+ processOutput: (output) => {
293
+ const messages = fromAiSdkMessages(output.response.messages) ?? [];
294
+ return {
295
+ messages,
296
+ get message() {
297
+ return messages.filter((msg) => msg.role === "assistant").at(-1);
298
+ },
299
+ usage: {
300
+ inputTokens: output.usage?.inputTokens ?? 0,
301
+ outputTokens: output.usage?.outputTokens ?? 0,
302
+ totalTokens: output.usage?.totalTokens ?? 0,
303
+ reasoningTokens: output.usage?.reasoningTokens ?? 0,
304
+ cachedInputTokens: output.usage?.cachedInputTokens ?? 0,
305
+ },
306
+ toolCalls: output.toolCalls?.map((toolCall) => {
307
+ return {
308
+ id: toolCall.toolCallId,
309
+ name: toolCall.toolName,
310
+ input: toolCall.args || toolCall.input || {},
311
+ metadata: toolCall.providerMetadata,
312
+ executedBy: toolCall.providerExecuted ? "provider" : undefined,
313
+ };
314
+ }) || [],
315
+ stopReason: toStopReason(output.finishReason),
316
+ model: output.response.modelId,
317
+ createdAt: output.response.timestamp.toISOString(),
318
+ raw: output,
319
+ };
320
+ },
321
+ // =========================================================================
322
+ // Executors
323
+ // =========================================================================
324
+ execute: (params) => {
325
+ logger.info({ params }, "execute");
326
+ return generateText(params);
327
+ },
328
+ executeStream: (params) => {
329
+ logger.info({ params }, "executeStream");
330
+ return streamText(params).fullStream;
331
+ },
332
+ reconstructRaw: (accumulated) => {
333
+ // Reconstruct a GenerateTextResult-like object from streaming data
334
+ // This provides a consistent format regardless of streaming vs non-streaming
335
+ // Build tool calls in AI SDK format (with proper type field and input instead of args)
336
+ const toolCalls = accumulated.toolCalls.map((tc) => ({
337
+ type: "tool-call",
338
+ toolCallId: tc.id,
339
+ toolName: tc.name,
340
+ input: tc.input,
341
+ }));
342
+ // Build response messages
343
+ const content = [];
344
+ if (accumulated.text) {
345
+ content.push({ type: "text", text: accumulated.text });
346
+ }
347
+ if (accumulated.reasoning) {
348
+ content.push({ type: "reasoning", text: accumulated.reasoning });
349
+ }
350
+ for (const tc of toolCalls) {
351
+ content.push({
352
+ type: "tool-call",
353
+ toolCallId: tc.toolCallId,
354
+ toolName: tc.toolName,
355
+ input: tc.input,
356
+ });
357
+ }
358
+ // Map internal stop reason to AI SDK FinishReason
359
+ const finishReason = (() => {
360
+ switch (accumulated.stopReason) {
361
+ case StopReason.STOP:
362
+ return "stop";
363
+ case StopReason.MAX_TOKENS:
364
+ return "length";
365
+ case StopReason.TOOL_USE:
366
+ return "tool-calls";
367
+ case StopReason.CONTENT_FILTER:
368
+ return "content-filter";
369
+ case StopReason.ERROR:
370
+ return "error";
371
+ default:
372
+ return "stop";
373
+ }
374
+ })();
375
+ // Reconstruct the GenerateTextResult format
376
+ const reconstructed = {
377
+ text: accumulated.text || "",
378
+ toolCalls: toolCalls.length > 0 ? toolCalls : [],
379
+ finishReason,
380
+ usage: {
381
+ inputTokens: accumulated.usage.inputTokens,
382
+ outputTokens: accumulated.usage.outputTokens,
383
+ totalTokens: accumulated.usage.totalTokens,
384
+ },
385
+ response: {
386
+ id: `gen-${Date.now()}`,
387
+ modelId: accumulated.model,
388
+ timestamp: new Date(),
389
+ messages: [
390
+ {
391
+ role: "assistant",
392
+ content,
393
+ },
394
+ ],
395
+ },
396
+ };
397
+ return reconstructed;
398
+ },
399
+ });
400
+ }
401
+ /**
402
+ * Shorthand factory for creating AI SDK adapter.
403
+ *
404
+ * @example
405
+ * ```typescript
406
+ * import { openai } from '@ai-sdk/openai';
407
+ *
408
+ * const model = aiSdk({ model: openai('gpt-4o') });
409
+ * ```
410
+ */
411
+ export function aiSdk(config) {
412
+ return createAiSdkModel(config);
413
+ }
414
+ // ============================================================================
415
+ // Message Conversion
416
+ // ============================================================================
417
+ export function toAiSdkMessages(messages, adapterSystemPrompt = "", defaultSystem) {
418
+ let system;
419
+ const modelMessages = [];
420
+ for (const msg of messages) {
421
+ if (msg.role === "system") {
422
+ // Extract system message
423
+ system = msg.content
424
+ .filter((block) => block.type === "text")
425
+ .map((block) => block.text)
426
+ .join("\n\n");
427
+ }
428
+ else if (msg.role === "tool") {
429
+ // Tool role messages - extract tool_result blocks
430
+ const toolResults = msg.content
431
+ .filter((block) => block.type === "tool_result")
432
+ .map((block) => ({
433
+ type: "tool-result",
434
+ toolCallId: block.toolUseId,
435
+ toolName: block.name || "unknown",
436
+ output: mapToolResultContent(block.content, block.isError),
437
+ }));
438
+ if (toolResults.length > 0) {
439
+ modelMessages.push({
440
+ role: "tool",
441
+ content: toolResults,
442
+ });
443
+ }
444
+ }
445
+ else {
446
+ // By this point, fromEngineState should have transformed 'event' to 'user'
447
+ // and ephemeral content has been interleaved as regular messages.
448
+ // This is a safety fallback in case adapter is used directly.
449
+ const role = msg.role === "event" ? "user" : msg.role;
450
+ if (role === "user" || role === "assistant") {
451
+ const content = mapContentBlocksToAiSdkContent(msg.content);
452
+ // Skip messages with empty content - these confuse the model
453
+ if (content.length > 0) {
454
+ modelMessages.push({
455
+ role,
456
+ content: content,
457
+ });
458
+ }
459
+ }
460
+ }
461
+ }
462
+ system = system || adapterSystemPrompt || defaultSystem;
463
+ if (system) {
464
+ modelMessages.unshift({
465
+ role: "system",
466
+ content: system,
467
+ });
468
+ }
469
+ return modelMessages;
470
+ }
471
+ /**
472
+ * Convert tool result content blocks to AI SDK LanguageModelV2ToolResultOutput format.
473
+ *
474
+ * The output must be one of:
475
+ * - { type: 'text', value: string }
476
+ * - { type: 'json', value: JSONValue }
477
+ * - { type: 'error-text', value: string }
478
+ * - { type: 'error-json', value: JSONValue }
479
+ * - { type: 'content', value: Array<{ type: 'text', text: string } | { type: 'media', data: string, mediaType: string }> }
480
+ */
481
+ export function mapToolResultContent(content, isError) {
482
+ if (!content || content.length === 0) {
483
+ return isError
484
+ ? { type: "error-text", value: "Tool execution failed" }
485
+ : { type: "text", value: "Tool execution succeeded" };
486
+ }
487
+ // Single text block
488
+ if (content.length === 1 && content[0].type === "text") {
489
+ const text = content[0].text;
490
+ return isError
491
+ ? { type: "error-text", value: text }
492
+ : { type: "text", value: text };
493
+ }
494
+ // Single JSON block
495
+ if (content.length === 1 && content[0].type === "json") {
496
+ const jsonBlock = content[0];
497
+ const data = jsonBlock.data ?? JSON.parse(jsonBlock.text);
498
+ return isError
499
+ ? { type: "error-json", value: data }
500
+ : { type: "json", value: data };
501
+ }
502
+ // Multiple blocks → use 'content' type with array
503
+ const value = content
504
+ .map((block) => {
505
+ if (block.type === "text") {
506
+ const textBlock = block;
507
+ // Skip empty text blocks to avoid AI SDK validation errors
508
+ if (!textBlock.text)
509
+ return null;
510
+ return { type: "text", text: textBlock.text };
511
+ }
512
+ else if (block.type === "json") {
513
+ const jsonBlock = block;
514
+ // JSON blocks can have either data (object) or text (string)
515
+ const jsonText = jsonBlock.text || JSON.stringify(jsonBlock.data, null, 2);
516
+ // Skip if both are empty/undefined
517
+ if (!jsonText)
518
+ return null;
519
+ return { type: "text", text: jsonText };
520
+ }
521
+ else if (block.type === "image") {
522
+ const mediaBlock = block;
523
+ if (mediaBlock.source.type === "base64") {
524
+ return {
525
+ type: "media",
526
+ data: mediaBlock.source.data,
527
+ mediaType: mediaBlock.mimeType || "image/png",
528
+ };
529
+ }
530
+ else if (mediaBlock.source.type === "url") {
531
+ return { type: "text", text: mediaBlock.source.url };
532
+ }
533
+ else if (mediaBlock.source.type === "s3") {
534
+ return {
535
+ type: "text",
536
+ text: `s3://${mediaBlock.source.bucket}/${mediaBlock.source.key}`,
537
+ };
538
+ }
539
+ else if (mediaBlock.source.type === "gcs") {
540
+ return {
541
+ type: "text",
542
+ text: `gs://${mediaBlock.source.bucket}/${mediaBlock.source.object}`,
543
+ };
544
+ }
545
+ // file_id source fallback to text
546
+ return {
547
+ type: "text",
548
+ text: `file_id:${mediaBlock.source.fileId}`,
549
+ };
550
+ }
551
+ // Fallback: serialize as text
552
+ return { type: "text", text: JSON.stringify(block) };
553
+ })
554
+ .filter((item) => item !== null);
555
+ return { type: "content", value };
556
+ }
557
+ export function fromAiSdkMessages(messages) {
558
+ if (!messages || messages.length === 0) {
559
+ return []; // Return empty array - no fake empty assistant messages
560
+ }
561
+ return messages
562
+ .map((msg) => ({
563
+ role: msg.role,
564
+ content: mapAiSdkContentToContentBlocks(msg.content),
565
+ }))
566
+ .filter((msg) => msg.content.length > 0); // Only keep messages with content
567
+ }
568
+ // ============================================================================
569
+ // Content Block Conversion: Engine → AI SDK
570
+ // ============================================================================
571
+ export function mapContentBlocksToAiSdkContent(content) {
572
+ return content
573
+ .map((block) => mapContentBlockToAiSdkPart(block))
574
+ .filter((part) => part !== undefined);
575
+ }
576
+ export function mapContentBlockToAiSdkPart(block) {
577
+ switch (block.type) {
578
+ case "text":
579
+ return { type: "text", text: block.text };
580
+ case "reasoning":
581
+ return {
582
+ type: "reasoning",
583
+ text: block.text,
584
+ };
585
+ case "image": {
586
+ const imageBlock = block;
587
+ const source = imageBlock.source;
588
+ if (source.type === "url") {
589
+ return {
590
+ type: "image",
591
+ image: source.url,
592
+ mediaType: imageBlock.mimeType,
593
+ };
594
+ }
595
+ else if (source.type === "base64") {
596
+ return {
597
+ type: "image",
598
+ image: source.data,
599
+ mediaType: imageBlock.mimeType,
600
+ };
601
+ }
602
+ return undefined;
603
+ }
604
+ case "document": {
605
+ const docBlock = block;
606
+ const source = docBlock.source;
607
+ if (source.type === "url") {
608
+ return {
609
+ type: "file",
610
+ data: source.url,
611
+ mediaType: docBlock.mimeType,
612
+ };
613
+ }
614
+ else if (source.type === "base64") {
615
+ return {
616
+ type: "file",
617
+ data: source.data,
618
+ mediaType: docBlock.mimeType,
619
+ };
620
+ }
621
+ return undefined;
622
+ }
623
+ case "audio": {
624
+ const audioBlock = block;
625
+ const source = audioBlock.source;
626
+ if (source.type === "url") {
627
+ return {
628
+ type: "file",
629
+ data: source.url,
630
+ mediaType: audioBlock.mimeType,
631
+ };
632
+ }
633
+ else if (source.type === "base64") {
634
+ return {
635
+ type: "file",
636
+ data: source.data,
637
+ mediaType: audioBlock.mimeType,
638
+ };
639
+ }
640
+ return undefined;
641
+ }
642
+ case "video": {
643
+ const videoBlock = block;
644
+ const source = videoBlock.source;
645
+ if (source.type === "url") {
646
+ return {
647
+ type: "file",
648
+ data: source.url,
649
+ mediaType: videoBlock.mimeType,
650
+ };
651
+ }
652
+ else if (source.type === "base64") {
653
+ return {
654
+ type: "file",
655
+ data: source.data,
656
+ mediaType: videoBlock.mimeType,
657
+ };
658
+ }
659
+ return undefined;
660
+ }
661
+ case "tool_use": {
662
+ const toolUseBlock = block;
663
+ return {
664
+ type: "tool-call",
665
+ toolCallId: toolUseBlock.toolUseId,
666
+ toolName: toolUseBlock.name,
667
+ input: toolUseBlock.input,
668
+ };
669
+ }
670
+ case "tool_result": {
671
+ const toolResultBlock = block;
672
+ return {
673
+ type: "tool-result",
674
+ toolCallId: toolResultBlock.toolUseId,
675
+ toolName: toolResultBlock.name || "unknown",
676
+ output: mapToolResultContent(toolResultBlock.content, toolResultBlock.isError),
677
+ };
678
+ }
679
+ default:
680
+ // Fallback: serialize as text
681
+ return { type: "text", text: JSON.stringify(block) };
682
+ }
683
+ }
684
+ // ============================================================================
685
+ // Content Block Conversion: AI SDK → Engine
686
+ // ============================================================================
687
+ export function mapAiSdkContentToContentBlocks(content) {
688
+ if (typeof content === "string") {
689
+ return [{ type: "text", text: content }];
690
+ }
691
+ if (!Array.isArray(content)) {
692
+ return [];
693
+ }
694
+ return content
695
+ .map((part) => mapAiSdkPartToContentBlock(part))
696
+ .filter((block) => block !== undefined);
697
+ }
698
+ export function mapAiSdkPartToContentBlock(part) {
699
+ if (typeof part === "string") {
700
+ return { type: "text", text: part };
701
+ }
702
+ if (!("type" in part)) {
703
+ return undefined;
704
+ }
705
+ switch (part.type) {
706
+ case "text":
707
+ return { type: "text", text: part.text };
708
+ case "reasoning":
709
+ return { type: "reasoning", text: part.text };
710
+ case "tool-call":
711
+ return {
712
+ type: "tool_use",
713
+ toolUseId: part.toolCallId,
714
+ name: part.toolName,
715
+ input: (part.args || part.input),
716
+ };
717
+ case "tool-result": {
718
+ const toolResultPart = part;
719
+ return {
720
+ type: "tool_result",
721
+ toolUseId: toolResultPart.toolCallId,
722
+ name: toolResultPart.toolName || "unknown",
723
+ content: mapToolResultOutputToContentBlocks(toolResultPart.output),
724
+ isError: toolResultPart.output &&
725
+ "type" in toolResultPart.output &&
726
+ toolResultPart.output.type.startsWith("error"),
727
+ };
728
+ }
729
+ default:
730
+ return undefined;
731
+ }
732
+ }
733
+ function mapToolResultOutputToContentBlocks(output) {
734
+ if (!output || typeof output !== "object") {
735
+ return [{ type: "text", text: String(output) }];
736
+ }
737
+ const typedOutput = output;
738
+ switch (typedOutput.type) {
739
+ case "text":
740
+ case "error-text":
741
+ return [{ type: "text", text: typedOutput.value }];
742
+ case "json":
743
+ case "error-json":
744
+ return [{ type: "json", data: typedOutput.value, text: "" }];
745
+ case "content":
746
+ return typedOutput.value.map((item) => {
747
+ if (item.type === "text") {
748
+ return { type: "text", text: item.text };
749
+ }
750
+ // media type
751
+ return {
752
+ type: "image",
753
+ mimeType: item.mediaType,
754
+ source: { type: "base64", data: item.data, mimeType: item.mediaType },
755
+ };
756
+ });
757
+ default:
758
+ return [{ type: "text", text: JSON.stringify(output) }];
759
+ }
760
+ }
761
+ //# sourceMappingURL=adapter.js.map