@agentick/ai-sdk 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,704 @@
1
+ /**
2
+ * ============================================================================
3
+ * AI SDK Compiler Adapter
4
+ * ============================================================================
5
+ *
6
+ * This adapter provides progressive adoption for AI SDK users who want to use
7
+ * our JSX compilation without fully committing to our Engine.
8
+ *
9
+ * Direction of adaptation: Engine → ai-sdk
10
+ * (For ai-sdk → Engine direction, use adapter.ts directly)
11
+ *
12
+ * ============================================================================
13
+ * PROGRESSIVE ADOPTION LEVELS
14
+ * ============================================================================
15
+ *
16
+ * Level 1: compile() only
17
+ * Returns library-native input. User calls generateText themselves.
18
+ *
19
+ * @example
20
+ * ```typescript
21
+ * import { compile } from '@tentickle/ai-sdk';
22
+ * import { generateText } from 'ai';
23
+ * import { openai } from '@ai-sdk/openai';
24
+ *
25
+ * const compiled = await compile(<MyAgent />);
26
+ *
27
+ * const result = await generateText({
28
+ * model: compiled.model ?? openai('gpt-4o'),
29
+ * messages: compiled.messages,
30
+ * tools: compiled.tools,
31
+ * system: compiled.system,
32
+ * });
33
+ * ```
34
+ *
35
+ * Level 2: run() with executor
36
+ * User controls model execution, we handle the tick loop.
37
+ *
38
+ * @example
39
+ * ```typescript
40
+ * import { createCompiler } from '@tentickle/ai-sdk';
41
+ * import { generateText } from 'ai';
42
+ * import { openai } from '@ai-sdk/openai';
43
+ *
44
+ * const compiler = createCompiler();
45
+ *
46
+ * const result = await compiler.run(<MyAgent />, async (input) => {
47
+ * return await generateText({
48
+ * model: openai('gpt-4o'),
49
+ * ...input,
50
+ * });
51
+ * });
52
+ * ```
53
+ *
54
+ * Level 3: run() / stream() - managed execution
55
+ * We handle everything. Model comes from <Model> component or config.
56
+ *
57
+ * @example
58
+ * ```typescript
59
+ * import { createCompiler } from '@tentickle/ai-sdk';
60
+ * import { openai } from '@ai-sdk/openai';
61
+ *
62
+ * const compiler = createCompiler({ model: openai('gpt-4o') });
63
+ *
64
+ * const result = await compiler.run(<MyAgent />);
65
+ *
66
+ * // Or streaming:
67
+ * for await (const chunk of compiler.stream(<MyAgent />)) {
68
+ * process.stdout.write(chunk.textDelta ?? '');
69
+ * }
70
+ * ```
71
+ *
72
+ * Level 4: generateText() / streamText() - mirror library API
73
+ * Same API as ai-sdk, but with JSX as the first argument.
74
+ *
75
+ * @example
76
+ * ```typescript
77
+ * import { generateText, streamText } from '@tentickle/ai-sdk';
78
+ * import { openai } from '@ai-sdk/openai';
79
+ *
80
+ * // createCompiler run internally
81
+ *
82
+ * // Exact same return type as ai-sdk's generateText
83
+ * const result = await generateText(<MyAgent />, {
84
+ * temperature: 0.8,
85
+ * });
86
+ *
87
+ * // Exact same return type as ai-sdk's streamText
88
+ * const { fullStream, text } = streamText(<MyAgent />);
89
+ * for await (const chunk of fullStream) {
90
+ * // Native ai-sdk chunks
91
+ * }
92
+ * ```
93
+ *
94
+ * ============================================================================
95
+ * COMPONENT PORTABILITY
96
+ * ============================================================================
97
+ *
98
+ * All adapter packages export the same component names:
99
+ * - Model: Configure the model declaratively
100
+ * - Tool: Define tools in JSX
101
+ * - Message, System, User, Assistant: Message components
102
+ *
103
+ * Switch adapters without changing agent code.
104
+ *
105
+ * ============================================================================
106
+ */
107
+ import { Runtime, } from "@tentickle/core";
108
+ import { generateText as aiSdkGenerateText, streamText as aiSdkStreamText } from "ai";
109
+ import { EventEmitter } from "node:events";
110
+ import { randomUUID } from "node:crypto";
111
+ import { createAiSdkModel, aiSdkMessagesToEngineInput, toAiSdkCompiledInput, fromAiSdkMessages, } from "./adapter";
112
+ import { AbortError, ValidationError } from "@tentickle/shared";
113
+ // ============================================================================
114
+ // Conversion Utilities
115
+ // ============================================================================
116
+ // Use adapter functions for conversions - see adapter.ts for implementations:
117
+ // - aiSdkMessagesToEngineInput: AI SDK messages → EngineInput
118
+ // - toAiSdkCompiledInput: TickResult → CompiledInput
119
+ /**
120
+ * Convert compiled output to library-native CompiledInput.
121
+ * Wraps the adapter function with the CompiledInput interface.
122
+ */
123
+ function toCompiledInput(compiled, tick, extractedModel) {
124
+ const result = toAiSdkCompiledInput(compiled.formatted, compiled.tools, tick, extractedModel);
125
+ return result;
126
+ }
127
+ /**
128
+ * Convert ai-sdk result to TickResultInput for state ingestion.
129
+ * Uses adapter's fromAiSdkMessages for content conversion.
130
+ */
131
+ function toTickResultInput(result) {
132
+ // Use adapter's conversion for response messages
133
+ const messages = fromAiSdkMessages(result.response?.messages);
134
+ // Find the assistant message
135
+ const assistantMessage = messages.find((m) => m.role === "assistant");
136
+ // Build timeline entries from converted messages
137
+ const newTimelineEntries = assistantMessage
138
+ ? [
139
+ {
140
+ kind: "message",
141
+ message: assistantMessage,
142
+ },
143
+ ]
144
+ : undefined;
145
+ const response = {
146
+ shouldStop: !result.toolCalls || result.toolCalls.length === 0,
147
+ newTimelineEntries: newTimelineEntries,
148
+ toolCalls: result.toolCalls?.map((tc) => ({
149
+ id: tc.toolCallId,
150
+ name: tc.toolName,
151
+ input: tc.args || {},
152
+ })),
153
+ };
154
+ return { response };
155
+ }
156
+ // ============================================================================
157
+ // Process Handle (for Fork/Spawn support)
158
+ // ============================================================================
159
+ class ProcessHandle {
160
+ pid;
161
+ rootPid;
162
+ type;
163
+ parentPid;
164
+ _status = "pending";
165
+ _result;
166
+ _events = new EventEmitter();
167
+ _completionPromise;
168
+ _resolve;
169
+ _reject;
170
+ _abortController = new AbortController();
171
+ constructor(type, parentPid, rootPid) {
172
+ this.pid = `aisdk-${type}-${randomUUID().slice(0, 8)}`;
173
+ this.type = type;
174
+ this.parentPid = parentPid;
175
+ this.rootPid = rootPid || this.pid;
176
+ this._completionPromise = new Promise((resolve, reject) => {
177
+ this._resolve = resolve;
178
+ this._reject = reject;
179
+ });
180
+ }
181
+ get status() {
182
+ return this._status;
183
+ }
184
+ get result() {
185
+ return this._result;
186
+ }
187
+ get tick() {
188
+ return 0;
189
+ }
190
+ start() {
191
+ this._status = "running";
192
+ this._events.emit("start");
193
+ }
194
+ complete(result) {
195
+ this._status = "completed";
196
+ this._result = result;
197
+ this._events.emit("complete", result);
198
+ this._resolve(result);
199
+ }
200
+ fail(error) {
201
+ this._status = "failed";
202
+ this._events.emit("error", error);
203
+ this._reject(error);
204
+ }
205
+ cancel() {
206
+ if (this._status === "running" || this._status === "pending") {
207
+ this._status = "cancelled";
208
+ this._abortController.abort();
209
+ this._events.emit("cancelled");
210
+ this._reject(new AbortError("Execution cancelled"));
211
+ }
212
+ }
213
+ getCancelSignal() {
214
+ return this._abortController.signal;
215
+ }
216
+ waitForCompletion() {
217
+ return this._completionPromise;
218
+ }
219
+ on(event, handler) {
220
+ this._events.on(event, handler);
221
+ return this;
222
+ }
223
+ off(event, handler) {
224
+ this._events.off(event, handler);
225
+ return this;
226
+ }
227
+ }
228
+ // ============================================================================
229
+ // Standalone compile() function (Level 1)
230
+ // ============================================================================
231
+ /**
232
+ * Compile JSX to library-native input.
233
+ *
234
+ * This is the simplest entry point. You get back messages, tools, and system
235
+ * in ai-sdk format, ready to pass to generateText/streamText.
236
+ *
237
+ * @example
238
+ * ```typescript
239
+ * import { compile } from '@tentickle/ai-sdk';
240
+ * import { generateText } from 'ai';
241
+ * import { openai } from '@ai-sdk/openai';
242
+ *
243
+ * const { messages, tools, system, model } = await compile(<MyAgent />);
244
+ *
245
+ * const result = await generateText({
246
+ * model: model ?? openai('gpt-4o'),
247
+ * messages,
248
+ * tools,
249
+ * system,
250
+ * });
251
+ * ```
252
+ */
253
+ export async function compile(jsx, initialMessages) {
254
+ const service = new Runtime();
255
+ const engineInput = aiSdkMessagesToEngineInput(initialMessages);
256
+ const { formatted } = await service.compile(jsx, engineInput);
257
+ // Extract model from COM if <Model> component was used
258
+ // TODO: Need to expose model extraction from COM
259
+ const extractedModel = undefined; // com.getModel()?.raw as LanguageModel | undefined;
260
+ const compiled = {
261
+ compiled: {},
262
+ formatted,
263
+ tools: service.getTools(),
264
+ shouldStop: false,
265
+ };
266
+ return toCompiledInput(compiled, 1, extractedModel);
267
+ }
268
+ // ============================================================================
269
+ // Compiler Class (Levels 2-4)
270
+ // ============================================================================
271
+ /**
272
+ * AI SDK Compiler.
273
+ *
274
+ * Provides progressive adoption from simple compilation to full execution management.
275
+ *
276
+ * @example Level 2: User-controlled execution
277
+ * ```typescript
278
+ * const compiler = createCompiler();
279
+ *
280
+ * const result = await compiler.run(<MyAgent />, async (input) => {
281
+ * return await generateText({ model: openai('gpt-4o'), ...input });
282
+ * });
283
+ * ```
284
+ *
285
+ * @example Level 3: Managed execution
286
+ * ```typescript
287
+ * const compiler = createCompiler({ model: openai('gpt-4o') });
288
+ * const result = await compiler.run(<MyAgent />);
289
+ * ```
290
+ *
291
+ * @example Level 4: Library-mirroring API
292
+ * ```typescript
293
+ * const compiler = createCompiler({ model: openai('gpt-4o') });
294
+ * const result = await compiler.generateText(<MyAgent />, { temperature: 0.8 });
295
+ * ```
296
+ */
297
+ export class AiSdkCompiler {
298
+ service;
299
+ defaultModel;
300
+ defaultOptions;
301
+ maxTicks;
302
+ executions = new Map();
303
+ currentExecutor;
304
+ constructor(config = {}) {
305
+ this.defaultModel = config.model;
306
+ this.defaultOptions = {
307
+ temperature: config.temperature,
308
+ maxOutputTokens: config.maxTokens,
309
+ };
310
+ this.maxTicks = config.maxTicks ?? 10;
311
+ // Create process methods for Fork/Spawn
312
+ const processMethods = this.createProcessMethods();
313
+ // Create service with model getter
314
+ const modelGetter = this.defaultModel
315
+ ? () => createAiSdkModel({ model: this.defaultModel })
316
+ : undefined;
317
+ this.service = new Runtime({
318
+ ...config.serviceConfig,
319
+ modelGetter,
320
+ processMethods,
321
+ });
322
+ }
323
+ createProcessMethods() {
324
+ return {
325
+ fork: (input, root, options) => {
326
+ const parentHandle = options?.parentPid
327
+ ? this.executions.get(options.parentPid)
328
+ : undefined;
329
+ const handle = new ProcessHandle("fork", options?.parentPid, parentHandle?.rootPid);
330
+ this.executions.set(handle.pid, handle);
331
+ if (root && this.currentExecutor) {
332
+ const element = typeof root === "function" ? { type: root, props: {}, key: null } : root;
333
+ const messages = this.engineInputToMessages(input);
334
+ this.runInternal(element, messages, this.currentExecutor)
335
+ .then((result) => handle.complete(result))
336
+ .catch((error) => handle.fail(error instanceof Error ? error : new Error(String(error))));
337
+ }
338
+ return handle;
339
+ },
340
+ spawn: (input, root, _options) => {
341
+ const handle = new ProcessHandle("spawn");
342
+ this.executions.set(handle.pid, handle);
343
+ if (root && this.currentExecutor) {
344
+ const element = typeof root === "function" ? { type: root, props: {}, key: null } : root;
345
+ const messages = this.engineInputToMessages(input);
346
+ this.runInternal(element, messages, this.currentExecutor)
347
+ .then((result) => handle.complete(result))
348
+ .catch((error) => handle.fail(error instanceof Error ? error : new Error(String(error))));
349
+ }
350
+ return handle;
351
+ },
352
+ signal: (pid, signal) => {
353
+ const handle = this.executions.get(pid);
354
+ if (handle && signal === "abort") {
355
+ handle.cancel();
356
+ }
357
+ },
358
+ kill: (pid) => {
359
+ this.executions.get(pid)?.cancel();
360
+ },
361
+ // Clean Process interface methods
362
+ all: () => {
363
+ return Array.from(this.executions.values()).filter((h) => h.status === "running");
364
+ },
365
+ get: (pid) => {
366
+ return this.executions.get(pid);
367
+ },
368
+ // Deprecated COMProcess methods (backwards compat)
369
+ list: () => {
370
+ return Array.from(this.executions.values()).filter((h) => h.status === "running");
371
+ },
372
+ };
373
+ }
374
+ engineInputToMessages(input) {
375
+ return (input.timeline || [])
376
+ .filter((entry) => entry.kind === "message")
377
+ .map((entry) => {
378
+ const msg = entry.message;
379
+ const content = msg.content.map((c) => {
380
+ if (c.type === "text")
381
+ return { type: "text", text: c.text };
382
+ return { type: "text", text: JSON.stringify(c) };
383
+ });
384
+ return { role: msg.role, content };
385
+ });
386
+ }
387
+ // ============================================================================
388
+ // Level 2-3: run() - with or without executor
389
+ // ============================================================================
390
+ /**
391
+ * Execute a JSX program.
392
+ *
393
+ * If executor is provided (Level 2), user controls model execution.
394
+ * If not provided (Level 3), we manage execution using configured model.
395
+ *
396
+ * @param jsx Root JSX element
397
+ * @param executorOrMessages Optional executor function OR initial messages
398
+ * @param maybeExecutor Optional executor (if second arg was messages)
399
+ */
400
+ async run(jsx, executorOrMessages, maybeExecutor) {
401
+ // Parse overloaded arguments
402
+ let initialMessages;
403
+ let executor;
404
+ if (typeof executorOrMessages === "function") {
405
+ executor = executorOrMessages;
406
+ }
407
+ else if (Array.isArray(executorOrMessages)) {
408
+ initialMessages = executorOrMessages;
409
+ executor = maybeExecutor;
410
+ }
411
+ // If no executor, use managed execution
412
+ if (!executor) {
413
+ executor = this.createManagedExecutor();
414
+ }
415
+ return this.runInternal(jsx, initialMessages, executor);
416
+ }
417
+ async runInternal(jsx, initialMessages, executor) {
418
+ // Store executor for fork/spawn
419
+ this.currentExecutor = executor;
420
+ const engineInput = aiSdkMessagesToEngineInput(initialMessages);
421
+ const config = {
422
+ input: engineInput,
423
+ rootElement: jsx,
424
+ maxTicks: this.maxTicks,
425
+ };
426
+ let tick = 1;
427
+ let lastResult;
428
+ await this.service._run(config, async (compiled) => {
429
+ const input = toCompiledInput(compiled, tick);
430
+ const result = await executor(input);
431
+ lastResult = result;
432
+ tick++;
433
+ return toTickResultInput(result);
434
+ });
435
+ return lastResult;
436
+ }
437
+ createManagedExecutor() {
438
+ if (!this.defaultModel) {
439
+ throw new ValidationError("model", "No model configured. Either pass an executor function, " +
440
+ "configure a model in createCompiler(), or use a <Model> component.");
441
+ }
442
+ return async (input) => {
443
+ return await aiSdkGenerateText({
444
+ model: input.model ?? this.defaultModel,
445
+ messages: input.messages,
446
+ system: input.system,
447
+ ...this.defaultOptions,
448
+ tools: Object.assign({}, input.tools, this.defaultOptions?.tools || {}),
449
+ });
450
+ };
451
+ }
452
+ // ============================================================================
453
+ // Level 2-3: stream() - with or without executor
454
+ // ============================================================================
455
+ /**
456
+ * Execute a JSX program with streaming.
457
+ *
458
+ * @param jsx Root JSX element
459
+ * @param executorOrMessages Optional executor function OR initial messages
460
+ * @param maybeExecutor Optional executor (if second arg was messages)
461
+ */
462
+ async *stream(jsx, executorOrMessages, maybeExecutor) {
463
+ // Parse overloaded arguments
464
+ let initialMessages;
465
+ let executor;
466
+ if (typeof executorOrMessages === "function") {
467
+ executor = executorOrMessages;
468
+ }
469
+ else if (Array.isArray(executorOrMessages)) {
470
+ initialMessages = executorOrMessages;
471
+ executor = maybeExecutor;
472
+ }
473
+ // If no executor, use managed execution
474
+ if (!executor) {
475
+ executor = this.createManagedStreamExecutor();
476
+ }
477
+ yield* this.streamInternal(jsx, initialMessages, executor);
478
+ }
479
+ async *streamInternal(jsx, initialMessages, executor) {
480
+ const engineInput = aiSdkMessagesToEngineInput(initialMessages);
481
+ const config = {
482
+ input: engineInput,
483
+ rootElement: jsx,
484
+ maxTicks: this.maxTicks,
485
+ };
486
+ for await (const event of this.service._runStream(config, {
487
+ onTick: async function* (compiled, tick) {
488
+ const input = toCompiledInput(compiled, tick);
489
+ yield { type: "compiled", tick, input };
490
+ const streamResult = executor(input);
491
+ for await (const chunk of streamResult.fullStream) {
492
+ yield chunk;
493
+ }
494
+ // Mark end with the accumulated result
495
+ const result = await streamResult;
496
+ yield { __result: result };
497
+ },
498
+ finalizeChunks: (chunks) => {
499
+ // Find the result marker
500
+ const resultMarker = chunks.find((c) => c?.__result);
501
+ if (resultMarker) {
502
+ return toTickResultInput(resultMarker.__result);
503
+ }
504
+ // Fallback: aggregate chunks
505
+ let text = "";
506
+ const toolCalls = [];
507
+ for (const chunk of chunks) {
508
+ if (chunk?.type === "text-delta") {
509
+ text += chunk.textDelta ?? "";
510
+ }
511
+ if (chunk?.type === "tool-call") {
512
+ toolCalls.push({
513
+ toolCallId: chunk.toolCallId,
514
+ toolName: chunk.toolName,
515
+ args: chunk.args,
516
+ });
517
+ }
518
+ }
519
+ const response = {
520
+ shouldStop: toolCalls.length === 0,
521
+ newTimelineEntries: text
522
+ ? [
523
+ {
524
+ kind: "message",
525
+ message: {
526
+ role: "assistant",
527
+ content: [{ type: "text", text }],
528
+ },
529
+ },
530
+ ]
531
+ : undefined,
532
+ toolCalls: toolCalls.length > 0
533
+ ? toolCalls.map((tc) => ({
534
+ id: tc.toolCallId,
535
+ name: tc.toolName,
536
+ input: tc.args,
537
+ }))
538
+ : undefined,
539
+ };
540
+ return { response };
541
+ },
542
+ })) {
543
+ // Transform internal events to our public event type
544
+ if (event.type === "tick_start") {
545
+ yield { type: "tick_start", tick: event.tick };
546
+ }
547
+ else if (event.type === "chunk") {
548
+ yield { type: "chunk", tick: event.tick, chunk: event.chunk };
549
+ }
550
+ else if (event.type === "tick_end") {
551
+ yield { type: "tick_end", tick: event.tick };
552
+ }
553
+ else if (event.type === "complete") {
554
+ yield { type: "complete", result: event.output };
555
+ }
556
+ }
557
+ }
558
+ createManagedStreamExecutor() {
559
+ if (!this.defaultModel) {
560
+ throw new ValidationError("model", "No model configured. Either pass an executor function, " +
561
+ "configure a model in createCompiler(), or use a <Model> component.");
562
+ }
563
+ return (input) => {
564
+ return aiSdkStreamText({
565
+ model: input.model ?? this.defaultModel,
566
+ messages: input.messages,
567
+ system: input.system,
568
+ ...this.defaultOptions,
569
+ tools: Object.assign({}, input.tools, this.defaultOptions?.tools || {}),
570
+ });
571
+ };
572
+ }
573
+ // ============================================================================
574
+ // Cleanup
575
+ // ============================================================================
576
+ async destroy() {
577
+ for (const handle of this.executions.values()) {
578
+ if (handle.status === "running") {
579
+ handle.cancel();
580
+ }
581
+ }
582
+ this.executions.clear();
583
+ }
584
+ }
585
+ // ============================================================================
586
+ // Level 4: generateText() / streamText() - mirror library API
587
+ // ============================================================================
588
+ /**
589
+ * Generate text using JSX.
590
+ *
591
+ * Mirrors ai-sdk's generateText API exactly.
592
+ * Returns the same type for seamless integration.
593
+ *
594
+ * @param jsx Root JSX element
595
+ * @param options Additional options (merged with defaults and JSX config)
596
+ */
597
+ export async function generateText(jsx, options) {
598
+ const compiled = await compile(jsx);
599
+ const model = compiled.model ?? options?.model;
600
+ if (!model) {
601
+ throw new ValidationError("model", "No model available. Configure via createCompiler({ model }), " +
602
+ "<Model> component, or options.model parameter.");
603
+ }
604
+ return await aiSdkGenerateText({
605
+ model,
606
+ messages: compiled.messages,
607
+ system: compiled.system,
608
+ ...options,
609
+ tools: Object.assign({}, compiled.tools, options?.tools || {}),
610
+ });
611
+ }
612
+ /**
613
+ * Stream text using JSX.
614
+ *
615
+ * Mirrors ai-sdk's streamText API exactly.
616
+ * Returns the same type for seamless integration.
617
+ *
618
+ * @param jsx Root JSX element
619
+ * @param options Additional options (merged with defaults and JSX config)
620
+ */
621
+ export function streamText(jsx, options) {
622
+ // We need to compile synchronously to return the stream immediately
623
+ // This is a limitation - compile() is async but streamText expects sync
624
+ // Solution: Return a proxy that starts compilation
625
+ // Create a deferred stream that compiles first
626
+ const streamPromise = (async () => {
627
+ const compiled = await compile(jsx);
628
+ const model = compiled.model ?? options?.model;
629
+ if (!model) {
630
+ throw new ValidationError("model", "No model available. Configure via createCompiler({ model }), " +
631
+ "<Model> component, or options.model parameter.");
632
+ }
633
+ return aiSdkStreamText({
634
+ model,
635
+ messages: compiled.messages,
636
+ system: compiled.system,
637
+ ...options,
638
+ tools: Object.assign({}, compiled.tools, options?.tools || {}),
639
+ });
640
+ })();
641
+ // Return a proxy object that looks like StreamTextResult
642
+ // but waits for compilation before accessing properties
643
+ return {
644
+ get fullStream() {
645
+ return (async function* () {
646
+ const stream = await streamPromise;
647
+ for await (const chunk of stream.fullStream) {
648
+ yield chunk;
649
+ }
650
+ })();
651
+ },
652
+ get text() {
653
+ return streamPromise.then((s) => s.text);
654
+ },
655
+ get toolCalls() {
656
+ return streamPromise.then((s) => s.toolCalls);
657
+ },
658
+ get toolResults() {
659
+ return streamPromise.then((s) => s.toolResults);
660
+ },
661
+ get usage() {
662
+ return streamPromise.then((s) => s.usage);
663
+ },
664
+ get finishReason() {
665
+ return streamPromise.then((s) => s.finishReason);
666
+ },
667
+ get response() {
668
+ return streamPromise.then((s) => s.response);
669
+ },
670
+ get steps() {
671
+ return streamPromise.then((s) => s.steps);
672
+ },
673
+ // ... other StreamTextResult properties
674
+ };
675
+ }
676
+ // ============================================================================
677
+ // Factory Function
678
+ // ============================================================================
679
+ /**
680
+ * Create an AI SDK compiler.
681
+ *
682
+ * @example Without model (requires executor or <Model> component)
683
+ * ```typescript
684
+ * const compiler = createCompiler();
685
+ *
686
+ * // Use with executor
687
+ * const result = await compiler.run(<MyAgent />, async (input) => {
688
+ * return await generateText({ model: openai('gpt-4o'), ...input });
689
+ * });
690
+ * ```
691
+ *
692
+ * @example With model (managed execution)
693
+ * ```typescript
694
+ * const compiler = createCompiler({ model: openai('gpt-4o') });
695
+ *
696
+ * const result = await compiler.run(<MyAgent />);
697
+ * ```
698
+ */
699
+ export function createCompiler(config) {
700
+ return new AiSdkCompiler(config);
701
+ }
702
+ // Also export as createAiSdkCompiler for backward compatibility
703
+ export { createCompiler as createAiSdkCompiler };
704
+ //# sourceMappingURL=compiler.js.map