@output.ai/llm 0.2.7 → 0.2.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -74,6 +74,49 @@ You are a helpful assistant.
74
74
  - **OpenAI** - Requires `OPENAI_API_KEY`
75
75
  - **Azure OpenAI** - Requires Azure-specific environment variables
76
76
 
77
+ ## Advanced Features
78
+
79
+ ### AI SDK Pass-Through
80
+
81
+ All generate functions accept additional [AI SDK options](https://sdk.vercel.ai/docs) that are passed through to the underlying provider. This enables tool calling, retry configuration, and other advanced features.
82
+
83
+ #### Tool Calling
84
+
85
+ ```typescript
86
+ import { generateText, tool } from '@output.ai/llm';
87
+ import { z } from '@output.ai/core';
88
+
89
+ const result = await generateText({
90
+ prompt: 'agent@v1',
91
+ variables: { task: 'Research competitor pricing' },
92
+ tools: {
93
+ searchWeb: tool({
94
+ description: 'Search the web for information',
95
+ parameters: z.object({ query: z.string() }),
96
+ execute: async ({ query }) => fetchSearchResults(query)
97
+ })
98
+ },
99
+ toolChoice: 'auto'
100
+ });
101
+
102
+ // Access tool calls made by the model
103
+ console.log(result.toolCalls);
104
+ ```
105
+
106
+ #### Common Pass-Through Options
107
+
108
+ | Option | Type | Description |
109
+ |--------|------|-------------|
110
+ | `tools` | `ToolSet` | Tools the model can call (generateText only) |
111
+ | `toolChoice` | `'auto' \| 'none' \| 'required'` | Tool selection strategy |
112
+ | `maxRetries` | `number` | Max retry attempts (default: 2) |
113
+ | `seed` | `number` | Seed for deterministic output |
114
+ | `abortSignal` | `AbortSignal` | Cancel the request |
115
+ | `topP` | `number` | Nucleus sampling (0-1) |
116
+ | `topK` | `number` | Top-K sampling |
117
+
118
+ Options set in the prompt file (temperature, maxTokens) can be overridden at call time.
119
+
77
120
  ## Documentation
78
121
 
79
122
  For comprehensive documentation, visit:
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.2.7",
3
+ "version": "0.2.9",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
package/src/ai_sdk.js CHANGED
@@ -5,24 +5,25 @@ import * as AI from 'ai';
5
5
  import { validateGenerateTextArgs, validateGenerateObjectArgs, validateGenerateArrayArgs, validateGenerateEnumArgs } from './validations.js';
6
6
  import { loadPrompt } from './prompt_loader.js';
7
7
 
8
- /*
9
- Word of wisdom:
10
- We could retrieve the result object using the rest operator:
11
- ```js
12
- const { usage, providerMetadata, ...rest } = response;
13
- const result = rest[resultProperty];
14
- ```
15
- But we CAN'T because the response of the generateText is an instance of `DefaultGenerateTextResult`
16
- and 'text' is a getter (`get text()`).
17
- Be aware of this when refactoring.
18
- */
19
8
  const traceWrapper = async ( { traceId, resultProperty, fn } ) => {
20
9
  try {
21
10
  const response = await fn();
22
11
  const { usage, providerMetadata } = response;
23
12
  const result = response[resultProperty];
24
13
  Tracing.addEventEnd( { id: traceId, details: { result, usage, providerMetadata } } );
25
- return result;
14
+
15
+ // Use a Proxy to add 'result' as a unified field name without mutating the AI SDK response.
16
+ // This preserves the original response object (with its getters/prototype) while allowing
17
+ // developers to use 'result' consistently across all generate* functions.
18
+ // Note: Don't use spread/rest on response - AI SDK uses getters that won't copy correctly.
19
+ return new Proxy( response, {
20
+ get( target, prop, receiver ) {
21
+ if ( prop === 'result' ) {
22
+ return target[resultProperty];
23
+ }
24
+ return Reflect.get( target, prop, receiver );
25
+ }
26
+ } );
26
27
  } catch ( error ) {
27
28
  Tracing.addEventError( { id: traceId, details: error } );
28
29
  throw error;
@@ -37,14 +38,14 @@ const startTrace = ( name, details ) => {
37
38
  return traceId;
38
39
  };
39
40
 
40
- const extraAiSdkOptionsFromPrompt = prompt => {
41
+ const aiSdkOptionsFromPrompt = prompt => {
41
42
  const options = {
42
43
  model: loadModel( prompt ),
43
44
  messages: prompt.messages,
44
45
  providerOptions: prompt.config.providerOptions
45
46
  };
46
47
 
47
- if ( prompt.config.temperature ) {
48
+ if ( prompt.config.temperature !== undefined ) {
48
49
  options.temperature = prompt.config.temperature;
49
50
  }
50
51
 
@@ -58,42 +59,59 @@ const extraAiSdkOptionsFromPrompt = prompt => {
58
59
  /**
59
60
  * Use an LLM model to generate text.
60
61
  *
62
+ * Accepts additional AI SDK options (tools, maxRetries, seed, etc.) that are passed through
63
+ * to the underlying provider. Options from the prompt file can be overridden at call time.
64
+ *
61
65
  * @param {object} args - Generation arguments
62
66
  * @param {string} args.prompt - Prompt file name
63
67
  * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
68
+ * @param {object} [args.tools] - AI SDK tools the model can call
69
+ * @param {'auto'|'none'|'required'|object} [args.toolChoice] - Tool selection strategy
70
+ * @param {number} [args.maxRetries] - Max retry attempts (default: 2)
71
+ * @param {number} [args.seed] - Seed for deterministic output
72
+ * @param {AbortSignal} [args.abortSignal] - Signal to abort the request
64
73
  * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
65
74
  * @throws {FatalError} If the prompt file is not found or template rendering fails
66
- * @returns {Promise<string>} Generated text
75
+ * @returns {Promise<GenerateTextResult>} AI SDK response with text, toolCalls, and metadata
67
76
  */
68
- export async function generateText( { prompt, variables } ) {
77
+ export async function generateText( { prompt, variables, ...extraAiSdkOptions } ) {
69
78
  validateGenerateTextArgs( { prompt, variables } );
70
79
  const loadedPrompt = loadPrompt( prompt, variables );
71
80
  const traceId = startTrace( 'generateText', { prompt, variables, loadedPrompt } );
72
81
 
73
82
  return traceWrapper( {
74
83
  traceId, resultProperty: 'text', fn: async () =>
75
- AI.generateText( extraAiSdkOptionsFromPrompt( loadedPrompt ) )
84
+ AI.generateText( {
85
+ ...aiSdkOptionsFromPrompt( loadedPrompt ),
86
+ ...extraAiSdkOptions
87
+ } )
76
88
  } );
77
89
  }
78
90
 
79
91
  /**
80
92
  * Use an LLM model to generate an object with a fixed schema.
81
93
  *
94
+ * Accepts additional AI SDK options (maxRetries, seed, etc.) that are passed through
95
+ * to the underlying provider. Options from the prompt file can be overridden at call time.
96
+ *
82
97
  * @param {object} args - Generation arguments
83
98
  * @param {string} args.prompt - Prompt file name
84
99
  * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
85
100
  * @param {z.ZodObject} args.schema - Output schema
86
101
  * @param {string} [args.schemaName] - Output schema name
87
102
  * @param {string} [args.schemaDescription] - Output schema description
103
+ * @param {number} [args.maxRetries] - Max retry attempts (default: 2)
104
+ * @param {number} [args.seed] - Seed for deterministic output
105
+ * @param {AbortSignal} [args.abortSignal] - Signal to abort the request
88
106
  * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
89
107
  * @throws {FatalError} If the prompt file is not found or template rendering fails
90
- * @returns {Promise<object>} Object matching the provided schema
108
+ * @returns {Promise<GenerateObjectResult>} AI SDK response with object and metadata
91
109
  */
92
110
  export async function generateObject( args ) {
93
111
  validateGenerateObjectArgs( args );
94
- const { prompt, variables, schema, schemaName, schemaDescription } = args;
112
+ const { prompt, variables, schema, schemaName, schemaDescription, ...extraAiSdkOptions } = args;
95
113
  const loadedPrompt = loadPrompt( prompt, variables );
96
- const traceId = startTrace( 'generateObject', { ...args, schema: z.toJSONSchema( schema ), loadedPrompt } );
114
+ const traceId = startTrace( 'generateObject', { prompt, variables, schema: z.toJSONSchema( schema ), loadedPrompt } );
97
115
 
98
116
  return traceWrapper( {
99
117
  traceId, resultProperty: 'object', fn: async () =>
@@ -102,7 +120,8 @@ export async function generateObject( args ) {
102
120
  schema,
103
121
  schemaName,
104
122
  schemaDescription,
105
- ...extraAiSdkOptionsFromPrompt( loadedPrompt )
123
+ ...aiSdkOptionsFromPrompt( loadedPrompt ),
124
+ ...extraAiSdkOptions
106
125
  } )
107
126
  } );
108
127
  }
@@ -110,21 +129,27 @@ export async function generateObject( args ) {
110
129
  /**
111
130
  * Use an LLM model to generate an array of values with a fixed schema.
112
131
  *
132
+ * Accepts additional AI SDK options (maxRetries, seed, etc.) that are passed through
133
+ * to the underlying provider. Options from the prompt file can be overridden at call time.
134
+ *
113
135
  * @param {object} args - Generation arguments
114
136
  * @param {string} args.prompt - Prompt file name
115
137
  * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
116
138
  * @param {z.ZodType} args.schema - Output schema (array item)
117
139
  * @param {string} [args.schemaName] - Output schema name
118
140
  * @param {string} [args.schemaDescription] - Output schema description
141
+ * @param {number} [args.maxRetries] - Max retry attempts (default: 2)
142
+ * @param {number} [args.seed] - Seed for deterministic output
143
+ * @param {AbortSignal} [args.abortSignal] - Signal to abort the request
119
144
  * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
120
145
  * @throws {FatalError} If the prompt file is not found or template rendering fails
121
- * @returns {Promise<object>} Array where each element matches the schema
146
+ * @returns {Promise<GenerateObjectResult>} AI SDK response with array and metadata
122
147
  */
123
148
  export async function generateArray( args ) {
124
149
  validateGenerateArrayArgs( args );
125
- const { prompt, variables, schema, schemaName, schemaDescription } = args;
150
+ const { prompt, variables, schema, schemaName, schemaDescription, ...extraAiSdkOptions } = args;
126
151
  const loadedPrompt = loadPrompt( prompt, variables );
127
- const traceId = startTrace( 'generateArray', { ...args, schema: z.toJSONSchema( schema ), loadedPrompt } );
152
+ const traceId = startTrace( 'generateArray', { prompt, variables, schema: z.toJSONSchema( schema ), loadedPrompt } );
128
153
 
129
154
  return traceWrapper( {
130
155
  traceId, resultProperty: 'object', fn: async () =>
@@ -133,7 +158,8 @@ export async function generateArray( args ) {
133
158
  schema,
134
159
  schemaName,
135
160
  schemaDescription,
136
- ...extraAiSdkOptionsFromPrompt( loadedPrompt )
161
+ ...aiSdkOptionsFromPrompt( loadedPrompt ),
162
+ ...extraAiSdkOptions
137
163
  } )
138
164
  } );
139
165
  }
@@ -141,26 +167,33 @@ export async function generateArray( args ) {
141
167
  /**
142
168
  * Use an LLM model to generate a result from an enum (array of string values).
143
169
  *
170
+ * Accepts additional AI SDK options (maxRetries, seed, etc.) that are passed through
171
+ * to the underlying provider. Options from the prompt file can be overridden at call time.
172
+ *
144
173
  * @param {object} args - Generation arguments
145
174
  * @param {string} args.prompt - Prompt file name
146
175
  * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
147
176
  * @param {string[]} args.enum - Allowed values for the generation
177
+ * @param {number} [args.maxRetries] - Max retry attempts (default: 2)
178
+ * @param {number} [args.seed] - Seed for deterministic output
179
+ * @param {AbortSignal} [args.abortSignal] - Signal to abort the request
148
180
  * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
149
181
  * @throws {FatalError} If the prompt file is not found or template rendering fails
150
- * @returns {Promise<string>} One of the provided enum values
182
+ * @returns {Promise<GenerateObjectResult>} AI SDK response with enum value and metadata
151
183
  */
152
184
  export async function generateEnum( args ) {
153
185
  validateGenerateEnumArgs( args );
154
- const { prompt, variables, enum: _enum } = args;
186
+ const { prompt, variables, enum: _enum, ...extraAiSdkOptions } = args;
155
187
  const loadedPrompt = loadPrompt( prompt, variables );
156
- const traceId = startTrace( 'generateEnum', { ...args, loadedPrompt } );
188
+ const traceId = startTrace( 'generateEnum', { prompt, variables, loadedPrompt } );
157
189
 
158
190
  return traceWrapper( {
159
191
  traceId, resultProperty: 'object', fn: async () =>
160
192
  AI.generateObject( {
161
193
  output: 'enum',
162
194
  enum: _enum,
163
- ...extraAiSdkOptionsFromPrompt( loadedPrompt )
195
+ ...aiSdkOptionsFromPrompt( loadedPrompt ),
196
+ ...extraAiSdkOptions
164
197
  } )
165
198
  } );
166
199
  }
@@ -52,8 +52,17 @@ beforeEach( () => {
52
52
  loadModelImpl.mockReset().mockReturnValue( 'MODEL' );
53
53
  loadPromptImpl.mockReset().mockReturnValue( basePrompt );
54
54
 
55
- aiFns.generateText.mockReset().mockResolvedValue( { text: 'TEXT' } );
56
- aiFns.generateObject.mockReset().mockResolvedValue( { object: 'OBJECT' } );
55
+ aiFns.generateText.mockReset().mockResolvedValue( {
56
+ text: 'TEXT',
57
+ sources: [],
58
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
59
+ finishReason: 'stop'
60
+ } );
61
+ aiFns.generateObject.mockReset().mockResolvedValue( {
62
+ object: 'OBJECT',
63
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
64
+ finishReason: 'stop'
65
+ } );
57
66
 
58
67
  validators.validateGenerateTextArgs.mockClear();
59
68
  validators.validateGenerateObjectArgs.mockClear();
@@ -83,12 +92,19 @@ describe( 'ai_sdk', () => {
83
92
  temperature: 0.3,
84
93
  providerOptions: basePrompt.config.providerOptions
85
94
  } );
86
- expect( result ).toBe( 'TEXT' );
95
+ expect( result.text ).toBe( 'TEXT' );
96
+ expect( result.sources ).toEqual( [] );
97
+ expect( result.usage ).toEqual( { inputTokens: 10, outputTokens: 5, totalTokens: 15 } );
98
+ expect( result.finishReason ).toBe( 'stop' );
87
99
  } );
88
100
 
89
101
  it( 'generateObject: validates, traces, calls AI with output object and returns object', async () => {
90
102
  const { generateObject } = await importSut();
91
- aiFns.generateObject.mockResolvedValueOnce( { object: { a: 1 } } );
103
+ aiFns.generateObject.mockResolvedValueOnce( {
104
+ object: { a: 1 },
105
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
106
+ finishReason: 'stop'
107
+ } );
92
108
 
93
109
  const schema = z.object( { a: z.number() } );
94
110
  const result = await generateObject( {
@@ -113,12 +129,16 @@ describe( 'ai_sdk', () => {
113
129
  temperature: 0.3,
114
130
  providerOptions: basePrompt.config.providerOptions
115
131
  } );
116
- expect( result ).toEqual( { a: 1 } );
132
+ expect( result.object ).toEqual( { a: 1 } );
117
133
  } );
118
134
 
119
135
  it( 'generateArray: validates, traces, calls AI (item schema) and returns array', async () => {
120
136
  const { generateArray } = await importSut();
121
- aiFns.generateObject.mockResolvedValueOnce( { object: [ 1, 2 ] } );
137
+ aiFns.generateObject.mockResolvedValueOnce( {
138
+ object: [ 1, 2 ],
139
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
140
+ finishReason: 'stop'
141
+ } );
122
142
 
123
143
  const schema = z.number();
124
144
  const result = await generateArray( {
@@ -143,12 +163,16 @@ describe( 'ai_sdk', () => {
143
163
  temperature: 0.3,
144
164
  providerOptions: basePrompt.config.providerOptions
145
165
  } );
146
- expect( result ).toEqual( [ 1, 2 ] );
166
+ expect( result.object ).toEqual( [ 1, 2 ] );
147
167
  } );
148
168
 
149
169
  it( 'generateEnum: validates, traces, calls AI with output enum and returns value', async () => {
150
170
  const { generateEnum } = await importSut();
151
- aiFns.generateObject.mockResolvedValueOnce( { object: 'B' } );
171
+ aiFns.generateObject.mockResolvedValueOnce( {
172
+ object: 'B',
173
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
174
+ finishReason: 'stop'
175
+ } );
152
176
 
153
177
  const result = await generateEnum( { prompt: 'test_prompt@v1', enum: [ 'A', 'B', 'C' ] } );
154
178
 
@@ -165,7 +189,7 @@ describe( 'ai_sdk', () => {
165
189
  temperature: 0.3,
166
190
  providerOptions: basePrompt.config.providerOptions
167
191
  } );
168
- expect( result ).toBe( 'B' );
192
+ expect( result.object ).toBe( 'B' );
169
193
  } );
170
194
 
171
195
  it( 'generateText: passes provider-specific options to AI SDK', async () => {
@@ -325,4 +349,283 @@ describe( 'ai_sdk', () => {
325
349
  }
326
350
  } );
327
351
  } );
352
+
353
+ it( 'generateText: passes through providerMetadata', async () => {
354
+ aiFns.generateText.mockResolvedValueOnce( {
355
+ text: 'TEXT',
356
+ sources: [],
357
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
358
+ finishReason: 'stop',
359
+ providerMetadata: { anthropic: { cacheReadInputTokens: 50 } }
360
+ } );
361
+
362
+ const { generateText } = await importSut();
363
+ const result = await generateText( { prompt: 'test_prompt@v1' } );
364
+
365
+ expect( result.providerMetadata ).toEqual( { anthropic: { cacheReadInputTokens: 50 } } );
366
+ } );
367
+
368
+ it( 'generateText: passes through warnings and response metadata', async () => {
369
+ aiFns.generateText.mockResolvedValueOnce( {
370
+ text: 'TEXT',
371
+ sources: [],
372
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
373
+ finishReason: 'stop',
374
+ warnings: [ { type: 'other', message: 'Test warning' } ],
375
+ response: { id: 'req_123', modelId: 'gpt-4o-2024-05-13' }
376
+ } );
377
+
378
+ const { generateText } = await importSut();
379
+ const result = await generateText( { prompt: 'test_prompt@v1' } );
380
+
381
+ expect( result.warnings ).toEqual( [ { type: 'other', message: 'Test warning' } ] );
382
+ expect( result.response ).toEqual( { id: 'req_123', modelId: 'gpt-4o-2024-05-13' } );
383
+ } );
384
+
385
+ it( 'generateText: includes unified result field that matches text', async () => {
386
+ const { generateText } = await importSut();
387
+ const response = await generateText( { prompt: 'test_prompt@v1' } );
388
+
389
+ expect( response.result ).toBe( 'TEXT' );
390
+ expect( response.result ).toBe( response.text );
391
+ } );
392
+
393
+ it( 'generateObject: includes unified result field that matches object', async () => {
394
+ const { generateObject } = await importSut();
395
+ aiFns.generateObject.mockResolvedValueOnce( {
396
+ object: { a: 1, b: 'test' },
397
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
398
+ finishReason: 'stop'
399
+ } );
400
+
401
+ const schema = z.object( { a: z.number(), b: z.string() } );
402
+ const response = await generateObject( { prompt: 'test_prompt@v1', schema } );
403
+
404
+ expect( response.result ).toEqual( { a: 1, b: 'test' } );
405
+ expect( response.result ).toEqual( response.object );
406
+ } );
407
+
408
+ it( 'generateArray: includes unified result field that matches object', async () => {
409
+ const { generateArray } = await importSut();
410
+ aiFns.generateObject.mockResolvedValueOnce( {
411
+ object: [ 'item1', 'item2', 'item3' ],
412
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
413
+ finishReason: 'stop'
414
+ } );
415
+
416
+ const schema = z.string();
417
+ const response = await generateArray( { prompt: 'test_prompt@v1', schema } );
418
+
419
+ expect( response.result ).toEqual( [ 'item1', 'item2', 'item3' ] );
420
+ expect( response.result ).toEqual( response.object );
421
+ } );
422
+
423
+ it( 'generateEnum: includes unified result field that matches object', async () => {
424
+ const { generateEnum } = await importSut();
425
+ aiFns.generateObject.mockResolvedValueOnce( {
426
+ object: 'yes',
427
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
428
+ finishReason: 'stop'
429
+ } );
430
+
431
+ const response = await generateEnum( { prompt: 'test_prompt@v1', enum: [ 'yes', 'no' ] } );
432
+
433
+ expect( response.result ).toBe( 'yes' );
434
+ expect( response.result ).toBe( response.object );
435
+ } );
436
+
437
+ it( 'generateText: traces error and rethrows when AI SDK fails', async () => {
438
+ const error = new Error( 'API rate limit exceeded' );
439
+ aiFns.generateText.mockRejectedValueOnce( error );
440
+ const { generateText } = await importSut();
441
+
442
+ await expect( generateText( { prompt: 'test_prompt@v1' } ) ).rejects.toThrow( 'API rate limit exceeded' );
443
+ expect( tracingSpies.addEventError ).toHaveBeenCalledWith(
444
+ expect.objectContaining( { details: error } )
445
+ );
446
+ } );
447
+
448
+ it( 'generateObject: traces error and rethrows when AI SDK fails', async () => {
449
+ const error = new Error( 'Invalid schema' );
450
+ aiFns.generateObject.mockRejectedValueOnce( error );
451
+ const { generateObject } = await importSut();
452
+
453
+ const schema = z.object( { a: z.number() } );
454
+ await expect( generateObject( { prompt: 'test_prompt@v1', schema } ) ).rejects.toThrow( 'Invalid schema' );
455
+ expect( tracingSpies.addEventError ).toHaveBeenCalledWith(
456
+ expect.objectContaining( { details: error } )
457
+ );
458
+ } );
459
+
460
+ it( 'generateText: Proxy correctly handles AI SDK response with getter', async () => {
461
+ const responseWithGetter = {
462
+ _internalText: 'TEXT_FROM_GETTER',
463
+ get text() {
464
+ return this._internalText;
465
+ },
466
+ sources: [],
467
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
468
+ finishReason: 'stop'
469
+ };
470
+ aiFns.generateText.mockResolvedValueOnce( responseWithGetter );
471
+
472
+ const { generateText } = await importSut();
473
+ const response = await generateText( { prompt: 'test_prompt@v1' } );
474
+
475
+ expect( response.text ).toBe( 'TEXT_FROM_GETTER' );
476
+ expect( response.result ).toBe( 'TEXT_FROM_GETTER' );
477
+ } );
478
+
479
+ it( 'generateObject: Proxy correctly handles AI SDK response with getter', async () => {
480
+ const responseWithGetter = {
481
+ _internalObject: { value: 42 },
482
+ get object() {
483
+ return this._internalObject;
484
+ },
485
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
486
+ finishReason: 'stop'
487
+ };
488
+ aiFns.generateObject.mockResolvedValueOnce( responseWithGetter );
489
+
490
+ const { generateObject } = await importSut();
491
+ const schema = z.object( { value: z.number() } );
492
+ const response = await generateObject( { prompt: 'test_prompt@v1', schema } );
493
+
494
+ expect( response.object ).toEqual( { value: 42 } );
495
+ expect( response.result ).toEqual( { value: 42 } );
496
+ } );
497
+
498
+ it( 'generateText: passes through AI SDK options like tools and maxRetries', async () => {
499
+ const { generateText } = await importSut();
500
+ const mockTools = { calculator: { description: 'A calculator tool' } };
501
+
502
+ await generateText( {
503
+ prompt: 'test_prompt@v1',
504
+ tools: mockTools,
505
+ toolChoice: 'required',
506
+ maxRetries: 5,
507
+ seed: 42
508
+ } );
509
+
510
+ expect( aiFns.generateText ).toHaveBeenCalledWith(
511
+ expect.objectContaining( {
512
+ tools: mockTools,
513
+ toolChoice: 'required',
514
+ maxRetries: 5,
515
+ seed: 42
516
+ } )
517
+ );
518
+ } );
519
+
520
+ it( 'generateText: user-provided temperature overrides prompt temperature', async () => {
521
+ loadPromptImpl.mockReturnValueOnce( {
522
+ config: {
523
+ provider: 'openai',
524
+ model: 'gpt-4o',
525
+ temperature: 0.7
526
+ },
527
+ messages: [ { role: 'user', content: 'Hi' } ]
528
+ } );
529
+
530
+ const { generateText } = await importSut();
531
+ await generateText( { prompt: 'test_prompt@v1', temperature: 0.2 } );
532
+
533
+ expect( aiFns.generateText ).toHaveBeenCalledWith(
534
+ expect.objectContaining( { temperature: 0.2 } )
535
+ );
536
+ } );
537
+
538
+ it( 'generateText: passes through temperature: 0 from prompt', async () => {
539
+ loadPromptImpl.mockReturnValueOnce( {
540
+ config: {
541
+ provider: 'openai',
542
+ model: 'gpt-4o',
543
+ temperature: 0
544
+ },
545
+ messages: [ { role: 'user', content: 'Hi' } ]
546
+ } );
547
+
548
+ const { generateText } = await importSut();
549
+ await generateText( { prompt: 'test_prompt@v1' } );
550
+
551
+ expect( aiFns.generateText ).toHaveBeenCalledWith(
552
+ expect.objectContaining( { temperature: 0 } )
553
+ );
554
+ } );
555
+
556
+ it( 'generateObject: passes through AI SDK options like maxRetries and seed', async () => {
557
+ const { generateObject } = await importSut();
558
+ const schema = z.object( { a: z.number() } );
559
+
560
+ await generateObject( {
561
+ prompt: 'test_prompt@v1',
562
+ schema,
563
+ maxRetries: 3,
564
+ seed: 123,
565
+ topP: 0.9
566
+ } );
567
+
568
+ expect( aiFns.generateObject ).toHaveBeenCalledWith(
569
+ expect.objectContaining( {
570
+ maxRetries: 3,
571
+ seed: 123,
572
+ topP: 0.9
573
+ } )
574
+ );
575
+ } );
576
+
577
+ it( 'generateArray: passes through AI SDK options', async () => {
578
+ const { generateArray } = await importSut();
579
+ const schema = z.string();
580
+ const controller = new AbortController();
581
+
582
+ await generateArray( {
583
+ prompt: 'test_prompt@v1',
584
+ schema,
585
+ abortSignal: controller.signal,
586
+ headers: { 'X-Custom': 'value' }
587
+ } );
588
+
589
+ expect( aiFns.generateObject ).toHaveBeenCalledWith(
590
+ expect.objectContaining( {
591
+ abortSignal: controller.signal,
592
+ headers: { 'X-Custom': 'value' }
593
+ } )
594
+ );
595
+ } );
596
+
597
+ it( 'generateEnum: passes through AI SDK options', async () => {
598
+ const { generateEnum } = await importSut();
599
+
600
+ await generateEnum( {
601
+ prompt: 'test_prompt@v1',
602
+ enum: [ 'A', 'B' ],
603
+ stopSequences: [ 'END' ],
604
+ presencePenalty: 0.5
605
+ } );
606
+
607
+ expect( aiFns.generateObject ).toHaveBeenCalledWith(
608
+ expect.objectContaining( {
609
+ stopSequences: [ 'END' ],
610
+ presencePenalty: 0.5
611
+ } )
612
+ );
613
+ } );
614
+
615
+ it( 'generateText: passes through unknown future options for forward compatibility', async () => {
616
+ const { generateText } = await importSut();
617
+
618
+ await generateText( {
619
+ prompt: 'test_prompt@v1',
620
+ experimental_futureOption: { key: 'value' },
621
+ unknownOption: true
622
+ } );
623
+
624
+ expect( aiFns.generateText ).toHaveBeenCalledWith(
625
+ expect.objectContaining( {
626
+ experimental_futureOption: { key: 'value' },
627
+ unknownOption: true
628
+ } )
629
+ );
630
+ } );
328
631
  } );
package/src/index.d.ts CHANGED
@@ -1,4 +1,11 @@
1
1
  import type { z } from '@output.ai/core';
2
+ import type {
3
+ GenerateTextResult as AIGenerateTextResult,
4
+ GenerateObjectResult as AIGenerateObjectResult,
5
+ CallSettings,
6
+ ToolSet,
7
+ ToolChoice
8
+ } from 'ai';
2
9
 
3
10
  /**
4
11
  * Represents a single message in a prompt conversation.
@@ -61,6 +68,62 @@ export type Prompt = {
61
68
  messages: PromptMessage[];
62
69
  };
63
70
 
71
+ // Re-export AI SDK types directly (auto-synced with AI SDK updates)
72
+ export type {
73
+ LanguageModelUsage,
74
+ FinishReason,
75
+ LanguageModelResponseMetadata,
76
+ ProviderMetadata,
77
+ CallWarning,
78
+ CallSettings,
79
+ ToolSet,
80
+ ToolChoice,
81
+ Tool
82
+ } from 'ai';
83
+
84
+ // Re-export the tool helper function for creating tools
85
+ export { tool } from 'ai';
86
+
87
+ /**
88
+ * Common AI SDK options that can be passed through to all generate functions.
89
+ * These options are passed directly to the underlying AI SDK call.
90
+ */
91
+ type AiSdkOptions = Partial<Omit<CallSettings, 'maxOutputTokens'>>;
92
+
93
+ /**
94
+ * AI SDK options specific to generateText, including tool calling support.
95
+ * @typeParam TOOLS - The tools available for the model to call
96
+ */
97
+ type GenerateTextAiSdkOptions<TOOLS extends ToolSet = ToolSet> = AiSdkOptions & {
98
+ /** Tools the model can call */
99
+ tools?: TOOLS;
100
+ /** Tool choice strategy: 'auto', 'none', 'required', or specific tool */
101
+ toolChoice?: ToolChoice<TOOLS>;
102
+ /** Limit which tools are active without changing types */
103
+ activeTools?: Array<keyof TOOLS>;
104
+ };
105
+
106
+ /**
107
+ * Result from generateText including full AI SDK response metadata.
108
+ * Extends AI SDK's GenerateTextResult with a unified `result` field.
109
+ */
110
+ export type GenerateTextResult =
111
+ AIGenerateTextResult<Record<string, never>, unknown> & {
112
+ /** Unified field name alias for 'text' - provides consistency across all generate* functions */
113
+ result: string;
114
+ };
115
+
116
+ /**
117
+ * Result from generateObject/generateArray/generateEnum including full AI SDK response metadata.
118
+ * Extends AI SDK's GenerateObjectResult with a unified `result` field.
119
+ * @typeParam T - The type of the generated object, inferred from the schema parameter
120
+ */
121
+ export type GenerateObjectResult<T> =
122
+ AIGenerateObjectResult<T> & {
123
+ /** Unified field name alias for 'object' - provides consistency across all generate* functions */
124
+ result: T;
125
+ };
126
+
64
127
  /**
65
128
  * Loads a prompt file and interpolates variables into its content.
66
129
  *
@@ -78,24 +141,28 @@ export function loadPrompt(
78
141
  *
79
142
  * This function is a wrapper over the AI SDK's `generateText`.
80
143
  * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
144
+ * Additional AI SDK options (tools, maxRetries, etc.) can be passed through.
81
145
  *
82
146
  * @param args - Generation arguments.
83
147
  * @param args.prompt - Prompt file name.
84
148
  * @param args.variables - Variables to interpolate.
85
- * @returns Generated text.
149
+ * @param args.tools - Tools the model can call (optional).
150
+ * @param args.toolChoice - Tool selection strategy (optional).
151
+ * @returns AI SDK response with text and metadata.
86
152
  */
87
- export function generateText(
153
+ export function generateText<TOOLS extends ToolSet = ToolSet>(
88
154
  args: {
89
155
  prompt: string,
90
156
  variables?: Record<string, string | number | boolean>
91
- }
92
- ): Promise<string>;
157
+ } & GenerateTextAiSdkOptions<TOOLS>
158
+ ): Promise<GenerateTextResult>;
93
159
 
94
160
  /**
95
161
  * Use an LLM model to generate an object with a fixed schema.
96
162
  *
97
163
  * This function is a wrapper over the AI SDK's `generateObject`.
98
164
  * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
165
+ * Additional AI SDK options (maxRetries, seed, etc.) can be passed through.
99
166
  *
100
167
  * @param args - Generation arguments.
101
168
  * @param args.prompt - Prompt file name.
@@ -103,7 +170,7 @@ export function generateText(
103
170
  * @param args.schema - Output schema.
104
171
  * @param args.schemaName - Output schema name.
105
172
  * @param args.schemaDescription - Output schema description.
106
- * @returns Resolves to an object matching the provided schema.
173
+ * @returns AI SDK response with object and metadata.
107
174
  */
108
175
  export function generateObject<TSchema extends z.ZodObject>(
109
176
  args: {
@@ -112,14 +179,15 @@ export function generateObject<TSchema extends z.ZodObject>(
112
179
  schema: TSchema,
113
180
  schemaName?: string,
114
181
  schemaDescription?: string
115
- }
116
- ): Promise<z.infer<TSchema>>;
182
+ } & AiSdkOptions
183
+ ): Promise<GenerateObjectResult<z.infer<TSchema>>>;
117
184
 
118
185
  /**
119
186
  * Use an LLM model to generate an array of values with a fixed schema.
120
187
  *
121
188
  * This function is a wrapper over the AI SDK's `generateObject` with `output: 'array'`.
122
189
  * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
190
+ * Additional AI SDK options (maxRetries, seed, etc.) can be passed through.
123
191
  *
124
192
  * @param args - Generation arguments.
125
193
  * @param args.prompt - Prompt file name.
@@ -127,7 +195,7 @@ export function generateObject<TSchema extends z.ZodObject>(
127
195
  * @param args.schema - Output schema (array item).
128
196
  * @param args.schemaName - Output schema name.
129
197
  * @param args.schemaDescription - Output schema description.
130
- * @returns Resolves to an array where each element matches the schema.
198
+ * @returns AI SDK response with array and metadata.
131
199
  */
132
200
  export function generateArray<TSchema extends z.ZodType>(
133
201
  args: {
@@ -136,25 +204,26 @@ export function generateArray<TSchema extends z.ZodType>(
136
204
  schema: TSchema,
137
205
  schemaName?: string,
138
206
  schemaDescription?: string
139
- }
140
- ): Promise<Array<z.infer<TSchema>>>;
207
+ } & AiSdkOptions
208
+ ): Promise<GenerateObjectResult<Array<z.infer<TSchema>>>>;
141
209
 
142
210
  /**
143
211
  * Use an LLM model to generate a result from an enum (array of string values).
144
212
  *
145
213
  * This function is a wrapper over the AI SDK's `generateObject` with `output: 'enum'`.
146
214
  * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
215
+ * Additional AI SDK options (maxRetries, seed, etc.) can be passed through.
147
216
  *
148
217
  * @param args - Generation arguments.
149
218
  * @param args.prompt - Prompt file name.
150
219
  * @param args.variables - Variables to interpolate.
151
220
  * @param args.enum - Allowed values for the generation.
152
- * @returns Resolves to one of the provided enum values.
221
+ * @returns AI SDK response with enum value and metadata.
153
222
  */
154
223
  export function generateEnum<const TEnum extends readonly [string, ...string[]]>(
155
224
  args: {
156
225
  prompt: string,
157
226
  variables?: Record<string, string | number | boolean>,
158
227
  enum: TEnum
159
- }
160
- ): Promise<TEnum[number]>;
228
+ } & AiSdkOptions
229
+ ): Promise<GenerateObjectResult<TEnum[number]>>;
package/src/index.js CHANGED
@@ -1,3 +1,4 @@
1
1
  export { generateText, generateArray, generateObject, generateEnum } from './ai_sdk.js';
2
2
  export { loadPrompt } from './prompt_loader.js';
3
+ export { tool } from 'ai';
3
4
  export * as ai from 'ai';