@output.ai/llm 0.2.6 → 0.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.2.6",
3
+ "version": "0.2.8",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
package/src/ai_sdk.js CHANGED
@@ -5,24 +5,25 @@ import * as AI from 'ai';
5
5
  import { validateGenerateTextArgs, validateGenerateObjectArgs, validateGenerateArrayArgs, validateGenerateEnumArgs } from './validations.js';
6
6
  import { loadPrompt } from './prompt_loader.js';
7
7
 
8
- /*
9
- Word of wisdom:
10
- We could retrieve the result object using the rest operator:
11
- ```js
12
- const { usage, providerMetadata, ...rest } = response;
13
- const result = rest[resultProperty];
14
- ```
15
- But we CAN'T because the response of the generateText is an instance of `DefaultGenerateTextResult`
16
- and 'text' is a getter (`get text()`).
17
- Be aware of this when refactoring.
18
- */
19
8
  const traceWrapper = async ( { traceId, resultProperty, fn } ) => {
20
9
  try {
21
10
  const response = await fn();
22
11
  const { usage, providerMetadata } = response;
23
12
  const result = response[resultProperty];
24
13
  Tracing.addEventEnd( { id: traceId, details: { result, usage, providerMetadata } } );
25
- return result;
14
+
15
+ // Use a Proxy to add 'result' as a unified field name without mutating the AI SDK response.
16
+ // This preserves the original response object (with its getters/prototype) while allowing
17
+ // developers to use 'result' consistently across all generate* functions.
18
+ // Note: Don't use spread/rest on response - AI SDK uses getters that won't copy correctly.
19
+ return new Proxy( response, {
20
+ get( target, prop, receiver ) {
21
+ if ( prop === 'result' ) {
22
+ return target[resultProperty];
23
+ }
24
+ return Reflect.get( target, prop, receiver );
25
+ }
26
+ } );
26
27
  } catch ( error ) {
27
28
  Tracing.addEventError( { id: traceId, details: error } );
28
29
  throw error;
@@ -63,7 +64,7 @@ const extraAiSdkOptionsFromPrompt = prompt => {
63
64
  * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
64
65
  * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
65
66
  * @throws {FatalError} If the prompt file is not found or template rendering fails
66
- * @returns {Promise<string>} Generated text
67
+ * @returns {Promise<GenerateTextResult>} AI SDK response with text and metadata
67
68
  */
68
69
  export async function generateText( { prompt, variables } ) {
69
70
  validateGenerateTextArgs( { prompt, variables } );
@@ -87,7 +88,7 @@ export async function generateText( { prompt, variables } ) {
87
88
  * @param {string} [args.schemaDescription] - Output schema description
88
89
  * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
89
90
  * @throws {FatalError} If the prompt file is not found or template rendering fails
90
- * @returns {Promise<object>} Object matching the provided schema
91
+ * @returns {Promise<GenerateObjectResult>} AI SDK response with object and metadata
91
92
  */
92
93
  export async function generateObject( args ) {
93
94
  validateGenerateObjectArgs( args );
@@ -118,7 +119,7 @@ export async function generateObject( args ) {
118
119
  * @param {string} [args.schemaDescription] - Output schema description
119
120
  * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
120
121
  * @throws {FatalError} If the prompt file is not found or template rendering fails
121
- * @returns {Promise<object>} Array where each element matches the schema
122
+ * @returns {Promise<GenerateObjectResult>} AI SDK response with array and metadata
122
123
  */
123
124
  export async function generateArray( args ) {
124
125
  validateGenerateArrayArgs( args );
@@ -147,7 +148,7 @@ export async function generateArray( args ) {
147
148
  * @param {string[]} args.enum - Allowed values for the generation
148
149
  * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
149
150
  * @throws {FatalError} If the prompt file is not found or template rendering fails
150
- * @returns {Promise<string>} One of the provided enum values
151
+ * @returns {Promise<GenerateObjectResult>} AI SDK response with enum value and metadata
151
152
  */
152
153
  export async function generateEnum( args ) {
153
154
  validateGenerateEnumArgs( args );
@@ -52,8 +52,17 @@ beforeEach( () => {
52
52
  loadModelImpl.mockReset().mockReturnValue( 'MODEL' );
53
53
  loadPromptImpl.mockReset().mockReturnValue( basePrompt );
54
54
 
55
- aiFns.generateText.mockReset().mockResolvedValue( { text: 'TEXT' } );
56
- aiFns.generateObject.mockReset().mockResolvedValue( { object: 'OBJECT' } );
55
+ aiFns.generateText.mockReset().mockResolvedValue( {
56
+ text: 'TEXT',
57
+ sources: [],
58
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
59
+ finishReason: 'stop'
60
+ } );
61
+ aiFns.generateObject.mockReset().mockResolvedValue( {
62
+ object: 'OBJECT',
63
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
64
+ finishReason: 'stop'
65
+ } );
57
66
 
58
67
  validators.validateGenerateTextArgs.mockClear();
59
68
  validators.validateGenerateObjectArgs.mockClear();
@@ -83,12 +92,19 @@ describe( 'ai_sdk', () => {
83
92
  temperature: 0.3,
84
93
  providerOptions: basePrompt.config.providerOptions
85
94
  } );
86
- expect( result ).toBe( 'TEXT' );
95
+ expect( result.text ).toBe( 'TEXT' );
96
+ expect( result.sources ).toEqual( [] );
97
+ expect( result.usage ).toEqual( { inputTokens: 10, outputTokens: 5, totalTokens: 15 } );
98
+ expect( result.finishReason ).toBe( 'stop' );
87
99
  } );
88
100
 
89
101
  it( 'generateObject: validates, traces, calls AI with output object and returns object', async () => {
90
102
  const { generateObject } = await importSut();
91
- aiFns.generateObject.mockResolvedValueOnce( { object: { a: 1 } } );
103
+ aiFns.generateObject.mockResolvedValueOnce( {
104
+ object: { a: 1 },
105
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
106
+ finishReason: 'stop'
107
+ } );
92
108
 
93
109
  const schema = z.object( { a: z.number() } );
94
110
  const result = await generateObject( {
@@ -113,12 +129,16 @@ describe( 'ai_sdk', () => {
113
129
  temperature: 0.3,
114
130
  providerOptions: basePrompt.config.providerOptions
115
131
  } );
116
- expect( result ).toEqual( { a: 1 } );
132
+ expect( result.object ).toEqual( { a: 1 } );
117
133
  } );
118
134
 
119
135
  it( 'generateArray: validates, traces, calls AI (item schema) and returns array', async () => {
120
136
  const { generateArray } = await importSut();
121
- aiFns.generateObject.mockResolvedValueOnce( { object: [ 1, 2 ] } );
137
+ aiFns.generateObject.mockResolvedValueOnce( {
138
+ object: [ 1, 2 ],
139
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
140
+ finishReason: 'stop'
141
+ } );
122
142
 
123
143
  const schema = z.number();
124
144
  const result = await generateArray( {
@@ -143,12 +163,16 @@ describe( 'ai_sdk', () => {
143
163
  temperature: 0.3,
144
164
  providerOptions: basePrompt.config.providerOptions
145
165
  } );
146
- expect( result ).toEqual( [ 1, 2 ] );
166
+ expect( result.object ).toEqual( [ 1, 2 ] );
147
167
  } );
148
168
 
149
169
  it( 'generateEnum: validates, traces, calls AI with output enum and returns value', async () => {
150
170
  const { generateEnum } = await importSut();
151
- aiFns.generateObject.mockResolvedValueOnce( { object: 'B' } );
171
+ aiFns.generateObject.mockResolvedValueOnce( {
172
+ object: 'B',
173
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
174
+ finishReason: 'stop'
175
+ } );
152
176
 
153
177
  const result = await generateEnum( { prompt: 'test_prompt@v1', enum: [ 'A', 'B', 'C' ] } );
154
178
 
@@ -165,6 +189,309 @@ describe( 'ai_sdk', () => {
165
189
  temperature: 0.3,
166
190
  providerOptions: basePrompt.config.providerOptions
167
191
  } );
168
- expect( result ).toBe( 'B' );
192
+ expect( result.object ).toBe( 'B' );
193
+ } );
194
+
195
+ it( 'generateText: passes provider-specific options to AI SDK', async () => {
196
+ const promptWithProviderOptions = {
197
+ config: {
198
+ provider: 'anthropic',
199
+ model: 'claude-sonnet-4-20250514',
200
+ providerOptions: {
201
+ thinking: {
202
+ type: 'enabled',
203
+ budgetTokens: 5000
204
+ },
205
+ anthropic: {
206
+ effort: 'medium',
207
+ customOption: 'value'
208
+ },
209
+ customField: 'should-be-passed'
210
+ }
211
+ },
212
+ messages: [ { role: 'user', content: 'Test' } ]
213
+ };
214
+ loadPromptImpl.mockReturnValueOnce( promptWithProviderOptions );
215
+
216
+ const { generateText } = await importSut();
217
+ await generateText( { prompt: 'test_prompt@v1' } );
218
+
219
+ expect( aiFns.generateText ).toHaveBeenCalledWith( {
220
+ model: 'MODEL',
221
+ messages: promptWithProviderOptions.messages,
222
+ providerOptions: {
223
+ thinking: {
224
+ type: 'enabled',
225
+ budgetTokens: 5000
226
+ },
227
+ anthropic: {
228
+ effort: 'medium',
229
+ customOption: 'value'
230
+ },
231
+ customField: 'should-be-passed'
232
+ }
233
+ } );
234
+ } );
235
+
236
+ it( 'generateObject: passes provider-specific options to AI SDK', async () => {
237
+ const promptWithOpenAIOptions = {
238
+ config: {
239
+ provider: 'openai',
240
+ model: 'o3-mini',
241
+ temperature: 0.8,
242
+ providerOptions: {
243
+ openai: {
244
+ reasoningEffort: 'high',
245
+ reasoningSummary: 'detailed'
246
+ }
247
+ }
248
+ },
249
+ messages: [ { role: 'user', content: 'Generate object' } ]
250
+ };
251
+ loadPromptImpl.mockReturnValueOnce( promptWithOpenAIOptions );
252
+
253
+ const { generateObject } = await importSut();
254
+ const schema = z.object( { result: z.string() } );
255
+ await generateObject( { prompt: 'test_prompt@v1', schema } );
256
+
257
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
258
+ output: 'object',
259
+ schema,
260
+ schemaName: undefined,
261
+ schemaDescription: undefined,
262
+ model: 'MODEL',
263
+ messages: promptWithOpenAIOptions.messages,
264
+ temperature: 0.8,
265
+ providerOptions: {
266
+ openai: {
267
+ reasoningEffort: 'high',
268
+ reasoningSummary: 'detailed'
269
+ }
270
+ }
271
+ } );
272
+ } );
273
+
274
+ it( 'generateArray: passes azure-specific options to AI SDK', async () => {
275
+ const promptWithAzureOptions = {
276
+ config: {
277
+ provider: 'azure',
278
+ model: 'gpt-4',
279
+ maxTokens: 2000,
280
+ providerOptions: {
281
+ azure: {
282
+ deploymentName: 'my-deployment',
283
+ apiVersion: '2023-12-01-preview'
284
+ }
285
+ }
286
+ },
287
+ messages: [ { role: 'user', content: 'Generate array' } ]
288
+ };
289
+ loadPromptImpl.mockReturnValueOnce( promptWithAzureOptions );
290
+
291
+ const { generateArray } = await importSut();
292
+ const schema = z.string();
293
+ await generateArray( { prompt: 'test_prompt@v1', schema } );
294
+
295
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
296
+ output: 'array',
297
+ schema,
298
+ schemaName: undefined,
299
+ schemaDescription: undefined,
300
+ model: 'MODEL',
301
+ messages: promptWithAzureOptions.messages,
302
+ maxOutputTokens: 2000,
303
+ providerOptions: {
304
+ azure: {
305
+ deploymentName: 'my-deployment',
306
+ apiVersion: '2023-12-01-preview'
307
+ }
308
+ }
309
+ } );
310
+ } );
311
+
312
+ it( 'generateEnum: passes mixed provider options to AI SDK', async () => {
313
+ const promptWithMixedOptions = {
314
+ config: {
315
+ provider: 'anthropic',
316
+ model: 'claude-3-opus-20240229',
317
+ providerOptions: {
318
+ thinking: {
319
+ type: 'enabled',
320
+ budgetTokens: 3000
321
+ },
322
+ anthropic: {
323
+ effort: 'high'
324
+ },
325
+ customField: { nested: 'value' }
326
+ }
327
+ },
328
+ messages: [ { role: 'user', content: 'Choose option' } ]
329
+ };
330
+ loadPromptImpl.mockReturnValueOnce( promptWithMixedOptions );
331
+
332
+ const { generateEnum } = await importSut();
333
+ await generateEnum( { prompt: 'test_prompt@v1', enum: [ 'A', 'B', 'C' ] } );
334
+
335
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
336
+ output: 'enum',
337
+ enum: [ 'A', 'B', 'C' ],
338
+ model: 'MODEL',
339
+ messages: promptWithMixedOptions.messages,
340
+ providerOptions: {
341
+ thinking: {
342
+ type: 'enabled',
343
+ budgetTokens: 3000
344
+ },
345
+ anthropic: {
346
+ effort: 'high'
347
+ },
348
+ customField: { nested: 'value' }
349
+ }
350
+ } );
351
+ } );
352
+
353
+ it( 'generateText: passes through providerMetadata', async () => {
354
+ aiFns.generateText.mockResolvedValueOnce( {
355
+ text: 'TEXT',
356
+ sources: [],
357
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
358
+ finishReason: 'stop',
359
+ providerMetadata: { anthropic: { cacheReadInputTokens: 50 } }
360
+ } );
361
+
362
+ const { generateText } = await importSut();
363
+ const result = await generateText( { prompt: 'test_prompt@v1' } );
364
+
365
+ expect( result.providerMetadata ).toEqual( { anthropic: { cacheReadInputTokens: 50 } } );
366
+ } );
367
+
368
+ it( 'generateText: passes through warnings and response metadata', async () => {
369
+ aiFns.generateText.mockResolvedValueOnce( {
370
+ text: 'TEXT',
371
+ sources: [],
372
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
373
+ finishReason: 'stop',
374
+ warnings: [ { type: 'other', message: 'Test warning' } ],
375
+ response: { id: 'req_123', modelId: 'gpt-4o-2024-05-13' }
376
+ } );
377
+
378
+ const { generateText } = await importSut();
379
+ const result = await generateText( { prompt: 'test_prompt@v1' } );
380
+
381
+ expect( result.warnings ).toEqual( [ { type: 'other', message: 'Test warning' } ] );
382
+ expect( result.response ).toEqual( { id: 'req_123', modelId: 'gpt-4o-2024-05-13' } );
383
+ } );
384
+
385
+ it( 'generateText: includes unified result field that matches text', async () => {
386
+ const { generateText } = await importSut();
387
+ const response = await generateText( { prompt: 'test_prompt@v1' } );
388
+
389
+ expect( response.result ).toBe( 'TEXT' );
390
+ expect( response.result ).toBe( response.text );
391
+ } );
392
+
393
+ it( 'generateObject: includes unified result field that matches object', async () => {
394
+ const { generateObject } = await importSut();
395
+ aiFns.generateObject.mockResolvedValueOnce( {
396
+ object: { a: 1, b: 'test' },
397
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
398
+ finishReason: 'stop'
399
+ } );
400
+
401
+ const schema = z.object( { a: z.number(), b: z.string() } );
402
+ const response = await generateObject( { prompt: 'test_prompt@v1', schema } );
403
+
404
+ expect( response.result ).toEqual( { a: 1, b: 'test' } );
405
+ expect( response.result ).toEqual( response.object );
406
+ } );
407
+
408
+ it( 'generateArray: includes unified result field that matches object', async () => {
409
+ const { generateArray } = await importSut();
410
+ aiFns.generateObject.mockResolvedValueOnce( {
411
+ object: [ 'item1', 'item2', 'item3' ],
412
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
413
+ finishReason: 'stop'
414
+ } );
415
+
416
+ const schema = z.string();
417
+ const response = await generateArray( { prompt: 'test_prompt@v1', schema } );
418
+
419
+ expect( response.result ).toEqual( [ 'item1', 'item2', 'item3' ] );
420
+ expect( response.result ).toEqual( response.object );
421
+ } );
422
+
423
+ it( 'generateEnum: includes unified result field that matches object', async () => {
424
+ const { generateEnum } = await importSut();
425
+ aiFns.generateObject.mockResolvedValueOnce( {
426
+ object: 'yes',
427
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
428
+ finishReason: 'stop'
429
+ } );
430
+
431
+ const response = await generateEnum( { prompt: 'test_prompt@v1', enum: [ 'yes', 'no' ] } );
432
+
433
+ expect( response.result ).toBe( 'yes' );
434
+ expect( response.result ).toBe( response.object );
435
+ } );
436
+
437
+ it( 'generateText: traces error and rethrows when AI SDK fails', async () => {
438
+ const error = new Error( 'API rate limit exceeded' );
439
+ aiFns.generateText.mockRejectedValueOnce( error );
440
+ const { generateText } = await importSut();
441
+
442
+ await expect( generateText( { prompt: 'test_prompt@v1' } ) ).rejects.toThrow( 'API rate limit exceeded' );
443
+ expect( tracingSpies.addEventError ).toHaveBeenCalledWith(
444
+ expect.objectContaining( { details: error } )
445
+ );
446
+ } );
447
+
448
+ it( 'generateObject: traces error and rethrows when AI SDK fails', async () => {
449
+ const error = new Error( 'Invalid schema' );
450
+ aiFns.generateObject.mockRejectedValueOnce( error );
451
+ const { generateObject } = await importSut();
452
+
453
+ const schema = z.object( { a: z.number() } );
454
+ await expect( generateObject( { prompt: 'test_prompt@v1', schema } ) ).rejects.toThrow( 'Invalid schema' );
455
+ expect( tracingSpies.addEventError ).toHaveBeenCalledWith(
456
+ expect.objectContaining( { details: error } )
457
+ );
458
+ } );
459
+
460
+ it( 'generateText: Proxy correctly handles AI SDK response with getter', async () => {
461
+ const responseWithGetter = {
462
+ _internalText: 'TEXT_FROM_GETTER',
463
+ get text() {
464
+ return this._internalText;
465
+ },
466
+ sources: [],
467
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
468
+ finishReason: 'stop'
469
+ };
470
+ aiFns.generateText.mockResolvedValueOnce( responseWithGetter );
471
+
472
+ const { generateText } = await importSut();
473
+ const response = await generateText( { prompt: 'test_prompt@v1' } );
474
+
475
+ expect( response.text ).toBe( 'TEXT_FROM_GETTER' );
476
+ expect( response.result ).toBe( 'TEXT_FROM_GETTER' );
477
+ } );
478
+
479
+ it( 'generateObject: Proxy correctly handles AI SDK response with getter', async () => {
480
+ const responseWithGetter = {
481
+ _internalObject: { value: 42 },
482
+ get object() {
483
+ return this._internalObject;
484
+ },
485
+ usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
486
+ finishReason: 'stop'
487
+ };
488
+ aiFns.generateObject.mockResolvedValueOnce( responseWithGetter );
489
+
490
+ const { generateObject } = await importSut();
491
+ const schema = z.object( { value: z.number() } );
492
+ const response = await generateObject( { prompt: 'test_prompt@v1', schema } );
493
+
494
+ expect( response.object ).toEqual( { value: 42 } );
495
+ expect( response.result ).toEqual( { value: 42 } );
169
496
  } );
170
497
  } );
package/src/index.d.ts CHANGED
@@ -1,23 +1,32 @@
1
1
  import type { z } from '@output.ai/core';
2
+ import type {
3
+ GenerateTextResult as AIGenerateTextResult,
4
+ GenerateObjectResult as AIGenerateObjectResult
5
+ } from 'ai';
2
6
 
3
7
  /**
4
- * Represents a single message in a prompt conversation
8
+ * Represents a single message in a prompt conversation.
9
+ *
5
10
  * @example
11
+ * ```ts
6
12
  * const msg: PromptMessage = {
7
13
  * role: 'user',
8
14
  * content: 'Hello, Claude!'
9
15
  * };
16
+ * ```
10
17
  */
11
18
  export type PromptMessage = {
12
- /** The role of the message. Examples: 'system', 'user', 'assistant' */
19
+ /** The role of the message. Examples include 'system', 'user', and 'assistant'. */
13
20
  role: string;
14
21
  /** The content of the message */
15
22
  content: string;
16
23
  };
17
24
 
18
25
  /**
19
- * Configuration for LLM prompt generation
26
+ * Configuration for LLM prompt generation.
27
+ *
20
28
  * @example
29
+ * ```ts
21
30
  * const prompt: Prompt = {
22
31
  * name: 'summarizePrompt',
23
32
  * config: {
@@ -28,14 +37,15 @@ export type PromptMessage = {
28
37
  * },
29
38
  * messages: [...]
30
39
  * };
40
+ * ```
31
41
  */
32
42
  export type Prompt = {
33
43
  /** Name of the prompt file */
34
44
  name: string;
35
45
 
36
- /** General configurations for the LLM */
46
+ /** General configuration for the LLM */
37
47
  config: {
38
- /** LLM Provider */
48
+ /** LLM provider */
39
49
  provider: 'anthropic' | 'openai' | 'azure' | 'vertex';
40
50
 
41
51
  /** Model name/identifier */
@@ -44,10 +54,10 @@ export type Prompt = {
44
54
  /** Generation temperature (0-2). Lower = more deterministic */
45
55
  temperature?: number;
46
56
 
47
- /** Maximum tokens in the response */
57
+ /** Maximum number of tokens in the response */
48
58
  maxTokens?: number;
49
59
 
50
- /** Provider-specific configurations */
60
+ /** Provider-specific options */
51
61
  providerOptions?: Record<string, unknown>;
52
62
  };
53
63
 
@@ -55,12 +65,42 @@ export type Prompt = {
55
65
  messages: PromptMessage[];
56
66
  };
57
67
 
68
+ // Re-export AI SDK types directly (auto-synced with AI SDK updates)
69
+ export type {
70
+ LanguageModelUsage,
71
+ FinishReason,
72
+ LanguageModelResponseMetadata,
73
+ ProviderMetadata,
74
+ CallWarning
75
+ } from 'ai';
76
+
77
+ /**
78
+ * Result from generateText including full AI SDK response metadata.
79
+ * Extends AI SDK's GenerateTextResult with a unified `result` field.
80
+ */
81
+ export type GenerateTextResult =
82
+ AIGenerateTextResult<Record<string, never>, unknown> & {
83
+ /** Unified field name alias for 'text' - provides consistency across all generate* functions */
84
+ result: string;
85
+ };
86
+
87
+ /**
88
+ * Result from generateObject/generateArray/generateEnum including full AI SDK response metadata.
89
+ * Extends AI SDK's GenerateObjectResult with a unified `result` field.
90
+ * @typeParam T - The type of the generated object, inferred from the schema parameter
91
+ */
92
+ export type GenerateObjectResult<T> =
93
+ AIGenerateObjectResult<T> & {
94
+ /** Unified field name alias for 'object' - provides consistency across all generate* functions */
95
+ result: T;
96
+ };
97
+
58
98
  /**
59
- * Load a prompt file and render it with variables.
99
+ * Loads a prompt file and interpolates variables into its content.
60
100
  *
61
- * @param {string} name - Name of the prompt file (without .prompt extension)
62
- * @param {Record<string, string | number | boolean>} [variables] - Variables to interpolate
63
- * @returns {Prompt} Loaded and rendered prompt object
101
+ * @param name - Name of the prompt file (without `.prompt` extension).
102
+ * @param variables - Variables to interpolate.
103
+ * @returns The loaded prompt object.
64
104
  */
65
105
  export function loadPrompt(
66
106
  name: string,
@@ -71,33 +111,33 @@ export function loadPrompt(
71
111
  * Use an LLM model to generate text.
72
112
  *
73
113
  * This function is a wrapper over the AI SDK's `generateText`.
74
- * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
114
+ * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
75
115
  *
76
- * @param {object} args - Generation arguments
77
- * @param {string} args.prompt - Prompt file name
78
- * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
79
- * @returns {Promise<string>} Generated text
116
+ * @param args - Generation arguments.
117
+ * @param args.prompt - Prompt file name.
118
+ * @param args.variables - Variables to interpolate.
119
+ * @returns AI SDK response with text and metadata.
80
120
  */
81
121
  export function generateText(
82
122
  args: {
83
123
  prompt: string,
84
124
  variables?: Record<string, string | number | boolean>
85
125
  }
86
- ): Promise<string>;
126
+ ): Promise<GenerateTextResult>;
87
127
 
88
128
  /**
89
129
  * Use an LLM model to generate an object with a fixed schema.
90
130
  *
91
131
  * This function is a wrapper over the AI SDK's `generateObject`.
92
- * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
132
+ * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
93
133
  *
94
- * @param {object} args - Generation arguments
95
- * @param {string} args.prompt - Prompt file name
96
- * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
97
- * @param {z.ZodObject} args.schema - Output schema
98
- * @param {string} [args.schemaName] - Output schema name
99
- * @param {string} [args.schemaDescription] - Output schema description
100
- * @returns {Promise<object>} Object matching the provided schema
134
+ * @param args - Generation arguments.
135
+ * @param args.prompt - Prompt file name.
136
+ * @param args.variables - Variables to interpolate.
137
+ * @param args.schema - Output schema.
138
+ * @param args.schemaName - Output schema name.
139
+ * @param args.schemaDescription - Output schema description.
140
+ * @returns AI SDK response with object and metadata.
101
141
  */
102
142
  export function generateObject<TSchema extends z.ZodObject>(
103
143
  args: {
@@ -107,21 +147,21 @@ export function generateObject<TSchema extends z.ZodObject>(
107
147
  schemaName?: string,
108
148
  schemaDescription?: string
109
149
  }
110
- ): Promise<z.infer<TSchema>>;
150
+ ): Promise<GenerateObjectResult<z.infer<TSchema>>>;
111
151
 
112
152
  /**
113
153
  * Use an LLM model to generate an array of values with a fixed schema.
114
154
  *
115
155
  * This function is a wrapper over the AI SDK's `generateObject` with `output: 'array'`.
116
- * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
156
+ * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
117
157
  *
118
- * @param {object} args - Generation arguments
119
- * @param {string} args.prompt - Prompt file name
120
- * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
121
- * @param {z.ZodType} args.schema - Output schema (array item)
122
- * @param {string} [args.schemaName] - Output schema name
123
- * @param {string} [args.schemaDescription] - Output schema description
124
- * @returns {Promise<object>} Array where each element matches the schema
158
+ * @param args - Generation arguments.
159
+ * @param args.prompt - Prompt file name.
160
+ * @param args.variables - Variables to interpolate.
161
+ * @param args.schema - Output schema (array item).
162
+ * @param args.schemaName - Output schema name.
163
+ * @param args.schemaDescription - Output schema description.
164
+ * @returns AI SDK response with array and metadata.
125
165
  */
126
166
  export function generateArray<TSchema extends z.ZodType>(
127
167
  args: {
@@ -131,19 +171,19 @@ export function generateArray<TSchema extends z.ZodType>(
131
171
  schemaName?: string,
132
172
  schemaDescription?: string
133
173
  }
134
- ): Promise<Array<z.infer<TSchema>>>;
174
+ ): Promise<GenerateObjectResult<Array<z.infer<TSchema>>>>;
135
175
 
136
176
  /**
137
177
  * Use an LLM model to generate a result from an enum (array of string values).
138
178
  *
139
179
  * This function is a wrapper over the AI SDK's `generateObject` with `output: 'enum'`.
140
- * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
180
+ * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
141
181
  *
142
- * @param {object} args - Generation arguments
143
- * @param {string} args.prompt - Prompt file name
144
- * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
145
- * @param {string[]} args.enum - Allowed values for the generation
146
- * @returns {Promise<string>} One of the provided enum values
182
+ * @param args - Generation arguments.
183
+ * @param args.prompt - Prompt file name.
184
+ * @param args.variables - Variables to interpolate.
185
+ * @param args.enum - Allowed values for the generation.
186
+ * @returns AI SDK response with enum value and metadata.
147
187
  */
148
188
  export function generateEnum<const TEnum extends readonly [string, ...string[]]>(
149
189
  args: {
@@ -151,4 +191,4 @@ export function generateEnum<const TEnum extends readonly [string, ...string[]]>
151
191
  variables?: Record<string, string | number | boolean>,
152
192
  enum: TEnum
153
193
  }
154
- ): Promise<TEnum[number]>;
194
+ ): Promise<GenerateObjectResult<TEnum[number]>>;
@@ -11,8 +11,13 @@ export const promptSchema = z.object( {
11
11
  thinking: z.object( {
12
12
  type: z.literal( 'enabled' ),
13
13
  budgetTokens: z.number()
14
- } ).strict().optional()
15
- } ).strict().optional()
14
+ } ).optional(),
15
+ anthropic: z.record( z.string(), z.unknown() ).optional(),
16
+ openai: z.record( z.string(), z.unknown() ).optional(),
17
+ azure: z.record( z.string(), z.unknown() ).optional(),
18
+ vertex: z.record( z.string(), z.unknown() ).optional(),
19
+ google: z.record( z.string(), z.unknown() ).optional()
20
+ } ).passthrough().optional()
16
21
  } ).strict(),
17
22
  messages: z.array(
18
23
  z.object( {
@@ -35,6 +40,19 @@ const getHintForError = errorMessage => {
35
40
  return '';
36
41
  };
37
42
 
43
+ // Known providerOptions fields. Note: these don't map 1:1 to providers.
44
+ // - 'google' is used by Vertex provider for Gemini language models
45
+ // - 'vertex' is used by Vertex provider for Imagen image models
46
+ // - 'anthropic' is used by both Anthropic provider and Vertex Anthropic
47
+ const knownProviderOptionsFields = new Set( [
48
+ 'thinking',
49
+ 'anthropic',
50
+ 'openai',
51
+ 'azure',
52
+ 'vertex',
53
+ 'google'
54
+ ] );
55
+
38
56
  export function validatePrompt( prompt ) {
39
57
  const result = promptSchema.safeParse( prompt );
40
58
  if ( !result.success ) {
@@ -47,4 +65,12 @@ export function validatePrompt( prompt ) {
47
65
  { cause: result.error }
48
66
  );
49
67
  }
68
+
69
+ const providerOptions = prompt?.config?.providerOptions;
70
+ if ( providerOptions ) {
71
+ const unknownFields = Object.keys( providerOptions ).filter( k => !knownProviderOptionsFields.has( k ) );
72
+ if ( unknownFields.length > 0 ) {
73
+ console.warn( `Prompt "${prompt.name}": Unrecognized providerOptions fields: ${unknownFields.join( ', ' )}` );
74
+ }
75
+ }
50
76
  }
@@ -65,6 +65,115 @@ describe( 'validatePrompt', () => {
65
65
  expect( () => validatePrompt( promptWithThinking ) ).not.toThrow();
66
66
  } );
67
67
 
68
+ it( 'should validate a prompt with anthropic-specific providerOptions', () => {
69
+ const promptWithAnthropicOptions = {
70
+ name: 'anthropic-options-prompt',
71
+ config: {
72
+ provider: 'anthropic',
73
+ model: 'claude-sonnet-4-20250514',
74
+ providerOptions: {
75
+ thinking: {
76
+ type: 'enabled',
77
+ budgetTokens: 5000
78
+ },
79
+ anthropic: {
80
+ effort: 'medium',
81
+ customOption: 'value'
82
+ }
83
+ }
84
+ },
85
+ messages: [
86
+ {
87
+ role: 'user',
88
+ content: 'Solve this problem.'
89
+ }
90
+ ]
91
+ };
92
+
93
+ expect( () => validatePrompt( promptWithAnthropicOptions ) ).not.toThrow();
94
+ } );
95
+
96
+ it( 'should validate a prompt with openai-specific providerOptions', () => {
97
+ const promptWithOpenAIOptions = {
98
+ name: 'openai-options-prompt',
99
+ config: {
100
+ provider: 'openai',
101
+ model: 'o3-mini',
102
+ providerOptions: {
103
+ openai: {
104
+ reasoningEffort: 'high',
105
+ reasoningSummary: 'detailed',
106
+ customParameter: 'test'
107
+ }
108
+ }
109
+ },
110
+ messages: [
111
+ {
112
+ role: 'user',
113
+ content: 'Analyze this data.'
114
+ }
115
+ ]
116
+ };
117
+
118
+ expect( () => validatePrompt( promptWithOpenAIOptions ) ).not.toThrow();
119
+ } );
120
+
121
+ it( 'should validate a prompt with azure-specific providerOptions', () => {
122
+ const promptWithAzureOptions = {
123
+ name: 'azure-options-prompt',
124
+ config: {
125
+ provider: 'azure',
126
+ model: 'gpt-4',
127
+ providerOptions: {
128
+ azure: {
129
+ deploymentName: 'my-deployment',
130
+ customConfig: { key: 'value' }
131
+ }
132
+ }
133
+ },
134
+ messages: [
135
+ {
136
+ role: 'user',
137
+ content: 'Process this request.'
138
+ }
139
+ ]
140
+ };
141
+
142
+ expect( () => validatePrompt( promptWithAzureOptions ) ).not.toThrow();
143
+ } );
144
+
145
+ it( 'should validate a prompt with mixed providerOptions including unknown fields', () => {
146
+ const promptWithMixedOptions = {
147
+ name: 'mixed-options-prompt',
148
+ config: {
149
+ provider: 'anthropic',
150
+ model: 'claude-3-opus-20240229',
151
+ providerOptions: {
152
+ thinking: {
153
+ type: 'enabled',
154
+ budgetTokens: 3000
155
+ },
156
+ anthropic: {
157
+ effort: 'high'
158
+ },
159
+ customProviderField: 'should-be-allowed',
160
+ anotherCustomField: {
161
+ nested: 'value',
162
+ array: [ 1, 2, 3 ]
163
+ }
164
+ }
165
+ },
166
+ messages: [
167
+ {
168
+ role: 'user',
169
+ content: 'Complex request with multiple options.'
170
+ }
171
+ ]
172
+ };
173
+
174
+ expect( () => validatePrompt( promptWithMixedOptions ) ).not.toThrow();
175
+ } );
176
+
68
177
  it( 'should throw ValidationError when provider is invalid', () => {
69
178
  const invalidPrompt = {
70
179
  name: 'invalid-provider',