@output.ai/llm 0.2.5 → 0.2.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,99 +1,57 @@
1
- # LLM Module
1
+ # @output.ai/llm
2
2
 
3
- Framework abstraction to interact with LLM models, including prompt management and structured generation.
3
+ Unified LLM generation API with built-in prompt templating for Output Framework workflows.
4
4
 
5
- ## Quick Start
5
+ [![npm version](https://img.shields.io/npm/v/@output.ai/llm)](https://www.npmjs.com/package/@output.ai/llm)
6
+ [![Documentation](https://img.shields.io/badge/docs-docs.output.ai-blue)](https://docs.output.ai/packages/llm)
6
7
 
7
- ```js
8
- import { generateText } from '@output.ai/llm';
8
+ ## Installation
9
9
 
10
- const response = await generateText({
11
- prompt: 'my_prompt@v1',
12
- variables: { topic: 'AI workflows' }
13
- });
10
+ ```bash
11
+ npm install @output.ai/llm
14
12
  ```
15
13
 
16
- ## Features
17
-
18
- - **Unified API**: Single import for prompt loading and LLM generation
19
- - **Multiple Generation Types**: Text, objects, arrays, and enums
20
- - **Prompt Management**: Load and render `.prompt` files with variable interpolation
21
- - **Multi-Provider Support**: Anthropic, OpenAI, and Azure
22
- - **Type Safety**: Full TypeScript support with Zod schemas
23
-
24
- ## Generate Text
25
-
26
- Generate unstructured text from an LLM:
14
+ ## Quick Start
27
15
 
28
- ```js
16
+ ```typescript
29
17
  import { generateText } from '@output.ai/llm';
30
18
 
31
- const response = await generateText({
32
- prompt: 'explain_topic@v1',
33
- variables: { topic: 'machine learning' }
19
+ const result = await generateText({
20
+ prompt: 'summarize@v1',
21
+ variables: { text: 'Your content here' }
34
22
  });
35
23
  ```
36
24
 
37
- ## Generate Object
25
+ ## Generation Functions
26
+
27
+ | Function | Description |
28
+ |----------|-------------|
29
+ | `generateText` | Generate unstructured text |
30
+ | `generateObject` | Generate a structured object matching a Zod schema |
31
+ | `generateArray` | Generate an array of structured items |
32
+ | `generateEnum` | Generate a value from allowed options |
38
33
 
39
- Generate a structured object matching a Zod schema:
34
+ ### Example: Structured Output
40
35
 
41
- ```js
36
+ ```typescript
42
37
  import { generateObject } from '@output.ai/llm';
43
38
  import { z } from '@output.ai/core';
44
39
 
45
- const recipeSchema = z.object({
46
- title: z.string(),
47
- ingredients: z.array(z.string()),
48
- steps: z.array(z.string())
49
- });
50
-
51
40
  const recipe = await generateObject({
52
41
  prompt: 'recipe@v1',
53
42
  variables: { dish: 'lasagna' },
54
- schema: recipeSchema
55
- });
56
- ```
57
-
58
- ## Generate Array
59
-
60
- Generate an array of structured items:
61
-
62
- ```js
63
- import { generateArray } from '@output.ai/llm';
64
- import { z } from '@output.ai/core';
65
-
66
- const taskSchema = z.object({
67
- title: z.string(),
68
- priority: z.number()
69
- });
70
-
71
- const tasks = await generateArray({
72
- prompt: 'task_list@v1',
73
- variables: { project: 'website' },
74
- schema: taskSchema
75
- });
76
- ```
77
-
78
- ## Generate Enum
79
-
80
- Generate a value from a list of allowed options:
81
-
82
- ```js
83
- import { generateEnum } from '@output.ai/llm';
84
-
85
- const category = await generateEnum({
86
- prompt: 'categorize@v1',
87
- variables: { text: 'Product announcement' },
88
- enum: ['marketing', 'engineering', 'sales', 'support']
43
+ schema: z.object({
44
+ title: z.string(),
45
+ ingredients: z.array(z.string()),
46
+ steps: z.array(z.string())
47
+ })
89
48
  });
90
49
  ```
91
50
 
92
51
  ## Prompt Files
93
52
 
94
- Prompt files use YAML frontmatter for configuration and support LiquidJS templating:
53
+ Prompt files use YAML frontmatter for configuration and LiquidJS for templating:
95
54
 
96
- **File: `explain_topic@v1.prompt`**
97
55
  ```yaml
98
56
  ---
99
57
  provider: anthropic
@@ -102,27 +60,23 @@ temperature: 0.7
102
60
  ---
103
61
 
104
62
  <system>
105
- You are a concise technical explainer.
63
+ You are a helpful assistant.
106
64
  </system>
107
65
 
108
66
  <user>
109
- Explain {{ topic }} in 3 bullet points.
67
+ {{ user_message }}
110
68
  </user>
111
69
  ```
112
70
 
113
- ## Configuration Options
71
+ ### Supported Providers
114
72
 
115
- Prompt files support these configuration fields:
73
+ - **Anthropic** - Requires `ANTHROPIC_API_KEY`
74
+ - **OpenAI** - Requires `OPENAI_API_KEY`
75
+ - **Azure OpenAI** - Requires Azure-specific environment variables
116
76
 
117
- ```yaml
118
- ---
119
- provider: anthropic | openai | azure
120
- model: model-name
121
- temperature: 0.0-1.0 (optional)
122
- maxTokens: number (optional)
123
- providerOptions: (optional)
124
- thinking:
125
- type: enabled
126
- budgetTokens: number
127
- ---
128
- ```
77
+ ## Documentation
78
+
79
+ For comprehensive documentation, visit:
80
+
81
+ - [Package Reference](https://docs.output.ai/packages/llm)
82
+ - [Getting Started](https://docs.output.ai/quickstart)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.2.5",
3
+ "version": "0.2.7",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -11,11 +11,15 @@
11
11
  "dependencies": {
12
12
  "@ai-sdk/anthropic": "2.0.28",
13
13
  "@ai-sdk/azure": "2.0.53",
14
+ "@ai-sdk/google-vertex": "3.0.96",
14
15
  "@ai-sdk/openai": "2.0.52",
15
16
  "@output.ai/core": ">=0.0.1",
16
17
  "ai": "5.0.52",
17
18
  "gray-matter": "4.0.3",
18
19
  "liquidjs": "10.22.0"
19
20
  },
20
- "license": "UNLICENSED"
21
+ "license": "Apache-2.0",
22
+ "publishConfig": {
23
+ "access": "public"
24
+ }
21
25
  }
package/src/ai_model.js CHANGED
@@ -1,8 +1,9 @@
1
1
  import { anthropic } from '@ai-sdk/anthropic';
2
2
  import { azure } from '@ai-sdk/azure';
3
+ import { vertex } from '@ai-sdk/google-vertex';
3
4
  import { openai } from '@ai-sdk/openai';
4
5
 
5
- const providers = { azure, anthropic, openai };
6
+ const providers = { azure, anthropic, openai, vertex };
6
7
 
7
8
  export function loadModel( prompt ) {
8
9
  const config = prompt?.config;
@@ -167,4 +167,162 @@ describe( 'ai_sdk', () => {
167
167
  } );
168
168
  expect( result ).toBe( 'B' );
169
169
  } );
170
+
171
+ it( 'generateText: passes provider-specific options to AI SDK', async () => {
172
+ const promptWithProviderOptions = {
173
+ config: {
174
+ provider: 'anthropic',
175
+ model: 'claude-sonnet-4-20250514',
176
+ providerOptions: {
177
+ thinking: {
178
+ type: 'enabled',
179
+ budgetTokens: 5000
180
+ },
181
+ anthropic: {
182
+ effort: 'medium',
183
+ customOption: 'value'
184
+ },
185
+ customField: 'should-be-passed'
186
+ }
187
+ },
188
+ messages: [ { role: 'user', content: 'Test' } ]
189
+ };
190
+ loadPromptImpl.mockReturnValueOnce( promptWithProviderOptions );
191
+
192
+ const { generateText } = await importSut();
193
+ await generateText( { prompt: 'test_prompt@v1' } );
194
+
195
+ expect( aiFns.generateText ).toHaveBeenCalledWith( {
196
+ model: 'MODEL',
197
+ messages: promptWithProviderOptions.messages,
198
+ providerOptions: {
199
+ thinking: {
200
+ type: 'enabled',
201
+ budgetTokens: 5000
202
+ },
203
+ anthropic: {
204
+ effort: 'medium',
205
+ customOption: 'value'
206
+ },
207
+ customField: 'should-be-passed'
208
+ }
209
+ } );
210
+ } );
211
+
212
+ it( 'generateObject: passes provider-specific options to AI SDK', async () => {
213
+ const promptWithOpenAIOptions = {
214
+ config: {
215
+ provider: 'openai',
216
+ model: 'o3-mini',
217
+ temperature: 0.8,
218
+ providerOptions: {
219
+ openai: {
220
+ reasoningEffort: 'high',
221
+ reasoningSummary: 'detailed'
222
+ }
223
+ }
224
+ },
225
+ messages: [ { role: 'user', content: 'Generate object' } ]
226
+ };
227
+ loadPromptImpl.mockReturnValueOnce( promptWithOpenAIOptions );
228
+
229
+ const { generateObject } = await importSut();
230
+ const schema = z.object( { result: z.string() } );
231
+ await generateObject( { prompt: 'test_prompt@v1', schema } );
232
+
233
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
234
+ output: 'object',
235
+ schema,
236
+ schemaName: undefined,
237
+ schemaDescription: undefined,
238
+ model: 'MODEL',
239
+ messages: promptWithOpenAIOptions.messages,
240
+ temperature: 0.8,
241
+ providerOptions: {
242
+ openai: {
243
+ reasoningEffort: 'high',
244
+ reasoningSummary: 'detailed'
245
+ }
246
+ }
247
+ } );
248
+ } );
249
+
250
+ it( 'generateArray: passes azure-specific options to AI SDK', async () => {
251
+ const promptWithAzureOptions = {
252
+ config: {
253
+ provider: 'azure',
254
+ model: 'gpt-4',
255
+ maxTokens: 2000,
256
+ providerOptions: {
257
+ azure: {
258
+ deploymentName: 'my-deployment',
259
+ apiVersion: '2023-12-01-preview'
260
+ }
261
+ }
262
+ },
263
+ messages: [ { role: 'user', content: 'Generate array' } ]
264
+ };
265
+ loadPromptImpl.mockReturnValueOnce( promptWithAzureOptions );
266
+
267
+ const { generateArray } = await importSut();
268
+ const schema = z.string();
269
+ await generateArray( { prompt: 'test_prompt@v1', schema } );
270
+
271
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
272
+ output: 'array',
273
+ schema,
274
+ schemaName: undefined,
275
+ schemaDescription: undefined,
276
+ model: 'MODEL',
277
+ messages: promptWithAzureOptions.messages,
278
+ maxOutputTokens: 2000,
279
+ providerOptions: {
280
+ azure: {
281
+ deploymentName: 'my-deployment',
282
+ apiVersion: '2023-12-01-preview'
283
+ }
284
+ }
285
+ } );
286
+ } );
287
+
288
+ it( 'generateEnum: passes mixed provider options to AI SDK', async () => {
289
+ const promptWithMixedOptions = {
290
+ config: {
291
+ provider: 'anthropic',
292
+ model: 'claude-3-opus-20240229',
293
+ providerOptions: {
294
+ thinking: {
295
+ type: 'enabled',
296
+ budgetTokens: 3000
297
+ },
298
+ anthropic: {
299
+ effort: 'high'
300
+ },
301
+ customField: { nested: 'value' }
302
+ }
303
+ },
304
+ messages: [ { role: 'user', content: 'Choose option' } ]
305
+ };
306
+ loadPromptImpl.mockReturnValueOnce( promptWithMixedOptions );
307
+
308
+ const { generateEnum } = await importSut();
309
+ await generateEnum( { prompt: 'test_prompt@v1', enum: [ 'A', 'B', 'C' ] } );
310
+
311
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
312
+ output: 'enum',
313
+ enum: [ 'A', 'B', 'C' ],
314
+ model: 'MODEL',
315
+ messages: promptWithMixedOptions.messages,
316
+ providerOptions: {
317
+ thinking: {
318
+ type: 'enabled',
319
+ budgetTokens: 3000
320
+ },
321
+ anthropic: {
322
+ effort: 'high'
323
+ },
324
+ customField: { nested: 'value' }
325
+ }
326
+ } );
327
+ } );
170
328
  } );
package/src/index.d.ts CHANGED
@@ -1,23 +1,28 @@
1
1
  import type { z } from '@output.ai/core';
2
2
 
3
3
  /**
4
- * Represents a single message in a prompt conversation
4
+ * Represents a single message in a prompt conversation.
5
+ *
5
6
  * @example
7
+ * ```ts
6
8
  * const msg: PromptMessage = {
7
9
  * role: 'user',
8
10
  * content: 'Hello, Claude!'
9
11
  * };
12
+ * ```
10
13
  */
11
14
  export type PromptMessage = {
12
- /** The role of the message. Examples: 'system', 'user', 'assistant' */
15
+ /** The role of the message. Examples include 'system', 'user', and 'assistant'. */
13
16
  role: string;
14
17
  /** The content of the message */
15
18
  content: string;
16
19
  };
17
20
 
18
21
  /**
19
- * Configuration for LLM prompt generation
22
+ * Configuration for LLM prompt generation.
23
+ *
20
24
  * @example
25
+ * ```ts
21
26
  * const prompt: Prompt = {
22
27
  * name: 'summarizePrompt',
23
28
  * config: {
@@ -28,15 +33,16 @@ export type PromptMessage = {
28
33
  * },
29
34
  * messages: [...]
30
35
  * };
36
+ * ```
31
37
  */
32
38
  export type Prompt = {
33
39
  /** Name of the prompt file */
34
40
  name: string;
35
41
 
36
- /** General configurations for the LLM */
42
+ /** General configuration for the LLM */
37
43
  config: {
38
- /** LLM Provider */
39
- provider: 'anthropic' | 'openai' | 'azure';
44
+ /** LLM provider */
45
+ provider: 'anthropic' | 'openai' | 'azure' | 'vertex';
40
46
 
41
47
  /** Model name/identifier */
42
48
  model: string;
@@ -44,10 +50,10 @@ export type Prompt = {
44
50
  /** Generation temperature (0-2). Lower = more deterministic */
45
51
  temperature?: number;
46
52
 
47
- /** Maximum tokens in the response */
53
+ /** Maximum number of tokens in the response */
48
54
  maxTokens?: number;
49
55
 
50
- /** Provider-specific configurations */
56
+ /** Provider-specific options */
51
57
  providerOptions?: Record<string, unknown>;
52
58
  };
53
59
 
@@ -56,11 +62,11 @@ export type Prompt = {
56
62
  };
57
63
 
58
64
  /**
59
- * Load a prompt file and render it with variables.
65
+ * Loads a prompt file and interpolates variables into its content.
60
66
  *
61
- * @param {string} name - Name of the prompt file (without .prompt extension)
62
- * @param {Record<string, string | number | boolean>} [variables] - Variables to interpolate
63
- * @returns {Prompt} Loaded and rendered prompt object
67
+ * @param name - Name of the prompt file (without `.prompt` extension).
68
+ * @param variables - Variables to interpolate.
69
+ * @returns The loaded prompt object.
64
70
  */
65
71
  export function loadPrompt(
66
72
  name: string,
@@ -71,12 +77,12 @@ export function loadPrompt(
71
77
  * Use an LLM model to generate text.
72
78
  *
73
79
  * This function is a wrapper over the AI SDK's `generateText`.
74
- * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
80
+ * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
75
81
  *
76
- * @param {object} args - Generation arguments
77
- * @param {string} args.prompt - Prompt file name
78
- * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
79
- * @returns {Promise<string>} Generated text
82
+ * @param args - Generation arguments.
83
+ * @param args.prompt - Prompt file name.
84
+ * @param args.variables - Variables to interpolate.
85
+ * @returns Generated text.
80
86
  */
81
87
  export function generateText(
82
88
  args: {
@@ -89,15 +95,15 @@ export function generateText(
89
95
  * Use an LLM model to generate an object with a fixed schema.
90
96
  *
91
97
  * This function is a wrapper over the AI SDK's `generateObject`.
92
- * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
98
+ * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
93
99
  *
94
- * @param {object} args - Generation arguments
95
- * @param {string} args.prompt - Prompt file name
96
- * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
97
- * @param {z.ZodObject} args.schema - Output schema
98
- * @param {string} [args.schemaName] - Output schema name
99
- * @param {string} [args.schemaDescription] - Output schema description
100
- * @returns {Promise<object>} Object matching the provided schema
100
+ * @param args - Generation arguments.
101
+ * @param args.prompt - Prompt file name.
102
+ * @param args.variables - Variables to interpolate.
103
+ * @param args.schema - Output schema.
104
+ * @param args.schemaName - Output schema name.
105
+ * @param args.schemaDescription - Output schema description.
106
+ * @returns Resolves to an object matching the provided schema.
101
107
  */
102
108
  export function generateObject<TSchema extends z.ZodObject>(
103
109
  args: {
@@ -113,15 +119,15 @@ export function generateObject<TSchema extends z.ZodObject>(
113
119
  * Use an LLM model to generate an array of values with a fixed schema.
114
120
  *
115
121
  * This function is a wrapper over the AI SDK's `generateObject` with `output: 'array'`.
116
- * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
122
+ * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
117
123
  *
118
- * @param {object} args - Generation arguments
119
- * @param {string} args.prompt - Prompt file name
120
- * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
121
- * @param {z.ZodType} args.schema - Output schema (array item)
122
- * @param {string} [args.schemaName] - Output schema name
123
- * @param {string} [args.schemaDescription] - Output schema description
124
- * @returns {Promise<object>} Array where each element matches the schema
124
+ * @param args - Generation arguments.
125
+ * @param args.prompt - Prompt file name.
126
+ * @param args.variables - Variables to interpolate.
127
+ * @param args.schema - Output schema (array item).
128
+ * @param args.schemaName - Output schema name.
129
+ * @param args.schemaDescription - Output schema description.
130
+ * @returns Resolves to an array where each element matches the schema.
125
131
  */
126
132
  export function generateArray<TSchema extends z.ZodType>(
127
133
  args: {
@@ -137,13 +143,13 @@ export function generateArray<TSchema extends z.ZodType>(
137
143
  * Use an LLM model to generate a result from an enum (array of string values).
138
144
  *
139
145
  * This function is a wrapper over the AI SDK's `generateObject` with `output: 'enum'`.
140
- * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
146
+ * The prompt file sets `model`, `messages`, `temperature`, `maxTokens`, and `providerOptions`.
141
147
  *
142
- * @param {object} args - Generation arguments
143
- * @param {string} args.prompt - Prompt file name
144
- * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
145
- * @param {string[]} args.enum - Allowed values for the generation
146
- * @returns {Promise<string>} One of the provided enum values
148
+ * @param args - Generation arguments.
149
+ * @param args.prompt - Prompt file name.
150
+ * @param args.variables - Variables to interpolate.
151
+ * @param args.enum - Allowed values for the generation.
152
+ * @returns Resolves to one of the provided enum values.
147
153
  */
148
154
  export function generateEnum<const TEnum extends readonly [string, ...string[]]>(
149
155
  args: {
@@ -3,7 +3,7 @@ import { ValidationError, z } from '@output.ai/core';
3
3
  export const promptSchema = z.object( {
4
4
  name: z.string(),
5
5
  config: z.object( {
6
- provider: z.enum( [ 'anthropic', 'azure', 'openai' ] ),
6
+ provider: z.enum( [ 'anthropic', 'azure', 'openai', 'vertex' ] ),
7
7
  model: z.string(),
8
8
  temperature: z.number().optional(),
9
9
  maxTokens: z.number().optional(),
@@ -11,8 +11,13 @@ export const promptSchema = z.object( {
11
11
  thinking: z.object( {
12
12
  type: z.literal( 'enabled' ),
13
13
  budgetTokens: z.number()
14
- } ).strict().optional()
15
- } ).strict().optional()
14
+ } ).optional(),
15
+ anthropic: z.record( z.string(), z.unknown() ).optional(),
16
+ openai: z.record( z.string(), z.unknown() ).optional(),
17
+ azure: z.record( z.string(), z.unknown() ).optional(),
18
+ vertex: z.record( z.string(), z.unknown() ).optional(),
19
+ google: z.record( z.string(), z.unknown() ).optional()
20
+ } ).passthrough().optional()
16
21
  } ).strict(),
17
22
  messages: z.array(
18
23
  z.object( {
@@ -35,6 +40,19 @@ const getHintForError = errorMessage => {
35
40
  return '';
36
41
  };
37
42
 
43
+ // Known providerOptions fields. Note: these don't map 1:1 to providers.
44
+ // - 'google' is used by Vertex provider for Gemini language models
45
+ // - 'vertex' is used by Vertex provider for Imagen image models
46
+ // - 'anthropic' is used by both Anthropic provider and Vertex Anthropic
47
+ const knownProviderOptionsFields = new Set( [
48
+ 'thinking',
49
+ 'anthropic',
50
+ 'openai',
51
+ 'azure',
52
+ 'vertex',
53
+ 'google'
54
+ ] );
55
+
38
56
  export function validatePrompt( prompt ) {
39
57
  const result = promptSchema.safeParse( prompt );
40
58
  if ( !result.success ) {
@@ -47,4 +65,12 @@ export function validatePrompt( prompt ) {
47
65
  { cause: result.error }
48
66
  );
49
67
  }
68
+
69
+ const providerOptions = prompt?.config?.providerOptions;
70
+ if ( providerOptions ) {
71
+ const unknownFields = Object.keys( providerOptions ).filter( k => !knownProviderOptionsFields.has( k ) );
72
+ if ( unknownFields.length > 0 ) {
73
+ console.warn( `Prompt "${prompt.name}": Unrecognized providerOptions fields: ${unknownFields.join( ', ' )}` );
74
+ }
75
+ }
50
76
  }
@@ -65,6 +65,115 @@ describe( 'validatePrompt', () => {
65
65
  expect( () => validatePrompt( promptWithThinking ) ).not.toThrow();
66
66
  } );
67
67
 
68
+ it( 'should validate a prompt with anthropic-specific providerOptions', () => {
69
+ const promptWithAnthropicOptions = {
70
+ name: 'anthropic-options-prompt',
71
+ config: {
72
+ provider: 'anthropic',
73
+ model: 'claude-sonnet-4-20250514',
74
+ providerOptions: {
75
+ thinking: {
76
+ type: 'enabled',
77
+ budgetTokens: 5000
78
+ },
79
+ anthropic: {
80
+ effort: 'medium',
81
+ customOption: 'value'
82
+ }
83
+ }
84
+ },
85
+ messages: [
86
+ {
87
+ role: 'user',
88
+ content: 'Solve this problem.'
89
+ }
90
+ ]
91
+ };
92
+
93
+ expect( () => validatePrompt( promptWithAnthropicOptions ) ).not.toThrow();
94
+ } );
95
+
96
+ it( 'should validate a prompt with openai-specific providerOptions', () => {
97
+ const promptWithOpenAIOptions = {
98
+ name: 'openai-options-prompt',
99
+ config: {
100
+ provider: 'openai',
101
+ model: 'o3-mini',
102
+ providerOptions: {
103
+ openai: {
104
+ reasoningEffort: 'high',
105
+ reasoningSummary: 'detailed',
106
+ customParameter: 'test'
107
+ }
108
+ }
109
+ },
110
+ messages: [
111
+ {
112
+ role: 'user',
113
+ content: 'Analyze this data.'
114
+ }
115
+ ]
116
+ };
117
+
118
+ expect( () => validatePrompt( promptWithOpenAIOptions ) ).not.toThrow();
119
+ } );
120
+
121
+ it( 'should validate a prompt with azure-specific providerOptions', () => {
122
+ const promptWithAzureOptions = {
123
+ name: 'azure-options-prompt',
124
+ config: {
125
+ provider: 'azure',
126
+ model: 'gpt-4',
127
+ providerOptions: {
128
+ azure: {
129
+ deploymentName: 'my-deployment',
130
+ customConfig: { key: 'value' }
131
+ }
132
+ }
133
+ },
134
+ messages: [
135
+ {
136
+ role: 'user',
137
+ content: 'Process this request.'
138
+ }
139
+ ]
140
+ };
141
+
142
+ expect( () => validatePrompt( promptWithAzureOptions ) ).not.toThrow();
143
+ } );
144
+
145
+ it( 'should validate a prompt with mixed providerOptions including unknown fields', () => {
146
+ const promptWithMixedOptions = {
147
+ name: 'mixed-options-prompt',
148
+ config: {
149
+ provider: 'anthropic',
150
+ model: 'claude-3-opus-20240229',
151
+ providerOptions: {
152
+ thinking: {
153
+ type: 'enabled',
154
+ budgetTokens: 3000
155
+ },
156
+ anthropic: {
157
+ effort: 'high'
158
+ },
159
+ customProviderField: 'should-be-allowed',
160
+ anotherCustomField: {
161
+ nested: 'value',
162
+ array: [ 1, 2, 3 ]
163
+ }
164
+ }
165
+ },
166
+ messages: [
167
+ {
168
+ role: 'user',
169
+ content: 'Complex request with multiple options.'
170
+ }
171
+ ]
172
+ };
173
+
174
+ expect( () => validatePrompt( promptWithMixedOptions ) ).not.toThrow();
175
+ } );
176
+
68
177
  it( 'should throw ValidationError when provider is invalid', () => {
69
178
  const invalidPrompt = {
70
179
  name: 'invalid-provider',