@output.ai/llm 0.0.15 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,30 +1,128 @@
1
1
  # LLM Module
2
2
 
3
- Provides llm calls abstractions.
3
+ Framework abstraction to interact with LLM models, including prompt management and structured generation.
4
4
 
5
- ## completions()
5
+ ## Quick Start
6
6
 
7
- Allow to use chat messages with a LLM model.
7
+ ```js
8
+ import { generateText } from '@output.ai/llm';
9
+
10
+ const response = await generateText({
11
+ prompt: 'my_prompt@v1',
12
+ variables: { topic: 'AI workflows' }
13
+ });
14
+ ```
15
+
16
+ ## Features
17
+
18
+ - **Unified API**: Single import for prompt loading and LLM generation
19
+ - **Multiple Generation Types**: Text, objects, arrays, and enums
20
+ - **Prompt Management**: Load and render `.prompt` files with variable interpolation
21
+ - **Multi-Provider Support**: Anthropic, OpenAI, and Azure
22
+ - **Type Safety**: Full TypeScript support with Zod schemas
23
+
24
+ ## Generate Text
25
+
26
+ Generate unstructured text from an LLM:
8
27
 
9
28
  ```js
10
29
  import { generateText } from '@output.ai/llm';
11
30
 
12
- const response = generateText({
13
- configs: {
14
- model: 'model-name', // eg claude-3.5
15
- provider: 'provider-name', // eg anthropic
16
- },
17
- messages: [
18
- {
19
- role: 'assistant',
20
- content: 'You are an assistant...',
21
- },
22
- {
23
- role: 'user',
24
- content: 'Whats the capital of Nicaragua?',
25
- },
26
- ],
31
+ const response = await generateText({
32
+ prompt: 'explain_topic@v1',
33
+ variables: { topic: 'machine learning' }
34
+ });
35
+ ```
36
+
37
+ ## Generate Object
38
+
39
+ Generate a structured object matching a Zod schema:
40
+
41
+ ```js
42
+ import { generateObject } from '@output.ai/llm';
43
+ import { z } from '@output.ai/core';
44
+
45
+ const recipeSchema = z.object({
46
+ title: z.string(),
47
+ ingredients: z.array(z.string()),
48
+ steps: z.array(z.string())
49
+ });
50
+
51
+ const recipe = await generateObject({
52
+ prompt: 'recipe@v1',
53
+ variables: { dish: 'lasagna' },
54
+ schema: recipeSchema
27
55
  });
28
56
  ```
29
57
 
30
- The response is a string.
58
+ ## Generate Array
59
+
60
+ Generate an array of structured items:
61
+
62
+ ```js
63
+ import { generateArray } from '@output.ai/llm';
64
+ import { z } from '@output.ai/core';
65
+
66
+ const taskSchema = z.object({
67
+ title: z.string(),
68
+ priority: z.number()
69
+ });
70
+
71
+ const tasks = await generateArray({
72
+ prompt: 'task_list@v1',
73
+ variables: { project: 'website' },
74
+ schema: taskSchema
75
+ });
76
+ ```
77
+
78
+ ## Generate Enum
79
+
80
+ Generate a value from a list of allowed options:
81
+
82
+ ```js
83
+ import { generateEnum } from '@output.ai/llm';
84
+
85
+ const category = await generateEnum({
86
+ prompt: 'categorize@v1',
87
+ variables: { text: 'Product announcement' },
88
+ enum: ['marketing', 'engineering', 'sales', 'support']
89
+ });
90
+ ```
91
+
92
+ ## Prompt Files
93
+
94
+ Prompt files use YAML frontmatter for configuration and support LiquidJS templating:
95
+
96
+ **File: `explain_topic@v1.prompt`**
97
+ ```yaml
98
+ ---
99
+ provider: anthropic
100
+ model: claude-sonnet-4-20250514
101
+ temperature: 0.7
102
+ ---
103
+
104
+ <system>
105
+ You are a concise technical explainer.
106
+ </system>
107
+
108
+ <user>
109
+ Explain {{ topic }} in 3 bullet points.
110
+ </user>
111
+ ```
112
+
113
+ ## Configuration Options
114
+
115
+ Prompt files support these configuration fields:
116
+
117
+ ```yaml
118
+ ---
119
+ provider: anthropic | openai | azure
120
+ model: model-name
121
+ temperature: 0.0-1.0 (optional)
122
+ maxTokens: number (optional)
123
+ providerOptions: (optional)
124
+ thinking:
125
+ type: enabled
126
+ budgetTokens: number
127
+ ---
128
+ ```
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.0.15",
3
+ "version": "0.2.0",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -13,8 +13,9 @@
13
13
  "@ai-sdk/azure": "2.0.53",
14
14
  "@ai-sdk/openai": "2.0.52",
15
15
  "@output.ai/core": ">=0.0.1",
16
- "@output.ai/prompt": ">=0.0.1",
17
- "ai": "5.0.48"
16
+ "ai": "5.0.48",
17
+ "gray-matter": "4.0.3",
18
+ "liquidjs": "10.22.0"
18
19
  },
19
20
  "license": "UNLICENSED"
20
21
  }
package/src/ai_model.js CHANGED
@@ -1,29 +1,34 @@
1
1
  import { anthropic } from '@ai-sdk/anthropic';
2
2
  import { azure } from '@ai-sdk/azure';
3
3
  import { openai } from '@ai-sdk/openai';
4
- import { ValidationError, z } from '@output.ai/core';
5
4
 
6
5
  const providers = { azure, anthropic, openai };
7
6
 
8
- const promptSchema = z.object( {
9
- config: z.object( {
10
- provider: z.enum( [ 'anthropic', 'azure', 'openai' ] ),
11
- model: z.string(),
12
- temperature: z.number().optional(),
13
- max_tokens: z.number().optional()
14
- } ),
15
- messages: z.array(
16
- z.object( {
17
- role: z.string(),
18
- content: z.string()
19
- } )
20
- )
21
- } );
22
-
23
- export const loadModel = prompt => {
24
- const result = promptSchema.safeParse( prompt );
25
- if ( !result.success ) {
26
- throw new ValidationError( `Invalid prompt object: ${result.error.message}` );
7
+ export function loadModel( prompt ) {
8
+ const config = prompt?.config;
9
+
10
+ if ( !config ) {
11
+ throw new Error( 'Prompt is missing config object' );
12
+ }
13
+
14
+ const { provider: providerName, model: modelName } = config;
15
+
16
+ if ( !providerName ) {
17
+ throw new Error( 'Prompt config is missing "provider" field' );
18
+ }
19
+
20
+ if ( !modelName ) {
21
+ throw new Error( 'Prompt config is missing "model" field' );
27
22
  }
28
- return providers[prompt.config.provider]( prompt.config.model );
29
- };
23
+
24
+ const provider = providers[providerName];
25
+
26
+ if ( !provider ) {
27
+ const validProviders = Object.keys( providers ).join( ', ' );
28
+ throw new Error(
29
+ `Invalid provider "${providerName}". Valid providers: ${validProviders}`
30
+ );
31
+ }
32
+
33
+ return provider( modelName );
34
+ }
@@ -0,0 +1,33 @@
1
+ import { it, expect, vi, afterEach } from 'vitest';
2
+
3
+ const openaiImpl = vi.fn( model => `openai:${model}` );
4
+ const azureImpl = vi.fn( model => `azure:${model}` );
5
+ const anthropicImpl = vi.fn( model => `anthropic:${model}` );
6
+
7
+ vi.mock( '@ai-sdk/openai', () => ( {
8
+ openai: ( ...values ) => openaiImpl( ...values )
9
+ } ) );
10
+
11
+ vi.mock( '@ai-sdk/azure', () => ( {
12
+ azure: ( ...values ) => azureImpl( ...values )
13
+ } ) );
14
+
15
+ vi.mock( '@ai-sdk/anthropic', () => ( {
16
+ anthropic: ( ...values ) => anthropicImpl( ...values )
17
+ } ) );
18
+
19
+ import { loadModel } from './ai_model.js';
20
+
21
+ afterEach( async () => {
22
+ await vi.resetModules();
23
+ vi.clearAllMocks();
24
+ } );
25
+
26
+ it( 'loads model using selected provider', () => {
27
+ const result = loadModel( { config: { provider: 'openai', model: 'gpt-4o-mini' } } );
28
+
29
+ expect( result ).toBe( 'openai:gpt-4o-mini' );
30
+ expect( openaiImpl ).toHaveBeenCalledWith( 'gpt-4o-mini' );
31
+ expect( azureImpl ).not.toHaveBeenCalled();
32
+ expect( anthropicImpl ).not.toHaveBeenCalled();
33
+ } );
package/src/ai_sdk.js CHANGED
@@ -1,8 +1,10 @@
1
1
  import { Tracing } from '@output.ai/core/tracing';
2
2
  import { loadModel } from './ai_model.js';
3
3
  import * as AI from 'ai';
4
+ import { validateGenerateTextArgs, validateGenerateObjectArgs, validateGenerateArrayArgs, validateGenerateEnumArgs } from './validations.js';
5
+ import { loadPrompt } from './prompt_loader.js';
4
6
 
5
- const generationWrapper = async ( traceId, fn ) => {
7
+ const traceWrapper = async ( traceId, fn ) => {
6
8
  try {
7
9
  const result = await fn();
8
10
  Tracing.addEventEnd( { id: traceId, details: result } );
@@ -13,33 +15,126 @@ const generationWrapper = async ( traceId, fn ) => {
13
15
  }
14
16
  };
15
17
 
16
- export async function generateText( { prompt, ...nativeAiSdkArgs } ) {
17
- const traceId = `generateText-${Date.now()}`;
18
- Tracing.addEventStart( { kind: 'llm', name: 'generateText', id: traceId, details: { prompt, nativeAiSdkArgs } } );
19
-
20
- return generationWrapper( traceId, async () => {
21
- return ( await AI.generateText( {
22
- model: loadModel( prompt ),
23
- messages: prompt.messages,
24
- temperature: prompt.config.temperature,
25
- maxOutputTokens: prompt.config.max_tokens ?? 64000,
26
- ...nativeAiSdkArgs
27
- } ) ).text;
28
- } );
18
+ const createTraceId = name => `${name}-${Date.now()}`;
19
+
20
+ const startTrace = ( name, details ) => {
21
+ const traceId = createTraceId( name );
22
+ Tracing.addEventStart( { kind: 'llm', name, id: traceId, details } );
23
+ return traceId;
24
+ };
25
+
26
+ const extraAiSdkOptionsFromPrompt = prompt => {
27
+ const options = {
28
+ model: loadModel( prompt ),
29
+ messages: prompt.messages,
30
+ providerOptions: prompt.config.providerOptions
31
+ };
32
+
33
+ if ( prompt.config.temperature ) {
34
+ options.temperature = prompt.config.temperature;
35
+ }
36
+
37
+ if ( prompt.config.maxTokens ) {
38
+ options.maxOutputTokens = prompt.config.maxTokens;
39
+ }
40
+
41
+ return options;
42
+ };
43
+
44
+ /**
45
+ * Use an LLM model to generate text.
46
+ *
47
+ * @param {object} args - Generation arguments
48
+ * @param {string} args.prompt - Prompt file name
49
+ * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
50
+ * @returns {Promise<string>} Generated text
51
+ */
52
+ export async function generateText( { prompt, variables } ) {
53
+ validateGenerateTextArgs( { prompt, variables } );
54
+ const loadedPrompt = loadPrompt( prompt, variables );
55
+ const traceId = startTrace( 'generateText', { prompt: loadedPrompt } );
56
+
57
+ return traceWrapper( traceId, async () =>
58
+ AI.generateText( extraAiSdkOptionsFromPrompt( loadedPrompt ) ).then( r => r.text )
59
+ );
60
+ }
61
+
62
+ /**
63
+ * Use an LLM model to generate an object with a fixed schema.
64
+ *
65
+ * @param {object} args - Generation arguments
66
+ * @param {string} args.prompt - Prompt file name
67
+ * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
68
+ * @param {z.ZodObject} args.schema - Output schema
69
+ * @param {string} [args.schemaName] - Output schema name
70
+ * @param {string} [args.schemaDescription] - Output schema description
71
+ * @returns {Promise<object>} Object matching the provided schema
72
+ */
73
+ export async function generateObject( args ) {
74
+ validateGenerateObjectArgs( args );
75
+ const { prompt, variables, schema, schemaName, schemaDescription } = args;
76
+ const loadedPrompt = loadPrompt( prompt, variables );
77
+ const traceId = startTrace( 'generateObject', { ...args, prompt: loadedPrompt } );
78
+
79
+ return traceWrapper( traceId, async () =>
80
+ AI.generateObject( {
81
+ output: 'object',
82
+ schema,
83
+ schemaName,
84
+ schemaDescription,
85
+ ...extraAiSdkOptionsFromPrompt( loadedPrompt )
86
+ } ).then( r => r.object )
87
+ );
29
88
  }
30
89
 
31
- export async function generateObject( { prompt, ...nativeAiSdkArgs } ) {
32
- const traceId = `generateObject-${Date.now()}`;
33
- Tracing.addEventStart( { kind: 'llm', name: 'generateObject', id: traceId, details: { prompt, nativeAiSdkArgs } } );
34
-
35
- return generationWrapper( traceId, async () => {
36
- return ( await AI.generateObject( {
37
- model: loadModel( prompt ),
38
- output: nativeAiSdkArgs.object ?? 'object',
39
- messages: prompt.messages,
40
- temperature: prompt.config.temperature,
41
- ...( prompt.config.max_tokens && { maxOutputTokens: prompt.config.max_tokens } ),
42
- ...nativeAiSdkArgs
43
- } ) ).object;
44
- } );
90
+ /**
91
+ * Use an LLM model to generate an array of values with a fixed schema.
92
+ *
93
+ * @param {object} args - Generation arguments
94
+ * @param {string} args.prompt - Prompt file name
95
+ * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
96
+ * @param {z.ZodType} args.schema - Output schema (array item)
97
+ * @param {string} [args.schemaName] - Output schema name
98
+ * @param {string} [args.schemaDescription] - Output schema description
99
+ * @returns {Promise<object>} Array where each element matches the schema
100
+ */
101
+ export async function generateArray( args ) {
102
+ validateGenerateArrayArgs( args );
103
+ const { prompt, variables, schema, schemaName, schemaDescription } = args;
104
+ const loadedPrompt = loadPrompt( prompt, variables );
105
+ const traceId = startTrace( 'generateArray', { ...args, prompt: loadedPrompt } );
106
+
107
+ return traceWrapper( traceId, async () =>
108
+ AI.generateObject( {
109
+ output: 'array',
110
+ schema,
111
+ schemaName,
112
+ schemaDescription,
113
+ ...extraAiSdkOptionsFromPrompt( loadedPrompt )
114
+ } ).then( r => r.object )
115
+ );
116
+ }
117
+
118
+ /**
119
+ * Use an LLM model to generate a result from an enum (array of string values).
120
+ *
121
+ * @param {object} args - Generation arguments
122
+ * @param {string} args.prompt - Prompt file name
123
+ * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
124
+ * @param {string[]} args.enum - Allowed values for the generation
125
+ * @returns {Promise<string>} One of the provided enum values
126
+ */
127
+ export async function generateEnum( args ) {
128
+ validateGenerateEnumArgs( args );
129
+ const { prompt, variables, enum: _enum } = args;
130
+ const loadedPrompt = loadPrompt( prompt, variables );
131
+ const traceId = startTrace( 'generateEnum', { ...args, prompt: loadedPrompt } );
132
+
133
+ return traceWrapper( traceId, async () =>
134
+ AI.generateObject( {
135
+ output: 'enum',
136
+ enum: _enum,
137
+ ...extraAiSdkOptionsFromPrompt( loadedPrompt )
138
+ } ).then( r => r.object )
139
+ );
45
140
  }
@@ -0,0 +1,170 @@
1
+ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
2
+ import { z } from '@output.ai/core';
3
+
4
+ const tracingSpies = {
5
+ addEventStart: vi.fn(),
6
+ addEventEnd: vi.fn(),
7
+ addEventError: vi.fn()
8
+ };
9
+ vi.mock( '@output.ai/core/tracing', () => ( { Tracing: tracingSpies } ), { virtual: true } );
10
+
11
+ const loadModelImpl = vi.fn();
12
+ vi.mock( './ai_model.js', () => ( {
13
+ loadModel: ( ...values ) => loadModelImpl( ...values )
14
+ } ) );
15
+
16
+ const aiFns = {
17
+ generateText: vi.fn(),
18
+ generateObject: vi.fn()
19
+ };
20
+ vi.mock( 'ai', () => ( aiFns ) );
21
+
22
+ const validators = {
23
+ validateGenerateTextArgs: vi.fn(),
24
+ validateGenerateObjectArgs: vi.fn(),
25
+ validateGenerateArrayArgs: vi.fn(),
26
+ validateGenerateEnumArgs: vi.fn()
27
+ };
28
+ vi.mock( './validations.js', () => ( validators ) );
29
+
30
+ const loadPromptImpl = vi.fn();
31
+ vi.mock( './prompt_loader.js', () => ( {
32
+ loadPrompt: ( ...values ) => loadPromptImpl( ...values )
33
+ } ) );
34
+
35
+ const importSut = async () => import( './ai_sdk.js' );
36
+
37
+ const basePrompt = {
38
+ config: {
39
+ provider: 'openai',
40
+ model: 'gpt-4o-mini',
41
+ temperature: 0.3,
42
+ providerOptions: { thinking: { enabled: true } }
43
+ },
44
+ messages: [ { role: 'user', content: 'Hi' } ]
45
+ };
46
+
47
+ beforeEach( () => {
48
+ tracingSpies.addEventStart.mockClear();
49
+ tracingSpies.addEventEnd.mockClear();
50
+ tracingSpies.addEventError.mockClear();
51
+
52
+ loadModelImpl.mockReset().mockReturnValue( 'MODEL' );
53
+ loadPromptImpl.mockReset().mockReturnValue( basePrompt );
54
+
55
+ aiFns.generateText.mockReset().mockResolvedValue( { text: 'TEXT' } );
56
+ aiFns.generateObject.mockReset().mockResolvedValue( { object: 'OBJECT' } );
57
+
58
+ validators.validateGenerateTextArgs.mockClear();
59
+ validators.validateGenerateObjectArgs.mockClear();
60
+ validators.validateGenerateArrayArgs.mockClear();
61
+ validators.validateGenerateEnumArgs.mockClear();
62
+ } );
63
+
64
+ afterEach( async () => {
65
+ await vi.resetModules();
66
+ vi.clearAllMocks();
67
+ } );
68
+
69
+ describe( 'ai_sdk', () => {
70
+ it( 'generateText: validates, traces, calls AI and returns text', async () => {
71
+ const { generateText } = await importSut();
72
+ const result = await generateText( { prompt: 'test_prompt@v1' } );
73
+
74
+ expect( validators.validateGenerateTextArgs ).toHaveBeenCalledWith( { prompt: 'test_prompt@v1' } );
75
+ expect( loadPromptImpl ).toHaveBeenCalledWith( 'test_prompt@v1', undefined );
76
+ expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
77
+ expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
78
+
79
+ expect( loadModelImpl ).toHaveBeenCalledWith( basePrompt );
80
+ expect( aiFns.generateText ).toHaveBeenCalledWith( {
81
+ model: 'MODEL',
82
+ messages: basePrompt.messages,
83
+ temperature: 0.3,
84
+ providerOptions: basePrompt.config.providerOptions
85
+ } );
86
+ expect( result ).toBe( 'TEXT' );
87
+ } );
88
+
89
+ it( 'generateObject: validates, traces, calls AI with output object and returns object', async () => {
90
+ const { generateObject } = await importSut();
91
+ aiFns.generateObject.mockResolvedValueOnce( { object: { a: 1 } } );
92
+
93
+ const schema = z.object( { a: z.number() } );
94
+ const result = await generateObject( {
95
+ prompt: 'test_prompt@v1',
96
+ schema,
97
+ schemaName: 'Thing',
98
+ schemaDescription: 'A thing'
99
+ } );
100
+
101
+ expect( validators.validateGenerateObjectArgs ).toHaveBeenCalled();
102
+ expect( loadPromptImpl ).toHaveBeenCalledWith( 'test_prompt@v1', undefined );
103
+ expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
104
+ expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
105
+
106
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
107
+ output: 'object',
108
+ schema,
109
+ schemaName: 'Thing',
110
+ schemaDescription: 'A thing',
111
+ model: 'MODEL',
112
+ messages: basePrompt.messages,
113
+ temperature: 0.3,
114
+ providerOptions: basePrompt.config.providerOptions
115
+ } );
116
+ expect( result ).toEqual( { a: 1 } );
117
+ } );
118
+
119
+ it( 'generateArray: validates, traces, calls AI (item schema) and returns array', async () => {
120
+ const { generateArray } = await importSut();
121
+ aiFns.generateObject.mockResolvedValueOnce( { object: [ 1, 2 ] } );
122
+
123
+ const schema = z.number();
124
+ const result = await generateArray( {
125
+ prompt: 'test_prompt@v1',
126
+ schema,
127
+ schemaName: 'Numbers',
128
+ schemaDescription: 'Two numbers'
129
+ } );
130
+
131
+ expect( validators.validateGenerateArrayArgs ).toHaveBeenCalled();
132
+ expect( loadPromptImpl ).toHaveBeenCalledWith( 'test_prompt@v1', undefined );
133
+ expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
134
+ expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
135
+
136
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
137
+ output: 'array',
138
+ schema,
139
+ schemaName: 'Numbers',
140
+ schemaDescription: 'Two numbers',
141
+ model: 'MODEL',
142
+ messages: basePrompt.messages,
143
+ temperature: 0.3,
144
+ providerOptions: basePrompt.config.providerOptions
145
+ } );
146
+ expect( result ).toEqual( [ 1, 2 ] );
147
+ } );
148
+
149
+ it( 'generateEnum: validates, traces, calls AI with output enum and returns value', async () => {
150
+ const { generateEnum } = await importSut();
151
+ aiFns.generateObject.mockResolvedValueOnce( { object: 'B' } );
152
+
153
+ const result = await generateEnum( { prompt: 'test_prompt@v1', enum: [ 'A', 'B', 'C' ] } );
154
+
155
+ expect( validators.validateGenerateEnumArgs ).toHaveBeenCalled();
156
+ expect( loadPromptImpl ).toHaveBeenCalledWith( 'test_prompt@v1', undefined );
157
+ expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
158
+ expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
159
+
160
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
161
+ output: 'enum',
162
+ enum: [ 'A', 'B', 'C' ],
163
+ model: 'MODEL',
164
+ messages: basePrompt.messages,
165
+ temperature: 0.3,
166
+ providerOptions: basePrompt.config.providerOptions
167
+ } );
168
+ expect( result ).toBe( 'B' );
169
+ } );
170
+ } );
package/src/index.d.ts CHANGED
@@ -1,110 +1,87 @@
1
- import type * as AiTypes from 'ai';
2
- import type { z as CoreZ } from '@output.ai/core';
3
- import type { Prompt } from '@output.ai/prompt';
4
-
5
- export type { Prompt };
6
-
7
- type NativeGenerateTextArgs = Parameters<typeof AiTypes.generateText>[0];
8
- type NativeGenerateObjectArgs = Parameters<typeof AiTypes.generateObject>[0];
9
-
10
- /**
11
- * Simplify types into a plain object while preserving unions
12
- * (distributes over union members instead of collapsing their keys)
13
- */
14
- type Simplify<T> = T extends unknown ? { [K in keyof T]: T[K] } & {} : never;
15
-
16
- /**
17
- * Replace keys K in T with V, preserving unions by distributing over T
18
- */
19
- type Replace<T, K extends PropertyKey, V> = T extends unknown
20
- ? Omit<T, Extract<K, keyof T>> & { [P in K]: V }
21
- : never;
1
+ import type { z } from '@output.ai/core';
22
2
 
23
3
  /**
24
- * Text generation arguments
25
- * Include all native AI SDK generateText options and a Prompt object from `@output.ai/prompt`
26
- */
27
- export type GenerateTextArgs = Simplify<
28
- Replace<Partial<NativeGenerateTextArgs>, 'prompt', Prompt>
29
- >;
30
-
31
- /**
32
- * Allow schemas from @output.ai/core's zod in addition to AI SDK's accepted schema types.
33
- */
34
- type WithCoreZodSchema<T> = T extends unknown
35
- ? T extends { schema?: infer S }
36
- ? Omit<T, 'schema'> & { schema?: S | CoreZ.ZodTypeAny }
37
- : T
38
- : never;
39
-
40
- /**
41
- * Object generation arguments
42
- * Include all native AI SDK generateObject options and a Prompt object from `@output.ai/prompt`
43
- */
44
- export type GenerateObjectArgs = Simplify<
45
- Replace<Partial<WithCoreZodSchema<NativeGenerateObjectArgs>>, 'prompt', Prompt>
46
- >;
47
-
48
- /**
49
- * Use a LLM Model to generate text
50
- *
51
- * This function a wrapper over AI SDK's generateText function.
52
- *
53
- * It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
4
+ * Use an LLM model to generate text.
54
5
  *
55
- * The Prompt object will set `model`, `messages`, `temperature` and `max_tokens`, however all these can be overwritten by their AI SDK native argument values.
6
+ * This function is a wrapper over the AI SDK's `generateText`.
7
+ * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
56
8
  *
57
- * @param {GenerateTextArgs} args - Generation arguments
58
- * @returns {Promise<string>}
9
+ * @param {object} args - Generation arguments
10
+ * @param {string} args.prompt - Prompt file name
11
+ * @param {Record<string, string | number>} args.variables - Variables to interpolate
12
+ * @returns {Promise<string>} Generated text
59
13
  */
60
14
  export function generateText(
61
- args: GenerateTextArgs
15
+ args: {
16
+ prompt: string,
17
+ variables?: Record<string, string | number>
18
+ }
62
19
  ): Promise<string>;
63
20
 
64
21
  /**
65
- * Use a LLM Model to generate object
66
- *
67
- * This function a wrapper over AI SDK's generateObject function.
68
- *
69
- * It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
70
- *
71
- * The Prompt object will set `model`, `messages`, `temperature` and `max_tokens`, however all these can be overwritten by their AI SDK native argument values.
72
- *
73
- * @param {GenerateObjectArgs} args - Generation arguments
74
- * @returns {Promise<object>} An object matching the provided schema
22
+ * Use an LLM model to generate an object with a fixed schema.
23
+ *
24
+ * This function is a wrapper over the AI SDK's `generateObject`.
25
+ * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
26
+ *
27
+ * @param {object} args - Generation arguments
28
+ * @param {string} args.prompt - Prompt file name
29
+ * @param {Record<string, string | number>} args.variables - Variables to interpolate
30
+ * @param {z.ZodObject} args.schema - Output schema
31
+ * @param {string} [args.schemaName] - Output schema name
32
+ * @param {string} [args.schemaDescription] - Output schema description
33
+ * @returns {Promise<object>} Object matching the provided schema
75
34
  */
76
- export function generateObject<A extends GenerateObjectArgs & { schema: CoreZ.ZodTypeAny }>(
77
- args: A
78
- ): Promise<CoreZ.infer<A['schema']>>;
35
+ export function generateObject<TSchema extends z.ZodObject>(
36
+ args: {
37
+ prompt: string,
38
+ variables?: Record<string, string | number>,
39
+ schema: TSchema,
40
+ schemaName?: string,
41
+ schemaDescription?: string
42
+ }
43
+ ): Promise<z.infer<TSchema>>;
79
44
 
80
45
  /**
81
- * Use a LLM Model to generate object
82
- *
83
- * This function a wrapper over AI SDK's generateObject function.
84
- *
85
- * It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
86
- *
87
- * The Prompt object will set `model`, `messages`, `temperature` and `max_tokens`, however all these can be overwritten by their AI SDK native argument values.
88
- *
89
- * @param {GenerateObjectArgs} args - Generation arguments
90
- * @returns {Promise<object>} An object matching the provided enum
46
+ * Use an LLM model to generate an array of values with a fixed schema.
47
+ *
48
+ * This function is a wrapper over the AI SDK's `generateObject` with `output: 'array'`.
49
+ * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
50
+ *
51
+ * @param {object} args - Generation arguments
52
+ * @param {string} args.prompt - Prompt file name
53
+ * @param {Record<string, string | number>} args.variables - Variables to interpolate
54
+ * @param {z.ZodType} args.schema - Output schema (array item)
55
+ * @param {string} [args.schemaName] - Output schema name
56
+ * @param {string} [args.schemaDescription] - Output schema description
57
+ * @returns {Promise<object>} Array where each element matches the schema
91
58
  */
92
- export function generateObject<A extends GenerateObjectArgs & { enum: readonly unknown[]; output: 'enum' }>(
93
- args: A
94
- ): Promise<A['enum'][number]>;
59
+ export function generateArray<TSchema extends z.ZodType>(
60
+ args: {
61
+ prompt: string,
62
+ variables?: Record<string, string | number>,
63
+ schema: TSchema,
64
+ schemaName?: string,
65
+ schemaDescription?: string
66
+ }
67
+ ): Promise<Array<z.infer<TSchema>>>;
95
68
 
96
69
  /**
97
- * Use a LLM Model to generate object
98
- *
99
- * This function a wrapper over AI SDK's generateObject function.
100
- *
101
- * It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
70
+ * Use an LLM model to generate a result from an enum (array of string values).
102
71
  *
103
- * The Prompt object will set `model`, `messages`, `temperature` and `max_tokens`, however all these can be overwritten by their AI SDK native argument values.
72
+ * This function is a wrapper over the AI SDK's `generateObject` with `output: 'enum'`.
73
+ * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
104
74
  *
105
- * @param {GenerateObjectArgs} args - Generation arguments
106
- * @returns {Promise<object>} An object without a pre-defined schema schema
75
+ * @param {object} args - Generation arguments
76
+ * @param {string} args.prompt - Prompt file name
77
+ * @param {Record<string, string | number>} args.variables - Variables to interpolate
78
+ * @param {string[]} args.enum - Allowed values for the generation
79
+ * @returns {Promise<string>} One of the provided enum values
107
80
  */
108
- export function generateObject(
109
- args: GenerateObjectArgs
110
- ): Promise<object>;
81
+ export function generateEnum<const TEnum extends readonly [string, ...string[]]>(
82
+ args: {
83
+ prompt: string,
84
+ variables?: Record<string, string | number>,
85
+ enum: TEnum
86
+ }
87
+ ): Promise<TEnum[number]>;
package/src/index.js CHANGED
@@ -1 +1,2 @@
1
- export { generateText, generateObject } from './ai_sdk.js';
1
+ export { generateText, generateArray, generateObject, generateEnum } from './ai_sdk.js';
2
+ export * as ai from 'ai';
@@ -0,0 +1,121 @@
1
+ import { join, dirname } from 'path';
2
+ import { readFileSync, readdirSync } from 'node:fs';
3
+ import { fileURLToPath } from 'node:url';
4
+
5
+ function extractFilePath( fileUrlMatch, parenMatch, directMatch ) {
6
+ if ( fileUrlMatch ) {
7
+ return fileURLToPath( 'file://' + fileUrlMatch[1] );
8
+ }
9
+ if ( parenMatch ) {
10
+ return parenMatch[1];
11
+ }
12
+ if ( directMatch ) {
13
+ return directMatch[1];
14
+ }
15
+ return null;
16
+ }
17
+
18
+ export const getInvocationDir = _ => {
19
+ const stack = new Error().stack;
20
+
21
+ if ( !stack ) {
22
+ throw new Error( 'Stack trace is unavailable - cannot determine invocation directory' );
23
+ }
24
+
25
+ const lines = stack.split( '\n' );
26
+
27
+ // Search through stack to find first valid file path that is NOT in SDK
28
+ // Skip first 2 lines (Error message and getInvocationDir itself)
29
+ for ( const line of lines.slice( 2 ) ) {
30
+ // Match file:// URLs (ESM) or regular file paths
31
+ // Pattern 1: file:///path/to/file.js:line:col
32
+ // Pattern 2: at func (/path/to/file.js:line:col)
33
+ // Pattern 3: /path/to/file.js:line:col
34
+ const fileUrlMatch = line.match( /file:\/\/([^:]+\.(?:js|mjs|ts|tsx|jsx))/ );
35
+ const parenMatch = line.match( /\(([^:)]+\.(?:js|mjs|ts|tsx|jsx)):\d+:\d+\)/ );
36
+ const directMatch = line.match( /^\s*at\s+[^(]*([^:]+\.(?:js|mjs|ts|tsx|jsx)):\d+:\d+/ );
37
+
38
+ // Determine the file path from different stack trace formats
39
+ const filePath = extractFilePath( fileUrlMatch, parenMatch, directMatch );
40
+
41
+ if ( filePath ) {
42
+ // Skip internal Node.js paths
43
+ if ( filePath.includes( 'node:' ) || filePath.includes( 'internal/' ) ) {
44
+ continue;
45
+ }
46
+
47
+ // Skip SDK internal files - we want the actual caller, not the SDK
48
+ // This includes @output.ai packages and sdk/ directory
49
+ if ( filePath.includes( '/sdk/' ) || filePath.includes( 'node_modules/@output.ai/' ) ) {
50
+ continue;
51
+ }
52
+
53
+ // Found a valid caller file outside the SDK
54
+ return dirname( filePath );
55
+ }
56
+ }
57
+
58
+ throw new Error(
59
+ 'Unable to determine invocation directory from stack trace. ' +
60
+ `Stack preview:\n${lines.slice( 0, 10 ).join( '\n' )}`
61
+ );
62
+ };
63
+
64
+ /**
65
+ * Recursively search for a file by its name and load its content.
66
+ *
67
+ * @param {string} name - Name of the file load its content
68
+ * @param {string} [dir=<invocation directory>] - The directory to search for the file
69
+ * @param {number} [depth=0] - Current recursion depth
70
+ * @param {number} [maxDepth=5] - Maximum recursion depth
71
+ * @returns {string | null} - File content or null if not found
72
+ */
73
+ export const loadContent = ( name, dir = getInvocationDir(), depth = 0, maxDepth = 5 ) => {
74
+ // Stop recursion if max depth exceeded
75
+ if ( depth > maxDepth ) {
76
+ return null;
77
+ }
78
+
79
+ // Validate filename doesn't contain path separators (prevent directory traversal)
80
+ if ( name.includes( '..' ) || name.includes( '/' ) || name.includes( '\\' ) ) {
81
+ throw new Error( `Invalid file name "${name}" - must not contain path separators or ".."` );
82
+ }
83
+
84
+ try {
85
+ const entries = readdirSync( dir, { withFileTypes: true } );
86
+
87
+ for ( const entry of entries ) {
88
+ if ( entry.name === name ) {
89
+ try {
90
+ return readFileSync( join( dir, entry.name ), 'utf-8' );
91
+ } catch ( error ) {
92
+ throw new Error(
93
+ `Found file "${name}" in "${dir}" but failed to read it: ${error.message}`,
94
+ { cause: error }
95
+ );
96
+ }
97
+ }
98
+
99
+ // Recurse into subdirectories, but skip symlinks to prevent infinite loops
100
+ if ( entry.isDirectory() && !entry.isSymbolicLink() ) {
101
+ const content = loadContent( name, join( dir, entry.name ), depth + 1, maxDepth );
102
+ if ( content !== null ) {
103
+ return content;
104
+ }
105
+ }
106
+ }
107
+
108
+ return null;
109
+ } catch ( error ) {
110
+ // Only suppress ENOENT (directory doesn't exist) during recursion
111
+ // This is expected when searching through nested directories
112
+ if ( error.code === 'ENOENT' ) {
113
+ return null;
114
+ }
115
+ // Propagate all other errors (permission denied, I/O errors, etc.)
116
+ throw new Error(
117
+ `Failed to read directory "${dir}" while searching for "${name}": ${error.message}`,
118
+ { cause: error }
119
+ );
120
+ }
121
+ };
@@ -0,0 +1,97 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { getInvocationDir, loadContent } from './load_content.js';
3
+ import { join } from 'path';
4
+ import { mkdtempSync, writeFileSync, mkdirSync } from 'node:fs';
5
+ import { tmpdir } from 'node:os';
6
+
7
+ describe( 'getInvocationDir', () => {
8
+ it( 'extracts directory from stack trace', () => {
9
+ const dir = getInvocationDir();
10
+ expect( dir ).toBeTruthy();
11
+ expect( typeof dir ).toBe( 'string' );
12
+ // Should not contain filename, only directory
13
+ expect( dir ).not.toContain( 'load_content.spec.js' );
14
+ } );
15
+
16
+ it( 'returns consistent directory when called multiple times', () => {
17
+ const dir1 = getInvocationDir();
18
+ const dir2 = getInvocationDir();
19
+ expect( dir1 ).toBe( dir2 );
20
+ } );
21
+
22
+ it( 'returns a valid filesystem path', () => {
23
+ const dir = getInvocationDir();
24
+ // Should be a non-empty string that looks like a directory path
25
+ expect( dir ).toBeTruthy();
26
+ expect( typeof dir ).toBe( 'string' );
27
+ expect( dir.length ).toBeGreaterThan( 0 );
28
+ } );
29
+ } );
30
+
31
+ describe( 'loadContent', () => {
32
+ it( 'loads file from root directory', () => {
33
+ const tempDir = mkdtempSync( join( tmpdir(), 'load-content-test-' ) );
34
+ const testContent = 'test file content';
35
+ writeFileSync( join( tempDir, 'test.txt' ), testContent );
36
+
37
+ const content = loadContent( 'test.txt', tempDir );
38
+
39
+ expect( content ).toBe( testContent );
40
+ } );
41
+
42
+ it( 'loads file from nested subdirectory', () => {
43
+ const tempDir = mkdtempSync( join( tmpdir(), 'load-content-test-' ) );
44
+ const subDir = join( tempDir, 'subdir' );
45
+ mkdirSync( subDir );
46
+
47
+ const testContent = 'nested file content';
48
+ writeFileSync( join( subDir, 'nested.txt' ), testContent );
49
+
50
+ const content = loadContent( 'nested.txt', tempDir );
51
+
52
+ expect( content ).toBe( testContent );
53
+ } );
54
+
55
+ it( 'returns null when file does not exist', () => {
56
+ const tempDir = mkdtempSync( join( tmpdir(), 'load-content-test-' ) );
57
+
58
+ const content = loadContent( 'nonexistent.txt', tempDir );
59
+
60
+ expect( content ).toBeNull();
61
+ } );
62
+
63
+ it( 'returns null when starting directory does not exist', () => {
64
+ // ENOENT is suppressed and returns null (expected behavior)
65
+ const content = loadContent( 'test.txt', '/nonexistent-root-path-12345' );
66
+ expect( content ).toBeNull();
67
+ } );
68
+
69
+ it( 'respects depth limit', () => {
70
+ const tempDir = mkdtempSync( join( tmpdir(), 'load-content-test-' ) );
71
+
72
+ // Create deeply nested structure: dir/1/2/3/4/5/6/deep.txt (7 levels)
73
+ const levels = [ 1, 2, 3, 4, 5, 6 ];
74
+ const deepPath = levels.reduce( ( acc, level ) => {
75
+ const newPath = join( acc, `level${level}` );
76
+ mkdirSync( newPath );
77
+ return newPath;
78
+ }, tempDir );
79
+ writeFileSync( join( deepPath, 'deep.txt' ), 'deeply nested' );
80
+
81
+ // Should NOT find it with default maxDepth of 5
82
+ const content = loadContent( 'deep.txt', tempDir );
83
+ expect( content ).toBeNull();
84
+ } );
85
+
86
+ it( 'throws error when filename contains path separators', () => {
87
+ const tempDir = mkdtempSync( join( tmpdir(), 'load-content-test-' ) );
88
+
89
+ expect( () => {
90
+ loadContent( '../test.txt', tempDir );
91
+ } ).toThrow( /Invalid file name/ );
92
+
93
+ expect( () => {
94
+ loadContent( 'foo/bar.txt', tempDir );
95
+ } ).toThrow( /Invalid file name/ );
96
+ } );
97
+ } );
package/src/parser.js ADDED
@@ -0,0 +1,27 @@
1
+ import matter from 'gray-matter';
2
+
3
+ export function parsePrompt( raw ) {
4
+ const { data, content } = matter( raw );
5
+
6
+ if ( !content || content.trim() === '' ) {
7
+ throw new Error( 'Prompt file has no content after frontmatter' );
8
+ }
9
+
10
+ const infoExtractor = /<(system|user|assistant|tool)>([\s\S]*?)<\/\1>/gm;
11
+ const messages = [ ...content.matchAll( infoExtractor ) ].map(
12
+ ( [ _, role, text ] ) => ( { role, content: text.trim() } )
13
+ );
14
+
15
+ if ( messages.length === 0 ) {
16
+ const contentPreview = content.substring( 0, 200 );
17
+ const ellipsis = content.length > 200 ? '...' : '';
18
+
19
+ throw new Error(
20
+ `No valid message blocks found in prompt file.
21
+ Expected format: <system>...</system>, <user>...</user>, etc.
22
+ Content preview: ${contentPreview}${ellipsis}`
23
+ );
24
+ }
25
+
26
+ return { data, messages };
27
+ }
@@ -0,0 +1,59 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { parsePrompt } from './parser.js';
3
+
4
+ describe( 'parsePrompt', () => {
5
+ it( 'parses frontmatter config and message blocks', () => {
6
+ const raw = `---
7
+ provider: anthropic
8
+ model: claude-3-5-sonnet-20241022
9
+ ---
10
+
11
+ <system>You are a helpful assistant.</system>
12
+ <user>Hello!</user>`;
13
+
14
+ const result = parsePrompt( raw );
15
+
16
+ expect( result.data ).toEqual( {
17
+ provider: 'anthropic',
18
+ model: 'claude-3-5-sonnet-20241022'
19
+ } );
20
+ expect( result.messages ).toHaveLength( 2 );
21
+ expect( result.messages[0] ).toEqual( {
22
+ role: 'system',
23
+ content: 'You are a helpful assistant.'
24
+ } );
25
+ expect( result.messages[1] ).toEqual( {
26
+ role: 'user',
27
+ content: 'Hello!'
28
+ } );
29
+ } );
30
+
31
+ it( 'throws error when content is empty', () => {
32
+ const raw = `---
33
+ provider: anthropic
34
+ model: claude-3-5-sonnet-20241022
35
+ ---
36
+
37
+ `;
38
+
39
+ expect( () => {
40
+ parsePrompt( raw );
41
+ } ).toThrow( /no content after frontmatter/ );
42
+ } );
43
+
44
+ it( 'throws error when no valid message blocks found', () => {
45
+ const raw = `---
46
+ provider: anthropic
47
+ model: claude-3-5-sonnet-20241022
48
+ ---
49
+
50
+ This is just plain text without any message tags.`;
51
+
52
+ expect( () => {
53
+ parsePrompt( raw );
54
+ } ).toThrow( /No valid message blocks found/ );
55
+ expect( () => {
56
+ parsePrompt( raw );
57
+ } ).toThrow( /Expected format/ );
58
+ } );
59
+ } );
@@ -0,0 +1,59 @@
1
+ import { parsePrompt } from './parser.js';
2
+ import { Liquid } from 'liquidjs';
3
+ import { loadContent } from './load_content.js';
4
+ import { validatePrompt } from './prompt_validations.js';
5
+
6
+ const liquid = new Liquid();
7
+
8
+ /**
9
+ * Render a single message with template variables.
10
+ *
11
+ * @param {string} role - The message role
12
+ * @param {string} content - The message content template
13
+ * @param {Record<string, string | number>} values - Variables to interpolate
14
+ * @param {string} promptName - Name of the prompt (for error messages)
15
+ * @param {number} index - Message index (for error messages)
16
+ * @returns {{role: string, content: string}} Rendered message object
17
+ */
18
+ const renderMessage = ( role, content, values, promptName, index ) => {
19
+ try {
20
+ return {
21
+ role,
22
+ content: liquid.parseAndRenderSync( content, values )
23
+ };
24
+ } catch ( error ) {
25
+ throw new Error(
26
+ `Failed to render template in message ${index + 1} (role: ${role}) of prompt "${promptName}": ${error.message}`,
27
+ { cause: error }
28
+ );
29
+ }
30
+ };
31
+
32
+ /**
33
+ * Load a prompt file and render it with variables.
34
+ *
35
+ * @param {string} name - Name of the prompt file (without .prompt extension)
36
+ * @param {Record<string, string | number>} [values] - Variables to interpolate
37
+ * @returns {Prompt} Loaded and rendered prompt object
38
+ */
39
+ export const loadPrompt = ( name, values ) => {
40
+ const promptContent = loadContent( `${name}.prompt` );
41
+ if ( !promptContent ) {
42
+ throw new Error( `Prompt ${name} not found.` );
43
+ }
44
+
45
+ const { data: config, messages } = parsePrompt( promptContent );
46
+
47
+ const prompt = {
48
+ name,
49
+ config,
50
+ messages: messages.map( ( { role, content }, index ) =>
51
+ renderMessage( role, content, values, name, index )
52
+ )
53
+ };
54
+
55
+ validatePrompt( prompt );
56
+
57
+ return prompt;
58
+ };
59
+
@@ -0,0 +1,56 @@
1
+ import { describe, it, expect, vi, beforeEach } from 'vitest';
2
+ import { loadPrompt } from './prompt_loader.js';
3
+
4
+ // Mock dependencies
5
+ vi.mock( './load_content.js', () => ( {
6
+ loadContent: vi.fn()
7
+ } ) );
8
+
9
+ vi.mock( './parser.js', () => ( {
10
+ parsePrompt: vi.fn()
11
+ } ) );
12
+
13
+ vi.mock( './prompt_validations.js', () => ( {
14
+ validatePrompt: vi.fn()
15
+ } ) );
16
+
17
+ import { loadContent } from './load_content.js';
18
+ import { parsePrompt } from './parser.js';
19
+ import { validatePrompt } from './prompt_validations.js';
20
+
21
+ describe( 'loadPrompt', () => {
22
+ beforeEach( () => {
23
+ vi.clearAllMocks();
24
+ } );
25
+
26
+ it( 'loads prompt file and renders with variables', () => {
27
+ const promptContent = `---
28
+ provider: anthropic
29
+ model: claude-3-5-sonnet-20241022
30
+ ---
31
+ <user>Hello {{ name }}!</user>`;
32
+
33
+ loadContent.mockReturnValue( promptContent );
34
+ parsePrompt.mockReturnValue( {
35
+ data: { provider: 'anthropic', model: 'claude-3-5-sonnet-20241022' },
36
+ messages: [ { role: 'user', content: 'Hello {{ name }}!' } ]
37
+ } );
38
+
39
+ const result = loadPrompt( 'test', { name: 'World' } );
40
+
41
+ expect( result.name ).toBe( 'test' );
42
+ expect( result.config ).toEqual( { provider: 'anthropic', model: 'claude-3-5-sonnet-20241022' } );
43
+ expect( result.messages ).toHaveLength( 1 );
44
+ expect( result.messages[0].content ).toBe( 'Hello World!' );
45
+ expect( validatePrompt ).toHaveBeenCalledWith( result );
46
+ } );
47
+
48
+ it( 'throws error when prompt file not found', () => {
49
+ loadContent.mockReturnValue( null );
50
+
51
+ expect( () => {
52
+ loadPrompt( 'nonexistent' );
53
+ } ).toThrow( /Prompt nonexistent not found/ );
54
+ } );
55
+
56
+ } );
@@ -0,0 +1,30 @@
1
+ import { ValidationError, z } from '@output.ai/core';
2
+
3
+ export const promptSchema = z.object( {
4
+ name: z.string(),
5
+ config: z.object( {
6
+ provider: z.enum( [ 'anthropic', 'azure', 'openai' ] ),
7
+ model: z.string(),
8
+ temperature: z.number().optional(),
9
+ maxTokens: z.number().optional(),
10
+ providerOptions: z.object( {
11
+ thinking: z.object( {
12
+ type: z.literal( 'enabled' ),
13
+ budgetTokens: z.number()
14
+ } ).optional()
15
+ } ).optional()
16
+ } ),
17
+ messages: z.array(
18
+ z.object( {
19
+ role: z.string(),
20
+ content: z.string()
21
+ } )
22
+ )
23
+ } );
24
+
25
+ export function validatePrompt( prompt ) {
26
+ const result = promptSchema.safeParse( prompt );
27
+ if ( !result.success ) {
28
+ throw new ValidationError( `Invalid prompt file: ${z.prettifyError( result.error )}` );
29
+ }
30
+ }
@@ -0,0 +1,55 @@
1
+ import { ValidationError, z } from '@output.ai/core';
2
+
3
+ const generateTextArgsSchema = z.object( {
4
+ prompt: z.string(),
5
+ variables: z.any().optional()
6
+ } );
7
+
8
+ const generateObjectArgsSchema = z.object( {
9
+ prompt: z.string(),
10
+ variables: z.any().optional(),
11
+ schema: z.custom( v => v instanceof z.ZodObject, {
12
+ message: 'schema must be a ZodObject'
13
+ } ),
14
+ schemaName: z.string().optional(),
15
+ schemaDescription: z.string().optional()
16
+ } );
17
+
18
+ const generateArrayArgsSchema = z.object( {
19
+ prompt: z.string(),
20
+ variables: z.any().optional(),
21
+ schema: z.custom( v => v instanceof z.ZodType, {
22
+ message: 'schema must be a ZodType'
23
+ } ),
24
+ schemaName: z.string().optional(),
25
+ schemaDescription: z.string().optional()
26
+ } );
27
+
28
+ const generateEnumArgsSchema = z.object( {
29
+ prompt: z.string(),
30
+ variables: z.any().optional(),
31
+ enum: z.array( z.string() )
32
+ } );
33
+
34
+ function validateSchema( schema, input, errorPrefix ) {
35
+ const result = schema.safeParse( input );
36
+ if ( !result.success ) {
37
+ throw new ValidationError( `${errorPrefix}: ${z.prettifyError( result.error )}` );
38
+ }
39
+ }
40
+
41
+ export function validateGenerateTextArgs( args ) {
42
+ validateSchema( generateTextArgsSchema, args, 'Invalid generateText() arguments' );
43
+ }
44
+
45
+ export function validateGenerateObjectArgs( args ) {
46
+ validateSchema( generateObjectArgsSchema, args, 'Invalid generateObject() arguments' );
47
+ }
48
+
49
+ export function validateGenerateArrayArgs( args ) {
50
+ validateSchema( generateArrayArgsSchema, args, 'Invalid generateArray() arguments' );
51
+ }
52
+
53
+ export function validateGenerateEnumArgs( args ) {
54
+ validateSchema( generateEnumArgsSchema, args, 'Invalid generateEnum() arguments' );
55
+ }