@output.ai/llm 0.1.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,30 +1,128 @@
1
1
  # LLM Module
2
2
 
3
- Provides llm calls abstractions.
3
+ Framework abstraction to interact with LLM models, including prompt management and structured generation.
4
4
 
5
- ## completions()
5
+ ## Quick Start
6
6
 
7
- Allow to use chat messages with a LLM model.
7
+ ```js
8
+ import { generateText } from '@output.ai/llm';
9
+
10
+ const response = await generateText({
11
+ prompt: 'my_prompt@v1',
12
+ variables: { topic: 'AI workflows' }
13
+ });
14
+ ```
15
+
16
+ ## Features
17
+
18
+ - **Unified API**: Single import for prompt loading and LLM generation
19
+ - **Multiple Generation Types**: Text, objects, arrays, and enums
20
+ - **Prompt Management**: Load and render `.prompt` files with variable interpolation
21
+ - **Multi-Provider Support**: Anthropic, OpenAI, and Azure
22
+ - **Type Safety**: Full TypeScript support with Zod schemas
23
+
24
+ ## Generate Text
25
+
26
+ Generate unstructured text from an LLM:
8
27
 
9
28
  ```js
10
29
  import { generateText } from '@output.ai/llm';
11
30
 
12
- const response = generateText({
13
- configs: {
14
- model: 'model-name', // eg claude-3.5
15
- provider: 'provider-name', // eg anthropic
16
- },
17
- messages: [
18
- {
19
- role: 'assistant',
20
- content: 'You are an assistant...',
21
- },
22
- {
23
- role: 'user',
24
- content: 'Whats the capital of Nicaragua?',
25
- },
26
- ],
31
+ const response = await generateText({
32
+ prompt: 'explain_topic@v1',
33
+ variables: { topic: 'machine learning' }
34
+ });
35
+ ```
36
+
37
+ ## Generate Object
38
+
39
+ Generate a structured object matching a Zod schema:
40
+
41
+ ```js
42
+ import { generateObject } from '@output.ai/llm';
43
+ import { z } from '@output.ai/core';
44
+
45
+ const recipeSchema = z.object({
46
+ title: z.string(),
47
+ ingredients: z.array(z.string()),
48
+ steps: z.array(z.string())
49
+ });
50
+
51
+ const recipe = await generateObject({
52
+ prompt: 'recipe@v1',
53
+ variables: { dish: 'lasagna' },
54
+ schema: recipeSchema
27
55
  });
28
56
  ```
29
57
 
30
- The response is a string.
58
+ ## Generate Array
59
+
60
+ Generate an array of structured items:
61
+
62
+ ```js
63
+ import { generateArray } from '@output.ai/llm';
64
+ import { z } from '@output.ai/core';
65
+
66
+ const taskSchema = z.object({
67
+ title: z.string(),
68
+ priority: z.number()
69
+ });
70
+
71
+ const tasks = await generateArray({
72
+ prompt: 'task_list@v1',
73
+ variables: { project: 'website' },
74
+ schema: taskSchema
75
+ });
76
+ ```
77
+
78
+ ## Generate Enum
79
+
80
+ Generate a value from a list of allowed options:
81
+
82
+ ```js
83
+ import { generateEnum } from '@output.ai/llm';
84
+
85
+ const category = await generateEnum({
86
+ prompt: 'categorize@v1',
87
+ variables: { text: 'Product announcement' },
88
+ enum: ['marketing', 'engineering', 'sales', 'support']
89
+ });
90
+ ```
91
+
92
+ ## Prompt Files
93
+
94
+ Prompt files use YAML frontmatter for configuration and support LiquidJS templating:
95
+
96
+ **File: `explain_topic@v1.prompt`**
97
+ ```yaml
98
+ ---
99
+ provider: anthropic
100
+ model: claude-sonnet-4-20250514
101
+ temperature: 0.7
102
+ ---
103
+
104
+ <system>
105
+ You are a concise technical explainer.
106
+ </system>
107
+
108
+ <user>
109
+ Explain {{ topic }} in 3 bullet points.
110
+ </user>
111
+ ```
112
+
113
+ ## Configuration Options
114
+
115
+ Prompt files support these configuration fields:
116
+
117
+ ```yaml
118
+ ---
119
+ provider: anthropic | openai | azure
120
+ model: model-name
121
+ temperature: 0.0-1.0 (optional)
122
+ maxTokens: number (optional)
123
+ providerOptions: (optional)
124
+ thinking:
125
+ type: enabled
126
+ budgetTokens: number
127
+ ---
128
+ ```
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.1.0",
3
+ "version": "0.2.1",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -13,8 +13,9 @@
13
13
  "@ai-sdk/azure": "2.0.53",
14
14
  "@ai-sdk/openai": "2.0.52",
15
15
  "@output.ai/core": ">=0.0.1",
16
- "@output.ai/prompt": ">=0.0.1",
17
- "ai": "5.0.48"
16
+ "ai": "5.0.48",
17
+ "gray-matter": "4.0.3",
18
+ "liquidjs": "10.22.0"
18
19
  },
19
20
  "license": "UNLICENSED"
20
21
  }
package/src/ai_model.js CHANGED
@@ -4,6 +4,31 @@ import { openai } from '@ai-sdk/openai';
4
4
 
5
5
  const providers = { azure, anthropic, openai };
6
6
 
7
- export const loadModel = prompt => {
8
- return providers[prompt.config.provider]( prompt.config.model );
9
- };
7
+ export function loadModel( prompt ) {
8
+ const config = prompt?.config;
9
+
10
+ if ( !config ) {
11
+ throw new Error( 'Prompt is missing config object' );
12
+ }
13
+
14
+ const { provider: providerName, model: modelName } = config;
15
+
16
+ if ( !providerName ) {
17
+ throw new Error( 'Prompt config is missing "provider" field' );
18
+ }
19
+
20
+ if ( !modelName ) {
21
+ throw new Error( 'Prompt config is missing "model" field' );
22
+ }
23
+
24
+ const provider = providers[providerName];
25
+
26
+ if ( !provider ) {
27
+ const validProviders = Object.keys( providers ).join( ', ' );
28
+ throw new Error(
29
+ `Invalid provider "${providerName}". Valid providers: ${validProviders}`
30
+ );
31
+ }
32
+
33
+ return provider( modelName );
34
+ }
package/src/ai_sdk.js CHANGED
@@ -2,6 +2,7 @@ import { Tracing } from '@output.ai/core/tracing';
2
2
  import { loadModel } from './ai_model.js';
3
3
  import * as AI from 'ai';
4
4
  import { validateGenerateTextArgs, validateGenerateObjectArgs, validateGenerateArrayArgs, validateGenerateEnumArgs } from './validations.js';
5
+ import { loadPrompt } from './prompt_loader.js';
5
6
 
6
7
  const traceWrapper = async ( traceId, fn ) => {
7
8
  try {
@@ -14,28 +15,47 @@ const traceWrapper = async ( traceId, fn ) => {
14
15
  }
15
16
  };
16
17
 
17
- const extraAiSdkOptionsFromPrompt = prompt => ( {
18
- model: loadModel( prompt ),
19
- messages: prompt.messages,
20
- ...( prompt.config.temperature && { temperature: prompt.config.temperature } ),
21
- ...( prompt.config.max_tokens && { maxOutputTokens: prompt.config.maxTokens } ),
22
- providerOptions: prompt.providerOptions
23
- } );
18
+ const createTraceId = name => `${name}-${Date.now()}`;
19
+
20
+ const startTrace = ( name, details ) => {
21
+ const traceId = createTraceId( name );
22
+ Tracing.addEventStart( { kind: 'llm', name, id: traceId, details } );
23
+ return traceId;
24
+ };
25
+
26
+ const extraAiSdkOptionsFromPrompt = prompt => {
27
+ const options = {
28
+ model: loadModel( prompt ),
29
+ messages: prompt.messages,
30
+ providerOptions: prompt.config.providerOptions
31
+ };
32
+
33
+ if ( prompt.config.temperature ) {
34
+ options.temperature = prompt.config.temperature;
35
+ }
36
+
37
+ if ( prompt.config.maxTokens ) {
38
+ options.maxOutputTokens = prompt.config.maxTokens;
39
+ }
40
+
41
+ return options;
42
+ };
24
43
 
25
44
  /**
26
45
  * Use an LLM model to generate text.
27
46
  *
28
47
  * @param {object} args - Generation arguments
29
- * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
48
+ * @param {string} args.prompt - Prompt file name
49
+ * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
30
50
  * @returns {Promise<string>} Generated text
31
51
  */
32
- export async function generateText( { prompt } ) {
33
- validateGenerateTextArgs( { prompt } );
34
- const traceId = `generateText-${Date.now()}`;
35
- Tracing.addEventStart( { kind: 'llm', name: 'generateText', id: traceId, details: { prompt } } );
52
+ export async function generateText( { prompt, variables } ) {
53
+ validateGenerateTextArgs( { prompt, variables } );
54
+ const loadedPrompt = loadPrompt( prompt, variables );
55
+ const traceId = startTrace( 'generateText', { prompt: loadedPrompt } );
36
56
 
37
57
  return traceWrapper( traceId, async () =>
38
- AI.generateText( extraAiSdkOptionsFromPrompt( prompt ) ).then( r => r.text )
58
+ AI.generateText( extraAiSdkOptionsFromPrompt( loadedPrompt ) ).then( r => r.text )
39
59
  );
40
60
  }
41
61
 
@@ -43,7 +63,8 @@ export async function generateText( { prompt } ) {
43
63
  * Use an LLM model to generate an object with a fixed schema.
44
64
  *
45
65
  * @param {object} args - Generation arguments
46
- * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
66
+ * @param {string} args.prompt - Prompt file name
67
+ * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
47
68
  * @param {z.ZodObject} args.schema - Output schema
48
69
  * @param {string} [args.schemaName] - Output schema name
49
70
  * @param {string} [args.schemaDescription] - Output schema description
@@ -51,10 +72,9 @@ export async function generateText( { prompt } ) {
51
72
  */
52
73
  export async function generateObject( args ) {
53
74
  validateGenerateObjectArgs( args );
54
- const { prompt, schema, schemaName, schemaDescription } = args;
55
-
56
- const traceId = `generateObject-${Date.now()}`;
57
- Tracing.addEventStart( { kind: 'llm', name: 'generateObject', id: traceId, details: args } );
75
+ const { prompt, variables, schema, schemaName, schemaDescription } = args;
76
+ const loadedPrompt = loadPrompt( prompt, variables );
77
+ const traceId = startTrace( 'generateObject', { ...args, prompt: loadedPrompt } );
58
78
 
59
79
  return traceWrapper( traceId, async () =>
60
80
  AI.generateObject( {
@@ -62,7 +82,7 @@ export async function generateObject( args ) {
62
82
  schema,
63
83
  schemaName,
64
84
  schemaDescription,
65
- ...extraAiSdkOptionsFromPrompt( prompt )
85
+ ...extraAiSdkOptionsFromPrompt( loadedPrompt )
66
86
  } ).then( r => r.object )
67
87
  );
68
88
  }
@@ -71,7 +91,8 @@ export async function generateObject( args ) {
71
91
  * Use an LLM model to generate an array of values with a fixed schema.
72
92
  *
73
93
  * @param {object} args - Generation arguments
74
- * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
94
+ * @param {string} args.prompt - Prompt file name
95
+ * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
75
96
  * @param {z.ZodType} args.schema - Output schema (array item)
76
97
  * @param {string} [args.schemaName] - Output schema name
77
98
  * @param {string} [args.schemaDescription] - Output schema description
@@ -79,10 +100,9 @@ export async function generateObject( args ) {
79
100
  */
80
101
  export async function generateArray( args ) {
81
102
  validateGenerateArrayArgs( args );
82
- const { prompt, schema, schemaName, schemaDescription } = args;
83
-
84
- const traceId = `generateArray-${Date.now()}`;
85
- Tracing.addEventStart( { kind: 'llm', name: 'generateArray', id: traceId, details: args } );
103
+ const { prompt, variables, schema, schemaName, schemaDescription } = args;
104
+ const loadedPrompt = loadPrompt( prompt, variables );
105
+ const traceId = startTrace( 'generateArray', { ...args, prompt: loadedPrompt } );
86
106
 
87
107
  return traceWrapper( traceId, async () =>
88
108
  AI.generateObject( {
@@ -90,7 +110,7 @@ export async function generateArray( args ) {
90
110
  schema,
91
111
  schemaName,
92
112
  schemaDescription,
93
- ...extraAiSdkOptionsFromPrompt( prompt )
113
+ ...extraAiSdkOptionsFromPrompt( loadedPrompt )
94
114
  } ).then( r => r.object )
95
115
  );
96
116
  }
@@ -99,18 +119,22 @@ export async function generateArray( args ) {
99
119
  * Use an LLM model to generate a result from an enum (array of string values).
100
120
  *
101
121
  * @param {object} args - Generation arguments
102
- * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
122
+ * @param {string} args.prompt - Prompt file name
123
+ * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
103
124
  * @param {string[]} args.enum - Allowed values for the generation
104
125
  * @returns {Promise<string>} One of the provided enum values
105
126
  */
106
127
  export async function generateEnum( args ) {
107
128
  validateGenerateEnumArgs( args );
108
- const { prompt, enum: _enum } = args;
109
-
110
- const traceId = `generateEnum-${Date.now()}`;
111
- Tracing.addEventStart( { kind: 'llm', name: 'generateEnum', id: traceId, details: args } );
129
+ const { prompt, variables, enum: _enum } = args;
130
+ const loadedPrompt = loadPrompt( prompt, variables );
131
+ const traceId = startTrace( 'generateEnum', { ...args, prompt: loadedPrompt } );
112
132
 
113
133
  return traceWrapper( traceId, async () =>
114
- AI.generateObject( { output: 'enum', enum: _enum, ...extraAiSdkOptionsFromPrompt( prompt ) } ).then( r => r.object )
134
+ AI.generateObject( {
135
+ output: 'enum',
136
+ enum: _enum,
137
+ ...extraAiSdkOptionsFromPrompt( loadedPrompt )
138
+ } ).then( r => r.object )
115
139
  );
116
140
  }
@@ -27,12 +27,21 @@ const validators = {
27
27
  };
28
28
  vi.mock( './validations.js', () => ( validators ) );
29
29
 
30
+ const loadPromptImpl = vi.fn();
31
+ vi.mock( './prompt_loader.js', () => ( {
32
+ loadPrompt: ( ...values ) => loadPromptImpl( ...values )
33
+ } ) );
34
+
30
35
  const importSut = async () => import( './ai_sdk.js' );
31
36
 
32
37
  const basePrompt = {
33
- config: { provider: 'openai', model: 'gpt-4o-mini', temperature: 0.3 },
34
- messages: [ { role: 'user', content: 'Hi' } ],
35
- providerOptions: { thinking: { enabled: true } }
38
+ config: {
39
+ provider: 'openai',
40
+ model: 'gpt-4o-mini',
41
+ temperature: 0.3,
42
+ providerOptions: { thinking: { enabled: true } }
43
+ },
44
+ messages: [ { role: 'user', content: 'Hi' } ]
36
45
  };
37
46
 
38
47
  beforeEach( () => {
@@ -41,6 +50,7 @@ beforeEach( () => {
41
50
  tracingSpies.addEventError.mockClear();
42
51
 
43
52
  loadModelImpl.mockReset().mockReturnValue( 'MODEL' );
53
+ loadPromptImpl.mockReset().mockReturnValue( basePrompt );
44
54
 
45
55
  aiFns.generateText.mockReset().mockResolvedValue( { text: 'TEXT' } );
46
56
  aiFns.generateObject.mockReset().mockResolvedValue( { object: 'OBJECT' } );
@@ -59,9 +69,10 @@ afterEach( async () => {
59
69
  describe( 'ai_sdk', () => {
60
70
  it( 'generateText: validates, traces, calls AI and returns text', async () => {
61
71
  const { generateText } = await importSut();
62
- const result = await generateText( { prompt: basePrompt } );
72
+ const result = await generateText( { prompt: 'test_prompt@v1' } );
63
73
 
64
- expect( validators.validateGenerateTextArgs ).toHaveBeenCalledWith( { prompt: basePrompt } );
74
+ expect( validators.validateGenerateTextArgs ).toHaveBeenCalledWith( { prompt: 'test_prompt@v1' } );
75
+ expect( loadPromptImpl ).toHaveBeenCalledWith( 'test_prompt@v1', undefined );
65
76
  expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
66
77
  expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
67
78
 
@@ -70,7 +81,7 @@ describe( 'ai_sdk', () => {
70
81
  model: 'MODEL',
71
82
  messages: basePrompt.messages,
72
83
  temperature: 0.3,
73
- providerOptions: basePrompt.providerOptions
84
+ providerOptions: basePrompt.config.providerOptions
74
85
  } );
75
86
  expect( result ).toBe( 'TEXT' );
76
87
  } );
@@ -81,13 +92,14 @@ describe( 'ai_sdk', () => {
81
92
 
82
93
  const schema = z.object( { a: z.number() } );
83
94
  const result = await generateObject( {
84
- prompt: basePrompt,
95
+ prompt: 'test_prompt@v1',
85
96
  schema,
86
97
  schemaName: 'Thing',
87
98
  schemaDescription: 'A thing'
88
99
  } );
89
100
 
90
101
  expect( validators.validateGenerateObjectArgs ).toHaveBeenCalled();
102
+ expect( loadPromptImpl ).toHaveBeenCalledWith( 'test_prompt@v1', undefined );
91
103
  expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
92
104
  expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
93
105
 
@@ -99,7 +111,7 @@ describe( 'ai_sdk', () => {
99
111
  model: 'MODEL',
100
112
  messages: basePrompt.messages,
101
113
  temperature: 0.3,
102
- providerOptions: basePrompt.providerOptions
114
+ providerOptions: basePrompt.config.providerOptions
103
115
  } );
104
116
  expect( result ).toEqual( { a: 1 } );
105
117
  } );
@@ -110,13 +122,14 @@ describe( 'ai_sdk', () => {
110
122
 
111
123
  const schema = z.number();
112
124
  const result = await generateArray( {
113
- prompt: basePrompt,
125
+ prompt: 'test_prompt@v1',
114
126
  schema,
115
127
  schemaName: 'Numbers',
116
128
  schemaDescription: 'Two numbers'
117
129
  } );
118
130
 
119
131
  expect( validators.validateGenerateArrayArgs ).toHaveBeenCalled();
132
+ expect( loadPromptImpl ).toHaveBeenCalledWith( 'test_prompt@v1', undefined );
120
133
  expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
121
134
  expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
122
135
 
@@ -128,7 +141,7 @@ describe( 'ai_sdk', () => {
128
141
  model: 'MODEL',
129
142
  messages: basePrompt.messages,
130
143
  temperature: 0.3,
131
- providerOptions: basePrompt.providerOptions
144
+ providerOptions: basePrompt.config.providerOptions
132
145
  } );
133
146
  expect( result ).toEqual( [ 1, 2 ] );
134
147
  } );
@@ -137,9 +150,10 @@ describe( 'ai_sdk', () => {
137
150
  const { generateEnum } = await importSut();
138
151
  aiFns.generateObject.mockResolvedValueOnce( { object: 'B' } );
139
152
 
140
- const result = await generateEnum( { prompt: basePrompt, enum: [ 'A', 'B', 'C' ] } );
153
+ const result = await generateEnum( { prompt: 'test_prompt@v1', enum: [ 'A', 'B', 'C' ] } );
141
154
 
142
155
  expect( validators.validateGenerateEnumArgs ).toHaveBeenCalled();
156
+ expect( loadPromptImpl ).toHaveBeenCalledWith( 'test_prompt@v1', undefined );
143
157
  expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
144
158
  expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
145
159
 
@@ -149,7 +163,7 @@ describe( 'ai_sdk', () => {
149
163
  model: 'MODEL',
150
164
  messages: basePrompt.messages,
151
165
  temperature: 0.3,
152
- providerOptions: basePrompt.providerOptions
166
+ providerOptions: basePrompt.config.providerOptions
153
167
  } );
154
168
  expect( result ).toBe( 'B' );
155
169
  } );
package/src/index.d.ts CHANGED
@@ -1,21 +1,90 @@
1
1
  import type { z } from '@output.ai/core';
2
- import type { Prompt } from '@output.ai/prompt';
3
2
 
4
- export type { Prompt };
3
+ /**
4
+ * Represents a single message in a prompt conversation
5
+ * @example
6
+ * const msg: PromptMessage = {
7
+ * role: 'user',
8
+ * content: 'Hello, Claude!'
9
+ * };
10
+ */
11
+ export type PromptMessage = {
12
+ /** The role of the message. Examples: 'system', 'user', 'assistant' */
13
+ role: string;
14
+ /** The content of the message */
15
+ content: string;
16
+ };
17
+
18
+ /**
19
+ * Configuration for LLM prompt generation
20
+ * @example
21
+ * const prompt: Prompt = {
22
+ * name: 'summarizePrompt',
23
+ * config: {
24
+ * provider: 'anthropic',
25
+ * model: 'claude-opus-4-1',
26
+ * temperature: 0.7,
27
+ * maxTokens: 2048
28
+ * },
29
+ * messages: [...]
30
+ * };
31
+ */
32
+ export type Prompt = {
33
+ /** Name of the prompt file */
34
+ name: string;
35
+
36
+ /** General configurations for the LLM */
37
+ config: {
38
+ /** LLM Provider */
39
+ provider: 'anthropic' | 'openai' | 'azure';
40
+
41
+ /** Model name/identifier */
42
+ model: string;
43
+
44
+ /** Generation temperature (0-2). Lower = more deterministic */
45
+ temperature?: number;
46
+
47
+ /** Maximum tokens in the response */
48
+ maxTokens?: number;
49
+
50
+ /** Additional provider-specific options */
51
+ options?: Record<string, Record<string, JSONValue>>;
52
+
53
+ /** Provider-specific configurations */
54
+ providerOptions?: Record<string, unknown>;
55
+ };
56
+
57
+ /** Array of messages in the conversation */
58
+ messages: PromptMessage[];
59
+ };
60
+
61
+ /**
62
+ * Load a prompt file and render it with variables.
63
+ *
64
+ * @param {string} name - Name of the prompt file (without .prompt extension)
65
+ * @param {Record<string, string | number>} [variables] - Variables to interpolate
66
+ * @returns {Prompt} Loaded and rendered prompt object
67
+ */
68
+ export function loadPrompt(
69
+ name: string,
70
+ variables?: Record<string, string | number>
71
+ ): Prompt;
5
72
 
6
73
  /**
7
74
  * Use an LLM model to generate text.
8
75
  *
9
76
  * This function is a wrapper over the AI SDK's `generateText`.
10
- * The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
77
+ * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
11
78
  *
12
79
  * @param {object} args - Generation arguments
13
- * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
80
+ * @param {string} args.prompt - Prompt file name
81
+ * @param {Record<string, string | number>} args.variables - Variables to interpolate
14
82
  * @returns {Promise<string>} Generated text
15
83
  */
16
84
  export function generateText(
17
85
  args: {
18
- prompt: Prompt
86
+ prompt: string,
87
+ variables?: Record<string, string | number>
19
88
  }
20
89
  ): Promise<string>;
21
90
 
@@ -23,10 +92,11 @@ export function generateText(
23
92
  * Use an LLM model to generate an object with a fixed schema.
24
93
  *
25
94
  * This function is a wrapper over the AI SDK's `generateObject`.
26
- * The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
95
+ * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
27
96
  *
28
97
  * @param {object} args - Generation arguments
29
- * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
98
+ * @param {string} args.prompt - Prompt file name
99
+ * @param {Record<string, string | number>} args.variables - Variables to interpolate
30
100
  * @param {z.ZodObject} args.schema - Output schema
31
101
  * @param {string} [args.schemaName] - Output schema name
32
102
  * @param {string} [args.schemaDescription] - Output schema description
@@ -34,8 +104,9 @@ export function generateText(
34
104
  */
35
105
  export function generateObject<TSchema extends z.ZodObject>(
36
106
  args: {
37
- prompt: Prompt,
38
- schema?: TSchema,
107
+ prompt: string,
108
+ variables?: Record<string, string | number>,
109
+ schema: TSchema,
39
110
  schemaName?: string,
40
111
  schemaDescription?: string
41
112
  }
@@ -45,10 +116,11 @@ export function generateObject<TSchema extends z.ZodObject>(
45
116
  * Use an LLM model to generate an array of values with a fixed schema.
46
117
  *
47
118
  * This function is a wrapper over the AI SDK's `generateObject` with `output: 'array'`.
48
- * The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
119
+ * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
49
120
  *
50
121
  * @param {object} args - Generation arguments
51
- * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
122
+ * @param {string} args.prompt - Prompt file name
123
+ * @param {Record<string, string | number>} args.variables - Variables to interpolate
52
124
  * @param {z.ZodType} args.schema - Output schema (array item)
53
125
  * @param {string} [args.schemaName] - Output schema name
54
126
  * @param {string} [args.schemaDescription] - Output schema description
@@ -56,8 +128,9 @@ export function generateObject<TSchema extends z.ZodObject>(
56
128
  */
57
129
  export function generateArray<TSchema extends z.ZodType>(
58
130
  args: {
59
- prompt: Prompt,
60
- schema?: TSchema,
131
+ prompt: string,
132
+ variables?: Record<string, string | number>,
133
+ schema: TSchema,
61
134
  schemaName?: string,
62
135
  schemaDescription?: string
63
136
  }
@@ -67,16 +140,18 @@ export function generateArray<TSchema extends z.ZodType>(
67
140
  * Use an LLM model to generate a result from an enum (array of string values).
68
141
  *
69
142
  * This function is a wrapper over the AI SDK's `generateObject` with `output: 'enum'`.
70
- * The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
143
+ * The prompt file sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
71
144
  *
72
145
  * @param {object} args - Generation arguments
73
- * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
146
+ * @param {string} args.prompt - Prompt file name
147
+ * @param {Record<string, string | number>} args.variables - Variables to interpolate
74
148
  * @param {string[]} args.enum - Allowed values for the generation
75
149
  * @returns {Promise<string>} One of the provided enum values
76
150
  */
77
151
  export function generateEnum<const TEnum extends readonly [string, ...string[]]>(
78
152
  args: {
79
- prompt: Prompt,
153
+ prompt: string,
154
+ variables?: Record<string, string | number>,
80
155
  enum: TEnum
81
156
  }
82
157
  ): Promise<TEnum[number]>;
package/src/index.js CHANGED
@@ -1,2 +1,3 @@
1
1
  export { generateText, generateArray, generateObject, generateEnum } from './ai_sdk.js';
2
+ export { loadPrompt } from './prompt_loader.js';
2
3
  export * as ai from 'ai';
@@ -0,0 +1,121 @@
1
+ import { join, dirname } from 'path';
2
+ import { readFileSync, readdirSync } from 'node:fs';
3
+ import { fileURLToPath } from 'node:url';
4
+
5
+ function extractFilePath( fileUrlMatch, parenMatch, directMatch ) {
6
+ if ( fileUrlMatch ) {
7
+ return fileURLToPath( 'file://' + fileUrlMatch[1] );
8
+ }
9
+ if ( parenMatch ) {
10
+ return parenMatch[1];
11
+ }
12
+ if ( directMatch ) {
13
+ return directMatch[1];
14
+ }
15
+ return null;
16
+ }
17
+
18
+ export const getInvocationDir = _ => {
19
+ const stack = new Error().stack;
20
+
21
+ if ( !stack ) {
22
+ throw new Error( 'Stack trace is unavailable - cannot determine invocation directory' );
23
+ }
24
+
25
+ const lines = stack.split( '\n' );
26
+
27
+ // Search through stack to find first valid file path that is NOT in SDK
28
+ // Skip first 2 lines (Error message and getInvocationDir itself)
29
+ for ( const line of lines.slice( 2 ) ) {
30
+ // Match file:// URLs (ESM) or regular file paths
31
+ // Pattern 1: file:///path/to/file.js:line:col
32
+ // Pattern 2: at func (/path/to/file.js:line:col)
33
+ // Pattern 3: /path/to/file.js:line:col
34
+ const fileUrlMatch = line.match( /file:\/\/([^:]+\.(?:js|mjs|ts|tsx|jsx))/ );
35
+ const parenMatch = line.match( /\(([^:)]+\.(?:js|mjs|ts|tsx|jsx)):\d+:\d+\)/ );
36
+ const directMatch = line.match( /^\s*at\s+[^(]*([^:]+\.(?:js|mjs|ts|tsx|jsx)):\d+:\d+/ );
37
+
38
+ // Determine the file path from different stack trace formats
39
+ const filePath = extractFilePath( fileUrlMatch, parenMatch, directMatch );
40
+
41
+ if ( filePath ) {
42
+ // Skip internal Node.js paths
43
+ if ( filePath.includes( 'node:' ) || filePath.includes( 'internal/' ) ) {
44
+ continue;
45
+ }
46
+
47
+ // Skip SDK internal files - we want the actual caller, not the SDK
48
+ // This includes @output.ai packages and sdk/ directory
49
+ if ( filePath.includes( '/sdk/' ) || filePath.includes( 'node_modules/@output.ai/' ) ) {
50
+ continue;
51
+ }
52
+
53
+ // Found a valid caller file outside the SDK
54
+ return dirname( filePath );
55
+ }
56
+ }
57
+
58
+ throw new Error(
59
+ 'Unable to determine invocation directory from stack trace. ' +
60
+ `Stack preview:\n${lines.slice( 0, 10 ).join( '\n' )}`
61
+ );
62
+ };
63
+
64
+ /**
65
+ * Recursively search for a file by its name and load its content.
66
+ *
67
+ * @param {string} name - Name of the file load its content
68
+ * @param {string} [dir=<invocation directory>] - The directory to search for the file
69
+ * @param {number} [depth=0] - Current recursion depth
70
+ * @param {number} [maxDepth=5] - Maximum recursion depth
71
+ * @returns {string | null} - File content or null if not found
72
+ */
73
+ export const loadContent = ( name, dir = getInvocationDir(), depth = 0, maxDepth = 5 ) => {
74
+ // Stop recursion if max depth exceeded
75
+ if ( depth > maxDepth ) {
76
+ return null;
77
+ }
78
+
79
+ // Validate filename doesn't contain path separators (prevent directory traversal)
80
+ if ( name.includes( '..' ) || name.includes( '/' ) || name.includes( '\\' ) ) {
81
+ throw new Error( `Invalid file name "${name}" - must not contain path separators or ".."` );
82
+ }
83
+
84
+ try {
85
+ const entries = readdirSync( dir, { withFileTypes: true } );
86
+
87
+ for ( const entry of entries ) {
88
+ if ( entry.name === name ) {
89
+ try {
90
+ return readFileSync( join( dir, entry.name ), 'utf-8' );
91
+ } catch ( error ) {
92
+ throw new Error(
93
+ `Found file "${name}" in "${dir}" but failed to read it: ${error.message}`,
94
+ { cause: error }
95
+ );
96
+ }
97
+ }
98
+
99
+ // Recurse into subdirectories, but skip symlinks to prevent infinite loops
100
+ if ( entry.isDirectory() && !entry.isSymbolicLink() ) {
101
+ const content = loadContent( name, join( dir, entry.name ), depth + 1, maxDepth );
102
+ if ( content !== null ) {
103
+ return content;
104
+ }
105
+ }
106
+ }
107
+
108
+ return null;
109
+ } catch ( error ) {
110
+ // Only suppress ENOENT (directory doesn't exist) during recursion
111
+ // This is expected when searching through nested directories
112
+ if ( error.code === 'ENOENT' ) {
113
+ return null;
114
+ }
115
+ // Propagate all other errors (permission denied, I/O errors, etc.)
116
+ throw new Error(
117
+ `Failed to read directory "${dir}" while searching for "${name}": ${error.message}`,
118
+ { cause: error }
119
+ );
120
+ }
121
+ };
@@ -0,0 +1,97 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { getInvocationDir, loadContent } from './load_content.js';
3
+ import { join } from 'path';
4
+ import { mkdtempSync, writeFileSync, mkdirSync } from 'node:fs';
5
+ import { tmpdir } from 'node:os';
6
+
7
+ describe( 'getInvocationDir', () => {
8
+ it( 'extracts directory from stack trace', () => {
9
+ const dir = getInvocationDir();
10
+ expect( dir ).toBeTruthy();
11
+ expect( typeof dir ).toBe( 'string' );
12
+ // Should not contain filename, only directory
13
+ expect( dir ).not.toContain( 'load_content.spec.js' );
14
+ } );
15
+
16
+ it( 'returns consistent directory when called multiple times', () => {
17
+ const dir1 = getInvocationDir();
18
+ const dir2 = getInvocationDir();
19
+ expect( dir1 ).toBe( dir2 );
20
+ } );
21
+
22
+ it( 'returns a valid filesystem path', () => {
23
+ const dir = getInvocationDir();
24
+ // Should be a non-empty string that looks like a directory path
25
+ expect( dir ).toBeTruthy();
26
+ expect( typeof dir ).toBe( 'string' );
27
+ expect( dir.length ).toBeGreaterThan( 0 );
28
+ } );
29
+ } );
30
+
31
+ describe( 'loadContent', () => {
32
+ it( 'loads file from root directory', () => {
33
+ const tempDir = mkdtempSync( join( tmpdir(), 'load-content-test-' ) );
34
+ const testContent = 'test file content';
35
+ writeFileSync( join( tempDir, 'test.txt' ), testContent );
36
+
37
+ const content = loadContent( 'test.txt', tempDir );
38
+
39
+ expect( content ).toBe( testContent );
40
+ } );
41
+
42
+ it( 'loads file from nested subdirectory', () => {
43
+ const tempDir = mkdtempSync( join( tmpdir(), 'load-content-test-' ) );
44
+ const subDir = join( tempDir, 'subdir' );
45
+ mkdirSync( subDir );
46
+
47
+ const testContent = 'nested file content';
48
+ writeFileSync( join( subDir, 'nested.txt' ), testContent );
49
+
50
+ const content = loadContent( 'nested.txt', tempDir );
51
+
52
+ expect( content ).toBe( testContent );
53
+ } );
54
+
55
+ it( 'returns null when file does not exist', () => {
56
+ const tempDir = mkdtempSync( join( tmpdir(), 'load-content-test-' ) );
57
+
58
+ const content = loadContent( 'nonexistent.txt', tempDir );
59
+
60
+ expect( content ).toBeNull();
61
+ } );
62
+
63
+ it( 'returns null when starting directory does not exist', () => {
64
+ // ENOENT is suppressed and returns null (expected behavior)
65
+ const content = loadContent( 'test.txt', '/nonexistent-root-path-12345' );
66
+ expect( content ).toBeNull();
67
+ } );
68
+
69
+ it( 'respects depth limit', () => {
70
+ const tempDir = mkdtempSync( join( tmpdir(), 'load-content-test-' ) );
71
+
72
+ // Create deeply nested structure: dir/1/2/3/4/5/6/deep.txt (7 levels)
73
+ const levels = [ 1, 2, 3, 4, 5, 6 ];
74
+ const deepPath = levels.reduce( ( acc, level ) => {
75
+ const newPath = join( acc, `level${level}` );
76
+ mkdirSync( newPath );
77
+ return newPath;
78
+ }, tempDir );
79
+ writeFileSync( join( deepPath, 'deep.txt' ), 'deeply nested' );
80
+
81
+ // Should NOT find it with default maxDepth of 5
82
+ const content = loadContent( 'deep.txt', tempDir );
83
+ expect( content ).toBeNull();
84
+ } );
85
+
86
+ it( 'throws error when filename contains path separators', () => {
87
+ const tempDir = mkdtempSync( join( tmpdir(), 'load-content-test-' ) );
88
+
89
+ expect( () => {
90
+ loadContent( '../test.txt', tempDir );
91
+ } ).toThrow( /Invalid file name/ );
92
+
93
+ expect( () => {
94
+ loadContent( 'foo/bar.txt', tempDir );
95
+ } ).toThrow( /Invalid file name/ );
96
+ } );
97
+ } );
package/src/parser.js ADDED
@@ -0,0 +1,27 @@
1
+ import matter from 'gray-matter';
2
+
3
+ export function parsePrompt( raw ) {
4
+ const { data, content } = matter( raw );
5
+
6
+ if ( !content || content.trim() === '' ) {
7
+ throw new Error( 'Prompt file has no content after frontmatter' );
8
+ }
9
+
10
+ const infoExtractor = /<(system|user|assistant|tool)>([\s\S]*?)<\/\1>/gm;
11
+ const messages = [ ...content.matchAll( infoExtractor ) ].map(
12
+ ( [ _, role, text ] ) => ( { role, content: text.trim() } )
13
+ );
14
+
15
+ if ( messages.length === 0 ) {
16
+ const contentPreview = content.substring( 0, 200 );
17
+ const ellipsis = content.length > 200 ? '...' : '';
18
+
19
+ throw new Error(
20
+ `No valid message blocks found in prompt file.
21
+ Expected format: <system>...</system>, <user>...</user>, etc.
22
+ Content preview: ${contentPreview}${ellipsis}`
23
+ );
24
+ }
25
+
26
+ return { data, messages };
27
+ }
@@ -0,0 +1,59 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { parsePrompt } from './parser.js';
3
+
4
+ describe( 'parsePrompt', () => {
5
+ it( 'parses frontmatter config and message blocks', () => {
6
+ const raw = `---
7
+ provider: anthropic
8
+ model: claude-3-5-sonnet-20241022
9
+ ---
10
+
11
+ <system>You are a helpful assistant.</system>
12
+ <user>Hello!</user>`;
13
+
14
+ const result = parsePrompt( raw );
15
+
16
+ expect( result.data ).toEqual( {
17
+ provider: 'anthropic',
18
+ model: 'claude-3-5-sonnet-20241022'
19
+ } );
20
+ expect( result.messages ).toHaveLength( 2 );
21
+ expect( result.messages[0] ).toEqual( {
22
+ role: 'system',
23
+ content: 'You are a helpful assistant.'
24
+ } );
25
+ expect( result.messages[1] ).toEqual( {
26
+ role: 'user',
27
+ content: 'Hello!'
28
+ } );
29
+ } );
30
+
31
+ it( 'throws error when content is empty', () => {
32
+ const raw = `---
33
+ provider: anthropic
34
+ model: claude-3-5-sonnet-20241022
35
+ ---
36
+
37
+ `;
38
+
39
+ expect( () => {
40
+ parsePrompt( raw );
41
+ } ).toThrow( /no content after frontmatter/ );
42
+ } );
43
+
44
+ it( 'throws error when no valid message blocks found', () => {
45
+ const raw = `---
46
+ provider: anthropic
47
+ model: claude-3-5-sonnet-20241022
48
+ ---
49
+
50
+ This is just plain text without any message tags.`;
51
+
52
+ expect( () => {
53
+ parsePrompt( raw );
54
+ } ).toThrow( /No valid message blocks found/ );
55
+ expect( () => {
56
+ parsePrompt( raw );
57
+ } ).toThrow( /Expected format/ );
58
+ } );
59
+ } );
@@ -0,0 +1,59 @@
1
+ import { parsePrompt } from './parser.js';
2
+ import { Liquid } from 'liquidjs';
3
+ import { loadContent } from './load_content.js';
4
+ import { validatePrompt } from './prompt_validations.js';
5
+
6
+ const liquid = new Liquid();
7
+
8
+ /**
9
+ * Render a single message with template variables.
10
+ *
11
+ * @param {string} role - The message role
12
+ * @param {string} content - The message content template
13
+ * @param {Record<string, string | number>} values - Variables to interpolate
14
+ * @param {string} promptName - Name of the prompt (for error messages)
15
+ * @param {number} index - Message index (for error messages)
16
+ * @returns {{role: string, content: string}} Rendered message object
17
+ */
18
+ const renderMessage = ( role, content, values, promptName, index ) => {
19
+ try {
20
+ return {
21
+ role,
22
+ content: liquid.parseAndRenderSync( content, values )
23
+ };
24
+ } catch ( error ) {
25
+ throw new Error(
26
+ `Failed to render template in message ${index + 1} (role: ${role}) of prompt "${promptName}": ${error.message}`,
27
+ { cause: error }
28
+ );
29
+ }
30
+ };
31
+
32
+ /**
33
+ * Load a prompt file and render it with variables.
34
+ *
35
+ * @param {string} name - Name of the prompt file (without .prompt extension)
36
+ * @param {Record<string, string | number>} [values] - Variables to interpolate
37
+ * @returns {Prompt} Loaded and rendered prompt object
38
+ */
39
+ export const loadPrompt = ( name, values ) => {
40
+ const promptContent = loadContent( `${name}.prompt` );
41
+ if ( !promptContent ) {
42
+ throw new Error( `Prompt ${name} not found.` );
43
+ }
44
+
45
+ const { data: config, messages } = parsePrompt( promptContent );
46
+
47
+ const prompt = {
48
+ name,
49
+ config,
50
+ messages: messages.map( ( { role, content }, index ) =>
51
+ renderMessage( role, content, values, name, index )
52
+ )
53
+ };
54
+
55
+ validatePrompt( prompt );
56
+
57
+ return prompt;
58
+ };
59
+
@@ -0,0 +1,56 @@
1
+ import { describe, it, expect, vi, beforeEach } from 'vitest';
2
+ import { loadPrompt } from './prompt_loader.js';
3
+
4
+ // Mock dependencies
5
+ vi.mock( './load_content.js', () => ( {
6
+ loadContent: vi.fn()
7
+ } ) );
8
+
9
+ vi.mock( './parser.js', () => ( {
10
+ parsePrompt: vi.fn()
11
+ } ) );
12
+
13
+ vi.mock( './prompt_validations.js', () => ( {
14
+ validatePrompt: vi.fn()
15
+ } ) );
16
+
17
+ import { loadContent } from './load_content.js';
18
+ import { parsePrompt } from './parser.js';
19
+ import { validatePrompt } from './prompt_validations.js';
20
+
21
+ describe( 'loadPrompt', () => {
22
+ beforeEach( () => {
23
+ vi.clearAllMocks();
24
+ } );
25
+
26
+ it( 'loads prompt file and renders with variables', () => {
27
+ const promptContent = `---
28
+ provider: anthropic
29
+ model: claude-3-5-sonnet-20241022
30
+ ---
31
+ <user>Hello {{ name }}!</user>`;
32
+
33
+ loadContent.mockReturnValue( promptContent );
34
+ parsePrompt.mockReturnValue( {
35
+ data: { provider: 'anthropic', model: 'claude-3-5-sonnet-20241022' },
36
+ messages: [ { role: 'user', content: 'Hello {{ name }}!' } ]
37
+ } );
38
+
39
+ const result = loadPrompt( 'test', { name: 'World' } );
40
+
41
+ expect( result.name ).toBe( 'test' );
42
+ expect( result.config ).toEqual( { provider: 'anthropic', model: 'claude-3-5-sonnet-20241022' } );
43
+ expect( result.messages ).toHaveLength( 1 );
44
+ expect( result.messages[0].content ).toBe( 'Hello World!' );
45
+ expect( validatePrompt ).toHaveBeenCalledWith( result );
46
+ } );
47
+
48
+ it( 'throws error when prompt file not found', () => {
49
+ loadContent.mockReturnValue( null );
50
+
51
+ expect( () => {
52
+ loadPrompt( 'nonexistent' );
53
+ } ).toThrow( /Prompt nonexistent not found/ );
54
+ } );
55
+
56
+ } );
@@ -0,0 +1,30 @@
1
+ import { ValidationError, z } from '@output.ai/core';
2
+
3
+ export const promptSchema = z.object( {
4
+ name: z.string(),
5
+ config: z.object( {
6
+ provider: z.enum( [ 'anthropic', 'azure', 'openai' ] ),
7
+ model: z.string(),
8
+ temperature: z.number().optional(),
9
+ maxTokens: z.number().optional(),
10
+ providerOptions: z.object( {
11
+ thinking: z.object( {
12
+ type: z.literal( 'enabled' ),
13
+ budgetTokens: z.number()
14
+ } ).optional()
15
+ } ).optional()
16
+ } ),
17
+ messages: z.array(
18
+ z.object( {
19
+ role: z.string(),
20
+ content: z.string()
21
+ } )
22
+ )
23
+ } );
24
+
25
+ export function validatePrompt( prompt ) {
26
+ const result = promptSchema.safeParse( prompt );
27
+ if ( !result.success ) {
28
+ throw new ValidationError( `Invalid prompt file: ${z.prettifyError( result.error )}` );
29
+ }
30
+ }
@@ -1,12 +1,13 @@
1
1
  import { ValidationError, z } from '@output.ai/core';
2
- import { promptSchema } from '@output.ai/prompt';
3
2
 
4
3
  const generateTextArgsSchema = z.object( {
5
- prompt: promptSchema
4
+ prompt: z.string(),
5
+ variables: z.any().optional()
6
6
  } );
7
7
 
8
8
  const generateObjectArgsSchema = z.object( {
9
- prompt: promptSchema,
9
+ prompt: z.string(),
10
+ variables: z.any().optional(),
10
11
  schema: z.custom( v => v instanceof z.ZodObject, {
11
12
  message: 'schema must be a ZodObject'
12
13
  } ),
@@ -15,7 +16,8 @@ const generateObjectArgsSchema = z.object( {
15
16
  } );
16
17
 
17
18
  const generateArrayArgsSchema = z.object( {
18
- prompt: promptSchema,
19
+ prompt: z.string(),
20
+ variables: z.any().optional(),
19
21
  schema: z.custom( v => v instanceof z.ZodType, {
20
22
  message: 'schema must be a ZodType'
21
23
  } ),
@@ -24,29 +26,30 @@ const generateArrayArgsSchema = z.object( {
24
26
  } );
25
27
 
26
28
  const generateEnumArgsSchema = z.object( {
27
- prompt: promptSchema,
29
+ prompt: z.string(),
30
+ variables: z.any().optional(),
28
31
  enum: z.array( z.string() )
29
32
  } );
30
33
 
31
- export function validateSchema( schema, input, errorPrefix ) {
34
+ function validateSchema( schema, input, errorPrefix ) {
32
35
  const result = schema.safeParse( input );
33
36
  if ( !result.success ) {
34
37
  throw new ValidationError( `${errorPrefix}: ${z.prettifyError( result.error )}` );
35
38
  }
36
- };
39
+ }
37
40
 
38
41
  export function validateGenerateTextArgs( args ) {
39
42
  validateSchema( generateTextArgsSchema, args, 'Invalid generateText() arguments' );
40
- };
43
+ }
41
44
 
42
45
  export function validateGenerateObjectArgs( args ) {
43
46
  validateSchema( generateObjectArgsSchema, args, 'Invalid generateObject() arguments' );
44
- };
47
+ }
45
48
 
46
49
  export function validateGenerateArrayArgs( args ) {
47
50
  validateSchema( generateArrayArgsSchema, args, 'Invalid generateArray() arguments' );
48
- };
51
+ }
49
52
 
50
53
  export function validateGenerateEnumArgs( args ) {
51
54
  validateSchema( generateEnumArgsSchema, args, 'Invalid generateEnum() arguments' );
52
- };
55
+ }