@output.ai/llm 0.0.15 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.0.15",
3
+ "version": "0.1.0",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
package/src/ai_model.js CHANGED
@@ -1,29 +1,9 @@
1
1
  import { anthropic } from '@ai-sdk/anthropic';
2
2
  import { azure } from '@ai-sdk/azure';
3
3
  import { openai } from '@ai-sdk/openai';
4
- import { ValidationError, z } from '@output.ai/core';
5
4
 
6
5
  const providers = { azure, anthropic, openai };
7
6
 
8
- const promptSchema = z.object( {
9
- config: z.object( {
10
- provider: z.enum( [ 'anthropic', 'azure', 'openai' ] ),
11
- model: z.string(),
12
- temperature: z.number().optional(),
13
- max_tokens: z.number().optional()
14
- } ),
15
- messages: z.array(
16
- z.object( {
17
- role: z.string(),
18
- content: z.string()
19
- } )
20
- )
21
- } );
22
-
23
7
  export const loadModel = prompt => {
24
- const result = promptSchema.safeParse( prompt );
25
- if ( !result.success ) {
26
- throw new ValidationError( `Invalid prompt object: ${result.error.message}` );
27
- }
28
8
  return providers[prompt.config.provider]( prompt.config.model );
29
9
  };
@@ -0,0 +1,33 @@
1
+ import { it, expect, vi, afterEach } from 'vitest';
2
+
3
+ const openaiImpl = vi.fn( model => `openai:${model}` );
4
+ const azureImpl = vi.fn( model => `azure:${model}` );
5
+ const anthropicImpl = vi.fn( model => `anthropic:${model}` );
6
+
7
+ vi.mock( '@ai-sdk/openai', () => ( {
8
+ openai: ( ...values ) => openaiImpl( ...values )
9
+ } ) );
10
+
11
+ vi.mock( '@ai-sdk/azure', () => ( {
12
+ azure: ( ...values ) => azureImpl( ...values )
13
+ } ) );
14
+
15
+ vi.mock( '@ai-sdk/anthropic', () => ( {
16
+ anthropic: ( ...values ) => anthropicImpl( ...values )
17
+ } ) );
18
+
19
+ import { loadModel } from './ai_model.js';
20
+
21
+ afterEach( async () => {
22
+ await vi.resetModules();
23
+ vi.clearAllMocks();
24
+ } );
25
+
26
+ it( 'loads model using selected provider', () => {
27
+ const result = loadModel( { config: { provider: 'openai', model: 'gpt-4o-mini' } } );
28
+
29
+ expect( result ).toBe( 'openai:gpt-4o-mini' );
30
+ expect( openaiImpl ).toHaveBeenCalledWith( 'gpt-4o-mini' );
31
+ expect( azureImpl ).not.toHaveBeenCalled();
32
+ expect( anthropicImpl ).not.toHaveBeenCalled();
33
+ } );
package/src/ai_sdk.js CHANGED
@@ -1,8 +1,9 @@
1
1
  import { Tracing } from '@output.ai/core/tracing';
2
2
  import { loadModel } from './ai_model.js';
3
3
  import * as AI from 'ai';
4
+ import { validateGenerateTextArgs, validateGenerateObjectArgs, validateGenerateArrayArgs, validateGenerateEnumArgs } from './validations.js';
4
5
 
5
- const generationWrapper = async ( traceId, fn ) => {
6
+ const traceWrapper = async ( traceId, fn ) => {
6
7
  try {
7
8
  const result = await fn();
8
9
  Tracing.addEventEnd( { id: traceId, details: result } );
@@ -13,33 +14,103 @@ const generationWrapper = async ( traceId, fn ) => {
13
14
  }
14
15
  };
15
16
 
16
- export async function generateText( { prompt, ...nativeAiSdkArgs } ) {
17
+ const extraAiSdkOptionsFromPrompt = prompt => ( {
18
+ model: loadModel( prompt ),
19
+ messages: prompt.messages,
20
+ ...( prompt.config.temperature && { temperature: prompt.config.temperature } ),
21
+ ...( prompt.config.max_tokens && { maxOutputTokens: prompt.config.maxTokens } ),
22
+ providerOptions: prompt.providerOptions
23
+ } );
24
+
25
+ /**
26
+ * Use an LLM model to generate text.
27
+ *
28
+ * @param {object} args - Generation arguments
29
+ * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
30
+ * @returns {Promise<string>} Generated text
31
+ */
32
+ export async function generateText( { prompt } ) {
33
+ validateGenerateTextArgs( { prompt } );
17
34
  const traceId = `generateText-${Date.now()}`;
18
- Tracing.addEventStart( { kind: 'llm', name: 'generateText', id: traceId, details: { prompt, nativeAiSdkArgs } } );
19
-
20
- return generationWrapper( traceId, async () => {
21
- return ( await AI.generateText( {
22
- model: loadModel( prompt ),
23
- messages: prompt.messages,
24
- temperature: prompt.config.temperature,
25
- maxOutputTokens: prompt.config.max_tokens ?? 64000,
26
- ...nativeAiSdkArgs
27
- } ) ).text;
28
- } );
35
+ Tracing.addEventStart( { kind: 'llm', name: 'generateText', id: traceId, details: { prompt } } );
36
+
37
+ return traceWrapper( traceId, async () =>
38
+ AI.generateText( extraAiSdkOptionsFromPrompt( prompt ) ).then( r => r.text )
39
+ );
29
40
  }
30
41
 
31
- export async function generateObject( { prompt, ...nativeAiSdkArgs } ) {
42
+ /**
43
+ * Use an LLM model to generate an object with a fixed schema.
44
+ *
45
+ * @param {object} args - Generation arguments
46
+ * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
47
+ * @param {z.ZodObject} args.schema - Output schema
48
+ * @param {string} [args.schemaName] - Output schema name
49
+ * @param {string} [args.schemaDescription] - Output schema description
50
+ * @returns {Promise<object>} Object matching the provided schema
51
+ */
52
+ export async function generateObject( args ) {
53
+ validateGenerateObjectArgs( args );
54
+ const { prompt, schema, schemaName, schemaDescription } = args;
55
+
32
56
  const traceId = `generateObject-${Date.now()}`;
33
- Tracing.addEventStart( { kind: 'llm', name: 'generateObject', id: traceId, details: { prompt, nativeAiSdkArgs } } );
34
-
35
- return generationWrapper( traceId, async () => {
36
- return ( await AI.generateObject( {
37
- model: loadModel( prompt ),
38
- output: nativeAiSdkArgs.object ?? 'object',
39
- messages: prompt.messages,
40
- temperature: prompt.config.temperature,
41
- ...( prompt.config.max_tokens && { maxOutputTokens: prompt.config.max_tokens } ),
42
- ...nativeAiSdkArgs
43
- } ) ).object;
44
- } );
57
+ Tracing.addEventStart( { kind: 'llm', name: 'generateObject', id: traceId, details: args } );
58
+
59
+ return traceWrapper( traceId, async () =>
60
+ AI.generateObject( {
61
+ output: 'object',
62
+ schema,
63
+ schemaName,
64
+ schemaDescription,
65
+ ...extraAiSdkOptionsFromPrompt( prompt )
66
+ } ).then( r => r.object )
67
+ );
68
+ }
69
+
70
+ /**
71
+ * Use an LLM model to generate an array of values with a fixed schema.
72
+ *
73
+ * @param {object} args - Generation arguments
74
+ * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
75
+ * @param {z.ZodType} args.schema - Output schema (array item)
76
+ * @param {string} [args.schemaName] - Output schema name
77
+ * @param {string} [args.schemaDescription] - Output schema description
78
+ * @returns {Promise<object>} Array where each element matches the schema
79
+ */
80
+ export async function generateArray( args ) {
81
+ validateGenerateArrayArgs( args );
82
+ const { prompt, schema, schemaName, schemaDescription } = args;
83
+
84
+ const traceId = `generateArray-${Date.now()}`;
85
+ Tracing.addEventStart( { kind: 'llm', name: 'generateArray', id: traceId, details: args } );
86
+
87
+ return traceWrapper( traceId, async () =>
88
+ AI.generateObject( {
89
+ output: 'array',
90
+ schema,
91
+ schemaName,
92
+ schemaDescription,
93
+ ...extraAiSdkOptionsFromPrompt( prompt )
94
+ } ).then( r => r.object )
95
+ );
96
+ }
97
+
98
+ /**
99
+ * Use an LLM model to generate a result from an enum (array of string values).
100
+ *
101
+ * @param {object} args - Generation arguments
102
+ * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
103
+ * @param {string[]} args.enum - Allowed values for the generation
104
+ * @returns {Promise<string>} One of the provided enum values
105
+ */
106
+ export async function generateEnum( args ) {
107
+ validateGenerateEnumArgs( args );
108
+ const { prompt, enum: _enum } = args;
109
+
110
+ const traceId = `generateEnum-${Date.now()}`;
111
+ Tracing.addEventStart( { kind: 'llm', name: 'generateEnum', id: traceId, details: args } );
112
+
113
+ return traceWrapper( traceId, async () =>
114
+ AI.generateObject( { output: 'enum', enum: _enum, ...extraAiSdkOptionsFromPrompt( prompt ) } ).then( r => r.object )
115
+ );
45
116
  }
@@ -0,0 +1,156 @@
1
+ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
2
+ import { z } from '@output.ai/core';
3
+
4
+ const tracingSpies = {
5
+ addEventStart: vi.fn(),
6
+ addEventEnd: vi.fn(),
7
+ addEventError: vi.fn()
8
+ };
9
+ vi.mock( '@output.ai/core/tracing', () => ( { Tracing: tracingSpies } ), { virtual: true } );
10
+
11
+ const loadModelImpl = vi.fn();
12
+ vi.mock( './ai_model.js', () => ( {
13
+ loadModel: ( ...values ) => loadModelImpl( ...values )
14
+ } ) );
15
+
16
+ const aiFns = {
17
+ generateText: vi.fn(),
18
+ generateObject: vi.fn()
19
+ };
20
+ vi.mock( 'ai', () => ( aiFns ) );
21
+
22
+ const validators = {
23
+ validateGenerateTextArgs: vi.fn(),
24
+ validateGenerateObjectArgs: vi.fn(),
25
+ validateGenerateArrayArgs: vi.fn(),
26
+ validateGenerateEnumArgs: vi.fn()
27
+ };
28
+ vi.mock( './validations.js', () => ( validators ) );
29
+
30
+ const importSut = async () => import( './ai_sdk.js' );
31
+
32
+ const basePrompt = {
33
+ config: { provider: 'openai', model: 'gpt-4o-mini', temperature: 0.3 },
34
+ messages: [ { role: 'user', content: 'Hi' } ],
35
+ providerOptions: { thinking: { enabled: true } }
36
+ };
37
+
38
+ beforeEach( () => {
39
+ tracingSpies.addEventStart.mockClear();
40
+ tracingSpies.addEventEnd.mockClear();
41
+ tracingSpies.addEventError.mockClear();
42
+
43
+ loadModelImpl.mockReset().mockReturnValue( 'MODEL' );
44
+
45
+ aiFns.generateText.mockReset().mockResolvedValue( { text: 'TEXT' } );
46
+ aiFns.generateObject.mockReset().mockResolvedValue( { object: 'OBJECT' } );
47
+
48
+ validators.validateGenerateTextArgs.mockClear();
49
+ validators.validateGenerateObjectArgs.mockClear();
50
+ validators.validateGenerateArrayArgs.mockClear();
51
+ validators.validateGenerateEnumArgs.mockClear();
52
+ } );
53
+
54
+ afterEach( async () => {
55
+ await vi.resetModules();
56
+ vi.clearAllMocks();
57
+ } );
58
+
59
+ describe( 'ai_sdk', () => {
60
+ it( 'generateText: validates, traces, calls AI and returns text', async () => {
61
+ const { generateText } = await importSut();
62
+ const result = await generateText( { prompt: basePrompt } );
63
+
64
+ expect( validators.validateGenerateTextArgs ).toHaveBeenCalledWith( { prompt: basePrompt } );
65
+ expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
66
+ expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
67
+
68
+ expect( loadModelImpl ).toHaveBeenCalledWith( basePrompt );
69
+ expect( aiFns.generateText ).toHaveBeenCalledWith( {
70
+ model: 'MODEL',
71
+ messages: basePrompt.messages,
72
+ temperature: 0.3,
73
+ providerOptions: basePrompt.providerOptions
74
+ } );
75
+ expect( result ).toBe( 'TEXT' );
76
+ } );
77
+
78
+ it( 'generateObject: validates, traces, calls AI with output object and returns object', async () => {
79
+ const { generateObject } = await importSut();
80
+ aiFns.generateObject.mockResolvedValueOnce( { object: { a: 1 } } );
81
+
82
+ const schema = z.object( { a: z.number() } );
83
+ const result = await generateObject( {
84
+ prompt: basePrompt,
85
+ schema,
86
+ schemaName: 'Thing',
87
+ schemaDescription: 'A thing'
88
+ } );
89
+
90
+ expect( validators.validateGenerateObjectArgs ).toHaveBeenCalled();
91
+ expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
92
+ expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
93
+
94
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
95
+ output: 'object',
96
+ schema,
97
+ schemaName: 'Thing',
98
+ schemaDescription: 'A thing',
99
+ model: 'MODEL',
100
+ messages: basePrompt.messages,
101
+ temperature: 0.3,
102
+ providerOptions: basePrompt.providerOptions
103
+ } );
104
+ expect( result ).toEqual( { a: 1 } );
105
+ } );
106
+
107
+ it( 'generateArray: validates, traces, calls AI (item schema) and returns array', async () => {
108
+ const { generateArray } = await importSut();
109
+ aiFns.generateObject.mockResolvedValueOnce( { object: [ 1, 2 ] } );
110
+
111
+ const schema = z.number();
112
+ const result = await generateArray( {
113
+ prompt: basePrompt,
114
+ schema,
115
+ schemaName: 'Numbers',
116
+ schemaDescription: 'Two numbers'
117
+ } );
118
+
119
+ expect( validators.validateGenerateArrayArgs ).toHaveBeenCalled();
120
+ expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
121
+ expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
122
+
123
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
124
+ output: 'array',
125
+ schema,
126
+ schemaName: 'Numbers',
127
+ schemaDescription: 'Two numbers',
128
+ model: 'MODEL',
129
+ messages: basePrompt.messages,
130
+ temperature: 0.3,
131
+ providerOptions: basePrompt.providerOptions
132
+ } );
133
+ expect( result ).toEqual( [ 1, 2 ] );
134
+ } );
135
+
136
+ it( 'generateEnum: validates, traces, calls AI with output enum and returns value', async () => {
137
+ const { generateEnum } = await importSut();
138
+ aiFns.generateObject.mockResolvedValueOnce( { object: 'B' } );
139
+
140
+ const result = await generateEnum( { prompt: basePrompt, enum: [ 'A', 'B', 'C' ] } );
141
+
142
+ expect( validators.validateGenerateEnumArgs ).toHaveBeenCalled();
143
+ expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
144
+ expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
145
+
146
+ expect( aiFns.generateObject ).toHaveBeenCalledWith( {
147
+ output: 'enum',
148
+ enum: [ 'A', 'B', 'C' ],
149
+ model: 'MODEL',
150
+ messages: basePrompt.messages,
151
+ temperature: 0.3,
152
+ providerOptions: basePrompt.providerOptions
153
+ } );
154
+ expect( result ).toBe( 'B' );
155
+ } );
156
+ } );
package/src/index.d.ts CHANGED
@@ -1,110 +1,82 @@
1
- import type * as AiTypes from 'ai';
2
- import type { z as CoreZ } from '@output.ai/core';
1
+ import type { z } from '@output.ai/core';
3
2
  import type { Prompt } from '@output.ai/prompt';
4
3
 
5
4
  export type { Prompt };
6
5
 
7
- type NativeGenerateTextArgs = Parameters<typeof AiTypes.generateText>[0];
8
- type NativeGenerateObjectArgs = Parameters<typeof AiTypes.generateObject>[0];
9
-
10
- /**
11
- * Simplify types into a plain object while preserving unions
12
- * (distributes over union members instead of collapsing their keys)
13
- */
14
- type Simplify<T> = T extends unknown ? { [K in keyof T]: T[K] } & {} : never;
15
-
16
- /**
17
- * Replace keys K in T with V, preserving unions by distributing over T
18
- */
19
- type Replace<T, K extends PropertyKey, V> = T extends unknown
20
- ? Omit<T, Extract<K, keyof T>> & { [P in K]: V }
21
- : never;
22
-
23
- /**
24
- * Text generation arguments
25
- * Include all native AI SDK generateText options and a Prompt object from `@output.ai/prompt`
26
- */
27
- export type GenerateTextArgs = Simplify<
28
- Replace<Partial<NativeGenerateTextArgs>, 'prompt', Prompt>
29
- >;
30
-
31
- /**
32
- * Allow schemas from @output.ai/core's zod in addition to AI SDK's accepted schema types.
33
- */
34
- type WithCoreZodSchema<T> = T extends unknown
35
- ? T extends { schema?: infer S }
36
- ? Omit<T, 'schema'> & { schema?: S | CoreZ.ZodTypeAny }
37
- : T
38
- : never;
39
-
40
- /**
41
- * Object generation arguments
42
- * Include all native AI SDK generateObject options and a Prompt object from `@output.ai/prompt`
43
- */
44
- export type GenerateObjectArgs = Simplify<
45
- Replace<Partial<WithCoreZodSchema<NativeGenerateObjectArgs>>, 'prompt', Prompt>
46
- >;
47
-
48
6
  /**
49
- * Use a LLM Model to generate text
50
- *
51
- * This function a wrapper over AI SDK's generateText function.
52
- *
53
- * It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
7
+ * Use an LLM model to generate text.
54
8
  *
55
- * The Prompt object will set `model`, `messages`, `temperature` and `max_tokens`, however all these can be overwritten by their AI SDK native argument values.
9
+ * This function is a wrapper over the AI SDK's `generateText`.
10
+ * The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
56
11
  *
57
- * @param {GenerateTextArgs} args - Generation arguments
58
- * @returns {Promise<string>}
12
+ * @param {object} args - Generation arguments
13
+ * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
14
+ * @returns {Promise<string>} Generated text
59
15
  */
60
16
  export function generateText(
61
- args: GenerateTextArgs
17
+ args: {
18
+ prompt: Prompt
19
+ }
62
20
  ): Promise<string>;
63
21
 
64
22
  /**
65
- * Use a LLM Model to generate object
23
+ * Use an LLM model to generate an object with a fixed schema.
66
24
  *
67
- * This function a wrapper over AI SDK's generateObject function.
25
+ * This function is a wrapper over the AI SDK's `generateObject`.
26
+ * The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
68
27
  *
69
- * It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
70
- *
71
- * The Prompt object will set `model`, `messages`, `temperature` and `max_tokens`, however all these can be overwritten by their AI SDK native argument values.
72
- *
73
- * @param {GenerateObjectArgs} args - Generation arguments
74
- * @returns {Promise<object>} An object matching the provided schema
28
+ * @param {object} args - Generation arguments
29
+ * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
30
+ * @param {z.ZodObject} args.schema - Output schema
31
+ * @param {string} [args.schemaName] - Output schema name
32
+ * @param {string} [args.schemaDescription] - Output schema description
33
+ * @returns {Promise<object>} Object matching the provided schema
75
34
  */
76
- export function generateObject<A extends GenerateObjectArgs & { schema: CoreZ.ZodTypeAny }>(
77
- args: A
78
- ): Promise<CoreZ.infer<A['schema']>>;
35
+ export function generateObject<TSchema extends z.ZodObject>(
36
+ args: {
37
+ prompt: Prompt,
38
+ schema?: TSchema,
39
+ schemaName?: string,
40
+ schemaDescription?: string
41
+ }
42
+ ): Promise<z.infer<TSchema>>;
79
43
 
80
44
  /**
81
- * Use a LLM Model to generate object
82
- *
83
- * This function a wrapper over AI SDK's generateObject function.
45
+ * Use an LLM model to generate an array of values with a fixed schema.
84
46
  *
85
- * It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
47
+ * This function is a wrapper over the AI SDK's `generateObject` with `output: 'array'`.
48
+ * The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
86
49
  *
87
- * The Prompt object will set `model`, `messages`, `temperature` and `max_tokens`, however all these can be overwritten by their AI SDK native argument values.
88
- *
89
- * @param {GenerateObjectArgs} args - Generation arguments
90
- * @returns {Promise<object>} An object matching the provided enum
50
+ * @param {object} args - Generation arguments
51
+ * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
52
+ * @param {z.ZodType} args.schema - Output schema (array item)
53
+ * @param {string} [args.schemaName] - Output schema name
54
+ * @param {string} [args.schemaDescription] - Output schema description
55
+ * @returns {Promise<object>} Array where each element matches the schema
91
56
  */
92
- export function generateObject<A extends GenerateObjectArgs & { enum: readonly unknown[]; output: 'enum' }>(
93
- args: A
94
- ): Promise<A['enum'][number]>;
57
+ export function generateArray<TSchema extends z.ZodType>(
58
+ args: {
59
+ prompt: Prompt,
60
+ schema?: TSchema,
61
+ schemaName?: string,
62
+ schemaDescription?: string
63
+ }
64
+ ): Promise<Array<z.infer<TSchema>>>;
95
65
 
96
66
  /**
97
- * Use a LLM Model to generate object
98
- *
99
- * This function a wrapper over AI SDK's generateObject function.
100
- *
101
- * It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
67
+ * Use an LLM model to generate a result from an enum (array of string values).
102
68
  *
103
- * The Prompt object will set `model`, `messages`, `temperature` and `max_tokens`, however all these can be overwritten by their AI SDK native argument values.
69
+ * This function is a wrapper over the AI SDK's `generateObject` with `output: 'enum'`.
70
+ * The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
104
71
  *
105
- * @param {GenerateObjectArgs} args - Generation arguments
106
- * @returns {Promise<object>} An object without a pre-defined schema schema
72
+ * @param {object} args - Generation arguments
73
+ * @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
74
+ * @param {string[]} args.enum - Allowed values for the generation
75
+ * @returns {Promise<string>} One of the provided enum values
107
76
  */
108
- export function generateObject(
109
- args: GenerateObjectArgs
110
- ): Promise<object>;
77
+ export function generateEnum<const TEnum extends readonly [string, ...string[]]>(
78
+ args: {
79
+ prompt: Prompt,
80
+ enum: TEnum
81
+ }
82
+ ): Promise<TEnum[number]>;
package/src/index.js CHANGED
@@ -1 +1,2 @@
1
- export { generateText, generateObject } from './ai_sdk.js';
1
+ export { generateText, generateArray, generateObject, generateEnum } from './ai_sdk.js';
2
+ export * as ai from 'ai';
@@ -0,0 +1,52 @@
1
+ import { ValidationError, z } from '@output.ai/core';
2
+ import { promptSchema } from '@output.ai/prompt';
3
+
4
+ const generateTextArgsSchema = z.object( {
5
+ prompt: promptSchema
6
+ } );
7
+
8
+ const generateObjectArgsSchema = z.object( {
9
+ prompt: promptSchema,
10
+ schema: z.custom( v => v instanceof z.ZodObject, {
11
+ message: 'schema must be a ZodObject'
12
+ } ),
13
+ schemaName: z.string().optional(),
14
+ schemaDescription: z.string().optional()
15
+ } );
16
+
17
+ const generateArrayArgsSchema = z.object( {
18
+ prompt: promptSchema,
19
+ schema: z.custom( v => v instanceof z.ZodType, {
20
+ message: 'schema must be a ZodType'
21
+ } ),
22
+ schemaName: z.string().optional(),
23
+ schemaDescription: z.string().optional()
24
+ } );
25
+
26
+ const generateEnumArgsSchema = z.object( {
27
+ prompt: promptSchema,
28
+ enum: z.array( z.string() )
29
+ } );
30
+
31
+ export function validateSchema( schema, input, errorPrefix ) {
32
+ const result = schema.safeParse( input );
33
+ if ( !result.success ) {
34
+ throw new ValidationError( `${errorPrefix}: ${z.prettifyError( result.error )}` );
35
+ }
36
+ };
37
+
38
+ export function validateGenerateTextArgs( args ) {
39
+ validateSchema( generateTextArgsSchema, args, 'Invalid generateText() arguments' );
40
+ };
41
+
42
+ export function validateGenerateObjectArgs( args ) {
43
+ validateSchema( generateObjectArgsSchema, args, 'Invalid generateObject() arguments' );
44
+ };
45
+
46
+ export function validateGenerateArrayArgs( args ) {
47
+ validateSchema( generateArrayArgsSchema, args, 'Invalid generateArray() arguments' );
48
+ };
49
+
50
+ export function validateGenerateEnumArgs( args ) {
51
+ validateSchema( generateEnumArgsSchema, args, 'Invalid generateEnum() arguments' );
52
+ };