@output.ai/llm 0.0.14 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/ai_model.js +0 -20
- package/src/ai_model.spec.js +33 -0
- package/src/ai_sdk.js +97 -26
- package/src/ai_sdk.spec.js +156 -0
- package/src/index.d.ts +63 -41
- package/src/index.js +2 -1
- package/src/validations.js +52 -0
package/package.json
CHANGED
package/src/ai_model.js
CHANGED
|
@@ -1,29 +1,9 @@
|
|
|
1
1
|
import { anthropic } from '@ai-sdk/anthropic';
|
|
2
2
|
import { azure } from '@ai-sdk/azure';
|
|
3
3
|
import { openai } from '@ai-sdk/openai';
|
|
4
|
-
import { ValidationError, z } from '@output.ai/core';
|
|
5
4
|
|
|
6
5
|
const providers = { azure, anthropic, openai };
|
|
7
6
|
|
|
8
|
-
const promptSchema = z.object( {
|
|
9
|
-
config: z.object( {
|
|
10
|
-
provider: z.enum( [ 'anthropic', 'azure', 'openai' ] ),
|
|
11
|
-
model: z.string(),
|
|
12
|
-
temperature: z.number().optional(),
|
|
13
|
-
max_tokens: z.number().optional()
|
|
14
|
-
} ),
|
|
15
|
-
messages: z.array(
|
|
16
|
-
z.object( {
|
|
17
|
-
role: z.string(),
|
|
18
|
-
content: z.string()
|
|
19
|
-
} )
|
|
20
|
-
)
|
|
21
|
-
} );
|
|
22
|
-
|
|
23
7
|
export const loadModel = prompt => {
|
|
24
|
-
const result = promptSchema.safeParse( prompt );
|
|
25
|
-
if ( !result.success ) {
|
|
26
|
-
throw new ValidationError( `Invalid prompt object: ${result.error.message}` );
|
|
27
|
-
}
|
|
28
8
|
return providers[prompt.config.provider]( prompt.config.model );
|
|
29
9
|
};
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import { it, expect, vi, afterEach } from 'vitest';
|
|
2
|
+
|
|
3
|
+
const openaiImpl = vi.fn( model => `openai:${model}` );
|
|
4
|
+
const azureImpl = vi.fn( model => `azure:${model}` );
|
|
5
|
+
const anthropicImpl = vi.fn( model => `anthropic:${model}` );
|
|
6
|
+
|
|
7
|
+
vi.mock( '@ai-sdk/openai', () => ( {
|
|
8
|
+
openai: ( ...values ) => openaiImpl( ...values )
|
|
9
|
+
} ) );
|
|
10
|
+
|
|
11
|
+
vi.mock( '@ai-sdk/azure', () => ( {
|
|
12
|
+
azure: ( ...values ) => azureImpl( ...values )
|
|
13
|
+
} ) );
|
|
14
|
+
|
|
15
|
+
vi.mock( '@ai-sdk/anthropic', () => ( {
|
|
16
|
+
anthropic: ( ...values ) => anthropicImpl( ...values )
|
|
17
|
+
} ) );
|
|
18
|
+
|
|
19
|
+
import { loadModel } from './ai_model.js';
|
|
20
|
+
|
|
21
|
+
afterEach( async () => {
|
|
22
|
+
await vi.resetModules();
|
|
23
|
+
vi.clearAllMocks();
|
|
24
|
+
} );
|
|
25
|
+
|
|
26
|
+
it( 'loads model using selected provider', () => {
|
|
27
|
+
const result = loadModel( { config: { provider: 'openai', model: 'gpt-4o-mini' } } );
|
|
28
|
+
|
|
29
|
+
expect( result ).toBe( 'openai:gpt-4o-mini' );
|
|
30
|
+
expect( openaiImpl ).toHaveBeenCalledWith( 'gpt-4o-mini' );
|
|
31
|
+
expect( azureImpl ).not.toHaveBeenCalled();
|
|
32
|
+
expect( anthropicImpl ).not.toHaveBeenCalled();
|
|
33
|
+
} );
|
package/src/ai_sdk.js
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import { Tracing } from '@output.ai/core/tracing';
|
|
2
2
|
import { loadModel } from './ai_model.js';
|
|
3
3
|
import * as AI from 'ai';
|
|
4
|
+
import { validateGenerateTextArgs, validateGenerateObjectArgs, validateGenerateArrayArgs, validateGenerateEnumArgs } from './validations.js';
|
|
4
5
|
|
|
5
|
-
const
|
|
6
|
+
const traceWrapper = async ( traceId, fn ) => {
|
|
6
7
|
try {
|
|
7
8
|
const result = await fn();
|
|
8
9
|
Tracing.addEventEnd( { id: traceId, details: result } );
|
|
@@ -13,33 +14,103 @@ const generationWrapper = async ( traceId, fn ) => {
|
|
|
13
14
|
}
|
|
14
15
|
};
|
|
15
16
|
|
|
16
|
-
|
|
17
|
+
const extraAiSdkOptionsFromPrompt = prompt => ( {
|
|
18
|
+
model: loadModel( prompt ),
|
|
19
|
+
messages: prompt.messages,
|
|
20
|
+
...( prompt.config.temperature && { temperature: prompt.config.temperature } ),
|
|
21
|
+
...( prompt.config.max_tokens && { maxOutputTokens: prompt.config.maxTokens } ),
|
|
22
|
+
providerOptions: prompt.providerOptions
|
|
23
|
+
} );
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Use an LLM model to generate text.
|
|
27
|
+
*
|
|
28
|
+
* @param {object} args - Generation arguments
|
|
29
|
+
* @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
|
|
30
|
+
* @returns {Promise<string>} Generated text
|
|
31
|
+
*/
|
|
32
|
+
export async function generateText( { prompt } ) {
|
|
33
|
+
validateGenerateTextArgs( { prompt } );
|
|
17
34
|
const traceId = `generateText-${Date.now()}`;
|
|
18
|
-
Tracing.addEventStart( { kind: 'llm', name: 'generateText', id: traceId, details: { prompt
|
|
19
|
-
|
|
20
|
-
return
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
messages: prompt.messages,
|
|
24
|
-
temperature: prompt.config.temperature,
|
|
25
|
-
maxOutputTokens: prompt.config.max_tokens ?? 64000,
|
|
26
|
-
...nativeAiSdkArgs
|
|
27
|
-
} ) ).text;
|
|
28
|
-
} );
|
|
35
|
+
Tracing.addEventStart( { kind: 'llm', name: 'generateText', id: traceId, details: { prompt } } );
|
|
36
|
+
|
|
37
|
+
return traceWrapper( traceId, async () =>
|
|
38
|
+
AI.generateText( extraAiSdkOptionsFromPrompt( prompt ) ).then( r => r.text )
|
|
39
|
+
);
|
|
29
40
|
}
|
|
30
41
|
|
|
31
|
-
|
|
42
|
+
/**
|
|
43
|
+
* Use an LLM model to generate an object with a fixed schema.
|
|
44
|
+
*
|
|
45
|
+
* @param {object} args - Generation arguments
|
|
46
|
+
* @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
|
|
47
|
+
* @param {z.ZodObject} args.schema - Output schema
|
|
48
|
+
* @param {string} [args.schemaName] - Output schema name
|
|
49
|
+
* @param {string} [args.schemaDescription] - Output schema description
|
|
50
|
+
* @returns {Promise<object>} Object matching the provided schema
|
|
51
|
+
*/
|
|
52
|
+
export async function generateObject( args ) {
|
|
53
|
+
validateGenerateObjectArgs( args );
|
|
54
|
+
const { prompt, schema, schemaName, schemaDescription } = args;
|
|
55
|
+
|
|
32
56
|
const traceId = `generateObject-${Date.now()}`;
|
|
33
|
-
Tracing.addEventStart( { kind: 'llm', name: 'generateObject', id: traceId, details:
|
|
34
|
-
|
|
35
|
-
return
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
...( prompt
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
57
|
+
Tracing.addEventStart( { kind: 'llm', name: 'generateObject', id: traceId, details: args } );
|
|
58
|
+
|
|
59
|
+
return traceWrapper( traceId, async () =>
|
|
60
|
+
AI.generateObject( {
|
|
61
|
+
output: 'object',
|
|
62
|
+
schema,
|
|
63
|
+
schemaName,
|
|
64
|
+
schemaDescription,
|
|
65
|
+
...extraAiSdkOptionsFromPrompt( prompt )
|
|
66
|
+
} ).then( r => r.object )
|
|
67
|
+
);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Use an LLM model to generate an array of values with a fixed schema.
|
|
72
|
+
*
|
|
73
|
+
* @param {object} args - Generation arguments
|
|
74
|
+
* @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
|
|
75
|
+
* @param {z.ZodType} args.schema - Output schema (array item)
|
|
76
|
+
* @param {string} [args.schemaName] - Output schema name
|
|
77
|
+
* @param {string} [args.schemaDescription] - Output schema description
|
|
78
|
+
* @returns {Promise<object>} Array where each element matches the schema
|
|
79
|
+
*/
|
|
80
|
+
export async function generateArray( args ) {
|
|
81
|
+
validateGenerateArrayArgs( args );
|
|
82
|
+
const { prompt, schema, schemaName, schemaDescription } = args;
|
|
83
|
+
|
|
84
|
+
const traceId = `generateArray-${Date.now()}`;
|
|
85
|
+
Tracing.addEventStart( { kind: 'llm', name: 'generateArray', id: traceId, details: args } );
|
|
86
|
+
|
|
87
|
+
return traceWrapper( traceId, async () =>
|
|
88
|
+
AI.generateObject( {
|
|
89
|
+
output: 'array',
|
|
90
|
+
schema,
|
|
91
|
+
schemaName,
|
|
92
|
+
schemaDescription,
|
|
93
|
+
...extraAiSdkOptionsFromPrompt( prompt )
|
|
94
|
+
} ).then( r => r.object )
|
|
95
|
+
);
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
/**
|
|
99
|
+
* Use an LLM model to generate a result from an enum (array of string values).
|
|
100
|
+
*
|
|
101
|
+
* @param {object} args - Generation arguments
|
|
102
|
+
* @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
|
|
103
|
+
* @param {string[]} args.enum - Allowed values for the generation
|
|
104
|
+
* @returns {Promise<string>} One of the provided enum values
|
|
105
|
+
*/
|
|
106
|
+
export async function generateEnum( args ) {
|
|
107
|
+
validateGenerateEnumArgs( args );
|
|
108
|
+
const { prompt, enum: _enum } = args;
|
|
109
|
+
|
|
110
|
+
const traceId = `generateEnum-${Date.now()}`;
|
|
111
|
+
Tracing.addEventStart( { kind: 'llm', name: 'generateEnum', id: traceId, details: args } );
|
|
112
|
+
|
|
113
|
+
return traceWrapper( traceId, async () =>
|
|
114
|
+
AI.generateObject( { output: 'enum', enum: _enum, ...extraAiSdkOptionsFromPrompt( prompt ) } ).then( r => r.object )
|
|
115
|
+
);
|
|
45
116
|
}
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
|
2
|
+
import { z } from '@output.ai/core';
|
|
3
|
+
|
|
4
|
+
const tracingSpies = {
|
|
5
|
+
addEventStart: vi.fn(),
|
|
6
|
+
addEventEnd: vi.fn(),
|
|
7
|
+
addEventError: vi.fn()
|
|
8
|
+
};
|
|
9
|
+
vi.mock( '@output.ai/core/tracing', () => ( { Tracing: tracingSpies } ), { virtual: true } );
|
|
10
|
+
|
|
11
|
+
const loadModelImpl = vi.fn();
|
|
12
|
+
vi.mock( './ai_model.js', () => ( {
|
|
13
|
+
loadModel: ( ...values ) => loadModelImpl( ...values )
|
|
14
|
+
} ) );
|
|
15
|
+
|
|
16
|
+
const aiFns = {
|
|
17
|
+
generateText: vi.fn(),
|
|
18
|
+
generateObject: vi.fn()
|
|
19
|
+
};
|
|
20
|
+
vi.mock( 'ai', () => ( aiFns ) );
|
|
21
|
+
|
|
22
|
+
const validators = {
|
|
23
|
+
validateGenerateTextArgs: vi.fn(),
|
|
24
|
+
validateGenerateObjectArgs: vi.fn(),
|
|
25
|
+
validateGenerateArrayArgs: vi.fn(),
|
|
26
|
+
validateGenerateEnumArgs: vi.fn()
|
|
27
|
+
};
|
|
28
|
+
vi.mock( './validations.js', () => ( validators ) );
|
|
29
|
+
|
|
30
|
+
const importSut = async () => import( './ai_sdk.js' );
|
|
31
|
+
|
|
32
|
+
const basePrompt = {
|
|
33
|
+
config: { provider: 'openai', model: 'gpt-4o-mini', temperature: 0.3 },
|
|
34
|
+
messages: [ { role: 'user', content: 'Hi' } ],
|
|
35
|
+
providerOptions: { thinking: { enabled: true } }
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
beforeEach( () => {
|
|
39
|
+
tracingSpies.addEventStart.mockClear();
|
|
40
|
+
tracingSpies.addEventEnd.mockClear();
|
|
41
|
+
tracingSpies.addEventError.mockClear();
|
|
42
|
+
|
|
43
|
+
loadModelImpl.mockReset().mockReturnValue( 'MODEL' );
|
|
44
|
+
|
|
45
|
+
aiFns.generateText.mockReset().mockResolvedValue( { text: 'TEXT' } );
|
|
46
|
+
aiFns.generateObject.mockReset().mockResolvedValue( { object: 'OBJECT' } );
|
|
47
|
+
|
|
48
|
+
validators.validateGenerateTextArgs.mockClear();
|
|
49
|
+
validators.validateGenerateObjectArgs.mockClear();
|
|
50
|
+
validators.validateGenerateArrayArgs.mockClear();
|
|
51
|
+
validators.validateGenerateEnumArgs.mockClear();
|
|
52
|
+
} );
|
|
53
|
+
|
|
54
|
+
afterEach( async () => {
|
|
55
|
+
await vi.resetModules();
|
|
56
|
+
vi.clearAllMocks();
|
|
57
|
+
} );
|
|
58
|
+
|
|
59
|
+
describe( 'ai_sdk', () => {
|
|
60
|
+
it( 'generateText: validates, traces, calls AI and returns text', async () => {
|
|
61
|
+
const { generateText } = await importSut();
|
|
62
|
+
const result = await generateText( { prompt: basePrompt } );
|
|
63
|
+
|
|
64
|
+
expect( validators.validateGenerateTextArgs ).toHaveBeenCalledWith( { prompt: basePrompt } );
|
|
65
|
+
expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
|
|
66
|
+
expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
|
|
67
|
+
|
|
68
|
+
expect( loadModelImpl ).toHaveBeenCalledWith( basePrompt );
|
|
69
|
+
expect( aiFns.generateText ).toHaveBeenCalledWith( {
|
|
70
|
+
model: 'MODEL',
|
|
71
|
+
messages: basePrompt.messages,
|
|
72
|
+
temperature: 0.3,
|
|
73
|
+
providerOptions: basePrompt.providerOptions
|
|
74
|
+
} );
|
|
75
|
+
expect( result ).toBe( 'TEXT' );
|
|
76
|
+
} );
|
|
77
|
+
|
|
78
|
+
it( 'generateObject: validates, traces, calls AI with output object and returns object', async () => {
|
|
79
|
+
const { generateObject } = await importSut();
|
|
80
|
+
aiFns.generateObject.mockResolvedValueOnce( { object: { a: 1 } } );
|
|
81
|
+
|
|
82
|
+
const schema = z.object( { a: z.number() } );
|
|
83
|
+
const result = await generateObject( {
|
|
84
|
+
prompt: basePrompt,
|
|
85
|
+
schema,
|
|
86
|
+
schemaName: 'Thing',
|
|
87
|
+
schemaDescription: 'A thing'
|
|
88
|
+
} );
|
|
89
|
+
|
|
90
|
+
expect( validators.validateGenerateObjectArgs ).toHaveBeenCalled();
|
|
91
|
+
expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
|
|
92
|
+
expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
|
|
93
|
+
|
|
94
|
+
expect( aiFns.generateObject ).toHaveBeenCalledWith( {
|
|
95
|
+
output: 'object',
|
|
96
|
+
schema,
|
|
97
|
+
schemaName: 'Thing',
|
|
98
|
+
schemaDescription: 'A thing',
|
|
99
|
+
model: 'MODEL',
|
|
100
|
+
messages: basePrompt.messages,
|
|
101
|
+
temperature: 0.3,
|
|
102
|
+
providerOptions: basePrompt.providerOptions
|
|
103
|
+
} );
|
|
104
|
+
expect( result ).toEqual( { a: 1 } );
|
|
105
|
+
} );
|
|
106
|
+
|
|
107
|
+
it( 'generateArray: validates, traces, calls AI (item schema) and returns array', async () => {
|
|
108
|
+
const { generateArray } = await importSut();
|
|
109
|
+
aiFns.generateObject.mockResolvedValueOnce( { object: [ 1, 2 ] } );
|
|
110
|
+
|
|
111
|
+
const schema = z.number();
|
|
112
|
+
const result = await generateArray( {
|
|
113
|
+
prompt: basePrompt,
|
|
114
|
+
schema,
|
|
115
|
+
schemaName: 'Numbers',
|
|
116
|
+
schemaDescription: 'Two numbers'
|
|
117
|
+
} );
|
|
118
|
+
|
|
119
|
+
expect( validators.validateGenerateArrayArgs ).toHaveBeenCalled();
|
|
120
|
+
expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
|
|
121
|
+
expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
|
|
122
|
+
|
|
123
|
+
expect( aiFns.generateObject ).toHaveBeenCalledWith( {
|
|
124
|
+
output: 'array',
|
|
125
|
+
schema,
|
|
126
|
+
schemaName: 'Numbers',
|
|
127
|
+
schemaDescription: 'Two numbers',
|
|
128
|
+
model: 'MODEL',
|
|
129
|
+
messages: basePrompt.messages,
|
|
130
|
+
temperature: 0.3,
|
|
131
|
+
providerOptions: basePrompt.providerOptions
|
|
132
|
+
} );
|
|
133
|
+
expect( result ).toEqual( [ 1, 2 ] );
|
|
134
|
+
} );
|
|
135
|
+
|
|
136
|
+
it( 'generateEnum: validates, traces, calls AI with output enum and returns value', async () => {
|
|
137
|
+
const { generateEnum } = await importSut();
|
|
138
|
+
aiFns.generateObject.mockResolvedValueOnce( { object: 'B' } );
|
|
139
|
+
|
|
140
|
+
const result = await generateEnum( { prompt: basePrompt, enum: [ 'A', 'B', 'C' ] } );
|
|
141
|
+
|
|
142
|
+
expect( validators.validateGenerateEnumArgs ).toHaveBeenCalled();
|
|
143
|
+
expect( tracingSpies.addEventStart ).toHaveBeenCalledTimes( 1 );
|
|
144
|
+
expect( tracingSpies.addEventEnd ).toHaveBeenCalledTimes( 1 );
|
|
145
|
+
|
|
146
|
+
expect( aiFns.generateObject ).toHaveBeenCalledWith( {
|
|
147
|
+
output: 'enum',
|
|
148
|
+
enum: [ 'A', 'B', 'C' ],
|
|
149
|
+
model: 'MODEL',
|
|
150
|
+
messages: basePrompt.messages,
|
|
151
|
+
temperature: 0.3,
|
|
152
|
+
providerOptions: basePrompt.providerOptions
|
|
153
|
+
} );
|
|
154
|
+
expect( result ).toBe( 'B' );
|
|
155
|
+
} );
|
|
156
|
+
} );
|
package/src/index.d.ts
CHANGED
|
@@ -1,60 +1,82 @@
|
|
|
1
|
-
import type
|
|
1
|
+
import type { z } from '@output.ai/core';
|
|
2
2
|
import type { Prompt } from '@output.ai/prompt';
|
|
3
3
|
|
|
4
4
|
export type { Prompt };
|
|
5
5
|
|
|
6
|
-
type NativeGenerateTextArgs = Parameters<typeof AiTypes.generateText>[0];
|
|
7
|
-
type NativeGenerateObjectArgs = Parameters<typeof AiTypes.generateObject>[0];
|
|
8
|
-
|
|
9
|
-
/**
|
|
10
|
-
* Simplify types into a plain object
|
|
11
|
-
*/
|
|
12
|
-
type Simplify<T> = { [K in keyof T]: T[K] } & {};
|
|
13
|
-
|
|
14
6
|
/**
|
|
15
|
-
*
|
|
16
|
-
*
|
|
7
|
+
* Use an LLM model to generate text.
|
|
8
|
+
*
|
|
9
|
+
* This function is a wrapper over the AI SDK's `generateText`.
|
|
10
|
+
* The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
|
|
11
|
+
*
|
|
12
|
+
* @param {object} args - Generation arguments
|
|
13
|
+
* @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
|
|
14
|
+
* @returns {Promise<string>} Generated text
|
|
17
15
|
*/
|
|
18
|
-
export
|
|
19
|
-
|
|
20
|
-
|
|
16
|
+
export function generateText(
|
|
17
|
+
args: {
|
|
18
|
+
prompt: Prompt
|
|
19
|
+
}
|
|
20
|
+
): Promise<string>;
|
|
21
21
|
|
|
22
22
|
/**
|
|
23
|
-
*
|
|
24
|
-
*
|
|
23
|
+
* Use an LLM model to generate an object with a fixed schema.
|
|
24
|
+
*
|
|
25
|
+
* This function is a wrapper over the AI SDK's `generateObject`.
|
|
26
|
+
* The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
|
|
27
|
+
*
|
|
28
|
+
* @param {object} args - Generation arguments
|
|
29
|
+
* @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
|
|
30
|
+
* @param {z.ZodObject} args.schema - Output schema
|
|
31
|
+
* @param {string} [args.schemaName] - Output schema name
|
|
32
|
+
* @param {string} [args.schemaDescription] - Output schema description
|
|
33
|
+
* @returns {Promise<object>} Object matching the provided schema
|
|
25
34
|
*/
|
|
26
|
-
export
|
|
27
|
-
|
|
28
|
-
|
|
35
|
+
export function generateObject<TSchema extends z.ZodObject>(
|
|
36
|
+
args: {
|
|
37
|
+
prompt: Prompt,
|
|
38
|
+
schema?: TSchema,
|
|
39
|
+
schemaName?: string,
|
|
40
|
+
schemaDescription?: string
|
|
41
|
+
}
|
|
42
|
+
): Promise<z.infer<TSchema>>;
|
|
29
43
|
|
|
30
44
|
/**
|
|
31
|
-
* Use
|
|
32
|
-
*
|
|
33
|
-
* This function a wrapper over AI SDK's generateText function.
|
|
34
|
-
*
|
|
35
|
-
* It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
|
|
45
|
+
* Use an LLM model to generate an array of values with a fixed schema.
|
|
36
46
|
*
|
|
37
|
-
*
|
|
47
|
+
* This function is a wrapper over the AI SDK's `generateObject` with `output: 'array'`.
|
|
48
|
+
* The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
|
|
38
49
|
*
|
|
39
|
-
* @param {
|
|
40
|
-
* @
|
|
50
|
+
* @param {object} args - Generation arguments
|
|
51
|
+
* @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
|
|
52
|
+
* @param {z.ZodType} args.schema - Output schema (array item)
|
|
53
|
+
* @param {string} [args.schemaName] - Output schema name
|
|
54
|
+
* @param {string} [args.schemaDescription] - Output schema description
|
|
55
|
+
* @returns {Promise<object>} Array where each element matches the schema
|
|
41
56
|
*/
|
|
42
|
-
export function
|
|
43
|
-
args:
|
|
44
|
-
|
|
57
|
+
export function generateArray<TSchema extends z.ZodType>(
|
|
58
|
+
args: {
|
|
59
|
+
prompt: Prompt,
|
|
60
|
+
schema?: TSchema,
|
|
61
|
+
schemaName?: string,
|
|
62
|
+
schemaDescription?: string
|
|
63
|
+
}
|
|
64
|
+
): Promise<Array<z.infer<TSchema>>>;
|
|
45
65
|
|
|
46
66
|
/**
|
|
47
|
-
* Use
|
|
48
|
-
*
|
|
49
|
-
* This function a wrapper over AI SDK's generateObject function.
|
|
50
|
-
*
|
|
51
|
-
* It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
|
|
67
|
+
* Use an LLM model to generate a result from an enum (array of string values).
|
|
52
68
|
*
|
|
53
|
-
*
|
|
69
|
+
* This function is a wrapper over the AI SDK's `generateObject` with `output: 'enum'`.
|
|
70
|
+
* The `Prompt` sets `model`, `messages`, `temperature`, `max_tokens`, and `provider_options`.
|
|
54
71
|
*
|
|
55
|
-
* @param {
|
|
56
|
-
* @
|
|
72
|
+
* @param {object} args - Generation arguments
|
|
73
|
+
* @param {Prompt} args.prompt - `@output.ai/prompt` Prompt object
|
|
74
|
+
* @param {string[]} args.enum - Allowed values for the generation
|
|
75
|
+
* @returns {Promise<string>} One of the provided enum values
|
|
57
76
|
*/
|
|
58
|
-
export function
|
|
59
|
-
args:
|
|
60
|
-
|
|
77
|
+
export function generateEnum<const TEnum extends readonly [string, ...string[]]>(
|
|
78
|
+
args: {
|
|
79
|
+
prompt: Prompt,
|
|
80
|
+
enum: TEnum
|
|
81
|
+
}
|
|
82
|
+
): Promise<TEnum[number]>;
|
package/src/index.js
CHANGED
|
@@ -1 +1,2 @@
|
|
|
1
|
-
export { generateText, generateObject } from './ai_sdk.js';
|
|
1
|
+
export { generateText, generateArray, generateObject, generateEnum } from './ai_sdk.js';
|
|
2
|
+
export * as ai from 'ai';
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import { ValidationError, z } from '@output.ai/core';
|
|
2
|
+
import { promptSchema } from '@output.ai/prompt';
|
|
3
|
+
|
|
4
|
+
const generateTextArgsSchema = z.object( {
|
|
5
|
+
prompt: promptSchema
|
|
6
|
+
} );
|
|
7
|
+
|
|
8
|
+
const generateObjectArgsSchema = z.object( {
|
|
9
|
+
prompt: promptSchema,
|
|
10
|
+
schema: z.custom( v => v instanceof z.ZodObject, {
|
|
11
|
+
message: 'schema must be a ZodObject'
|
|
12
|
+
} ),
|
|
13
|
+
schemaName: z.string().optional(),
|
|
14
|
+
schemaDescription: z.string().optional()
|
|
15
|
+
} );
|
|
16
|
+
|
|
17
|
+
const generateArrayArgsSchema = z.object( {
|
|
18
|
+
prompt: promptSchema,
|
|
19
|
+
schema: z.custom( v => v instanceof z.ZodType, {
|
|
20
|
+
message: 'schema must be a ZodType'
|
|
21
|
+
} ),
|
|
22
|
+
schemaName: z.string().optional(),
|
|
23
|
+
schemaDescription: z.string().optional()
|
|
24
|
+
} );
|
|
25
|
+
|
|
26
|
+
const generateEnumArgsSchema = z.object( {
|
|
27
|
+
prompt: promptSchema,
|
|
28
|
+
enum: z.array( z.string() )
|
|
29
|
+
} );
|
|
30
|
+
|
|
31
|
+
export function validateSchema( schema, input, errorPrefix ) {
|
|
32
|
+
const result = schema.safeParse( input );
|
|
33
|
+
if ( !result.success ) {
|
|
34
|
+
throw new ValidationError( `${errorPrefix}: ${z.prettifyError( result.error )}` );
|
|
35
|
+
}
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
export function validateGenerateTextArgs( args ) {
|
|
39
|
+
validateSchema( generateTextArgsSchema, args, 'Invalid generateText() arguments' );
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
export function validateGenerateObjectArgs( args ) {
|
|
43
|
+
validateSchema( generateObjectArgsSchema, args, 'Invalid generateObject() arguments' );
|
|
44
|
+
};
|
|
45
|
+
|
|
46
|
+
export function validateGenerateArrayArgs( args ) {
|
|
47
|
+
validateSchema( generateArrayArgsSchema, args, 'Invalid generateArray() arguments' );
|
|
48
|
+
};
|
|
49
|
+
|
|
50
|
+
export function validateGenerateEnumArgs( args ) {
|
|
51
|
+
validateSchema( generateEnumArgsSchema, args, 'Invalid generateEnum() arguments' );
|
|
52
|
+
};
|