@output.ai/llm 0.0.11 → 0.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +7 -9
- package/src/ai_model.js +29 -0
- package/src/ai_sdk.js +38 -54
- package/src/index.d.ts +1 -1
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@output.ai/llm",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.13",
|
|
4
4
|
"description": "Framework abstraction to interact with LLM models",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "src/index.js",
|
|
@@ -8,14 +8,12 @@
|
|
|
8
8
|
"files": [
|
|
9
9
|
"./src"
|
|
10
10
|
],
|
|
11
|
-
"repository": {
|
|
12
|
-
"type": "git",
|
|
13
|
-
"url": "git+https://github.com/growthxai/flow-sdk"
|
|
14
|
-
},
|
|
15
11
|
"dependencies": {
|
|
16
|
-
"@ai-sdk/anthropic": "2.0.
|
|
17
|
-
"@ai-sdk/
|
|
18
|
-
"@
|
|
12
|
+
"@ai-sdk/anthropic": "2.0.28",
|
|
13
|
+
"@ai-sdk/azure": "2.0.53",
|
|
14
|
+
"@ai-sdk/openai": "2.0.52",
|
|
15
|
+
"@output.ai/core": ">=0.0.1",
|
|
19
16
|
"ai": "5.0.48"
|
|
20
|
-
}
|
|
17
|
+
},
|
|
18
|
+
"license": "UNLICENSED"
|
|
21
19
|
}
|
package/src/ai_model.js
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { anthropic } from '@ai-sdk/anthropic';
|
|
2
|
+
import { azure } from '@ai-sdk/azure';
|
|
3
|
+
import { openai } from '@ai-sdk/openai';
|
|
4
|
+
import { ValidationError, z } from '@output.ai/core';
|
|
5
|
+
|
|
6
|
+
const providers = { azure, anthropic, openai };
|
|
7
|
+
|
|
8
|
+
const promptSchema = z.object( {
|
|
9
|
+
config: z.object( {
|
|
10
|
+
provider: z.enum( [ 'anthropic', 'azure', 'openai' ] ),
|
|
11
|
+
model: z.string(),
|
|
12
|
+
temperature: z.number().optional(),
|
|
13
|
+
max_tokens: z.number().optional()
|
|
14
|
+
} ),
|
|
15
|
+
messages: z.array(
|
|
16
|
+
z.object( {
|
|
17
|
+
role: z.string(),
|
|
18
|
+
content: z.string()
|
|
19
|
+
} )
|
|
20
|
+
)
|
|
21
|
+
} );
|
|
22
|
+
|
|
23
|
+
export const loadModel = prompt => {
|
|
24
|
+
const result = promptSchema.safeParse( prompt );
|
|
25
|
+
if ( !result.success ) {
|
|
26
|
+
throw new ValidationError( `Invalid prompt object: ${result.error.message}` );
|
|
27
|
+
}
|
|
28
|
+
return providers[prompt.config.provider]( prompt.config.model );
|
|
29
|
+
};
|
package/src/ai_sdk.js
CHANGED
|
@@ -1,61 +1,45 @@
|
|
|
1
|
-
import {
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
1
|
+
import { Tracing } from '@output.ai/core/tracing';
|
|
2
|
+
import { loadModel } from './ai_model.js';
|
|
3
|
+
import * as AI from 'ai';
|
|
4
|
+
|
|
5
|
+
const generationWrapper = async ( traceId, fn ) => {
|
|
6
|
+
try {
|
|
7
|
+
const result = await fn();
|
|
8
|
+
Tracing.addEventEnd( { id: traceId, details: result } );
|
|
9
|
+
return result;
|
|
10
|
+
} catch ( error ) {
|
|
11
|
+
Tracing.addEventError( { id: traceId, details: error } );
|
|
12
|
+
throw error;
|
|
13
|
+
}
|
|
12
14
|
};
|
|
13
15
|
|
|
14
|
-
export async function generateText(
|
|
15
|
-
const
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
16
|
+
export async function generateText( { prompt, ...nativeAiSdkArgs } ) {
|
|
17
|
+
const traceId = `generateText-${Date.now()}`;
|
|
18
|
+
Tracing.addEventStart( { kind: 'llm', name: 'generateText', id: traceId, details: { prompt, nativeAiSdkArgs } } );
|
|
19
|
+
|
|
20
|
+
return generationWrapper( traceId, async () => {
|
|
21
|
+
return ( await AI.generateText( {
|
|
22
|
+
model: loadModel( prompt ),
|
|
23
|
+
messages: prompt.messages,
|
|
24
|
+
temperature: prompt.config.temperature,
|
|
25
|
+
maxOutputTokens: prompt.config.max_tokens ?? 64000,
|
|
26
|
+
...nativeAiSdkArgs
|
|
27
|
+
} ) ).text;
|
|
26
28
|
} );
|
|
27
|
-
|
|
28
|
-
trace( { lib: 'llm', event: 'generateText', input: options, output: result } );
|
|
29
|
-
|
|
30
|
-
return result.text;
|
|
31
29
|
}
|
|
32
30
|
|
|
33
|
-
export async function generateObject(
|
|
34
|
-
const {
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
const result = await aiGenerateObject( {
|
|
47
|
-
model,
|
|
48
|
-
schema,
|
|
49
|
-
schemaName,
|
|
50
|
-
schemaDescription,
|
|
51
|
-
output,
|
|
52
|
-
messages: prompt.messages,
|
|
53
|
-
temperature: prompt.config.temperature,
|
|
54
|
-
maxOutputTokens: prompt.config.max_tokens ?? 64000,
|
|
55
|
-
...aiSdkOptions // Spread remaining AI SDK options
|
|
31
|
+
export async function generateObject( { prompt, ...nativeAiSdkArgs } ) {
|
|
32
|
+
const traceId = `generateObject-${Date.now()}`;
|
|
33
|
+
Tracing.addEventStart( { kind: 'llm', name: 'generateObject', id: traceId, details: { prompt, nativeAiSdkArgs } } );
|
|
34
|
+
|
|
35
|
+
return generationWrapper( traceId, async () => {
|
|
36
|
+
return ( await AI.generateObject( {
|
|
37
|
+
model: loadModel( prompt ),
|
|
38
|
+
output: nativeAiSdkArgs.object ?? 'object',
|
|
39
|
+
messages: prompt.messages,
|
|
40
|
+
temperature: prompt.config.temperature,
|
|
41
|
+
...( prompt.config.max_tokens && { maxOutputTokens: prompt.config.max_tokens } ),
|
|
42
|
+
...nativeAiSdkArgs
|
|
43
|
+
} ) ).object;
|
|
56
44
|
} );
|
|
57
|
-
|
|
58
|
-
trace( { lib: 'llm', event: 'generateObject', input: options, output: result } );
|
|
59
|
-
|
|
60
|
-
return result.object;
|
|
61
45
|
}
|