@output.ai/llm 0.0.12 → 0.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.0.12",
3
+ "version": "0.0.14",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -9,10 +9,11 @@
9
9
  "./src"
10
10
  ],
11
11
  "dependencies": {
12
- "@ai-sdk/anthropic": "2.0.4",
13
- "@ai-sdk/azure": "2.0.49",
14
- "@ai-sdk/openai": "2.0.18",
15
- "@output.ai/trace": "0.0.1",
12
+ "@ai-sdk/anthropic": "2.0.28",
13
+ "@ai-sdk/azure": "2.0.53",
14
+ "@ai-sdk/openai": "2.0.52",
15
+ "@output.ai/core": ">=0.0.1",
16
+ "@output.ai/prompt": ">=0.0.1",
16
17
  "ai": "5.0.48"
17
18
  },
18
19
  "license": "UNLICENSED"
@@ -0,0 +1,29 @@
1
+ import { anthropic } from '@ai-sdk/anthropic';
2
+ import { azure } from '@ai-sdk/azure';
3
+ import { openai } from '@ai-sdk/openai';
4
+ import { ValidationError, z } from '@output.ai/core';
5
+
6
+ const providers = { azure, anthropic, openai };
7
+
8
+ const promptSchema = z.object( {
9
+ config: z.object( {
10
+ provider: z.enum( [ 'anthropic', 'azure', 'openai' ] ),
11
+ model: z.string(),
12
+ temperature: z.number().optional(),
13
+ max_tokens: z.number().optional()
14
+ } ),
15
+ messages: z.array(
16
+ z.object( {
17
+ role: z.string(),
18
+ content: z.string()
19
+ } )
20
+ )
21
+ } );
22
+
23
+ export const loadModel = prompt => {
24
+ const result = promptSchema.safeParse( prompt );
25
+ if ( !result.success ) {
26
+ throw new ValidationError( `Invalid prompt object: ${result.error.message}` );
27
+ }
28
+ return providers[prompt.config.provider]( prompt.config.model );
29
+ };
package/src/ai_sdk.js CHANGED
@@ -1,73 +1,45 @@
1
- import {
2
- generateText as aiGenerateText,
3
- generateObject as aiGenerateObject
4
- } from 'ai';
5
- import { anthropic } from '@ai-sdk/anthropic';
6
- import { openai } from '@ai-sdk/openai';
7
- import { azure } from '@ai-sdk/azure';
8
- import { trace } from '@output.ai/trace';
9
-
10
- const providers = {
11
- anthropic,
12
- azure,
13
- openai
14
- };
15
-
16
- export async function generateText( options ) {
17
- const { prompt, ...aiSdkOptions } = options;
18
-
19
- const provider = providers[prompt.config.provider];
20
- const model = provider( prompt.config.model );
21
-
22
- const aiOptions = {
23
- model,
24
- messages: prompt.messages,
25
- temperature: prompt.config.temperature,
26
- ...aiSdkOptions // Spread remaining AI SDK options (thinking, etc.)
27
- };
28
-
29
- if ( prompt.config.max_tokens ) {
30
- aiOptions.maxOutputTokens = prompt.config.max_tokens;
1
+ import { Tracing } from '@output.ai/core/tracing';
2
+ import { loadModel } from './ai_model.js';
3
+ import * as AI from 'ai';
4
+
5
+ const generationWrapper = async ( traceId, fn ) => {
6
+ try {
7
+ const result = await fn();
8
+ Tracing.addEventEnd( { id: traceId, details: result } );
9
+ return result;
10
+ } catch ( error ) {
11
+ Tracing.addEventError( { id: traceId, details: error } );
12
+ throw error;
31
13
  }
14
+ };
32
15
 
33
- const result = await aiGenerateText( aiOptions );
34
-
35
- trace( { lib: 'llm', event: 'generateText', input: options, output: result } );
36
-
37
- return result.text;
16
+ export async function generateText( { prompt, ...nativeAiSdkArgs } ) {
17
+ const traceId = `generateText-${Date.now()}`;
18
+ Tracing.addEventStart( { kind: 'llm', name: 'generateText', id: traceId, details: { prompt, nativeAiSdkArgs } } );
19
+
20
+ return generationWrapper( traceId, async () => {
21
+ return ( await AI.generateText( {
22
+ model: loadModel( prompt ),
23
+ messages: prompt.messages,
24
+ temperature: prompt.config.temperature,
25
+ maxOutputTokens: prompt.config.max_tokens ?? 64000,
26
+ ...nativeAiSdkArgs
27
+ } ) ).text;
28
+ } );
38
29
  }
39
30
 
40
- export async function generateObject( options ) {
41
- const {
42
- prompt,
43
- schema,
44
- schemaName,
45
- schemaDescription,
46
- output = 'object',
47
- ...aiSdkOptions
48
- } = options;
49
-
50
- const provider = providers[prompt.config.provider];
51
- const model = provider( prompt.config.model );
52
-
53
- const aiOptions = {
54
- model,
55
- schema,
56
- schemaName,
57
- schemaDescription,
58
- output,
59
- messages: prompt.messages,
60
- temperature: prompt.config.temperature,
61
- ...aiSdkOptions // Spread remaining AI SDK options
62
- };
63
-
64
- if ( prompt.config.max_tokens ) {
65
- aiOptions.maxOutputTokens = prompt.config.max_tokens;
66
- }
67
-
68
- const result = await aiGenerateObject( aiOptions );
69
-
70
- trace( { lib: 'llm', event: 'generateObject', input: options, output: result } );
71
-
72
- return result.object;
31
+ export async function generateObject( { prompt, ...nativeAiSdkArgs } ) {
32
+ const traceId = `generateObject-${Date.now()}`;
33
+ Tracing.addEventStart( { kind: 'llm', name: 'generateObject', id: traceId, details: { prompt, nativeAiSdkArgs } } );
34
+
35
+ return generationWrapper( traceId, async () => {
36
+ return ( await AI.generateObject( {
37
+ model: loadModel( prompt ),
38
+ output: nativeAiSdkArgs.object ?? 'object',
39
+ messages: prompt.messages,
40
+ temperature: prompt.config.temperature,
41
+ ...( prompt.config.max_tokens && { maxOutputTokens: prompt.config.max_tokens } ),
42
+ ...nativeAiSdkArgs
43
+ } ) ).object;
44
+ } );
73
45
  }
package/src/index.d.ts CHANGED
@@ -1,44 +1,60 @@
1
- import type {
2
- GenerateTextOptions,
3
- GenerateObjectOptions
4
- } from 'ai';
1
+ import type * as AiTypes from 'ai';
2
+ import type { Prompt } from '@output.ai/prompt';
5
3
 
6
- export interface Prompt {
7
- config: {
8
- provider: 'anthropic' | 'openai' | 'azure' ;
9
- model: string;
10
- temperature?: number;
11
- max_tokens?: number;
12
- };
13
- messages: Array<{
14
- role: string;
15
- content: string;
16
- }>;
17
- }
4
+ export type { Prompt };
18
5
 
19
- // Omit prompt/messages from AI SDK options and add our Prompt type
20
- export type GenerateTextOptionsWithPrompt = Omit<
21
- GenerateTextOptions,
22
- 'prompt' | 'messages' | 'model'
23
- > & {
24
- prompt: Prompt,
25
- };
6
+ type NativeGenerateTextArgs = Parameters<typeof AiTypes.generateText>[0];
7
+ type NativeGenerateObjectArgs = Parameters<typeof AiTypes.generateObject>[0];
26
8
 
27
- export type GenerateObjectOptionsWithPrompt<SCHEMA> = Omit<
28
- GenerateObjectOptions<SCHEMA>,
29
- 'prompt' | 'messages' | 'model' | 'schema' | 'schemaName' | 'schemaDescription' | 'output'
30
- > & {
31
- prompt: Prompt,
32
- schema: Record<string, unknown> | object,
33
- schemaName?: string,
34
- schemaDescription?: string,
35
- output?: 'object' | 'array',
36
- };
9
+ /**
10
+ * Simplify types into a plain object
11
+ */
12
+ type Simplify<T> = { [K in keyof T]: T[K] } & {};
37
13
 
14
+ /**
15
+ * Text generation arguments
16
+ * Include all native AI SDK generateText options and a Prompt object from `@output.ai/prompt`
17
+ */
18
+ export type GenerateTextArgs = Simplify<
19
+ Partial<Omit<NativeGenerateTextArgs, 'prompt'>> & { prompt: Prompt }
20
+ >;
21
+
22
+ /**
23
+ * Object generation arguments
24
+ * Include all native AI SDK generateObject options and a Prompt object from `@output.ai/prompt`
25
+ */
26
+ export type GenerateObjectArgs = Simplify<
27
+ Partial<Omit<NativeGenerateObjectArgs, 'prompt'>> & { prompt: Prompt }
28
+ >;
29
+
30
+ /**
31
+ * Use a LLM Model to generate text
32
+ *
33
+ * This function a wrapper over AI SDK's generateText function.
34
+ *
35
+ * It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
36
+ *
37
+ * The Prompt object will set `model`, `messages`, `temperature` and `max_tokens`, however all these can be overwritten by their AI SDK native argument values.
38
+ *
39
+ * @param {GenerateTextArgs} args - Generation arguments
40
+ * @returns {Promise<string>}
41
+ */
38
42
  export function generateText(
39
- options: GenerateTextOptionsWithPrompt
43
+ args: GenerateTextArgs
40
44
  ): Promise<string>;
41
45
 
42
- export function generateObject<T = unknown>(
43
- options: GenerateObjectOptionsWithPrompt<T>
44
- ): Promise<T>;
46
+ /**
47
+ * Use a LLM Model to generate object
48
+ *
49
+ * This function a wrapper over AI SDK's generateObject function.
50
+ *
51
+ * It accepts the same arguments of the the original function, plus a "Prompt" object, generated using the `output.ai/prompt`.
52
+ *
53
+ * The Prompt object will set `model`, `messages`, `temperature` and `max_tokens`, however all these can be overwritten by their AI SDK native argument values.
54
+ *
55
+ * @param {GenerateObjectArgs} args - Generation arguments
56
+ * @returns {Promise<object>}
57
+ */
58
+ export function generateObject(
59
+ args: GenerateObjectArgs
60
+ ): Promise<object>;