@output.ai/llm 0.2.13 → 0.3.0-dev.pr341-d46aaf1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.2.13",
3
+ "version": "0.3.0-dev.pr341-d46aaf1",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -9,12 +9,12 @@
9
9
  "./src"
10
10
  ],
11
11
  "dependencies": {
12
- "@ai-sdk/anthropic": "2.0.28",
13
- "@ai-sdk/azure": "2.0.53",
14
- "@ai-sdk/google-vertex": "3.0.96",
15
- "@ai-sdk/openai": "2.0.52",
16
- "@output.ai/core": ">=0.0.1",
17
- "ai": "5.0.52",
12
+ "@ai-sdk/anthropic": "3.0.43",
13
+ "@ai-sdk/azure": "3.0.29",
14
+ "@ai-sdk/google-vertex": "4.0.57",
15
+ "@ai-sdk/openai": "3.0.28",
16
+ "@output.ai/core": "^0.5.2",
17
+ "ai": "6.0.84",
18
18
  "gray-matter": "4.0.3",
19
19
  "liquidjs": "10.22.0"
20
20
  },
package/src/ai_model.js CHANGED
@@ -3,7 +3,30 @@ import { azure } from '@ai-sdk/azure';
3
3
  import { vertex } from '@ai-sdk/google-vertex';
4
4
  import { openai } from '@ai-sdk/openai';
5
5
 
6
- const providers = { azure, anthropic, openai, vertex };
6
+ const builtInProviders = { azure, anthropic, openai, vertex };
7
+ const providers = { ...builtInProviders };
8
+
9
+ /** @internal Resets providers to built-in state. For testing only. */
10
+ export function _resetProviders() {
11
+ for ( const key of Object.keys( providers ) ) {
12
+ delete providers[key];
13
+ }
14
+ Object.assign( providers, builtInProviders );
15
+ }
16
+
17
+ export function registerProvider( name, providerFn ) {
18
+ if ( typeof name !== 'string' || name.length === 0 ) {
19
+ throw new Error( 'Provider name must be a non-empty string' );
20
+ }
21
+ if ( typeof providerFn !== 'function' ) {
22
+ throw new Error( `Provider "${name}" must be a function that creates a model` );
23
+ }
24
+ providers[name] = providerFn;
25
+ }
26
+
27
+ export function getRegisteredProviders() {
28
+ return Object.keys( providers );
29
+ }
7
30
 
8
31
  export function loadModel( prompt ) {
9
32
  const config = prompt?.config;
@@ -56,7 +79,6 @@ export function loadTools( prompt ) {
56
79
  );
57
80
  }
58
81
 
59
- // Return null if empty object
60
82
  if ( Object.keys( toolsConfig ).length === 0 ) {
61
83
  return null;
62
84
  }
@@ -71,7 +93,6 @@ export function loadTools( prompt ) {
71
93
  );
72
94
  }
73
95
 
74
- // Check if provider has tools object
75
96
  if ( !provider.tools || typeof provider.tools !== 'object' ) {
76
97
  throw new Error(
77
98
  `Provider "${providerName}" does not support provider-specific tools.`
@@ -81,11 +102,9 @@ export function loadTools( prompt ) {
81
102
  const tools = {};
82
103
 
83
104
  for ( const [ toolName, toolConfig ] of Object.entries( toolsConfig ) ) {
84
- // Access tool factory directly from provider.tools (dynamic!)
85
105
  const toolFactory = provider.tools[toolName];
86
106
 
87
107
  if ( !toolFactory || typeof toolFactory !== 'function' ) {
88
- // Dynamically list available tools for this provider
89
108
  const availableTools = Object.keys( provider.tools )
90
109
  .filter( key => typeof provider.tools[key] === 'function' )
91
110
  .join( ', ' );
@@ -103,7 +122,6 @@ export function loadTools( prompt ) {
103
122
  );
104
123
  }
105
124
 
106
- // Call factory with config - this passes configuration to AI SDK
107
125
  tools[toolName] = toolFactory( toolConfig );
108
126
  }
109
127
 
@@ -46,7 +46,7 @@ vi.mock( '@ai-sdk/google-vertex', () => {
46
46
  return { vertex: vertexFn };
47
47
  } );
48
48
 
49
- import { loadModel, loadTools } from './ai_model.js';
49
+ import { loadModel, loadTools, registerProvider, getRegisteredProviders, _resetProviders } from './ai_model.js';
50
50
 
51
51
  afterEach( async () => {
52
52
  await vi.resetModules();
@@ -600,3 +600,67 @@ describe( 'loadTools', () => {
600
600
  } );
601
601
  } );
602
602
  } );
603
+
604
+ describe( 'registerProvider', () => {
605
+ afterEach( () => {
606
+ _resetProviders();
607
+ } );
608
+
609
+ it( 'registers a custom provider and uses it in loadModel', () => {
610
+ const customProvider = vi.fn( model => `custom:${model}` );
611
+ registerProvider( 'custom', customProvider );
612
+
613
+ const result = loadModel( { config: { provider: 'custom', model: 'my-model' } } );
614
+
615
+ expect( result ).toBe( 'custom:my-model' );
616
+ expect( customProvider ).toHaveBeenCalledWith( 'my-model' );
617
+ } );
618
+
619
+ it( 'overrides a built-in provider', () => {
620
+ const overrideOpenai = vi.fn( model => `override:${model}` );
621
+ registerProvider( 'openai', overrideOpenai );
622
+
623
+ const result = loadModel( { config: { provider: 'openai', model: 'gpt-custom' } } );
624
+
625
+ expect( result ).toBe( 'override:gpt-custom' );
626
+ } );
627
+
628
+ it( 'throws when name is empty string', () => {
629
+ expect( () => registerProvider( '', vi.fn() ) ).toThrow( 'non-empty string' );
630
+ } );
631
+
632
+ it( 'throws when name is not a string', () => {
633
+ expect( () => registerProvider( 123, vi.fn() ) ).toThrow( 'non-empty string' );
634
+ } );
635
+
636
+ it( 'throws when providerFn is not a function', () => {
637
+ expect( () => registerProvider( 'bad', 'not-a-function' ) ).toThrow( 'must be a function' );
638
+ } );
639
+
640
+ it( 'throws when providerFn is null', () => {
641
+ expect( () => registerProvider( 'bad', null ) ).toThrow( 'must be a function' );
642
+ } );
643
+ } );
644
+
645
+ describe( 'getRegisteredProviders', () => {
646
+ afterEach( () => {
647
+ _resetProviders();
648
+ } );
649
+
650
+ it( 'returns default providers', () => {
651
+ const providers = getRegisteredProviders();
652
+
653
+ expect( providers ).toContain( 'anthropic' );
654
+ expect( providers ).toContain( 'openai' );
655
+ expect( providers ).toContain( 'azure' );
656
+ expect( providers ).toContain( 'vertex' );
657
+ } );
658
+
659
+ it( 'includes dynamically registered providers', () => {
660
+ registerProvider( 'bedrock', vi.fn() );
661
+
662
+ const providers = getRegisteredProviders();
663
+
664
+ expect( providers ).toContain( 'bedrock' );
665
+ } );
666
+ } );
package/src/ai_sdk.js CHANGED
@@ -12,13 +12,11 @@ const traceWrapper = async ( { traceId, resultProperty, fn } ) => {
12
12
  const result = response[resultProperty];
13
13
  Tracing.addEventEnd( { id: traceId, details: { result, usage, providerMetadata } } );
14
14
 
15
- // Use a Proxy to add 'result' as a unified field name without mutating the AI SDK response.
16
- // This preserves the original response object (with its getters/prototype) while allowing
17
- // developers to use 'result' consistently across all generate* functions.
18
- // Note: Don't use spread/rest on response - AI SDK uses getters that won't copy correctly.
15
+ // Proxy adds unified 'result' alias and backward-compat 'object' alias without mutating the response.
16
+ // Don't use spread/rest on response - AI SDK uses getters that won't copy correctly.
19
17
  return new Proxy( response, {
20
18
  get( target, prop, receiver ) {
21
- if ( prop === 'result' ) {
19
+ if ( prop === 'result' || ( prop === 'object' && resultProperty === 'output' ) ) {
22
20
  return target[resultProperty];
23
21
  }
24
22
  return Reflect.get( target, prop, receiver );
@@ -99,20 +97,15 @@ export async function generateText( { prompt, variables, ...extraAiSdkOptions }
99
97
  /**
100
98
  * Use an LLM model to generate an object with a fixed schema.
101
99
  *
102
- * Accepts additional AI SDK options (maxRetries, seed, etc.) that are passed through
103
- * to the underlying provider. Options from the prompt file can be overridden at call time.
100
+ * @deprecated Use generateText() with Output.object({ schema }) instead:
101
+ * generateText({ prompt, output: Output.object({ schema }) })
104
102
  *
105
103
  * @param {object} args - Generation arguments
106
104
  * @param {string} args.prompt - Prompt file name
107
105
  * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
108
- * @param {z.ZodObject} args.schema - Output schema
106
+ * @param {z.ZodType} args.schema - Output schema
109
107
  * @param {string} [args.schemaName] - Output schema name
110
108
  * @param {string} [args.schemaDescription] - Output schema description
111
- * @param {number} [args.maxRetries] - Max retry attempts (default: 2)
112
- * @param {number} [args.seed] - Seed for deterministic output
113
- * @param {AbortSignal} [args.abortSignal] - Signal to abort the request
114
- * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
115
- * @throws {FatalError} If the prompt file is not found or template rendering fails
116
109
  * @returns {Promise<GenerateObjectResult>} AI SDK response with object and metadata
117
110
  */
118
111
  export async function generateObject( args ) {
@@ -122,12 +115,9 @@ export async function generateObject( args ) {
122
115
  const traceId = startTrace( 'generateObject', { prompt, variables, schema: z.toJSONSchema( schema ), loadedPrompt } );
123
116
 
124
117
  return traceWrapper( {
125
- traceId, resultProperty: 'object', fn: async () =>
126
- AI.generateObject( {
127
- output: 'object',
128
- schema,
129
- schemaName,
130
- schemaDescription,
118
+ traceId, resultProperty: 'output', fn: async () =>
119
+ AI.generateText( {
120
+ output: AI.Output.object( { schema, name: schemaName, description: schemaDescription } ),
131
121
  ...aiSdkOptionsFromPrompt( loadedPrompt ),
132
122
  ...extraAiSdkOptions
133
123
  } )
@@ -137,8 +127,8 @@ export async function generateObject( args ) {
137
127
  /**
138
128
  * Use an LLM model to generate an array of values with a fixed schema.
139
129
  *
140
- * Accepts additional AI SDK options (maxRetries, seed, etc.) that are passed through
141
- * to the underlying provider. Options from the prompt file can be overridden at call time.
130
+ * @deprecated Use generateText() with Output.array({ element }) instead:
131
+ * generateText({ prompt, output: Output.array({ element: schema }) })
142
132
  *
143
133
  * @param {object} args - Generation arguments
144
134
  * @param {string} args.prompt - Prompt file name
@@ -146,11 +136,6 @@ export async function generateObject( args ) {
146
136
  * @param {z.ZodType} args.schema - Output schema (array item)
147
137
  * @param {string} [args.schemaName] - Output schema name
148
138
  * @param {string} [args.schemaDescription] - Output schema description
149
- * @param {number} [args.maxRetries] - Max retry attempts (default: 2)
150
- * @param {number} [args.seed] - Seed for deterministic output
151
- * @param {AbortSignal} [args.abortSignal] - Signal to abort the request
152
- * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
153
- * @throws {FatalError} If the prompt file is not found or template rendering fails
154
139
  * @returns {Promise<GenerateObjectResult>} AI SDK response with array and metadata
155
140
  */
156
141
  export async function generateArray( args ) {
@@ -160,12 +145,9 @@ export async function generateArray( args ) {
160
145
  const traceId = startTrace( 'generateArray', { prompt, variables, schema: z.toJSONSchema( schema ), loadedPrompt } );
161
146
 
162
147
  return traceWrapper( {
163
- traceId, resultProperty: 'object', fn: async () =>
164
- AI.generateObject( {
165
- output: 'array',
166
- schema,
167
- schemaName,
168
- schemaDescription,
148
+ traceId, resultProperty: 'output', fn: async () =>
149
+ AI.generateText( {
150
+ output: AI.Output.array( { element: schema, name: schemaName, description: schemaDescription } ),
169
151
  ...aiSdkOptionsFromPrompt( loadedPrompt ),
170
152
  ...extraAiSdkOptions
171
153
  } )
@@ -175,18 +157,13 @@ export async function generateArray( args ) {
175
157
  /**
176
158
  * Use an LLM model to generate a result from an enum (array of string values).
177
159
  *
178
- * Accepts additional AI SDK options (maxRetries, seed, etc.) that are passed through
179
- * to the underlying provider. Options from the prompt file can be overridden at call time.
160
+ * @deprecated Use generateText() with Output.choice({ options }) instead:
161
+ * generateText({ prompt, output: Output.choice({ options: enumValues }) })
180
162
  *
181
163
  * @param {object} args - Generation arguments
182
164
  * @param {string} args.prompt - Prompt file name
183
165
  * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
184
166
  * @param {string[]} args.enum - Allowed values for the generation
185
- * @param {number} [args.maxRetries] - Max retry attempts (default: 2)
186
- * @param {number} [args.seed] - Seed for deterministic output
187
- * @param {AbortSignal} [args.abortSignal] - Signal to abort the request
188
- * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
189
- * @throws {FatalError} If the prompt file is not found or template rendering fails
190
167
  * @returns {Promise<GenerateObjectResult>} AI SDK response with enum value and metadata
191
168
  */
192
169
  export async function generateEnum( args ) {
@@ -196,10 +173,9 @@ export async function generateEnum( args ) {
196
173
  const traceId = startTrace( 'generateEnum', { prompt, variables, loadedPrompt } );
197
174
 
198
175
  return traceWrapper( {
199
- traceId, resultProperty: 'object', fn: async () =>
200
- AI.generateObject( {
201
- output: 'enum',
202
- enum: _enum,
176
+ traceId, resultProperty: 'output', fn: async () =>
177
+ AI.generateText( {
178
+ output: AI.Output.choice( { options: _enum } ),
203
179
  ...aiSdkOptionsFromPrompt( loadedPrompt ),
204
180
  ...extraAiSdkOptions
205
181
  } )