@output.ai/llm 0.2.13 → 0.3.0-dev.pr341-daa6878

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.2.13",
3
+ "version": "0.3.0-dev.pr341-daa6878",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -9,12 +9,12 @@
9
9
  "./src"
10
10
  ],
11
11
  "dependencies": {
12
- "@ai-sdk/anthropic": "2.0.28",
13
- "@ai-sdk/azure": "2.0.53",
14
- "@ai-sdk/google-vertex": "3.0.96",
15
- "@ai-sdk/openai": "2.0.52",
16
- "@output.ai/core": ">=0.0.1",
17
- "ai": "5.0.52",
12
+ "@ai-sdk/anthropic": "3.0.43",
13
+ "@ai-sdk/azure": "3.0.29",
14
+ "@ai-sdk/google-vertex": "4.0.57",
15
+ "@ai-sdk/openai": "3.0.28",
16
+ "@output.ai/core": "^0.5.2",
17
+ "ai": "6.0.84",
18
18
  "gray-matter": "4.0.3",
19
19
  "liquidjs": "10.22.0"
20
20
  },
package/src/ai_model.js CHANGED
@@ -3,7 +3,30 @@ import { azure } from '@ai-sdk/azure';
3
3
  import { vertex } from '@ai-sdk/google-vertex';
4
4
  import { openai } from '@ai-sdk/openai';
5
5
 
6
- const providers = { azure, anthropic, openai, vertex };
6
+ const builtInProviders = { azure, anthropic, openai, vertex };
7
+ const providers = { ...builtInProviders };
8
+
9
+ /** @internal Resets providers to built-in state. For testing only. */
10
+ export function _resetProviders() {
11
+ for ( const key of Object.keys( providers ) ) {
12
+ delete providers[key];
13
+ }
14
+ Object.assign( providers, builtInProviders );
15
+ }
16
+
17
+ export function registerProvider( name, providerFn ) {
18
+ if ( typeof name !== 'string' || name.length === 0 ) {
19
+ throw new Error( 'Provider name must be a non-empty string' );
20
+ }
21
+ if ( typeof providerFn !== 'function' ) {
22
+ throw new Error( `Provider "${name}" must be a function that creates a model` );
23
+ }
24
+ providers[name] = providerFn;
25
+ }
26
+
27
+ export function getRegisteredProviders() {
28
+ return Object.keys( providers );
29
+ }
7
30
 
8
31
  export function loadModel( prompt ) {
9
32
  const config = prompt?.config;
@@ -56,7 +79,6 @@ export function loadTools( prompt ) {
56
79
  );
57
80
  }
58
81
 
59
- // Return null if empty object
60
82
  if ( Object.keys( toolsConfig ).length === 0 ) {
61
83
  return null;
62
84
  }
@@ -71,7 +93,6 @@ export function loadTools( prompt ) {
71
93
  );
72
94
  }
73
95
 
74
- // Check if provider has tools object
75
96
  if ( !provider.tools || typeof provider.tools !== 'object' ) {
76
97
  throw new Error(
77
98
  `Provider "${providerName}" does not support provider-specific tools.`
@@ -81,11 +102,9 @@ export function loadTools( prompt ) {
81
102
  const tools = {};
82
103
 
83
104
  for ( const [ toolName, toolConfig ] of Object.entries( toolsConfig ) ) {
84
- // Access tool factory directly from provider.tools (dynamic!)
85
105
  const toolFactory = provider.tools[toolName];
86
106
 
87
107
  if ( !toolFactory || typeof toolFactory !== 'function' ) {
88
- // Dynamically list available tools for this provider
89
108
  const availableTools = Object.keys( provider.tools )
90
109
  .filter( key => typeof provider.tools[key] === 'function' )
91
110
  .join( ', ' );
@@ -103,7 +122,6 @@ export function loadTools( prompt ) {
103
122
  );
104
123
  }
105
124
 
106
- // Call factory with config - this passes configuration to AI SDK
107
125
  tools[toolName] = toolFactory( toolConfig );
108
126
  }
109
127
 
@@ -46,7 +46,7 @@ vi.mock( '@ai-sdk/google-vertex', () => {
46
46
  return { vertex: vertexFn };
47
47
  } );
48
48
 
49
- import { loadModel, loadTools } from './ai_model.js';
49
+ import { loadModel, loadTools, registerProvider, getRegisteredProviders, _resetProviders } from './ai_model.js';
50
50
 
51
51
  afterEach( async () => {
52
52
  await vi.resetModules();
@@ -600,3 +600,67 @@ describe( 'loadTools', () => {
600
600
  } );
601
601
  } );
602
602
  } );
603
+
604
+ describe( 'registerProvider', () => {
605
+ afterEach( () => {
606
+ _resetProviders();
607
+ } );
608
+
609
+ it( 'registers a custom provider and uses it in loadModel', () => {
610
+ const customProvider = vi.fn( model => `custom:${model}` );
611
+ registerProvider( 'custom', customProvider );
612
+
613
+ const result = loadModel( { config: { provider: 'custom', model: 'my-model' } } );
614
+
615
+ expect( result ).toBe( 'custom:my-model' );
616
+ expect( customProvider ).toHaveBeenCalledWith( 'my-model' );
617
+ } );
618
+
619
+ it( 'overrides a built-in provider', () => {
620
+ const overrideOpenai = vi.fn( model => `override:${model}` );
621
+ registerProvider( 'openai', overrideOpenai );
622
+
623
+ const result = loadModel( { config: { provider: 'openai', model: 'gpt-custom' } } );
624
+
625
+ expect( result ).toBe( 'override:gpt-custom' );
626
+ } );
627
+
628
+ it( 'throws when name is empty string', () => {
629
+ expect( () => registerProvider( '', vi.fn() ) ).toThrow( 'non-empty string' );
630
+ } );
631
+
632
+ it( 'throws when name is not a string', () => {
633
+ expect( () => registerProvider( 123, vi.fn() ) ).toThrow( 'non-empty string' );
634
+ } );
635
+
636
+ it( 'throws when providerFn is not a function', () => {
637
+ expect( () => registerProvider( 'bad', 'not-a-function' ) ).toThrow( 'must be a function' );
638
+ } );
639
+
640
+ it( 'throws when providerFn is null', () => {
641
+ expect( () => registerProvider( 'bad', null ) ).toThrow( 'must be a function' );
642
+ } );
643
+ } );
644
+
645
+ describe( 'getRegisteredProviders', () => {
646
+ afterEach( () => {
647
+ _resetProviders();
648
+ } );
649
+
650
+ it( 'returns default providers', () => {
651
+ const providers = getRegisteredProviders();
652
+
653
+ expect( providers ).toContain( 'anthropic' );
654
+ expect( providers ).toContain( 'openai' );
655
+ expect( providers ).toContain( 'azure' );
656
+ expect( providers ).toContain( 'vertex' );
657
+ } );
658
+
659
+ it( 'includes dynamically registered providers', () => {
660
+ registerProvider( 'bedrock', vi.fn() );
661
+
662
+ const providers = getRegisteredProviders();
663
+
664
+ expect( providers ).toContain( 'bedrock' );
665
+ } );
666
+ } );
package/src/ai_sdk.js CHANGED
@@ -5,6 +5,14 @@ import * as AI from 'ai';
5
5
  import { validateGenerateTextArgs, validateGenerateObjectArgs, validateGenerateArrayArgs, validateGenerateEnumArgs } from './validations.js';
6
6
  import { loadPrompt } from './prompt_loader.js';
7
7
 
8
+ const _deprecationWarned = new Set();
9
+ const warnDeprecated = ( name, message ) => {
10
+ if ( !_deprecationWarned.has( name ) ) {
11
+ _deprecationWarned.add( name );
12
+ process.emitWarning( message, { type: 'DeprecationWarning', code: `OUTPUT_DEP_${name.toUpperCase()}` } );
13
+ }
14
+ };
15
+
8
16
  const traceWrapper = async ( { traceId, resultProperty, fn } ) => {
9
17
  try {
10
18
  const response = await fn();
@@ -12,13 +20,11 @@ const traceWrapper = async ( { traceId, resultProperty, fn } ) => {
12
20
  const result = response[resultProperty];
13
21
  Tracing.addEventEnd( { id: traceId, details: { result, usage, providerMetadata } } );
14
22
 
15
- // Use a Proxy to add 'result' as a unified field name without mutating the AI SDK response.
16
- // This preserves the original response object (with its getters/prototype) while allowing
17
- // developers to use 'result' consistently across all generate* functions.
18
- // Note: Don't use spread/rest on response - AI SDK uses getters that won't copy correctly.
23
+ // Proxy adds unified 'result' alias and backward-compat 'object' alias without mutating the response.
24
+ // Don't use spread/rest on response - AI SDK uses getters that won't copy correctly.
19
25
  return new Proxy( response, {
20
26
  get( target, prop, receiver ) {
21
- if ( prop === 'result' ) {
27
+ if ( prop === 'result' || ( prop === 'object' && resultProperty === 'output' ) ) {
22
28
  return target[resultProperty];
23
29
  }
24
30
  return Reflect.get( target, prop, receiver );
@@ -99,35 +105,40 @@ export async function generateText( { prompt, variables, ...extraAiSdkOptions }
99
105
  /**
100
106
  * Use an LLM model to generate an object with a fixed schema.
101
107
  *
102
- * Accepts additional AI SDK options (maxRetries, seed, etc.) that are passed through
103
- * to the underlying provider. Options from the prompt file can be overridden at call time.
108
+ * @deprecated Since v0.3.0. Use generateText() with Output.object({ schema }) instead.
109
+ * Will be removed in v1.0.0.
110
+ *
111
+ * @example Migration:
112
+ * ```js
113
+ * // Before (deprecated):
114
+ * const { object } = await generateObject({ prompt: 'my_prompt', schema: MySchema });
115
+ *
116
+ * // After (recommended):
117
+ * const { output } = await generateText({ prompt: 'my_prompt', output: Output.object({ schema: MySchema }) });
118
+ * ```
104
119
  *
105
120
  * @param {object} args - Generation arguments
106
121
  * @param {string} args.prompt - Prompt file name
107
122
  * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
108
- * @param {z.ZodObject} args.schema - Output schema
123
+ * @param {z.ZodType} args.schema - Output schema
109
124
  * @param {string} [args.schemaName] - Output schema name
110
125
  * @param {string} [args.schemaDescription] - Output schema description
111
- * @param {number} [args.maxRetries] - Max retry attempts (default: 2)
112
- * @param {number} [args.seed] - Seed for deterministic output
113
- * @param {AbortSignal} [args.abortSignal] - Signal to abort the request
114
- * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
115
- * @throws {FatalError} If the prompt file is not found or template rendering fails
126
+ * @see {@link generateText} for the recommended replacement
116
127
  * @returns {Promise<GenerateObjectResult>} AI SDK response with object and metadata
117
128
  */
118
129
  export async function generateObject( args ) {
130
+ warnDeprecated( 'generateObject',
131
+ 'generateObject() is deprecated since v0.3.0 and will be removed in v1.0.0. ' +
132
+ 'Use generateText() with Output.object({ schema }) instead.' );
119
133
  validateGenerateObjectArgs( args );
120
134
  const { prompt, variables, schema, schemaName, schemaDescription, ...extraAiSdkOptions } = args;
121
135
  const loadedPrompt = loadPrompt( prompt, variables );
122
136
  const traceId = startTrace( 'generateObject', { prompt, variables, schema: z.toJSONSchema( schema ), loadedPrompt } );
123
137
 
124
138
  return traceWrapper( {
125
- traceId, resultProperty: 'object', fn: async () =>
126
- AI.generateObject( {
127
- output: 'object',
128
- schema,
129
- schemaName,
130
- schemaDescription,
139
+ traceId, resultProperty: 'output', fn: async () =>
140
+ AI.generateText( {
141
+ output: AI.Output.object( { schema, name: schemaName, description: schemaDescription } ),
131
142
  ...aiSdkOptionsFromPrompt( loadedPrompt ),
132
143
  ...extraAiSdkOptions
133
144
  } )
@@ -137,8 +148,17 @@ export async function generateObject( args ) {
137
148
  /**
138
149
  * Use an LLM model to generate an array of values with a fixed schema.
139
150
  *
140
- * Accepts additional AI SDK options (maxRetries, seed, etc.) that are passed through
141
- * to the underlying provider. Options from the prompt file can be overridden at call time.
151
+ * @deprecated Since v0.3.0. Use generateText() with Output.array({ element }) instead.
152
+ * Will be removed in v1.0.0.
153
+ *
154
+ * @example Migration:
155
+ * ```js
156
+ * // Before (deprecated):
157
+ * const { object } = await generateArray({ prompt: 'my_prompt', schema: ItemSchema });
158
+ *
159
+ * // After (recommended):
160
+ * const { output } = await generateText({ prompt: 'my_prompt', output: Output.array({ element: ItemSchema }) });
161
+ * ```
142
162
  *
143
163
  * @param {object} args - Generation arguments
144
164
  * @param {string} args.prompt - Prompt file name
@@ -146,26 +166,22 @@ export async function generateObject( args ) {
146
166
  * @param {z.ZodType} args.schema - Output schema (array item)
147
167
  * @param {string} [args.schemaName] - Output schema name
148
168
  * @param {string} [args.schemaDescription] - Output schema description
149
- * @param {number} [args.maxRetries] - Max retry attempts (default: 2)
150
- * @param {number} [args.seed] - Seed for deterministic output
151
- * @param {AbortSignal} [args.abortSignal] - Signal to abort the request
152
- * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
153
- * @throws {FatalError} If the prompt file is not found or template rendering fails
169
+ * @see {@link generateText} for the recommended replacement
154
170
  * @returns {Promise<GenerateObjectResult>} AI SDK response with array and metadata
155
171
  */
156
172
  export async function generateArray( args ) {
173
+ warnDeprecated( 'generateArray',
174
+ 'generateArray() is deprecated since v0.3.0 and will be removed in v1.0.0. ' +
175
+ 'Use generateText() with Output.array({ element: schema }) instead.' );
157
176
  validateGenerateArrayArgs( args );
158
177
  const { prompt, variables, schema, schemaName, schemaDescription, ...extraAiSdkOptions } = args;
159
178
  const loadedPrompt = loadPrompt( prompt, variables );
160
179
  const traceId = startTrace( 'generateArray', { prompt, variables, schema: z.toJSONSchema( schema ), loadedPrompt } );
161
180
 
162
181
  return traceWrapper( {
163
- traceId, resultProperty: 'object', fn: async () =>
164
- AI.generateObject( {
165
- output: 'array',
166
- schema,
167
- schemaName,
168
- schemaDescription,
182
+ traceId, resultProperty: 'output', fn: async () =>
183
+ AI.generateText( {
184
+ output: AI.Output.array( { element: schema, name: schemaName, description: schemaDescription } ),
169
185
  ...aiSdkOptionsFromPrompt( loadedPrompt ),
170
186
  ...extraAiSdkOptions
171
187
  } )
@@ -175,31 +191,38 @@ export async function generateArray( args ) {
175
191
  /**
176
192
  * Use an LLM model to generate a result from an enum (array of string values).
177
193
  *
178
- * Accepts additional AI SDK options (maxRetries, seed, etc.) that are passed through
179
- * to the underlying provider. Options from the prompt file can be overridden at call time.
194
+ * @deprecated Since v0.3.0. Use generateText() with Output.choice({ options }) instead.
195
+ * Will be removed in v1.0.0.
196
+ *
197
+ * @example Migration:
198
+ * ```js
199
+ * // Before (deprecated):
200
+ * const { object } = await generateEnum({ prompt: 'my_prompt', enum: ['yes', 'no', 'maybe'] });
201
+ *
202
+ * // After (recommended):
203
+ * const { output } = await generateText({ prompt: 'my_prompt', output: Output.choice({ options: ['yes', 'no', 'maybe'] }) });
204
+ * ```
180
205
  *
181
206
  * @param {object} args - Generation arguments
182
207
  * @param {string} args.prompt - Prompt file name
183
208
  * @param {Record<string, string | number>} [args.variables] - Variables to interpolate
184
209
  * @param {string[]} args.enum - Allowed values for the generation
185
- * @param {number} [args.maxRetries] - Max retry attempts (default: 2)
186
- * @param {number} [args.seed] - Seed for deterministic output
187
- * @param {AbortSignal} [args.abortSignal] - Signal to abort the request
188
- * @throws {ValidationError} If the prompt config is invalid (e.g., snake_case fields)
189
- * @throws {FatalError} If the prompt file is not found or template rendering fails
210
+ * @see {@link generateText} for the recommended replacement
190
211
  * @returns {Promise<GenerateObjectResult>} AI SDK response with enum value and metadata
191
212
  */
192
213
  export async function generateEnum( args ) {
214
+ warnDeprecated( 'generateEnum',
215
+ 'generateEnum() is deprecated since v0.3.0 and will be removed in v1.0.0. ' +
216
+ 'Use generateText() with Output.choice({ options }) instead.' );
193
217
  validateGenerateEnumArgs( args );
194
218
  const { prompt, variables, enum: _enum, ...extraAiSdkOptions } = args;
195
219
  const loadedPrompt = loadPrompt( prompt, variables );
196
220
  const traceId = startTrace( 'generateEnum', { prompt, variables, loadedPrompt } );
197
221
 
198
222
  return traceWrapper( {
199
- traceId, resultProperty: 'object', fn: async () =>
200
- AI.generateObject( {
201
- output: 'enum',
202
- enum: _enum,
223
+ traceId, resultProperty: 'output', fn: async () =>
224
+ AI.generateText( {
225
+ output: AI.Output.choice( { options: _enum } ),
203
226
  ...aiSdkOptionsFromPrompt( loadedPrompt ),
204
227
  ...extraAiSdkOptions
205
228
  } )