@output.ai/llm 0.0.10 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.0.10",
3
+ "version": "0.0.12",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -8,14 +8,12 @@
8
8
  "files": [
9
9
  "./src"
10
10
  ],
11
- "repository": {
12
- "type": "git",
13
- "url": "git+https://github.com/growthxai/flow-sdk"
14
- },
15
11
  "dependencies": {
16
12
  "@ai-sdk/anthropic": "2.0.4",
13
+ "@ai-sdk/azure": "2.0.49",
17
14
  "@ai-sdk/openai": "2.0.18",
18
15
  "@output.ai/trace": "0.0.1",
19
16
  "ai": "5.0.48"
20
- }
17
+ },
18
+ "license": "UNLICENSED"
21
19
  }
package/src/ai_sdk.js CHANGED
@@ -4,10 +4,12 @@ import {
4
4
  } from 'ai';
5
5
  import { anthropic } from '@ai-sdk/anthropic';
6
6
  import { openai } from '@ai-sdk/openai';
7
+ import { azure } from '@ai-sdk/azure';
7
8
  import { trace } from '@output.ai/trace';
8
9
 
9
10
  const providers = {
10
11
  anthropic,
12
+ azure,
11
13
  openai
12
14
  };
13
15
 
@@ -17,13 +19,18 @@ export async function generateText( options ) {
17
19
  const provider = providers[prompt.config.provider];
18
20
  const model = provider( prompt.config.model );
19
21
 
20
- const result = await aiGenerateText( {
22
+ const aiOptions = {
21
23
  model,
22
24
  messages: prompt.messages,
23
25
  temperature: prompt.config.temperature,
24
- maxOutputTokens: prompt.config.max_tokens ?? 64000,
25
26
  ...aiSdkOptions // Spread remaining AI SDK options (thinking, etc.)
26
- } );
27
+ };
28
+
29
+ if ( prompt.config.max_tokens ) {
30
+ aiOptions.maxOutputTokens = prompt.config.max_tokens;
31
+ }
32
+
33
+ const result = await aiGenerateText( aiOptions );
27
34
 
28
35
  trace( { lib: 'llm', event: 'generateText', input: options, output: result } );
29
36
 
@@ -43,7 +50,7 @@ export async function generateObject( options ) {
43
50
  const provider = providers[prompt.config.provider];
44
51
  const model = provider( prompt.config.model );
45
52
 
46
- const result = await aiGenerateObject( {
53
+ const aiOptions = {
47
54
  model,
48
55
  schema,
49
56
  schemaName,
@@ -51,9 +58,14 @@ export async function generateObject( options ) {
51
58
  output,
52
59
  messages: prompt.messages,
53
60
  temperature: prompt.config.temperature,
54
- maxOutputTokens: prompt.config.max_tokens ?? 64000,
55
61
  ...aiSdkOptions // Spread remaining AI SDK options
56
- } );
62
+ };
63
+
64
+ if ( prompt.config.max_tokens ) {
65
+ aiOptions.maxOutputTokens = prompt.config.max_tokens;
66
+ }
67
+
68
+ const result = await aiGenerateObject( aiOptions );
57
69
 
58
70
  trace( { lib: 'llm', event: 'generateObject', input: options, output: result } );
59
71
 
package/src/index.d.ts CHANGED
@@ -5,7 +5,7 @@ import type {
5
5
 
6
6
  export interface Prompt {
7
7
  config: {
8
- provider: 'anthropic' | 'openai';
8
+ provider: 'anthropic' | 'openai' | 'azure' ;
9
9
  model: string;
10
10
  temperature?: number;
11
11
  max_tokens?: number;
package/src/index.js CHANGED
@@ -1,2 +1 @@
1
1
  export { generateText, generateObject } from './ai_sdk.js';
2
- export { LLMSchema } from './schema.js';
package/src/schema.js DELETED
@@ -1,8 +0,0 @@
1
- export class LLMSchema {
2
- constructor( { name, description, schema, output = 'object' } ) {
3
- this.name = name;
4
- this.schema = schema;
5
- this.description = description;
6
- this.output = output;
7
- }
8
- }