@output.ai/llm 0.2.4 → 0.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,99 +1,57 @@
1
- # LLM Module
1
+ # @output.ai/llm
2
2
 
3
- Framework abstraction to interact with LLM models, including prompt management and structured generation.
3
+ Unified LLM generation API with built-in prompt templating for Output Framework workflows.
4
4
 
5
- ## Quick Start
5
+ [![npm version](https://img.shields.io/npm/v/@output.ai/llm)](https://www.npmjs.com/package/@output.ai/llm)
6
+ [![Documentation](https://img.shields.io/badge/docs-docs.output.ai-blue)](https://docs.output.ai/packages/llm)
6
7
 
7
- ```js
8
- import { generateText } from '@output.ai/llm';
8
+ ## Installation
9
9
 
10
- const response = await generateText({
11
- prompt: 'my_prompt@v1',
12
- variables: { topic: 'AI workflows' }
13
- });
10
+ ```bash
11
+ npm install @output.ai/llm
14
12
  ```
15
13
 
16
- ## Features
17
-
18
- - **Unified API**: Single import for prompt loading and LLM generation
19
- - **Multiple Generation Types**: Text, objects, arrays, and enums
20
- - **Prompt Management**: Load and render `.prompt` files with variable interpolation
21
- - **Multi-Provider Support**: Anthropic, OpenAI, and Azure
22
- - **Type Safety**: Full TypeScript support with Zod schemas
23
-
24
- ## Generate Text
25
-
26
- Generate unstructured text from an LLM:
14
+ ## Quick Start
27
15
 
28
- ```js
16
+ ```typescript
29
17
  import { generateText } from '@output.ai/llm';
30
18
 
31
- const response = await generateText({
32
- prompt: 'explain_topic@v1',
33
- variables: { topic: 'machine learning' }
19
+ const result = await generateText({
20
+ prompt: 'summarize@v1',
21
+ variables: { text: 'Your content here' }
34
22
  });
35
23
  ```
36
24
 
37
- ## Generate Object
25
+ ## Generation Functions
26
+
27
+ | Function | Description |
28
+ |----------|-------------|
29
+ | `generateText` | Generate unstructured text |
30
+ | `generateObject` | Generate a structured object matching a Zod schema |
31
+ | `generateArray` | Generate an array of structured items |
32
+ | `generateEnum` | Generate a value from allowed options |
38
33
 
39
- Generate a structured object matching a Zod schema:
34
+ ### Example: Structured Output
40
35
 
41
- ```js
36
+ ```typescript
42
37
  import { generateObject } from '@output.ai/llm';
43
38
  import { z } from '@output.ai/core';
44
39
 
45
- const recipeSchema = z.object({
46
- title: z.string(),
47
- ingredients: z.array(z.string()),
48
- steps: z.array(z.string())
49
- });
50
-
51
40
  const recipe = await generateObject({
52
41
  prompt: 'recipe@v1',
53
42
  variables: { dish: 'lasagna' },
54
- schema: recipeSchema
55
- });
56
- ```
57
-
58
- ## Generate Array
59
-
60
- Generate an array of structured items:
61
-
62
- ```js
63
- import { generateArray } from '@output.ai/llm';
64
- import { z } from '@output.ai/core';
65
-
66
- const taskSchema = z.object({
67
- title: z.string(),
68
- priority: z.number()
69
- });
70
-
71
- const tasks = await generateArray({
72
- prompt: 'task_list@v1',
73
- variables: { project: 'website' },
74
- schema: taskSchema
75
- });
76
- ```
77
-
78
- ## Generate Enum
79
-
80
- Generate a value from a list of allowed options:
81
-
82
- ```js
83
- import { generateEnum } from '@output.ai/llm';
84
-
85
- const category = await generateEnum({
86
- prompt: 'categorize@v1',
87
- variables: { text: 'Product announcement' },
88
- enum: ['marketing', 'engineering', 'sales', 'support']
43
+ schema: z.object({
44
+ title: z.string(),
45
+ ingredients: z.array(z.string()),
46
+ steps: z.array(z.string())
47
+ })
89
48
  });
90
49
  ```
91
50
 
92
51
  ## Prompt Files
93
52
 
94
- Prompt files use YAML frontmatter for configuration and support LiquidJS templating:
53
+ Prompt files use YAML frontmatter for configuration and LiquidJS for templating:
95
54
 
96
- **File: `explain_topic@v1.prompt`**
97
55
  ```yaml
98
56
  ---
99
57
  provider: anthropic
@@ -102,27 +60,23 @@ temperature: 0.7
102
60
  ---
103
61
 
104
62
  <system>
105
- You are a concise technical explainer.
63
+ You are a helpful assistant.
106
64
  </system>
107
65
 
108
66
  <user>
109
- Explain {{ topic }} in 3 bullet points.
67
+ {{ user_message }}
110
68
  </user>
111
69
  ```
112
70
 
113
- ## Configuration Options
71
+ ### Supported Providers
114
72
 
115
- Prompt files support these configuration fields:
73
+ - **Anthropic** - Requires `ANTHROPIC_API_KEY`
74
+ - **OpenAI** - Requires `OPENAI_API_KEY`
75
+ - **Azure OpenAI** - Requires Azure-specific environment variables
116
76
 
117
- ```yaml
118
- ---
119
- provider: anthropic | openai | azure
120
- model: model-name
121
- temperature: 0.0-1.0 (optional)
122
- maxTokens: number (optional)
123
- providerOptions: (optional)
124
- thinking:
125
- type: enabled
126
- budgetTokens: number
127
- ---
128
- ```
77
+ ## Documentation
78
+
79
+ For comprehensive documentation, visit:
80
+
81
+ - [Package Reference](https://docs.output.ai/packages/llm)
82
+ - [Getting Started](https://docs.output.ai/quickstart)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.2.4",
3
+ "version": "0.2.6",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -11,11 +11,15 @@
11
11
  "dependencies": {
12
12
  "@ai-sdk/anthropic": "2.0.28",
13
13
  "@ai-sdk/azure": "2.0.53",
14
+ "@ai-sdk/google-vertex": "3.0.96",
14
15
  "@ai-sdk/openai": "2.0.52",
15
16
  "@output.ai/core": ">=0.0.1",
16
- "ai": "5.0.48",
17
+ "ai": "5.0.52",
17
18
  "gray-matter": "4.0.3",
18
19
  "liquidjs": "10.22.0"
19
20
  },
20
- "license": "UNLICENSED"
21
+ "license": "Apache-2.0",
22
+ "publishConfig": {
23
+ "access": "public"
24
+ }
21
25
  }
package/src/ai_model.js CHANGED
@@ -1,8 +1,9 @@
1
1
  import { anthropic } from '@ai-sdk/anthropic';
2
2
  import { azure } from '@ai-sdk/azure';
3
+ import { vertex } from '@ai-sdk/google-vertex';
3
4
  import { openai } from '@ai-sdk/openai';
4
5
 
5
- const providers = { azure, anthropic, openai };
6
+ const providers = { azure, anthropic, openai, vertex };
6
7
 
7
8
  export function loadModel( prompt ) {
8
9
  const config = prompt?.config;
package/src/index.d.ts CHANGED
@@ -36,7 +36,7 @@ export type Prompt = {
36
36
  /** General configurations for the LLM */
37
37
  config: {
38
38
  /** LLM Provider */
39
- provider: 'anthropic' | 'openai' | 'azure';
39
+ provider: 'anthropic' | 'openai' | 'azure' | 'vertex';
40
40
 
41
41
  /** Model name/identifier */
42
42
  model: string;
@@ -59,12 +59,12 @@ export type Prompt = {
59
59
  * Load a prompt file and render it with variables.
60
60
  *
61
61
  * @param {string} name - Name of the prompt file (without .prompt extension)
62
- * @param {Record<string, string | number>} [variables] - Variables to interpolate
62
+ * @param {Record<string, string | number | boolean>} [variables] - Variables to interpolate
63
63
  * @returns {Prompt} Loaded and rendered prompt object
64
64
  */
65
65
  export function loadPrompt(
66
66
  name: string,
67
- variables?: Record<string, string | number>
67
+ variables?: Record<string, string | number | boolean>
68
68
  ): Prompt;
69
69
 
70
70
  /**
@@ -75,13 +75,13 @@ export function loadPrompt(
75
75
  *
76
76
  * @param {object} args - Generation arguments
77
77
  * @param {string} args.prompt - Prompt file name
78
- * @param {Record<string, string | number>} args.variables - Variables to interpolate
78
+ * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
79
79
  * @returns {Promise<string>} Generated text
80
80
  */
81
81
  export function generateText(
82
82
  args: {
83
83
  prompt: string,
84
- variables?: Record<string, string | number>
84
+ variables?: Record<string, string | number | boolean>
85
85
  }
86
86
  ): Promise<string>;
87
87
 
@@ -93,7 +93,7 @@ export function generateText(
93
93
  *
94
94
  * @param {object} args - Generation arguments
95
95
  * @param {string} args.prompt - Prompt file name
96
- * @param {Record<string, string | number>} args.variables - Variables to interpolate
96
+ * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
97
97
  * @param {z.ZodObject} args.schema - Output schema
98
98
  * @param {string} [args.schemaName] - Output schema name
99
99
  * @param {string} [args.schemaDescription] - Output schema description
@@ -102,7 +102,7 @@ export function generateText(
102
102
  export function generateObject<TSchema extends z.ZodObject>(
103
103
  args: {
104
104
  prompt: string,
105
- variables?: Record<string, string | number>,
105
+ variables?: Record<string, string | number | boolean>,
106
106
  schema: TSchema,
107
107
  schemaName?: string,
108
108
  schemaDescription?: string
@@ -117,7 +117,7 @@ export function generateObject<TSchema extends z.ZodObject>(
117
117
  *
118
118
  * @param {object} args - Generation arguments
119
119
  * @param {string} args.prompt - Prompt file name
120
- * @param {Record<string, string | number>} args.variables - Variables to interpolate
120
+ * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
121
121
  * @param {z.ZodType} args.schema - Output schema (array item)
122
122
  * @param {string} [args.schemaName] - Output schema name
123
123
  * @param {string} [args.schemaDescription] - Output schema description
@@ -126,7 +126,7 @@ export function generateObject<TSchema extends z.ZodObject>(
126
126
  export function generateArray<TSchema extends z.ZodType>(
127
127
  args: {
128
128
  prompt: string,
129
- variables?: Record<string, string | number>,
129
+ variables?: Record<string, string | number | boolean>,
130
130
  schema: TSchema,
131
131
  schemaName?: string,
132
132
  schemaDescription?: string
@@ -141,14 +141,14 @@ export function generateArray<TSchema extends z.ZodType>(
141
141
  *
142
142
  * @param {object} args - Generation arguments
143
143
  * @param {string} args.prompt - Prompt file name
144
- * @param {Record<string, string | number>} args.variables - Variables to interpolate
144
+ * @param {Record<string, string | number | boolean>} args.variables - Variables to interpolate
145
145
  * @param {string[]} args.enum - Allowed values for the generation
146
146
  * @returns {Promise<string>} One of the provided enum values
147
147
  */
148
148
  export function generateEnum<const TEnum extends readonly [string, ...string[]]>(
149
149
  args: {
150
150
  prompt: string,
151
- variables?: Record<string, string | number>,
151
+ variables?: Record<string, string | number | boolean>,
152
152
  enum: TEnum
153
153
  }
154
154
  ): Promise<TEnum[number]>;
@@ -18,7 +18,7 @@ const renderPrompt = ( name, content, values ) => {
18
18
  * Load a prompt file and render it with variables.
19
19
  *
20
20
  * @param {string} name - Name of the prompt file (without .prompt extension)
21
- * @param {Record<string, string | number>} [values] - Variables to interpolate
21
+ * @param {Record<string, string | number | boolean>} [values] - Variables to interpolate
22
22
  * @returns {Prompt} Loaded and rendered prompt object
23
23
  */
24
24
  export const loadPrompt = ( name, values = {} ) => {
@@ -146,4 +146,34 @@ temperature: 0.7
146
146
  expect( result.config.maxTokens ).toBe( 1024 );
147
147
  } );
148
148
 
149
+ it( 'should render boolean variables correctly', () => {
150
+ const promptContent = `---
151
+ provider: anthropic
152
+ model: claude-3-5-sonnet-20241022
153
+ ---
154
+
155
+ <user>{% if debug %}Debug mode enabled{% else %}Debug mode disabled{% endif %}</user>`;
156
+
157
+ loadContent.mockReturnValue( promptContent );
158
+
159
+ const result = loadPrompt( 'test', { debug: true } );
160
+
161
+ expect( result.messages[0].content ).toBe( 'Debug mode enabled' );
162
+ } );
163
+
164
+ it( 'should render false boolean variables', () => {
165
+ const promptContent = `---
166
+ provider: anthropic
167
+ model: claude-3-5-sonnet-20241022
168
+ ---
169
+
170
+ <user>{% if enabled %}Feature enabled{% else %}Feature disabled{% endif %}</user>`;
171
+
172
+ loadContent.mockReturnValue( promptContent );
173
+
174
+ const result = loadPrompt( 'test', { enabled: false } );
175
+
176
+ expect( result.messages[0].content ).toBe( 'Feature disabled' );
177
+ } );
178
+
149
179
  } );
@@ -3,7 +3,7 @@ import { ValidationError, z } from '@output.ai/core';
3
3
  export const promptSchema = z.object( {
4
4
  name: z.string(),
5
5
  config: z.object( {
6
- provider: z.enum( [ 'anthropic', 'azure', 'openai' ] ),
6
+ provider: z.enum( [ 'anthropic', 'azure', 'openai', 'vertex' ] ),
7
7
  model: z.string(),
8
8
  temperature: z.number().optional(),
9
9
  maxTokens: z.number().optional(),