@output.ai/llm 0.2.5 → 0.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,99 +1,57 @@
1
- # LLM Module
1
+ # @output.ai/llm
2
2
 
3
- Framework abstraction to interact with LLM models, including prompt management and structured generation.
3
+ Unified LLM generation API with built-in prompt templating for Output Framework workflows.
4
4
 
5
- ## Quick Start
5
+ [![npm version](https://img.shields.io/npm/v/@output.ai/llm)](https://www.npmjs.com/package/@output.ai/llm)
6
+ [![Documentation](https://img.shields.io/badge/docs-docs.output.ai-blue)](https://docs.output.ai/packages/llm)
6
7
 
7
- ```js
8
- import { generateText } from '@output.ai/llm';
8
+ ## Installation
9
9
 
10
- const response = await generateText({
11
- prompt: 'my_prompt@v1',
12
- variables: { topic: 'AI workflows' }
13
- });
10
+ ```bash
11
+ npm install @output.ai/llm
14
12
  ```
15
13
 
16
- ## Features
17
-
18
- - **Unified API**: Single import for prompt loading and LLM generation
19
- - **Multiple Generation Types**: Text, objects, arrays, and enums
20
- - **Prompt Management**: Load and render `.prompt` files with variable interpolation
21
- - **Multi-Provider Support**: Anthropic, OpenAI, and Azure
22
- - **Type Safety**: Full TypeScript support with Zod schemas
23
-
24
- ## Generate Text
25
-
26
- Generate unstructured text from an LLM:
14
+ ## Quick Start
27
15
 
28
- ```js
16
+ ```typescript
29
17
  import { generateText } from '@output.ai/llm';
30
18
 
31
- const response = await generateText({
32
- prompt: 'explain_topic@v1',
33
- variables: { topic: 'machine learning' }
19
+ const result = await generateText({
20
+ prompt: 'summarize@v1',
21
+ variables: { text: 'Your content here' }
34
22
  });
35
23
  ```
36
24
 
37
- ## Generate Object
25
+ ## Generation Functions
26
+
27
+ | Function | Description |
28
+ |----------|-------------|
29
+ | `generateText` | Generate unstructured text |
30
+ | `generateObject` | Generate a structured object matching a Zod schema |
31
+ | `generateArray` | Generate an array of structured items |
32
+ | `generateEnum` | Generate a value from allowed options |
38
33
 
39
- Generate a structured object matching a Zod schema:
34
+ ### Example: Structured Output
40
35
 
41
- ```js
36
+ ```typescript
42
37
  import { generateObject } from '@output.ai/llm';
43
38
  import { z } from '@output.ai/core';
44
39
 
45
- const recipeSchema = z.object({
46
- title: z.string(),
47
- ingredients: z.array(z.string()),
48
- steps: z.array(z.string())
49
- });
50
-
51
40
  const recipe = await generateObject({
52
41
  prompt: 'recipe@v1',
53
42
  variables: { dish: 'lasagna' },
54
- schema: recipeSchema
55
- });
56
- ```
57
-
58
- ## Generate Array
59
-
60
- Generate an array of structured items:
61
-
62
- ```js
63
- import { generateArray } from '@output.ai/llm';
64
- import { z } from '@output.ai/core';
65
-
66
- const taskSchema = z.object({
67
- title: z.string(),
68
- priority: z.number()
69
- });
70
-
71
- const tasks = await generateArray({
72
- prompt: 'task_list@v1',
73
- variables: { project: 'website' },
74
- schema: taskSchema
75
- });
76
- ```
77
-
78
- ## Generate Enum
79
-
80
- Generate a value from a list of allowed options:
81
-
82
- ```js
83
- import { generateEnum } from '@output.ai/llm';
84
-
85
- const category = await generateEnum({
86
- prompt: 'categorize@v1',
87
- variables: { text: 'Product announcement' },
88
- enum: ['marketing', 'engineering', 'sales', 'support']
43
+ schema: z.object({
44
+ title: z.string(),
45
+ ingredients: z.array(z.string()),
46
+ steps: z.array(z.string())
47
+ })
89
48
  });
90
49
  ```
91
50
 
92
51
  ## Prompt Files
93
52
 
94
- Prompt files use YAML frontmatter for configuration and support LiquidJS templating:
53
+ Prompt files use YAML frontmatter for configuration and LiquidJS for templating:
95
54
 
96
- **File: `explain_topic@v1.prompt`**
97
55
  ```yaml
98
56
  ---
99
57
  provider: anthropic
@@ -102,27 +60,23 @@ temperature: 0.7
102
60
  ---
103
61
 
104
62
  <system>
105
- You are a concise technical explainer.
63
+ You are a helpful assistant.
106
64
  </system>
107
65
 
108
66
  <user>
109
- Explain {{ topic }} in 3 bullet points.
67
+ {{ user_message }}
110
68
  </user>
111
69
  ```
112
70
 
113
- ## Configuration Options
71
+ ### Supported Providers
114
72
 
115
- Prompt files support these configuration fields:
73
+ - **Anthropic** - Requires `ANTHROPIC_API_KEY`
74
+ - **OpenAI** - Requires `OPENAI_API_KEY`
75
+ - **Azure OpenAI** - Requires Azure-specific environment variables
116
76
 
117
- ```yaml
118
- ---
119
- provider: anthropic | openai | azure
120
- model: model-name
121
- temperature: 0.0-1.0 (optional)
122
- maxTokens: number (optional)
123
- providerOptions: (optional)
124
- thinking:
125
- type: enabled
126
- budgetTokens: number
127
- ---
128
- ```
77
+ ## Documentation
78
+
79
+ For comprehensive documentation, visit:
80
+
81
+ - [Package Reference](https://docs.output.ai/packages/llm)
82
+ - [Getting Started](https://docs.output.ai/quickstart)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.2.5",
3
+ "version": "0.2.6",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -11,11 +11,15 @@
11
11
  "dependencies": {
12
12
  "@ai-sdk/anthropic": "2.0.28",
13
13
  "@ai-sdk/azure": "2.0.53",
14
+ "@ai-sdk/google-vertex": "3.0.96",
14
15
  "@ai-sdk/openai": "2.0.52",
15
16
  "@output.ai/core": ">=0.0.1",
16
17
  "ai": "5.0.52",
17
18
  "gray-matter": "4.0.3",
18
19
  "liquidjs": "10.22.0"
19
20
  },
20
- "license": "UNLICENSED"
21
+ "license": "Apache-2.0",
22
+ "publishConfig": {
23
+ "access": "public"
24
+ }
21
25
  }
package/src/ai_model.js CHANGED
@@ -1,8 +1,9 @@
1
1
  import { anthropic } from '@ai-sdk/anthropic';
2
2
  import { azure } from '@ai-sdk/azure';
3
+ import { vertex } from '@ai-sdk/google-vertex';
3
4
  import { openai } from '@ai-sdk/openai';
4
5
 
5
- const providers = { azure, anthropic, openai };
6
+ const providers = { azure, anthropic, openai, vertex };
6
7
 
7
8
  export function loadModel( prompt ) {
8
9
  const config = prompt?.config;
package/src/index.d.ts CHANGED
@@ -36,7 +36,7 @@ export type Prompt = {
36
36
  /** General configurations for the LLM */
37
37
  config: {
38
38
  /** LLM Provider */
39
- provider: 'anthropic' | 'openai' | 'azure';
39
+ provider: 'anthropic' | 'openai' | 'azure' | 'vertex';
40
40
 
41
41
  /** Model name/identifier */
42
42
  model: string;
@@ -3,7 +3,7 @@ import { ValidationError, z } from '@output.ai/core';
3
3
  export const promptSchema = z.object( {
4
4
  name: z.string(),
5
5
  config: z.object( {
6
- provider: z.enum( [ 'anthropic', 'azure', 'openai' ] ),
6
+ provider: z.enum( [ 'anthropic', 'azure', 'openai', 'vertex' ] ),
7
7
  model: z.string(),
8
8
  temperature: z.number().optional(),
9
9
  maxTokens: z.number().optional(),