@output.ai/llm 0.2.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.2.0",
3
+ "version": "0.2.1",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
package/src/index.d.ts CHANGED
@@ -1,5 +1,75 @@
1
1
  import type { z } from '@output.ai/core';
2
2
 
3
+ /**
4
+ * Represents a single message in a prompt conversation
5
+ * @example
6
+ * const msg: PromptMessage = {
7
+ * role: 'user',
8
+ * content: 'Hello, Claude!'
9
+ * };
10
+ */
11
+ export type PromptMessage = {
12
+ /** The role of the message. Examples: 'system', 'user', 'assistant' */
13
+ role: string;
14
+ /** The content of the message */
15
+ content: string;
16
+ };
17
+
18
+ /**
19
+ * Configuration for LLM prompt generation
20
+ * @example
21
+ * const prompt: Prompt = {
22
+ * name: 'summarizePrompt',
23
+ * config: {
24
+ * provider: 'anthropic',
25
+ * model: 'claude-opus-4-1',
26
+ * temperature: 0.7,
27
+ * maxTokens: 2048
28
+ * },
29
+ * messages: [...]
30
+ * };
31
+ */
32
+ export type Prompt = {
33
+ /** Name of the prompt file */
34
+ name: string;
35
+
36
+ /** General configurations for the LLM */
37
+ config: {
38
+ /** LLM Provider */
39
+ provider: 'anthropic' | 'openai' | 'azure';
40
+
41
+ /** Model name/identifier */
42
+ model: string;
43
+
44
+ /** Generation temperature (0-2). Lower = more deterministic */
45
+ temperature?: number;
46
+
47
+ /** Maximum tokens in the response */
48
+ maxTokens?: number;
49
+
50
+ /** Additional provider-specific options */
51
+ options?: Record<string, Record<string, JSONValue>>;
52
+
53
+ /** Provider-specific configurations */
54
+ providerOptions?: Record<string, unknown>;
55
+ };
56
+
57
+ /** Array of messages in the conversation */
58
+ messages: PromptMessage[];
59
+ };
60
+
61
+ /**
62
+ * Load a prompt file and render it with variables.
63
+ *
64
+ * @param {string} name - Name of the prompt file (without .prompt extension)
65
+ * @param {Record<string, string | number>} [variables] - Variables to interpolate
66
+ * @returns {Prompt} Loaded and rendered prompt object
67
+ */
68
+ export function loadPrompt(
69
+ name: string,
70
+ variables?: Record<string, string | number>
71
+ ): Prompt;
72
+
3
73
  /**
4
74
  * Use an LLM model to generate text.
5
75
  *
package/src/index.js CHANGED
@@ -1,2 +1,3 @@
1
1
  export { generateText, generateArray, generateObject, generateEnum } from './ai_sdk.js';
2
+ export { loadPrompt } from './prompt_loader.js';
2
3
  export * as ai from 'ai';