@node-llm/core 0.6.0 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +89 -458
- package/dist/chat/Chat.d.ts.map +1 -1
- package/dist/chat/Chat.js +5 -3
- package/dist/chat/ChatResponse.d.ts +2 -1
- package/dist/chat/ChatResponse.d.ts.map +1 -1
- package/dist/chat/ChatResponse.js +3 -1
- package/dist/chat/Stream.d.ts.map +1 -1
- package/dist/chat/Stream.js +7 -1
- package/dist/config.d.ts +31 -0
- package/dist/config.d.ts.map +1 -0
- package/dist/config.js +12 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +2 -0
- package/dist/llm.d.ts +16 -9
- package/dist/llm.d.ts.map +1 -1
- package/dist/llm.js +41 -20
- package/dist/models/ModelRegistry.d.ts +0 -3
- package/dist/models/ModelRegistry.d.ts.map +1 -1
- package/dist/models/ModelRegistry.js +0 -3
- package/dist/models/models.d.ts.map +1 -1
- package/dist/models/models.js +386 -0
- package/dist/models/types.d.ts +2 -2
- package/dist/models/types.d.ts.map +1 -1
- package/dist/providers/Provider.d.ts +3 -0
- package/dist/providers/Provider.d.ts.map +1 -1
- package/dist/providers/anthropic/AnthropicProvider.d.ts +1 -0
- package/dist/providers/anthropic/AnthropicProvider.d.ts.map +1 -1
- package/dist/providers/anthropic/AnthropicProvider.js +1 -0
- package/dist/providers/anthropic/index.d.ts.map +1 -1
- package/dist/providers/anthropic/index.js +3 -2
- package/dist/providers/deepseek/Capabilities.d.ts +14 -0
- package/dist/providers/deepseek/Capabilities.d.ts.map +1 -0
- package/dist/providers/deepseek/Capabilities.js +52 -0
- package/dist/providers/deepseek/Chat.d.ts +8 -0
- package/dist/providers/deepseek/Chat.d.ts.map +1 -0
- package/dist/providers/deepseek/Chat.js +89 -0
- package/dist/providers/deepseek/DeepSeekProvider.d.ts +28 -0
- package/dist/providers/deepseek/DeepSeekProvider.d.ts.map +1 -0
- package/dist/providers/deepseek/DeepSeekProvider.js +38 -0
- package/dist/providers/deepseek/Models.d.ts +8 -0
- package/dist/providers/deepseek/Models.d.ts.map +1 -0
- package/dist/providers/deepseek/Models.js +67 -0
- package/dist/providers/deepseek/Streaming.d.ts +8 -0
- package/dist/providers/deepseek/Streaming.d.ts.map +1 -0
- package/dist/providers/deepseek/Streaming.js +74 -0
- package/dist/providers/deepseek/index.d.ts +7 -0
- package/dist/providers/deepseek/index.d.ts.map +1 -0
- package/dist/providers/deepseek/index.js +22 -0
- package/dist/providers/gemini/Capabilities.d.ts.map +1 -1
- package/dist/providers/gemini/GeminiProvider.d.ts +1 -0
- package/dist/providers/gemini/GeminiProvider.d.ts.map +1 -1
- package/dist/providers/gemini/GeminiProvider.js +1 -0
- package/dist/providers/gemini/index.d.ts.map +1 -1
- package/dist/providers/gemini/index.js +3 -2
- package/dist/providers/ollama/Capabilities.d.ts +13 -0
- package/dist/providers/ollama/Capabilities.d.ts.map +1 -0
- package/dist/providers/ollama/Capabilities.js +50 -0
- package/dist/providers/ollama/Embedding.d.ts +6 -0
- package/dist/providers/ollama/Embedding.d.ts.map +1 -0
- package/dist/providers/ollama/Embedding.js +12 -0
- package/dist/providers/ollama/Models.d.ts +8 -0
- package/dist/providers/ollama/Models.d.ts.map +1 -0
- package/dist/providers/ollama/Models.js +31 -0
- package/dist/providers/ollama/OllamaProvider.d.ts +8 -0
- package/dist/providers/ollama/OllamaProvider.d.ts.map +1 -0
- package/dist/providers/ollama/OllamaProvider.js +28 -0
- package/dist/providers/ollama/index.d.ts +9 -0
- package/dist/providers/ollama/index.d.ts.map +1 -0
- package/dist/providers/ollama/index.js +17 -0
- package/dist/providers/openai/Capabilities.d.ts +2 -1
- package/dist/providers/openai/Capabilities.d.ts.map +1 -1
- package/dist/providers/openai/Capabilities.js +10 -2
- package/dist/providers/openai/Embedding.d.ts +4 -2
- package/dist/providers/openai/Embedding.d.ts.map +1 -1
- package/dist/providers/openai/Embedding.js +13 -8
- package/dist/providers/openai/Models.d.ts +12 -2
- package/dist/providers/openai/Models.d.ts.map +1 -1
- package/dist/providers/openai/Models.js +50 -16
- package/dist/providers/openai/OpenAIProvider.d.ts +17 -9
- package/dist/providers/openai/OpenAIProvider.d.ts.map +1 -1
- package/dist/providers/openai/OpenAIProvider.js +2 -1
- package/dist/providers/openai/index.d.ts.map +1 -1
- package/dist/providers/openai/index.js +4 -3
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,533 +1,164 @@
|
|
|
1
|
+
<p align="left">
|
|
2
|
+
<img src="../../docs/assets/images/logo.jpg" alt="node-llm logo" width="300" />
|
|
3
|
+
</p>
|
|
4
|
+
|
|
1
5
|
# @node-llm/core
|
|
2
6
|
|
|
3
7
|
[](https://www.npmjs.com/package/@node-llm/core)
|
|
4
8
|
[](https://opensource.org/licenses/MIT)
|
|
5
9
|
[](https://www.typescriptlang.org/)
|
|
6
10
|
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
`node-llm` focuses on clean abstractions, minimal magic, and a streaming-first design. It provides a unified interface to interact with various LLM providers without being locked into their specific SDKs.
|
|
11
|
+
**One unified interface for OpenAI, Anthropic, Gemini, DeepSeek, and local models.**
|
|
10
12
|
|
|
11
|
-
|
|
13
|
+
**node-llm** abstracts away the chaos of vendor-specific SDKs. It gives you a clean, streaming-first API with built-in support for Vision, Tools, and Structured Outputs.
|
|
12
14
|
|
|
13
|
-
|
|
15
|
+
<br/>
|
|
14
16
|
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
17
|
+
<p align="left">
|
|
18
|
+
<img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai.svg" height="28" />
|
|
19
|
+
<img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai-text.svg" height="22" />
|
|
20
|
+
|
|
21
|
+
<img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/anthropic-text.svg" height="18" />
|
|
22
|
+
|
|
23
|
+
<img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/gemini-color.svg" height="28" />
|
|
24
|
+
<img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/gemini-text.svg" height="20" />
|
|
25
|
+
|
|
26
|
+
<img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/deepseek-color.svg" height="28" />
|
|
27
|
+
<img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/deepseek-text.svg" height="20" />
|
|
28
|
+
|
|
29
|
+
<img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/ollama.svg" height="28" />
|
|
30
|
+
<img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/ollama-text.svg" height="18" />
|
|
31
|
+
</p>
|
|
25
32
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
```bash
|
|
29
|
-
npm install @node-llm/core
|
|
30
|
-
# or
|
|
31
|
-
pnpm add @node-llm/core
|
|
32
|
-
```
|
|
33
|
+
<br/>
|
|
33
34
|
|
|
34
35
|
---
|
|
35
36
|
|
|
36
|
-
##
|
|
37
|
-
|
|
38
|
-
### 1. Configure the Provider
|
|
37
|
+
## ⚡ Quick Example
|
|
39
38
|
|
|
40
39
|
```ts
|
|
41
40
|
import { LLM } from "@node-llm/core";
|
|
42
|
-
import "dotenv/config";
|
|
43
|
-
|
|
44
|
-
LLM.configure({
|
|
45
|
-
provider: "openai", // or "anthropic", "gemini"
|
|
46
|
-
retry: { attempts: 3, delayMs: 500 },
|
|
47
|
-
defaultModerationModel: "text-moderation-latest",
|
|
48
|
-
defaultTranscriptionModel: "whisper-1",
|
|
49
|
-
defaultEmbeddingModel: "text-embedding-3-small"
|
|
50
|
-
});
|
|
51
|
-
```
|
|
52
41
|
|
|
53
|
-
|
|
42
|
+
// 1. Configure once
|
|
43
|
+
LLM.configure({ provider: "openai" });
|
|
54
44
|
|
|
55
|
-
|
|
56
|
-
const chat = LLM.chat("gpt-4o
|
|
57
|
-
|
|
58
|
-
});
|
|
59
|
-
|
|
60
|
-
const response = await chat.ask("What is Node.js?");
|
|
61
|
-
|
|
62
|
-
// Use as a string directly
|
|
63
|
-
console.log(response);
|
|
64
|
-
|
|
65
|
-
// Or access metadata (RubyLLM style)
|
|
45
|
+
// 2. Basic Chat
|
|
46
|
+
const chat = LLM.chat("gpt-4o");
|
|
47
|
+
const response = await chat.ask("Explain Node.js");
|
|
66
48
|
console.log(response.content);
|
|
67
|
-
console.log(`Model: ${response.model_id}`);
|
|
68
|
-
console.log(`Tokens: ${response.input_tokens} in, ${response.output_tokens} out`);
|
|
69
|
-
console.log(`Cost: $${response.cost}`);
|
|
70
|
-
```
|
|
71
49
|
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
```ts
|
|
75
|
-
for await (const chunk of chat.stream("Write a poem")) {
|
|
50
|
+
// 3. Streaming
|
|
51
|
+
for await (const chunk of chat.stream("Explain Node.js")) {
|
|
76
52
|
process.stdout.write(chunk.content);
|
|
77
53
|
}
|
|
78
54
|
```
|
|
79
55
|
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
Generate images and interact with them using a rich API.
|
|
83
|
-
|
|
84
|
-
```ts
|
|
85
|
-
const image = await LLM.paint("a sunset over mountains", {
|
|
86
|
-
model: "dall-e-3"
|
|
87
|
-
});
|
|
88
|
-
|
|
89
|
-
// Use as a URL string
|
|
90
|
-
console.log(`URL: ${image}`);
|
|
91
|
-
|
|
92
|
-
// Or use rich methods
|
|
93
|
-
await image.save("sunset.png");
|
|
94
|
-
console.log(`Format: ${image.mimeType}`);
|
|
95
|
-
```
|
|
56
|
+
---
|
|
96
57
|
|
|
97
|
-
|
|
58
|
+
## 🔮 Capabilities
|
|
98
59
|
|
|
99
|
-
|
|
60
|
+
### 💬 Unified Chat
|
|
61
|
+
Stop rewriting code for every provider. `node-llm` normalizes inputs and outputs.
|
|
100
62
|
|
|
101
63
|
```ts
|
|
102
|
-
const
|
|
103
|
-
|
|
104
|
-
console.log(response.input_tokens); // 10
|
|
105
|
-
console.log(response.output_tokens); // 5
|
|
106
|
-
console.log(response.cost); // 0.000185
|
|
107
|
-
|
|
108
|
-
// Access aggregated usage for the whole session
|
|
109
|
-
console.log(chat.totalUsage.total_tokens);
|
|
110
|
-
console.log(chat.totalUsage.cost);
|
|
64
|
+
const chat = LLM.chat(); // Defaults to GPT-4o
|
|
65
|
+
await chat.ask("Hello world");
|
|
111
66
|
```
|
|
112
67
|
|
|
113
|
-
###
|
|
114
|
-
|
|
115
|
-
Generate vector representations of text for semantic search, clustering, and similarity comparisons.
|
|
68
|
+
### 👁️ Smart Vision & Files
|
|
69
|
+
Pass images, PDFs, or audio files directly. We handle the base64 encoding and MIME types.
|
|
116
70
|
|
|
117
71
|
```ts
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
console.log(embedding.vector); // Array of floats (e.g., 1536 dimensions)
|
|
122
|
-
console.log(embedding.dimensions); // 1536
|
|
123
|
-
console.log(embedding.model); // "text-embedding-3-small"
|
|
124
|
-
console.log(embedding.input_tokens); // Token count
|
|
125
|
-
|
|
126
|
-
// Batch embeddings
|
|
127
|
-
const embeddings = await LLM.embed([
|
|
128
|
-
"First text",
|
|
129
|
-
"Second text",
|
|
130
|
-
"Third text"
|
|
131
|
-
]);
|
|
132
|
-
|
|
133
|
-
console.log(embeddings.vectors); // Array of vectors
|
|
134
|
-
console.log(embeddings.vectors.length); // 3
|
|
135
|
-
|
|
136
|
-
// Custom model and dimensions
|
|
137
|
-
const customEmbedding = await LLM.embed("Semantic search text", {
|
|
138
|
-
model: "text-embedding-3-large",
|
|
139
|
-
dimensions: 256 // Reduce dimensions for faster processing
|
|
72
|
+
await chat.ask("Analyze this interface", {
|
|
73
|
+
files: ["./screenshot.png", "./specs.pdf"]
|
|
140
74
|
});
|
|
141
75
|
```
|
|
142
76
|
|
|
143
|
-
###
|
|
144
|
-
|
|
145
|
-
Convert audio files to text using specialized models like Whisper.
|
|
146
|
-
|
|
147
|
-
```ts
|
|
148
|
-
const text = await LLM.transcribe("meeting.mp3");
|
|
149
|
-
console.log(text);
|
|
150
|
-
```
|
|
151
|
-
|
|
152
|
-
### 7. Content Moderation (Moderate)
|
|
153
|
-
|
|
154
|
-
Check if text content violates safety policies.
|
|
155
|
-
|
|
156
|
-
```ts
|
|
157
|
-
const result = await LLM.moderate("I want to help everyone!");
|
|
158
|
-
if (result.flagged) {
|
|
159
|
-
console.log(`❌ Flagged for: ${result.flaggedCategories.join(", ")}`);
|
|
160
|
-
} else {
|
|
161
|
-
console.log("✅ Content appears safe");
|
|
162
|
-
}
|
|
163
|
-
```
|
|
164
|
-
|
|
165
|
-
Learn how to implement [custom risk thresholds](../../examples/openai/12-risk-assessment.mjs) for more granular control.
|
|
166
|
-
|
|
167
|
-
### 8. Chat Event Handlers
|
|
168
|
-
|
|
169
|
-
Hook into the chat lifecycle for logging, UI updates, or auditing.
|
|
170
|
-
|
|
171
|
-
```ts
|
|
172
|
-
chat
|
|
173
|
-
.onNewMessage(() => console.log("AI started typing..."))
|
|
174
|
-
.onToolCall((tool) => console.log(`Calling ${tool.function.name}...`))
|
|
175
|
-
.onToolResult((result) => console.log(`Tool returned: ${result}`))
|
|
176
|
-
.onEndMessage((response) => console.log(`Done. Usage: ${response.total_tokens}`));
|
|
177
|
-
|
|
178
|
-
await chat.ask("What's the weather?");
|
|
179
|
-
```
|
|
180
|
-
|
|
181
|
-
### 9. System Prompts (Instructions)
|
|
182
|
-
|
|
183
|
-
Guide the AI's behavior, personality, or constraints.
|
|
184
|
-
|
|
185
|
-
```ts
|
|
186
|
-
// Set initial instructions
|
|
187
|
-
chat.withInstructions("You are a helpful assistant that explains simply.");
|
|
188
|
-
|
|
189
|
-
// Update instructions mid-conversation (replace: true removes previous ones)
|
|
190
|
-
chat.withInstructions("Now assume the persona of a pirate.", { replace: true });
|
|
191
|
-
|
|
192
|
-
await chat.ask("Hello");
|
|
193
|
-
// => "Ahoy matey!"
|
|
194
|
-
```
|
|
195
|
-
|
|
196
|
-
### 10. Temperature Control (Creativity)
|
|
197
|
-
|
|
198
|
-
Adjust the randomness of the model's responses.
|
|
199
|
-
|
|
200
|
-
```ts
|
|
201
|
-
// Factual (0.0 - 0.3)
|
|
202
|
-
const factual = LLM.chat("gpt-4o").withTemperature(0.2);
|
|
203
|
-
|
|
204
|
-
// Creative (0.7 - 1.0)
|
|
205
|
-
const creative = LLM.chat("gpt-4o").withTemperature(0.9);
|
|
206
|
-
```
|
|
207
|
-
|
|
208
|
-
### 11. Provider-Specific Parameters
|
|
209
|
-
|
|
210
|
-
Access unique provider features while maintaining the unified interface. Parameters passed via `withParams()` will override any defaults set by the library.
|
|
211
|
-
|
|
212
|
-
```ts
|
|
213
|
-
// OpenAI: Set seed for deterministic output
|
|
214
|
-
const chat = LLM.chat("gpt-4o-mini")
|
|
215
|
-
.withParams({
|
|
216
|
-
seed: 42,
|
|
217
|
-
user: "user-123",
|
|
218
|
-
presence_penalty: 0.5
|
|
219
|
-
});
|
|
220
|
-
|
|
221
|
-
// Gemini: Configure safety settings and generation params
|
|
222
|
-
const geminiChat = LLM.chat("gemini-2.0-flash")
|
|
223
|
-
.withParams({
|
|
224
|
-
generationConfig: { topP: 0.8, topK: 40 },
|
|
225
|
-
safetySettings: [
|
|
226
|
-
{ category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_LOW_AND_ABOVE" }
|
|
227
|
-
]
|
|
228
|
-
});
|
|
229
|
-
|
|
230
|
-
// Anthropic: Custom headers or beta features
|
|
231
|
-
const claudeChat = LLM.chat("claude-3-5-sonnet-20241022")
|
|
232
|
-
.withParams({
|
|
233
|
-
top_k: 50,
|
|
234
|
-
top_p: 0.9
|
|
235
|
-
});
|
|
236
|
-
```
|
|
237
|
-
|
|
238
|
-
**⚠️ Important Notes:**
|
|
239
|
-
- Parameters from `withParams()` take precedence over library defaults
|
|
240
|
-
- Always consult the provider's API documentation for supported parameters
|
|
241
|
-
- The library passes these parameters through without validation
|
|
242
|
-
- Enable debug mode to see the exact request: `process.env.NODELLM_DEBUG = "true"`
|
|
243
|
-
|
|
244
|
-
See examples: [OpenAI](../../examples/openai/chat/params.mjs) | [Gemini](../../examples/gemini/chat/params.mjs)
|
|
245
|
-
|
|
246
|
-
---
|
|
247
|
-
|
|
248
|
-
## 📚 Examples
|
|
249
|
-
|
|
250
|
-
Check the [examples](../../examples) directory for focused scripts organized by provider:
|
|
251
|
-
|
|
252
|
-
### OpenAI Examples
|
|
253
|
-
|
|
254
|
-
#### 💬 Chat
|
|
255
|
-
| Example | Description |
|
|
256
|
-
| :--- | :--- |
|
|
257
|
-
| [Basic & Streaming](../../examples/openai/chat/basic.mjs) | Standard completions and real-time streaming |
|
|
258
|
-
| [System Instructions](../../examples/openai/chat/instructions.mjs) | Tuning behavior with system prompts and temperature |
|
|
259
|
-
| [Tool Calling](../../examples/openai/chat/tools.mjs) | Automatic execution of model-requested functions |
|
|
260
|
-
| [Parallel Tool Calling](../../examples/openai/chat/parallel-tools.mjs) | Executing multiple tools in a single turn |
|
|
261
|
-
| [Lifecycle Events](../../examples/openai/chat/events.mjs) | Hooks for specific chat events (onNewMessage, onToolCall) |
|
|
262
|
-
| [Token Usage](../../examples/openai/chat/usage.mjs) | Tracking costs and token counts |
|
|
263
|
-
| [Max Tokens](../../examples/openai/chat/max-tokens.mjs) | Limiting response length with `maxTokens` |
|
|
264
|
-
| [Structured Output](../../examples/openai/chat/structured.mjs) | Zod-based JSON schema enforcement |
|
|
265
|
-
|
|
266
|
-
#### 🖼️ Multimodal
|
|
267
|
-
| Example | Description |
|
|
268
|
-
| :--- | :--- |
|
|
269
|
-
| [Vision Analysis](../../examples/openai/multimodal/vision.mjs) | Analyzing images via URLs |
|
|
270
|
-
| [Multi-Image Analysis](../../examples/openai/multimodal/multi-image.mjs) | Comparing multiple images in one request |
|
|
271
|
-
| [File Context](../../examples/openai/multimodal/files.mjs) | Reading and analyzing local project files |
|
|
272
|
-
| [Audio Transcription](../../examples/openai/multimodal/transcribe.mjs) | Converting audio files to text (Whisper) |
|
|
273
|
-
|
|
274
|
-
#### 🎨 Images
|
|
275
|
-
| Example | Description |
|
|
276
|
-
| :--- | :--- |
|
|
277
|
-
| [Generate & Save](../../examples/openai/images/generate.mjs) | Creating images with DALL-E 3 and saving to disk |
|
|
278
|
-
|
|
279
|
-
#### 🛡️ Safety
|
|
280
|
-
| Example | Description |
|
|
281
|
-
| :--- | :--- |
|
|
282
|
-
| [Moderation](../../examples/openai/safety/moderation.mjs) | Content safety checks and risk assessment |
|
|
283
|
-
|
|
284
|
-
#### 🧠 Discovery
|
|
285
|
-
| Example | Description |
|
|
286
|
-
| :--- | :--- |
|
|
287
|
-
| [Models & Capabilities](../../examples/openai/discovery/models.mjs) | Listing models and inspecting their specs |
|
|
288
|
-
| [Embeddings](../../examples/openai/embeddings/create.mjs) | Generating semantic vector embeddings |
|
|
289
|
-
|
|
290
|
-
### Gemini Examples
|
|
291
|
-
|
|
292
|
-
#### 💬 Chat
|
|
293
|
-
| Example | Description |
|
|
294
|
-
| :--- | :--- |
|
|
295
|
-
| [Basic & Streaming](../../examples/gemini/chat/basic.mjs) | Standard completions and real-time streaming |
|
|
296
|
-
| [System Instructions](../../examples/gemini/chat/instructions.mjs) | Behavior tuning and creativity control |
|
|
297
|
-
| [Tool Calling](../../examples/gemini/chat/tools.mjs) | Function calling with automatic execution |
|
|
298
|
-
| [Lifecycle Events](../../examples/gemini/chat/events.mjs) | Event hooks for chat interactions |
|
|
299
|
-
| [Token Usage](../../examples/gemini/chat/usage.mjs) | Tracking conversation costs |
|
|
300
|
-
| [Structured Output](../../examples/gemini/chat/structured.mjs) | Native JSON schema support |
|
|
301
|
-
|
|
302
|
-
#### 🖼️ Multimodal
|
|
303
|
-
| Example | Description |
|
|
304
|
-
| :--- | :--- |
|
|
305
|
-
| [Vision Analysis](../../examples/gemini/multimodal/vision.mjs) | Understanding images |
|
|
306
|
-
| [File Context](../../examples/gemini/multimodal/files.mjs) | Reading multiple local files |
|
|
307
|
-
| [Audio Transcription](../../examples/gemini/multimodal/transcribe.mjs) | Native audio understanding |
|
|
308
|
-
|
|
309
|
-
#### 🎨 Images
|
|
310
|
-
| Example | Description |
|
|
311
|
-
| :--- | :--- |
|
|
312
|
-
| [Generate & Save](../../examples/gemini/images/generate.mjs) | Creating images with Imagen |
|
|
313
|
-
|
|
314
|
-
#### 🧠 Discovery
|
|
315
|
-
| Example | Description |
|
|
316
|
-
| :--- | :--- |
|
|
317
|
-
| [Models & Capabilities](../../examples/gemini/discovery/models.mjs) | Listing models and inspecting their specs |
|
|
318
|
-
| [Embeddings](../../examples/gemini/embeddings/create.mjs) | Generating semantic vector embeddings |
|
|
319
|
-
|
|
320
|
-
### Anthropic Examples
|
|
321
|
-
|
|
322
|
-
#### 💬 Chat
|
|
323
|
-
| Example | Description |
|
|
324
|
-
| :--- | :--- |
|
|
325
|
-
| [Basic & Streaming](../../examples/anthropic/chat/basic.mjs) | Chatting with Claude 3.5 Models |
|
|
326
|
-
| [Tool Calling](../../examples/anthropic/chat/tools.mjs) | Native tool use with automatic execution |
|
|
327
|
-
| [Parallel Tools](../../examples/anthropic/chat/parallel-tools.mjs) | Handling multiple tool requests in one turn |
|
|
328
|
-
| [Token Usage](../../examples/anthropic/chat/usage.mjs) | Tracking Claude-specific token metrics |
|
|
329
|
-
| [Structured Output](../../examples/anthropic/chat/structured.mjs) | Prompt-based JSON schema enforcement |
|
|
330
|
-
|
|
331
|
-
#### 🖼️ Multimodal
|
|
332
|
-
| Example | Description |
|
|
333
|
-
| :--- | :--- |
|
|
334
|
-
| [Vision Analysis](../../examples/anthropic/multimodal/vision.mjs) | Analyzing images with Claude Vision |
|
|
335
|
-
| [PDF Analysis](../../examples/anthropic/multimodal/pdf.mjs) | Native PDF document processing |
|
|
336
|
-
| [File Context](../../examples/anthropic/multimodal/files.mjs) | Passing local file contents to Claude |
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
To run an example:
|
|
340
|
-
```bash
|
|
341
|
-
node examples/openai/01-basic-chat.mjs
|
|
342
|
-
```
|
|
343
|
-
|
|
344
|
-
---
|
|
345
|
-
|
|
346
|
-
## 🔌 Advanced Usage
|
|
347
|
-
|
|
348
|
-
### Tool Calling (Function Calling)
|
|
349
|
-
|
|
350
|
-
Define your tools and let the library handle the execution loop automatically.
|
|
77
|
+
### 🛠️ Auto-Executing Tools
|
|
78
|
+
Define tools once, and the library manages the execution loop for you.
|
|
351
79
|
|
|
352
80
|
```ts
|
|
353
|
-
const
|
|
81
|
+
const tools = [{
|
|
354
82
|
type: 'function',
|
|
355
|
-
function: {
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
type: 'object',
|
|
359
|
-
properties: { location: { type: 'string' } }
|
|
360
|
-
}
|
|
361
|
-
},
|
|
362
|
-
handler: async ({ location }) => {
|
|
363
|
-
return JSON.stringify({ location, temp: 22, unit: 'celsius' });
|
|
364
|
-
}
|
|
365
|
-
};
|
|
366
|
-
|
|
367
|
-
// Use the fluent API to add tools on the fly
|
|
368
|
-
const reply = await chat
|
|
369
|
-
.withTool(weatherTool)
|
|
370
|
-
.ask("What is the weather in London?");
|
|
371
|
-
```
|
|
372
|
-
|
|
373
|
-
### Structured Output (Schemas)
|
|
83
|
+
function: { name: 'get_weather', ... },
|
|
84
|
+
handler: async ({ loc }) => `Sunny in ${loc}`
|
|
85
|
+
}];
|
|
374
86
|
|
|
375
|
-
|
|
87
|
+
await chat.withTools(tools).ask("Weather in Tokyo?");
|
|
88
|
+
```
|
|
376
89
|
|
|
377
|
-
|
|
90
|
+
### ✨ Structured Output
|
|
91
|
+
Get type-safe JSON back using **Zod** schemas.
|
|
378
92
|
|
|
379
93
|
```ts
|
|
380
|
-
import {
|
|
381
|
-
|
|
382
|
-
const personSchema = z.object({
|
|
383
|
-
name: z.string(),
|
|
384
|
-
age: z.number(),
|
|
385
|
-
hobbies: z.array(z.string())
|
|
386
|
-
});
|
|
94
|
+
import { z } from "zod";
|
|
387
95
|
|
|
388
|
-
const
|
|
389
|
-
|
|
390
|
-
.ask("Generate a person named Alice who likes hiking");
|
|
96
|
+
const Product = z.object({ name: z.string(), price: z.number() });
|
|
97
|
+
const res = await chat.withSchema(Product).ask("Generate a gadget");
|
|
391
98
|
|
|
392
|
-
// Type-safe access
|
|
393
|
-
const person = response.parsed;
|
|
394
|
-
console.log(person.name); // "Alice"
|
|
99
|
+
console.log(res.parsed.name); // Type-safe access
|
|
395
100
|
```
|
|
396
101
|
|
|
397
|
-
|
|
398
|
-
|
|
102
|
+
### 🎨 Image Generation
|
|
399
103
|
```ts
|
|
400
|
-
|
|
401
|
-
type: "object",
|
|
402
|
-
properties: {
|
|
403
|
-
name: { type: "string" },
|
|
404
|
-
age: { type: "integer" }
|
|
405
|
-
},
|
|
406
|
-
required: ["name", "age"],
|
|
407
|
-
additionalProperties: false // Required for strict mode in OpenAI
|
|
408
|
-
};
|
|
409
|
-
|
|
410
|
-
const response = await chat
|
|
411
|
-
.withSchema(schema)
|
|
412
|
-
.ask("Generate a person");
|
|
413
|
-
|
|
414
|
-
console.log(response.parsed); // { name: "...", age: ... }
|
|
104
|
+
await LLM.paint("A cyberpunk city in rain");
|
|
415
105
|
```
|
|
416
106
|
|
|
417
|
-
###
|
|
418
|
-
|
|
419
|
-
Guarantee valid JSON output without enforcing a strict schema.
|
|
420
|
-
|
|
107
|
+
### 🎤 Audio Transcription
|
|
421
108
|
```ts
|
|
422
|
-
|
|
423
|
-
responseFormat: { type: "json_object" }
|
|
424
|
-
});
|
|
425
|
-
|
|
426
|
-
const response = await chat.ask("Generate a JSON object with a greeting");
|
|
427
|
-
console.log(response.parsed); // { greeting: "..." }
|
|
109
|
+
await LLM.transcribe("meeting-recording.wav");
|
|
428
110
|
```
|
|
429
111
|
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
Pass local paths or URLs directly. The library handles reading, MIME detection, and encoding for a wide variety of file types.
|
|
434
|
-
|
|
435
|
-
**Supported File Types:**
|
|
436
|
-
- **Images**: `.jpg`, `.jpeg`, `.png`, `.gif`, `.webp`
|
|
437
|
-
- **Videos**: `.mp4`, `.mpeg`, `.mov`
|
|
438
|
-
- **Audio**: `.wav`, `.mp3`
|
|
439
|
-
- **Documents**: `.csv`, `.json`
|
|
440
|
-
- **Code**: `.js`, `.mjs`, `.cjs`, `.ts`, `.py`, `.rb`, `.go`, `.java`, `.c`, `.cpp`, `.rs`, `.swift`, `.kt`
|
|
441
|
-
- **Text**: `.txt`, `.md`, `.html`, `.css`, `.xml`, `.yml`, `.yaml`
|
|
112
|
+
### 🧠 Deep Reasoning
|
|
113
|
+
Access the thought process of models like **DeepSeek R1** or **OpenAI o1/o3** using the `.reasoning` field.
|
|
442
114
|
|
|
443
115
|
```ts
|
|
444
|
-
|
|
445
|
-
await chat.ask("
|
|
446
|
-
files: ["./screenshot.png"]
|
|
447
|
-
});
|
|
448
|
-
|
|
449
|
-
// Audio
|
|
450
|
-
await chat.ask("Transcribe this", {
|
|
451
|
-
files: ["./meeting.mp3"]
|
|
452
|
-
});
|
|
116
|
+
const chat = LLM.chat("deepseek-reasoner");
|
|
117
|
+
const res = await chat.ask("Solve a complex puzzle");
|
|
453
118
|
|
|
454
|
-
//
|
|
455
|
-
|
|
456
|
-
files: ["./app.ts"]
|
|
457
|
-
});
|
|
458
|
-
|
|
459
|
-
// Multiple files at once
|
|
460
|
-
await chat.ask("Analyze these files", {
|
|
461
|
-
files: ["diagram.png", "data.json", "notes.txt"]
|
|
462
|
-
});
|
|
119
|
+
console.log(res.reasoning); // Output the model's inner thought process
|
|
120
|
+
console.log(res.content); // Output the final answer
|
|
463
121
|
```
|
|
464
122
|
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
Inject custom headers into requests, useful for tools like Helicone or Portkey.
|
|
468
|
-
|
|
469
|
-
```ts
|
|
470
|
-
chat.withRequestOptions({
|
|
471
|
-
headers: {
|
|
472
|
-
"Helicone-Auth": "Bearer my-key",
|
|
473
|
-
"X-Custom-Trace": "123"
|
|
474
|
-
}
|
|
475
|
-
});
|
|
476
|
-
```
|
|
477
|
-
|
|
478
|
-
### Model Capabilities & Pricing
|
|
123
|
+
---
|
|
479
124
|
|
|
480
|
-
|
|
125
|
+
## 📋 Supported Providers
|
|
481
126
|
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
```
|
|
127
|
+
| Provider | Supported Features |
|
|
128
|
+
| :--- | :--- |
|
|
129
|
+
| <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai.svg" height="18"> <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai-text.svg" height="18"> | Chat, Streaming, Tools, Vision, Audio, Images, Transcription, **Reasoning** |
|
|
130
|
+
| <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/gemini-color.svg" height="18"> <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/gemini-text.svg" height="14"> | Chat, Streaming, Tools, Vision, Audio, Video, Embeddings |
|
|
131
|
+
| <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/anthropic-text.svg" height="12"> | Chat, Streaming, Tools, Vision, PDF Support, Structured Output |
|
|
132
|
+
| <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/deepseek-color.svg" height="18"> <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/deepseek-text.svg" height="14"> | Chat (V3), **Reasoning (R1)**, Tools, Streaming, Structured Output |
|
|
133
|
+
| <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/ollama.svg" height="18"> <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/ollama-text.svg" height="12"> | **Local Inference**, Chat, Streaming, Tools, Vision, Embeddings |
|
|
490
134
|
|
|
491
135
|
---
|
|
492
136
|
|
|
493
|
-
##
|
|
137
|
+
## 🚀 Why use this over official SDKs?
|
|
494
138
|
|
|
495
|
-
|
|
|
139
|
+
| Feature | node-llm | Official SDKs |
|
|
496
140
|
| :--- | :--- | :--- |
|
|
497
|
-
| **
|
|
498
|
-
| **
|
|
499
|
-
| **
|
|
500
|
-
| **
|
|
141
|
+
| **API Style** | Consistent across all providers | Different for everyone |
|
|
142
|
+
| **Streaming** | Standard `AsyncIterator` | Callbacks/Events/Streams mixed |
|
|
143
|
+
| **Tools** | Automatic Execution Loop | Manual parsing & recursion |
|
|
144
|
+
| **Files** | Path string or URL | Base64 buffers / distinct types |
|
|
145
|
+
| **Retries** | Built-in & Configurable | Varies by SDK |
|
|
501
146
|
|
|
502
147
|
---
|
|
503
148
|
|
|
504
|
-
##
|
|
149
|
+
## 📚 Documentation & Installation
|
|
505
150
|
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
- **Production Ready**: Built-in retries and strict type checking.
|
|
510
|
-
|
|
511
|
-
---
|
|
151
|
+
```bash
|
|
152
|
+
npm install @node-llm/core
|
|
153
|
+
```
|
|
512
154
|
|
|
513
|
-
|
|
155
|
+
**[View Full Documentation ↗](https://node-llm.eshaiju.com/)**
|
|
514
156
|
|
|
515
|
-
|
|
516
|
-
```bash
|
|
517
|
-
npm run test:unit
|
|
518
|
-
```
|
|
157
|
+
---
|
|
519
158
|
|
|
520
|
-
|
|
521
|
-
- **Replay Mode (Default)**: Runs against recorded cassettes. Fast and requires no API keys.
|
|
522
|
-
```bash
|
|
523
|
-
npm run test:integration
|
|
524
|
-
```
|
|
525
|
-
- **Record Mode**: Update cassettes by hitting real APIs (requires API keys).
|
|
526
|
-
```bash
|
|
527
|
-
VCR_MODE=record npm run test:integration
|
|
528
|
-
```
|
|
159
|
+
## 🫶 Credits
|
|
529
160
|
|
|
530
|
-
|
|
161
|
+
Heavily inspired by the elegant design of [RubyLLM](https://rubyllm.com/).
|
|
531
162
|
|
|
532
163
|
---
|
|
533
164
|
|
package/dist/chat/Chat.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"Chat.d.ts","sourceRoot":"","sources":["../../src/chat/Chat.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AAC/C,OAAO,EAAE,QAAQ,EAAE,KAAK,EAAE,MAAM,0BAA0B,CAAC;AAI3D,OAAO,EAAE,IAAI,EAAE,MAAM,WAAW,CAAC;AACjC,OAAO,EAAE,MAAM,EAAE,MAAM,qBAAqB,CAAC;AAE7C,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAExB,MAAM,WAAW,UAAU;IACzB,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;IAClB,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;IACjB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAClC;AAED,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAEvD,qBAAa,IAAI;IAKb,OAAO,CAAC,QAAQ,CAAC,QAAQ;IACzB,OAAO,CAAC,KAAK;IACb,OAAO,CAAC,QAAQ,CAAC,OAAO;IAN1B,OAAO,CAAC,QAAQ,CAAiB;IACjC,OAAO,CAAC,QAAQ,CAAW;gBAGR,QAAQ,EAAE,QAAQ,EAC3B,KAAK,EAAE,MAAM,EACJ,OAAO,GAAE,WAAgB;IAmB5C;;OAEG;IACH,IAAI,OAAO,IAAI,SAAS,OAAO,EAAE,CAEhC;IAED;;OAEG;IACH,IAAI,UAAU,IAAI,KAAK,CAetB;IAED;;;OAGG;IACH,QAAQ,CAAC,IAAI,EAAE,GAAG,GAAG,IAAI;IAIzB;;;;;;;OAOG;IACH,SAAS,CAAC,KAAK,EAAE,CAAC,IAAI,GAAG,GAAG,CAAC,EAAE,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IA2BvE;;;;OAIG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAmB5E;;OAEG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAI5E;;;OAGG;IACH,eAAe,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI;IAKnC;;OAEG;IACH,SAAS,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAK9B;;;OAGG;IACH,kBAAkB,CAAC,OAAO,EAAE;QAAE,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QAAC,cAAc,CAAC,EAAE,GAAG,CAAA;KAAE,GAAG,IAAI;IAU7F;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI;IAK7C;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI,GAAG,IAAI;IAkB9E,YAAY,CAAC,OAAO,EAAE,MAAM,IAAI,GAAG,IAAI;IAKvC,YAAY,CAAC,OAAO,EAAE,CAAC,OAAO,EAAE,kBAAkB,KAAK,IAAI,GAAG,IAAI;IAKlE,UAAU,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKlD,YAAY,CAAC,OAAO,EAAE,CAAC,MAAM,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKlD;;OAEG;IACG,GAAG,CAAC,OAAO,EAAE,MAAM,GAAG,GAAG,EAAE,EAAE,OAAO,CAAC,EAAE,UAAU,GAAG,OAAO,CAAC,kBAAkB,CAAC;
|
|
1
|
+
{"version":3,"file":"Chat.d.ts","sourceRoot":"","sources":["../../src/chat/Chat.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AAC/C,OAAO,EAAE,QAAQ,EAAE,KAAK,EAAE,MAAM,0BAA0B,CAAC;AAI3D,OAAO,EAAE,IAAI,EAAE,MAAM,WAAW,CAAC;AACjC,OAAO,EAAE,MAAM,EAAE,MAAM,qBAAqB,CAAC;AAE7C,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAExB,MAAM,WAAW,UAAU;IACzB,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;IAClB,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;IACjB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAClC;AAED,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAEvD,qBAAa,IAAI;IAKb,OAAO,CAAC,QAAQ,CAAC,QAAQ;IACzB,OAAO,CAAC,KAAK;IACb,OAAO,CAAC,QAAQ,CAAC,OAAO;IAN1B,OAAO,CAAC,QAAQ,CAAiB;IACjC,OAAO,CAAC,QAAQ,CAAW;gBAGR,QAAQ,EAAE,QAAQ,EAC3B,KAAK,EAAE,MAAM,EACJ,OAAO,GAAE,WAAgB;IAmB5C;;OAEG;IACH,IAAI,OAAO,IAAI,SAAS,OAAO,EAAE,CAEhC;IAED;;OAEG;IACH,IAAI,UAAU,IAAI,KAAK,CAetB;IAED;;;OAGG;IACH,QAAQ,CAAC,IAAI,EAAE,GAAG,GAAG,IAAI;IAIzB;;;;;;;OAOG;IACH,SAAS,CAAC,KAAK,EAAE,CAAC,IAAI,GAAG,GAAG,CAAC,EAAE,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IA2BvE;;;;OAIG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAmB5E;;OAEG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAI5E;;;OAGG;IACH,eAAe,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI;IAKnC;;OAEG;IACH,SAAS,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAK9B;;;OAGG;IACH,kBAAkB,CAAC,OAAO,EAAE;QAAE,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QAAC,cAAc,CAAC,EAAE,GAAG,CAAA;KAAE,GAAG,IAAI;IAU7F;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI;IAK7C;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI,GAAG,IAAI;IAkB9E,YAAY,CAAC,OAAO,EAAE,MAAM,IAAI,GAAG,IAAI;IAKvC,YAAY,CAAC,OAAO,EAAE,CAAC,OAAO,EAAE,kBAAkB,KAAK,IAAI,GAAG,IAAI;IAKlE,UAAU,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKlD,YAAY,CAAC,OAAO,EAAE,CAAC,MAAM,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKlD;;OAEG;IACG,GAAG,CAAC,OAAO,EAAE,MAAM,GAAG,GAAG,EAAE,EAAE,OAAO,CAAC,EAAE,UAAU,GAAG,OAAO,CAAC,kBAAkB,CAAC;IAgMrF;;OAEG;IACI,MAAM,CAAC,OAAO,EAAE,MAAM;CAI9B"}
|
package/dist/chat/Chat.js
CHANGED
|
@@ -287,7 +287,7 @@ export class Chat {
|
|
|
287
287
|
this.options.onNewMessage();
|
|
288
288
|
let response = await this.executor.executeChat(executeOptions);
|
|
289
289
|
trackUsage(response.usage);
|
|
290
|
-
const firstAssistantMessage = new ChatResponseString(response.content ?? "", response.usage ?? { input_tokens: 0, output_tokens: 0, total_tokens: 0 }, this.model);
|
|
290
|
+
const firstAssistantMessage = new ChatResponseString(response.content ?? "", response.usage ?? { input_tokens: 0, output_tokens: 0, total_tokens: 0 }, this.model, response.reasoning);
|
|
291
291
|
this.messages.push({
|
|
292
292
|
role: "assistant",
|
|
293
293
|
content: firstAssistantMessage,
|
|
@@ -337,7 +337,7 @@ export class Chat {
|
|
|
337
337
|
headers: this.options.headers,
|
|
338
338
|
});
|
|
339
339
|
trackUsage(response.usage);
|
|
340
|
-
const assistantMessage = new ChatResponseString(response.content ?? "", response.usage ?? { input_tokens: 0, output_tokens: 0, total_tokens: 0 }, this.model);
|
|
340
|
+
const assistantMessage = new ChatResponseString(response.content ?? "", response.usage ?? { input_tokens: 0, output_tokens: 0, total_tokens: 0 }, this.model, response.reasoning);
|
|
341
341
|
this.messages.push({
|
|
342
342
|
role: "assistant",
|
|
343
343
|
content: assistantMessage,
|
|
@@ -348,7 +348,9 @@ export class Chat {
|
|
|
348
348
|
this.options.onEndMessage(assistantMessage);
|
|
349
349
|
}
|
|
350
350
|
}
|
|
351
|
-
|
|
351
|
+
// For the final return, we might want to aggregate reasoning too if it happened in multiple turns?
|
|
352
|
+
// Usually reasoning only happens once or we just want the last one.
|
|
353
|
+
return new ChatResponseString(response.content ?? "", totalUsage, this.model, response.reasoning);
|
|
352
354
|
}
|
|
353
355
|
/**
|
|
354
356
|
* Streams the model's response to a user question.
|