@node-llm/core 1.5.1 → 1.5.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/README.md +42 -84
  2. package/dist/chat/Chat.d.ts +9 -21
  3. package/dist/chat/Chat.d.ts.map +1 -1
  4. package/dist/chat/Chat.js +83 -122
  5. package/dist/chat/ChatOptions.d.ts +3 -3
  6. package/dist/chat/ChatOptions.d.ts.map +1 -1
  7. package/dist/chat/ChatStream.d.ts +3 -21
  8. package/dist/chat/ChatStream.d.ts.map +1 -1
  9. package/dist/chat/ChatStream.js +96 -97
  10. package/dist/chat/Content.d.ts +10 -0
  11. package/dist/chat/Content.d.ts.map +1 -1
  12. package/dist/chat/Content.js +34 -1
  13. package/dist/chat/Tool.d.ts +6 -0
  14. package/dist/chat/Tool.d.ts.map +1 -1
  15. package/dist/chat/ToolHandler.d.ts +11 -0
  16. package/dist/chat/ToolHandler.d.ts.map +1 -0
  17. package/dist/chat/ToolHandler.js +42 -0
  18. package/dist/chat/Validation.d.ts +10 -0
  19. package/dist/chat/Validation.d.ts.map +1 -0
  20. package/dist/chat/Validation.js +33 -0
  21. package/dist/config.d.ts +43 -14
  22. package/dist/config.d.ts.map +1 -1
  23. package/dist/config.js +58 -13
  24. package/dist/errors/index.d.ts +8 -0
  25. package/dist/errors/index.d.ts.map +1 -1
  26. package/dist/errors/index.js +13 -0
  27. package/dist/llm.d.ts.map +1 -1
  28. package/dist/llm.js +18 -7
  29. package/dist/providers/gemini/Image.js +1 -1
  30. package/dist/providers/openai/Capabilities.d.ts +1 -0
  31. package/dist/providers/openai/Capabilities.d.ts.map +1 -1
  32. package/dist/providers/openai/Capabilities.js +3 -0
  33. package/dist/providers/openai/Chat.d.ts.map +1 -1
  34. package/dist/providers/openai/Chat.js +13 -4
  35. package/dist/providers/openai/Streaming.d.ts.map +1 -1
  36. package/dist/providers/openai/Streaming.js +7 -2
  37. package/dist/providers/openai/types.d.ts +4 -0
  38. package/dist/providers/openai/types.d.ts.map +1 -1
  39. package/dist/utils/Binary.d.ts.map +1 -1
  40. package/dist/utils/Binary.js +10 -11
  41. package/dist/utils/audio.js +1 -1
  42. package/dist/utils/fetch.d.ts.map +1 -1
  43. package/dist/utils/fetch.js +15 -4
  44. package/dist/utils/logger.js +1 -1
  45. package/package.json +1 -1
package/README.md CHANGED
@@ -1,16 +1,16 @@
1
1
  <p align="left">
2
2
  <a href="https://node-llm.eshaiju.com/">
3
- <img src="https://github.com/node-llm/node-llm/raw/main/docs/assets/images/logo.jpg" alt="NodeLLM logo" width="300" />
3
+ <img src="docs/assets/images/logo.jpg" alt="NodeLLM logo" width="300" />
4
4
  </a>
5
5
  </p>
6
6
 
7
7
  # NodeLLM
8
8
 
9
- **An opinionated architectural layer for integrating Large Language Models in Node.js.**
9
+ **An architectural layer for integrating Large Language Models in Node.js.**
10
10
 
11
11
  **Provider-agnostic by design.**
12
12
 
13
- Most LLM SDKs **tightly couple** your application to vendors, APIs, and churn. NodeLLM provides a unified, production-oriented API for interacting with over 540+ models across multiple providers (OpenAI, Gemini, Anthropic, DeepSeek, OpenRouter, Ollama, etc.) without the SDK fatigue.
13
+ Integrating multiple LLM providers often means juggling different SDKs, API styles, and update cycles. NodeLLM provides a single, unified, production-oriented API for interacting with over 540+ models across multiple providers (OpenAI, Gemini, Anthropic, DeepSeek, OpenRouter, Ollama, etc.) that stays consistent even when providers change.
14
14
 
15
15
  <p align="left">
16
16
  <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai.svg" height="28" />
@@ -52,14 +52,14 @@ NodeLLM is **NOT**:
52
52
 
53
53
  ## 🏗️ Why NodeLLM?
54
54
 
55
- Most AI integrations today are provider-specific, SDK-driven, and leaky at abstraction boundaries. This creates long-term architectural risk. **LLMs should be treated as infrastructure**, and NodeLLM exists to help you integrate them without vendor lock-in.
55
+ Direct integrations often become tightly coupled to specific providers, making it difficult to adapt as models evolve. **LLMs should be treated as infrastructure**, and NodeLLM helps you build a stable foundation that persists regardless of which model is currently "state of the art."
56
56
 
57
- NodeLLM exists to solve **architectural problems**, not just provide API access. It is the core architectural layer for LLMs in the Node.js ecosystem.
57
+ NodeLLM helps solve **architectural problems**, not just provide API access. It serves as the core integration layer for LLMs in the Node.js ecosystem.
58
58
 
59
59
  ### Strategic Goals
60
60
  - **Provider Isolation**: Decouple your services from vendor SDKs.
61
- - **Production-Ready**: Native support for streaming, retries, and unified error handling.
62
- - **Predictable API**: Consistent behavior for Tools, Vision, and Structured Outputs across all models.
61
+ - **Production-Ready**: Native support for streaming, automatic retries, and unified error handling.
62
+ - **Predictable API**: Consistent behavior for Tools, Vision, and Structured Outputs across all models, **now including full parity for streaming**.
63
63
 
64
64
  ---
65
65
 
@@ -68,7 +68,7 @@ NodeLLM exists to solve **architectural problems**, not just provide API access.
68
68
  ```ts
69
69
  import { NodeLLM } from "@node-llm/core";
70
70
 
71
- // 1. Configure once
71
+ // 1. Configure once (Safe for ESM + dotenv race conditions)
72
72
  NodeLLM.configure({ provider: "openai" });
73
73
 
74
74
  // 2. Chat (High-level request/response)
@@ -77,29 +77,37 @@ const response = await chat.ask("Explain event-driven architecture");
77
77
  console.log(response.content);
78
78
 
79
79
  // 3. Streaming (Standard AsyncIterator)
80
+ // NOW with full support for Tools, Vision, and Schemas!
80
81
  for await (const chunk of chat.stream("Explain event-driven architecture")) {
81
82
  process.stdout.write(chunk.content);
82
83
  }
83
84
  ```
84
85
 
86
+ ### 🎯 Real-World Example: Brand Perception Checker
87
+
88
+ Built with NodeLLM - Multi-provider AI analysis, tool calling, and structured outputs working together:
89
+
90
+ <p align="center">
91
+ <img src="assets/brand-perception-checker.png" alt="Brand Perception Checker" width="800" />
92
+ </p>
93
+
94
+ **[View Example →](examples/brand-perception-checker/)**
95
+
85
96
 
86
97
  ---
87
98
 
88
99
  ## 🔧 Strategic Configuration
89
100
 
90
- NodeLLM provides a flexible configuration system designed for enterprise usage:
101
+ NodeLLM provides a flexible, **lazy-initialized** configuration system designed for enterprise usage. It is safe for ESM and resolved only when your first request is made, eliminating the common `dotenv` race condition.
91
102
 
92
103
  ```ts
93
104
  // Recommended for multi-provider pipelines
94
- NodeLLM.configure((config) => {
95
- config.openaiApiKey = process.env.OPENAI_API_KEY;
96
- config.anthropicApiKey = process.env.ANTHROPIC_API_KEY;
97
- config.ollamaApiBase = process.env.OLLAMA_API_BASE;
105
+ NodeLLM.configure({
106
+ openaiApiKey: process.env.OPENAI_API_KEY,
107
+ anthropicApiKey: process.env.ANTHROPIC_API_KEY,
108
+ ollamaApiBase: process.env.OLLAMA_API_BASE,
98
109
  });
99
110
 
100
- // Switch providers at the framework level
101
- NodeLLM.configure({ provider: "anthropic" });
102
-
103
111
  // Support for Custom Endpoints (e.g., Azure or LocalAI)
104
112
  NodeLLM.configure({
105
113
  openaiApiKey: process.env.AZURE_KEY,
@@ -107,7 +115,7 @@ NodeLLM.configure({
107
115
  });
108
116
  ```
109
117
 
110
- **[Full Configuration Guide →](https://node-llm.eshaiju.com/getting-started/configuration)**
118
+ **[Full Configuration Guide →](docs/getting_started/configuration.md)**
111
119
 
112
120
  ---
113
121
 
@@ -123,7 +131,7 @@ await chat.ask("Hello world");
123
131
  ```
124
132
 
125
133
  ### 👁️ Smart Vision & Files
126
- Pass images, PDFs, or audio files directly. We handle the heavy lifting: fetching remote URLs, base64 encoding, and MIME type mapping.
134
+ Pass images, PDFs, or audio files directly to **both `ask()` and `stream()`**. We handle the heavy lifting: fetching remote URLs, base64 encoding, and MIME type mapping.
127
135
  ```ts
128
136
  await chat.ask("Analyze this interface", {
129
137
  files: ["./screenshot.png", "https://example.com/spec.pdf"]
@@ -142,42 +150,18 @@ class WeatherTool extends Tool {
142
150
  description = "Get current weather";
143
151
  schema = z.object({ location: z.string() });
144
152
 
145
- async handler({ location }) {
153
+ async execute({ location }) {
146
154
  return `Sunny in ${location}`;
147
155
  }
148
156
  }
149
157
 
150
158
  // Now the model can use it automatically
151
159
  await chat.withTool(WeatherTool).ask("What's the weather in Tokyo?");
152
- ```
153
160
 
154
- ### 🛡️ Loop Protection & Resource Limits
155
- Prevent runaway costs, infinite loops, and hanging requests with comprehensive protection against resource exhaustion.
156
-
157
- NodeLLM provides **defense-in-depth** security that you can configure globally or per-request:
158
-
159
- ```ts
160
- // 1. Global config
161
- NodeLLM.configure({
162
- requestTimeout: 30000, // Timeout requests after 30 seconds (default)
163
- maxToolCalls: 5, // Stop after 5 sequential tool execution turns
164
- maxRetries: 2, // Retry provider-level errors up to 2 times
165
- maxTokens: 4096 // Limit output to 4K tokens (default)
166
- });
167
-
168
- // 2. Per request override
169
- await chat.ask("Deep search task", {
170
- requestTimeout: 120000, // 2 minutes for this request
171
- maxToolCalls: 10,
172
- maxTokens: 8192 // 8K tokens for this request
173
- });
161
+ // Lifecycle Hooks for Error & Flow Control
162
+ chat.onToolCallError((call, err) => "STOP");
174
163
  ```
175
-
176
- **Security Benefits:**
177
- - **`requestTimeout`**: Prevents DoS attacks and hanging requests
178
- - **`maxToolCalls`**: Prevents infinite tool execution loops
179
- - **`maxRetries`**: Prevents retry storms during outages
180
- - **`maxTokens`**: Prevents excessive output and cost overruns
164
+ **[Full Tool Calling Guide →](https://node-llm.eshaiju.com/core-features/tool-calling)**
181
165
 
182
166
  ### 🔍 Comprehensive Debug Logging
183
167
  Enable detailed logging for all API requests and responses across every feature and provider:
@@ -193,42 +177,6 @@ process.env.NODELLM_DEBUG = "true";
193
177
  ```
194
178
  **Covers:** Chat, Streaming, Images, Embeddings, Transcription, Moderation - across all providers!
195
179
 
196
- ### 🛡️ Content Policy Hooks
197
- NodeLLM provides pluggable hooks to implement custom security, compliance, and moderation logic. Instead of hard-coded rules, you can inject your own policies at the edge.
198
-
199
- - **`beforeRequest()`**: Intercept and modify messages before they hit the LLM (e.g., PII detection/redaction).
200
- - **`afterResponse()`**: Process the final response before it returns to your code (e.g., output masking or compliance checks).
201
-
202
- ```ts
203
- chat
204
- .beforeRequest(async (messages) => {
205
- // Detect PII and redact
206
- return redactSSN(messages);
207
- })
208
- .afterResponse(async (response) => {
209
- // Ensure output compliance
210
- return response.withContent(maskSensitiveData(response.content));
211
- });
212
- ```
213
-
214
- ### 🧱 Smart Context Isolation
215
- Stop worrying about prompt injection or instruction drift. NodeLLM automatically separates system instructions from the conversation history, providing a higher level of protection and strictness.
216
-
217
- - **Zero-Config Security**: Enabled by default for all chats. No special flags required.
218
- - **Smart Model Mapping**: Automatically uses OpenAI's modern `developer` role for compatible models (GPT-4o, o1, o3) while safely falling back to the standard `system` role for older or local models (Ollama, DeepSeek, etc.).
219
- - **Universal Context**: Instructions stay separated internally, ensuring they are always prioritized by the model and never accidentally overridden by user messages.
220
- - **Provider Agnostic**: Write instructions once; NodeLLM handles the specific role requirements for every major provider (OpenAI, Anthropic, Gemini).
221
-
222
- ### 🔍 Observability & Tool Auditing
223
- For enterprise compliance, NodeLLM provides deep visibility into the tool execution lifecycle. You can monitor, log, and audit every step of a tool's execution.
224
-
225
- ```ts
226
- chat
227
- .onToolCallStart((call) => log(`Starting tool: ${call.function.name}`))
228
- .onToolCallEnd((call, res) => log(`Tool ${call.id} finished with: ${res}`))
229
- .onToolCallError((call, err) => alert(`Tool ${call.function.name} failed: ${err.message}`));
230
- ```
231
-
232
180
  ### ✨ Structured Output
233
181
  Get type-safe, validated JSON back using **Zod** schemas.
234
182
  ```ts
@@ -285,7 +233,7 @@ console.log(res.reasoning); // Chain-of-thought
285
233
 
286
234
  | Provider | Supported Features |
287
235
  | :--- | :--- |
288
- | <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai.svg" height="18"> **OpenAI** | Chat, **Streaming + Tools**, Vision, Audio, Images, Transcription, **Reasoning**, **Smart Developer Role** |
236
+ | <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai.svg" height="18"> **OpenAI** | Chat, **Streaming + Tools**, Vision, Audio, Images, Transcription, **Reasoning** |
289
237
  | <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/gemini-color.svg" height="18"> **Gemini** | Chat, **Streaming + Tools**, Vision, Audio, Video, Embeddings |
290
238
  | <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/anthropic-text.svg" height="12"> **Anthropic** | Chat, **Streaming + Tools**, Vision, PDF, Structured Output |
291
239
  | <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/deepseek-color.svg" height="18"> **DeepSeek** | Chat (V3), **Reasoning (R1)**, **Streaming + Tools** |
@@ -302,11 +250,21 @@ npm install @node-llm/core
302
250
 
303
251
  **[View Full Documentation ↗](https://node-llm.eshaiju.com/)**
304
252
 
253
+ ### 🍿 Try the Live Demo
254
+ Want to see it in action? Run this in your terminal:
255
+ ```bash
256
+ git clone https://github.com/node-llm/node-llm.git
257
+ cd node-llm
258
+ npm install
259
+ npm run demo
260
+ ```
261
+
262
+
305
263
  ---
306
264
 
307
265
  ## 🤝 Contributing
308
266
 
309
- We welcome contributions! Please see our **[Contributing Guide](https://github.com/node-llm/node-llm/blob/main/CONTRIBUTING.md)** for more details on how to get started.
267
+ We welcome contributions! Please see our **[Contributing Guide](CONTRIBUTING.md)** for more details on how to get started.
310
268
 
311
269
  ---
312
270
 
@@ -1,8 +1,9 @@
1
1
  import { Message } from "./Message.js";
2
+ import { ContentPart } from "./Content.js";
2
3
  import { ChatOptions } from "./ChatOptions.js";
3
4
  import { Provider, Usage, ChatChunk } from "../providers/Provider.js";
4
5
  import { Stream } from "../streaming/Stream.js";
5
- import { Tool } from "./Tool.js";
6
+ import { ToolResolvable } from "./Tool.js";
6
7
  import { Schema } from "../schema/Schema.js";
7
8
  import { z } from "zod";
8
9
  import { ToolExecutionMode } from "../constants.js";
@@ -14,6 +15,7 @@ export interface AskOptions {
14
15
  headers?: Record<string, string>;
15
16
  maxToolCalls?: number;
16
17
  requestTimeout?: number;
18
+ signal?: AbortSignal;
17
19
  }
18
20
  import { ChatResponseString } from "./ChatResponse.js";
19
21
  export declare class Chat {
@@ -40,7 +42,7 @@ export declare class Chat {
40
42
  * Add a tool to the chat session (fluent API)
41
43
  * Supports passing a tool instance or a tool class (which will be instantiated).
42
44
  */
43
- withTool(tool: any): this;
45
+ withTool(tool: ToolResolvable): this;
44
46
  /**
45
47
  * Add multiple tools to the chat session.
46
48
  * Supports passing Tool classes (which will be instantiated) or instances.
@@ -49,9 +51,7 @@ export declare class Chat {
49
51
  * @example
50
52
  * chat.withTools([WeatherTool, new CalculatorTool()], { replace: true });
51
53
  */
52
- withTools(tools: (Tool | {
53
- new (): Tool;
54
- } | any)[], options?: {
54
+ withTools(tools: ToolResolvable[], options?: {
55
55
  replace?: boolean;
56
56
  }): this;
57
57
  /**
@@ -107,10 +107,7 @@ export declare class Chat {
107
107
  * Called when a tool call ends successfully.
108
108
  */
109
109
  onToolCallEnd(handler: (toolCall: any, result: any) => void): this;
110
- /**
111
- * Called when a tool call fails.
112
- */
113
- onToolCallError(handler: (toolCall: any, error: Error) => void): this;
110
+ onToolCallError(handler: (toolCall: any, error: Error) => 'STOP' | 'CONTINUE' | void | Promise<'STOP' | 'CONTINUE' | void>): this;
114
111
  /**
115
112
  * Set the tool execution mode.
116
113
  * - "auto": (Default) Automatically execute all tool calls.
@@ -140,19 +137,10 @@ export declare class Chat {
140
137
  /**
141
138
  * Streams the model's response to a user question.
142
139
  */
143
- stream(content: string): Stream<ChatChunk>;
144
- /**
145
- * Check if tool execution should proceed based on the current mode.
146
- */
147
- private shouldExecuteTools;
148
- /**
149
- * Request user confirmation for a tool call in "confirm" mode.
150
- * Returns true if approved, false if rejected.
151
- */
152
- private requestToolConfirmation;
140
+ stream(content: string | ContentPart[], options?: AskOptions): Stream<ChatChunk>;
153
141
  /**
154
- * Execute a single tool call and return the result message.
142
+ * Normalizes a ToolResolvable into a ToolDefinition.
155
143
  */
156
- private executeToolCall;
144
+ private normalizeTool;
157
145
  }
158
146
  //# sourceMappingURL=Chat.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"Chat.d.ts","sourceRoot":"","sources":["../../src/chat/Chat.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AAC/C,OAAO,EAAE,QAAQ,EAAE,KAAK,EAAE,SAAS,EAAE,MAAM,0BAA0B,CAAC;AAGtE,OAAO,EAAE,MAAM,EAAE,MAAM,wBAAwB,CAAC;AAChD,OAAO,EAAE,IAAI,EAAkB,MAAM,WAAW,CAAC;AACjD,OAAO,EAAE,MAAM,EAAE,MAAM,qBAAqB,CAAC;AAE7C,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAExB,OAAO,EAAE,iBAAiB,EAAE,MAAM,iBAAiB,CAAC;AAEpD,MAAM,WAAW,UAAU;IACzB,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;IAClB,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;IACjB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,cAAc,CAAC,EAAE,MAAM,CAAC;CACzB;AAED,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAEvD,qBAAa,IAAI;IAMb,OAAO,CAAC,QAAQ,CAAC,QAAQ;IACzB,OAAO,CAAC,KAAK;IACb,OAAO,CAAC,QAAQ,CAAC,OAAO;IAP1B,OAAO,CAAC,QAAQ,CAAiB;IACjC,OAAO,CAAC,cAAc,CAAiB;IACvC,OAAO,CAAC,QAAQ,CAAW;gBAGR,QAAQ,EAAE,QAAQ,EAC3B,KAAK,EAAE,MAAM,EACJ,OAAO,GAAE,WAAgB,EAC1C,WAAW,GAAE;QAAE,QAAQ,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAA;KAAgC;IA0BlF;;OAEG;IACH,IAAI,OAAO,IAAI,SAAS,OAAO,EAAE,CAEhC;IAED,IAAI,OAAO,IAAI,MAAM,CAEpB;IAED;;OAEG;IACH,IAAI,UAAU,IAAI,KAAK,CAetB;IAED;;;OAGG;IACH,QAAQ,CAAC,IAAI,EAAE,GAAG,GAAG,IAAI;IAIvB;;;;;;;KAOC;IACH,SAAS,CAAC,KAAK,EAAE,CAAC,IAAI,GAAG;QAAE,QAAO,IAAI,CAAA;KAAE,GAAG,GAAG,CAAC,EAAE,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAmCzF;;;;OAIG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAW5E;;OAEG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAI5E;;;OAGG;IACH,eAAe,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI;IAKnC;;OAEG;IACH,SAAS,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAK9B;;;OAGG;IACH,kBAAkB,CAAC,OAAO,EAAE;QAAE,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QAAC,cAAc,CAAC,EAAE,GAAG,CAAA;KAAE,GAAG,IAAI;IAU7F;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI;IAK7C;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI,GAAG,IAAI;IAkB9E,YAAY,CAAC,OAAO,EAAE,MAAM,IAAI,GAAG,IAAI;IAKvC,YAAY,CAAC,OAAO,EAAE,CAAC,OAAO,EAAE,kBAAkB,KAAK,IAAI,GAAG,IAAI;IAKlE,UAAU,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAIlD,YAAY,CAAC,OAAO,EAAE,CAAC,MAAM,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAIlD;;OAEG;IACH,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKvD;;OAEG;IACH,aAAa,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,MAAM,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKlE;;OAEG;IACH,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,KAAK,EAAE,KAAK,KAAK,IAAI,GAAG,IAAI;IAKrE;;;;;OAKG;IACH,iBAAiB,CAAC,IAAI,EAAE,iBAAiB,GAAG,IAAI;IAKhD;;;OAGG;IACH,iBAAiB,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,OAAO,CAAC,OAAO,CAAC,GAAG,OAAO,GAAG,IAAI;IAK/E;;;OAGG;IACH,aAAa,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,OAAO,EAAE,KAAK,OAAO,CAAC,OAAO,EAAE,GAAG,IAAI,CAAC,GAAG,IAAI;IAKhF;;;OAGG;IACH,aAAa,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,kBAAkB,KAAK,OAAO,CAAC,kBAAkB,GAAG,IAAI,CAAC,GAAG,IAAI;IAKlG;;OAEG;IACG,GAAG,CAAC,OAAO,EAAE,MAAM,GAAG,GAAG,EAAE,EAAE,OAAO,CAAC,EAAE,UAAU,GAAG,OAAO,CAAC,kBAAkB,CAAC;IA0OrF;;OAEG;IACH,MAAM,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,CAAC,SAAS,CAAC;IAK1C;;OAEG;IACH,OAAO,CAAC,kBAAkB;IAM1B;;;OAGG;YACW,uBAAuB;IASrC;;OAEG;YACW,eAAe;CA2C9B"}
1
+ {"version":3,"file":"Chat.d.ts","sourceRoot":"","sources":["../../src/chat/Chat.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,WAAW,EAA4C,MAAM,cAAc,CAAC;AACrF,OAAO,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AAC/C,OAAO,EAAE,QAAQ,EAAE,KAAK,EAAE,SAAS,EAAE,MAAM,0BAA0B,CAAC;AAGtE,OAAO,EAAE,MAAM,EAAE,MAAM,wBAAwB,CAAC;AAChD,OAAO,EAAwB,cAAc,EAAE,MAAM,WAAW,CAAC;AACjE,OAAO,EAAE,MAAM,EAAE,MAAM,qBAAqB,CAAC;AAE7C,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAExB,OAAO,EAAE,iBAAiB,EAAE,MAAM,iBAAiB,CAAC;AAMpD,MAAM,WAAW,UAAU;IACzB,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;IAClB,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;IACjB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,MAAM,CAAC,EAAE,WAAW,CAAC;CACtB;AAED,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAEvD,qBAAa,IAAI;IAMb,OAAO,CAAC,QAAQ,CAAC,QAAQ;IACzB,OAAO,CAAC,KAAK;IACb,OAAO,CAAC,QAAQ,CAAC,OAAO;IAP1B,OAAO,CAAC,QAAQ,CAAiB;IACjC,OAAO,CAAC,cAAc,CAAiB;IACvC,OAAO,CAAC,QAAQ,CAAW;gBAGR,QAAQ,EAAE,QAAQ,EAC3B,KAAK,EAAE,MAAM,EACJ,OAAO,GAAE,WAAgB,EAC1C,WAAW,GAAE;QAAE,QAAQ,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAA;KAAgC;IAgClF;;OAEG;IACH,IAAI,OAAO,IAAI,SAAS,OAAO,EAAE,CAEhC;IAED,IAAI,OAAO,IAAI,MAAM,CAEpB;IAED;;OAEG;IACH,IAAI,UAAU,IAAI,KAAK,CAetB;IAED;;;OAGG;IACH,QAAQ,CAAC,IAAI,EAAE,cAAc,GAAG,IAAI;IAIlC;;;;;;;KAOC;IACH,SAAS,CAAC,KAAK,EAAE,cAAc,EAAE,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAkBzE;;;;OAIG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAW5E;;OAEG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAI5E;;;OAGG;IACH,eAAe,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI;IAKnC;;OAEG;IACH,SAAS,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAK9B;;;OAGG;IACH,kBAAkB,CAAC,OAAO,EAAE;QAAE,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QAAC,cAAc,CAAC,EAAE,GAAG,CAAA;KAAE,GAAG,IAAI;IAU7F;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI;IAK7C;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI,GAAG,IAAI;IAkB9E,YAAY,CAAC,OAAO,EAAE,MAAM,IAAI,GAAG,IAAI;IAKvC,YAAY,CAAC,OAAO,EAAE,CAAC,OAAO,EAAE,kBAAkB,KAAK,IAAI,GAAG,IAAI;IAKlE,UAAU,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAIlD,YAAY,CAAC,OAAO,EAAE,CAAC,MAAM,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAIlD;;OAEG;IACH,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKvD;;OAEG;IACH,aAAa,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,MAAM,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKlE,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,KAAK,EAAE,KAAK,KAAK,MAAM,GAAG,UAAU,GAAG,IAAI,GAAG,OAAO,CAAC,MAAM,GAAG,UAAU,GAAG,IAAI,CAAC,GAAG,IAAI;IAKjI;;;;;OAKG;IACH,iBAAiB,CAAC,IAAI,EAAE,iBAAiB,GAAG,IAAI;IAKhD;;;OAGG;IACH,iBAAiB,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,OAAO,CAAC,OAAO,CAAC,GAAG,OAAO,GAAG,IAAI;IAK/E;;;OAGG;IACH,aAAa,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,OAAO,EAAE,KAAK,OAAO,CAAC,OAAO,EAAE,GAAG,IAAI,CAAC,GAAG,IAAI;IAKhF;;;OAGG;IACH,aAAa,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,kBAAkB,KAAK,OAAO,CAAC,kBAAkB,GAAG,IAAI,CAAC,GAAG,IAAI;IAKlG;;OAEG;IACG,GAAG,CAAC,OAAO,EAAE,MAAM,GAAG,GAAG,EAAE,EAAE,OAAO,CAAC,EAAE,UAAU,GAAG,OAAO,CAAC,kBAAkB,CAAC;IAoOrF;;OAEG;IACH,MAAM,CAAC,OAAO,EAAE,MAAM,GAAG,WAAW,EAAE,EAAE,OAAO,GAAE,UAAe,GAAG,MAAM,CAAC,SAAS,CAAC;IAKpF;;OAEG;IACH,OAAO,CAAC,aAAa;CAuCtB"}
package/dist/chat/Chat.js CHANGED
@@ -1,4 +1,5 @@
1
1
  import { FileLoader } from "../utils/FileLoader.js";
2
+ import { isBinaryContent, formatMultimodalContent } from "./Content.js";
2
3
  import { Executor } from "../executor/Executor.js";
3
4
  import { ChatStream } from "./ChatStream.js";
4
5
  import { Schema } from "../schema/Schema.js";
@@ -6,6 +7,10 @@ import { toJsonSchema } from "../schema/to-json-schema.js";
6
7
  import { z } from "zod";
7
8
  import { config } from "../config.js";
8
9
  import { ToolExecutionMode } from "../constants.js";
10
+ import { ConfigurationError } from "../errors/index.js";
11
+ import { ChatValidator } from "./Validation.js";
12
+ import { ToolHandler } from "./ToolHandler.js";
13
+ import { logger } from "../utils/logger.js";
9
14
  import { ChatResponseString } from "./ChatResponse.js";
10
15
  export class Chat {
11
16
  provider;
@@ -35,6 +40,11 @@ export class Chat {
35
40
  if (!this.options.toolExecution) {
36
41
  this.options.toolExecution = config.toolExecution || ToolExecutionMode.AUTO;
37
42
  }
43
+ if (options.tools) {
44
+ const toolList = options.tools;
45
+ this.options.tools = []; // Clear and re-add via normalized method
46
+ this.withTools(toolList);
47
+ }
38
48
  }
39
49
  /**
40
50
  * Read-only access to message history
@@ -84,27 +94,9 @@ export class Chat {
84
94
  this.options.tools = [];
85
95
  }
86
96
  for (const tool of tools) {
87
- let toolInstance;
88
- // Handle class constructor
89
- if (typeof tool === "function") {
90
- try {
91
- toolInstance = new tool();
92
- }
93
- catch (e) {
94
- console.error(`[NodeLLM] Failed to instantiate tool class: ${tool.name}`, e);
95
- continue;
96
- }
97
- }
98
- else {
99
- toolInstance = tool;
100
- }
101
- // Normalized to standard ToolDefinition interface if it's a Tool class instance
102
- if (toolInstance && typeof toolInstance.toLLMTool === "function") {
103
- this.options.tools.push(toolInstance.toLLMTool());
104
- }
105
- else {
106
- // Fallback for legacy raw tool objects (defined as objects with type: 'function')
107
- this.options.tools.push(toolInstance);
97
+ const normalized = this.normalizeTool(tool);
98
+ if (normalized) {
99
+ this.options.tools.push(normalized);
108
100
  }
109
101
  }
110
102
  return this;
@@ -213,9 +205,6 @@ export class Chat {
213
205
  this.options.onToolCallEnd = handler;
214
206
  return this;
215
207
  }
216
- /**
217
- * Called when a tool call fails.
218
- */
219
208
  onToolCallError(handler) {
220
209
  this.options.onToolCallError = handler;
221
210
  return this;
@@ -261,41 +250,13 @@ export class Chat {
261
250
  let messageContent = content;
262
251
  const files = [...(options?.images ?? []), ...(options?.files ?? [])];
263
252
  if (files.length > 0) {
264
- const processedFiles = await Promise.all(files.map(f => FileLoader.load(f)));
265
- const hasBinary = processedFiles.some(p => p.type === "image_url" || p.type === "input_audio" || p.type === "video_url");
266
- if (hasBinary && !this.options.assumeModelExists && this.provider.capabilities && !this.provider.capabilities.supportsVision(this.model)) {
267
- throw new Error(`Model ${this.model} does not support vision/binary files.`);
268
- }
269
- if (hasBinary && this.options.assumeModelExists) {
270
- console.warn(`[NodeLLM] Skipping vision capability validation for model ${this.model}`);
271
- }
272
- // Separate text files from binary files
273
- const textFiles = processedFiles.filter(p => p.type === "text");
274
- const binaryFiles = processedFiles.filter(p => p.type !== "text");
275
- // Concatenate text files into the main content
276
- let fullText = content;
277
- if (textFiles.length > 0) {
278
- fullText += "\n" + textFiles.map(f => f.text).join("\n");
279
- }
280
- // If we have binary files, create multimodal content
281
- if (binaryFiles.length > 0) {
282
- messageContent = [
283
- { type: "text", text: fullText },
284
- ...binaryFiles
285
- ];
286
- }
287
- else {
288
- // Only text files, keep as string
289
- messageContent = fullText;
290
- }
253
+ const processedFiles = await Promise.all(files.map((f) => FileLoader.load(f)));
254
+ const hasBinary = processedFiles.some(isBinaryContent);
255
+ ChatValidator.validateVision(this.provider, this.model, hasBinary, this.options);
256
+ messageContent = formatMultimodalContent(content, processedFiles);
291
257
  }
292
258
  if (this.options.tools && this.options.tools.length > 0) {
293
- if (!this.options.assumeModelExists && this.provider.capabilities && !this.provider.capabilities.supportsTools(this.model)) {
294
- throw new Error(`Model ${this.model} does not support tool calling.`);
295
- }
296
- if (this.options.assumeModelExists) {
297
- console.warn(`[NodeLLM] Skipping tool capability validation for model ${this.model}`);
298
- }
259
+ ChatValidator.validateTools(this.provider, this.model, true, this.options);
299
260
  }
300
261
  this.messages.push({
301
262
  role: "user",
@@ -304,12 +265,7 @@ export class Chat {
304
265
  // Process Schema/Structured Output
305
266
  let responseFormat = this.options.responseFormat;
306
267
  if (this.options.schema) {
307
- if (!this.options.assumeModelExists && this.provider.capabilities && !this.provider.capabilities.supportsStructuredOutput(this.model)) {
308
- throw new Error(`Model ${this.model} does not support structured output.`);
309
- }
310
- if (this.options.assumeModelExists) {
311
- console.warn(`[NodeLLM] Skipping structured output capability validation for model ${this.model}`);
312
- }
268
+ ChatValidator.validateStructuredOutput(this.provider, this.model, true, this.options);
313
269
  const jsonSchema = toJsonSchema(this.options.schema.definition.schema);
314
270
  responseFormat = {
315
271
  type: "json_schema",
@@ -330,6 +286,7 @@ export class Chat {
330
286
  headers: { ...this.options.headers, ...options?.headers },
331
287
  response_format: responseFormat, // Pass to provider
332
288
  requestTimeout: options?.requestTimeout ?? this.options.requestTimeout ?? config.requestTimeout,
289
+ signal: options?.signal,
333
290
  ...this.options.params,
334
291
  };
335
292
  // --- Content Policy Hooks (Input) ---
@@ -381,7 +338,7 @@ export class Chat {
381
338
  let stepCount = 0;
382
339
  while (response.tool_calls && response.tool_calls.length > 0) {
383
340
  // Dry-run mode: stop after proposing tools
384
- if (!this.shouldExecuteTools(response.tool_calls, this.options.toolExecution)) {
341
+ if (!ToolHandler.shouldExecuteTools(response.tool_calls, this.options.toolExecution)) {
385
342
  break;
386
343
  }
387
344
  stepCount++;
@@ -389,9 +346,9 @@ export class Chat {
389
346
  throw new Error(`[NodeLLM] Maximum tool execution calls (${maxToolCalls}) exceeded.`);
390
347
  }
391
348
  for (const toolCall of response.tool_calls) {
392
- // Confirm mode: request approval
349
+ // Human-in-the-loop: check for approval
393
350
  if (this.options.toolExecution === ToolExecutionMode.CONFIRM) {
394
- const approved = await this.requestToolConfirmation(toolCall, this.options.onConfirmToolCall);
351
+ const approved = await ToolHandler.requestToolConfirmation(toolCall, this.options.onConfirmToolCall);
395
352
  if (!approved) {
396
353
  this.messages.push({
397
354
  role: "tool",
@@ -401,16 +358,42 @@ export class Chat {
401
358
  continue;
402
359
  }
403
360
  }
404
- // Execute the tool
405
- const toolResult = await this.executeToolCall(toolCall, this.options.tools, this.options.onToolCallStart, this.options.onToolCallEnd, this.options.onToolCallError);
406
- this.messages.push(toolResult);
361
+ try {
362
+ const toolResult = await ToolHandler.execute(toolCall, this.options.tools, this.options.onToolCallStart, this.options.onToolCallEnd);
363
+ this.messages.push(toolResult);
364
+ }
365
+ catch (error) {
366
+ const directive = await this.options.onToolCallError?.(toolCall, error);
367
+ if (directive === 'STOP') {
368
+ throw error;
369
+ }
370
+ this.messages.push({
371
+ role: "tool",
372
+ tool_call_id: toolCall.id,
373
+ content: `Fatal error executing tool '${toolCall.function.name}': ${error.message}`,
374
+ });
375
+ if (directive === 'CONTINUE') {
376
+ continue;
377
+ }
378
+ // Default short-circuit logic
379
+ const isFatal = error.fatal === true || error.status === 401 || error.status === 403;
380
+ if (isFatal) {
381
+ throw error;
382
+ }
383
+ logger.error(`Tool execution failed for '${toolCall.function.name}':`, error);
384
+ }
407
385
  }
408
386
  response = await this.executor.executeChat({
409
387
  model: this.model,
410
388
  messages: [...this.systemMessages, ...this.messages],
411
389
  tools: this.options.tools,
390
+ temperature: options?.temperature ?? this.options.temperature,
391
+ max_tokens: options?.maxTokens ?? this.options.maxTokens ?? config.maxTokens,
412
392
  headers: this.options.headers,
393
+ response_format: responseFormat,
413
394
  requestTimeout: options?.requestTimeout ?? this.options.requestTimeout ?? config.requestTimeout,
395
+ signal: options?.signal,
396
+ ...this.options.params,
414
397
  });
415
398
  trackUsage(response.usage);
416
399
  assistantMessage = new ChatResponseString(response.content ?? "", response.usage ?? { input_tokens: 0, output_tokens: 0, total_tokens: 0 }, this.model, this.provider.id, response.reasoning);
@@ -438,68 +421,46 @@ export class Chat {
438
421
  /**
439
422
  * Streams the model's response to a user question.
440
423
  */
441
- stream(content) {
424
+ stream(content, options = {}) {
442
425
  const streamer = new ChatStream(this.provider, this.model, this.options, this.messages, this.systemMessages);
443
- return streamer.create(content);
444
- }
445
- /**
446
- * Check if tool execution should proceed based on the current mode.
447
- */
448
- shouldExecuteTools(toolCalls, mode) {
449
- if (!toolCalls || toolCalls.length === 0)
450
- return false;
451
- if (mode === ToolExecutionMode.DRY_RUN)
452
- return false;
453
- return true;
426
+ return streamer.create(content, options);
454
427
  }
455
428
  /**
456
- * Request user confirmation for a tool call in "confirm" mode.
457
- * Returns true if approved, false if rejected.
429
+ * Normalizes a ToolResolvable into a ToolDefinition.
458
430
  */
459
- async requestToolConfirmation(toolCall, onConfirm) {
460
- if (!onConfirm)
461
- return true;
462
- const confirmed = await onConfirm(toolCall);
463
- return confirmed !== false;
464
- }
465
- /**
466
- * Execute a single tool call and return the result message.
467
- */
468
- async executeToolCall(toolCall, tools, onStart, onEnd, onError) {
469
- if (onStart)
470
- onStart(toolCall);
471
- const tool = tools?.find((t) => t.function.name === toolCall.function.name);
472
- if (tool?.handler) {
431
+ normalizeTool(tool) {
432
+ let toolInstance;
433
+ // Handle class constructor
434
+ if (typeof tool === "function") {
473
435
  try {
474
- const args = JSON.parse(toolCall.function.arguments);
475
- const result = await tool.handler(args);
476
- if (onEnd)
477
- onEnd(toolCall, result);
478
- return {
479
- role: "tool",
480
- tool_call_id: toolCall.id,
481
- content: result,
482
- };
436
+ toolInstance = new tool();
483
437
  }
484
- catch (error) {
485
- if (onError)
486
- onError(toolCall, error);
487
- return {
488
- role: "tool",
489
- tool_call_id: toolCall.id,
490
- content: `Error executing tool: ${error.message}`,
491
- };
438
+ catch (e) {
439
+ logger.error(`Failed to instantiate tool class: ${tool.name}`, e);
440
+ return null;
492
441
  }
493
442
  }
494
443
  else {
495
- const error = new Error("Tool not found or no handler provided");
496
- if (onError)
497
- onError(toolCall, error);
498
- return {
499
- role: "tool",
500
- tool_call_id: toolCall.id,
501
- content: "Error: Tool not found or no handler provided",
502
- };
444
+ toolInstance = tool;
445
+ }
446
+ if (!toolInstance)
447
+ return null;
448
+ // Normalized to standard ToolDefinition interface if it's a Tool class instance
449
+ if (typeof toolInstance.toLLMTool === "function") {
450
+ return toolInstance.toLLMTool();
451
+ }
452
+ if (!toolInstance.function || !toolInstance.function.name) {
453
+ // 1. Validate structure
454
+ throw new ConfigurationError(`[NodeLLM] Tool validation failed: 'function.name' is required for raw tool objects.`);
455
+ }
456
+ if (toolInstance.type !== 'function') {
457
+ // 2. Ensure 'type: function' exists (standardize for providers)
458
+ toolInstance.type = 'function';
459
+ }
460
+ if (typeof toolInstance.handler !== 'function') {
461
+ // 3. Validate handler existence
462
+ throw new ConfigurationError(`[NodeLLM] Tool validation failed: Tool '${toolInstance.function.name}' must have a 'handler' function. (Note: Only Tool subclasses use 'execute()')`);
503
463
  }
464
+ return toolInstance;
504
465
  }
505
466
  }
@@ -1,19 +1,19 @@
1
1
  import { Message } from "./Message.js";
2
- import { ToolDefinition } from "./Tool.js";
2
+ import { ToolResolvable } from "./Tool.js";
3
3
  import { Schema } from "../schema/Schema.js";
4
4
  import { ChatResponseString } from "./ChatResponse.js";
5
5
  import { ToolExecutionMode } from "../constants.js";
6
6
  export interface ChatOptions {
7
7
  systemPrompt?: string;
8
8
  messages?: Message[];
9
- tools?: ToolDefinition[];
9
+ tools?: ToolResolvable[];
10
10
  temperature?: number;
11
11
  maxTokens?: number;
12
12
  onNewMessage?: () => void;
13
13
  onEndMessage?: (message: any) => void;
14
14
  onToolCallStart?: (toolCall: any) => void;
15
15
  onToolCallEnd?: (toolCall: any, result: any) => void;
16
- onToolCallError?: (toolCall: any, error: Error) => void;
16
+ onToolCallError?: (toolCall: any, error: Error) => 'STOP' | 'CONTINUE' | void | Promise<'STOP' | 'CONTINUE' | void>;
17
17
  headers?: Record<string, string>;
18
18
  schema?: Schema;
19
19
  responseFormat?: {