@node-llm/core 1.4.2 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/README.md +87 -19
  2. package/dist/chat/Chat.d.ts +51 -0
  3. package/dist/chat/Chat.d.ts.map +1 -1
  4. package/dist/chat/Chat.js +189 -55
  5. package/dist/chat/ChatOptions.d.ts +11 -2
  6. package/dist/chat/ChatOptions.d.ts.map +1 -1
  7. package/dist/chat/ChatResponse.d.ts +7 -1
  8. package/dist/chat/ChatResponse.d.ts.map +1 -1
  9. package/dist/chat/ChatResponse.js +9 -1
  10. package/dist/chat/ChatStream.d.ts +15 -1
  11. package/dist/chat/ChatStream.d.ts.map +1 -1
  12. package/dist/chat/ChatStream.js +122 -36
  13. package/dist/chat/Role.d.ts +1 -1
  14. package/dist/chat/Role.d.ts.map +1 -1
  15. package/dist/config.d.ts +11 -0
  16. package/dist/config.d.ts.map +1 -1
  17. package/dist/config.js +7 -1
  18. package/dist/constants.d.ts +11 -0
  19. package/dist/constants.d.ts.map +1 -1
  20. package/dist/constants.js +12 -0
  21. package/dist/index.d.ts +1 -0
  22. package/dist/index.d.ts.map +1 -1
  23. package/dist/index.js +1 -0
  24. package/dist/llm.d.ts +4 -0
  25. package/dist/llm.d.ts.map +1 -1
  26. package/dist/llm.js +10 -0
  27. package/dist/providers/Provider.d.ts +6 -0
  28. package/dist/providers/Provider.d.ts.map +1 -1
  29. package/dist/providers/anthropic/AnthropicProvider.d.ts +1 -0
  30. package/dist/providers/anthropic/AnthropicProvider.d.ts.map +1 -1
  31. package/dist/providers/anthropic/AnthropicProvider.js +1 -0
  32. package/dist/providers/anthropic/Chat.d.ts.map +1 -1
  33. package/dist/providers/anthropic/Chat.js +4 -3
  34. package/dist/providers/anthropic/Streaming.d.ts.map +1 -1
  35. package/dist/providers/anthropic/Streaming.js +3 -2
  36. package/dist/providers/anthropic/Utils.js +2 -2
  37. package/dist/providers/deepseek/Chat.d.ts.map +1 -1
  38. package/dist/providers/deepseek/Chat.js +8 -5
  39. package/dist/providers/deepseek/DeepSeekProvider.d.ts +1 -0
  40. package/dist/providers/deepseek/DeepSeekProvider.d.ts.map +1 -1
  41. package/dist/providers/deepseek/DeepSeekProvider.js +1 -0
  42. package/dist/providers/deepseek/Streaming.d.ts.map +1 -1
  43. package/dist/providers/deepseek/Streaming.js +7 -4
  44. package/dist/providers/gemini/Chat.d.ts.map +1 -1
  45. package/dist/providers/gemini/Chat.js +4 -3
  46. package/dist/providers/gemini/ChatUtils.js +5 -5
  47. package/dist/providers/gemini/GeminiProvider.d.ts +1 -0
  48. package/dist/providers/gemini/GeminiProvider.d.ts.map +1 -1
  49. package/dist/providers/gemini/GeminiProvider.js +1 -0
  50. package/dist/providers/gemini/Streaming.d.ts.map +1 -1
  51. package/dist/providers/gemini/Streaming.js +3 -2
  52. package/dist/providers/ollama/OllamaProvider.d.ts.map +1 -1
  53. package/dist/providers/ollama/OllamaProvider.js +1 -0
  54. package/dist/providers/openai/Capabilities.d.ts +1 -0
  55. package/dist/providers/openai/Capabilities.d.ts.map +1 -1
  56. package/dist/providers/openai/Capabilities.js +3 -0
  57. package/dist/providers/openai/Chat.d.ts +4 -2
  58. package/dist/providers/openai/Chat.d.ts.map +1 -1
  59. package/dist/providers/openai/Chat.js +16 -7
  60. package/dist/providers/openai/Embedding.d.ts.map +1 -1
  61. package/dist/providers/openai/Embedding.js +3 -2
  62. package/dist/providers/openai/Image.d.ts.map +1 -1
  63. package/dist/providers/openai/Image.js +3 -2
  64. package/dist/providers/openai/Moderation.d.ts.map +1 -1
  65. package/dist/providers/openai/Moderation.js +3 -2
  66. package/dist/providers/openai/OpenAIProvider.d.ts +1 -0
  67. package/dist/providers/openai/OpenAIProvider.d.ts.map +1 -1
  68. package/dist/providers/openai/OpenAIProvider.js +3 -2
  69. package/dist/providers/openai/Streaming.d.ts +4 -2
  70. package/dist/providers/openai/Streaming.d.ts.map +1 -1
  71. package/dist/providers/openai/Streaming.js +15 -6
  72. package/dist/providers/openai/Transcription.d.ts.map +1 -1
  73. package/dist/providers/openai/Transcription.js +5 -4
  74. package/dist/providers/openrouter/OpenRouterProvider.d.ts +1 -0
  75. package/dist/providers/openrouter/OpenRouterProvider.d.ts.map +1 -1
  76. package/dist/providers/openrouter/OpenRouterProvider.js +1 -0
  77. package/dist/providers/utils.d.ts +8 -0
  78. package/dist/providers/utils.d.ts.map +1 -0
  79. package/dist/providers/utils.js +16 -0
  80. package/dist/utils/fetch.d.ts +12 -0
  81. package/dist/utils/fetch.d.ts.map +1 -0
  82. package/dist/utils/fetch.js +34 -0
  83. package/package.json +11 -12
  84. package/dist/aliases.json +0 -132
  85. package/dist/utils/sanitize.d.ts +0 -21
  86. package/dist/utils/sanitize.d.ts.map +0 -1
  87. package/dist/utils/sanitize.js +0 -76
package/README.md CHANGED
@@ -1,18 +1,17 @@
1
1
  <p align="left">
2
- <img src="https://github.com/eshaiju/node-llm/raw/main/docs/assets/images/logo.jpg" alt="NodeLLMlogo" width="300" />
2
+ <a href="https://node-llm.eshaiju.com/">
3
+ <img src="https://github.com/node-llm/node-llm/raw/main/docs/assets/images/logo.jpg" alt="NodeLLM logo" width="300" />
4
+ </a>
3
5
  </p>
4
6
 
5
7
  # NodeLLM
6
8
 
7
-
8
9
  **An opinionated architectural layer for integrating Large Language Models in Node.js.**
9
10
 
10
11
  **Provider-agnostic by design.**
11
12
 
12
13
  Most LLM SDKs **tightly couple** your application to vendors, APIs, and churn. NodeLLM provides a unified, production-oriented API for interacting with over 540+ models across multiple providers (OpenAI, Gemini, Anthropic, DeepSeek, OpenRouter, Ollama, etc.) without the SDK fatigue.
13
14
 
14
- <br/>
15
-
16
15
  <p align="left">
17
16
  <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai.svg" height="28" />
18
17
  <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai-text.svg" height="22" />
@@ -88,7 +87,7 @@ for await (const chunk of chat.stream("Explain event-driven architecture")) {
88
87
 
89
88
  ## 🔧 Strategic Configuration
90
89
 
91
- NodeLLMprovides a flexible configuration system designed for enterprise usage:
90
+ NodeLLM provides a flexible configuration system designed for enterprise usage:
92
91
 
93
92
  ```ts
94
93
  // Recommended for multi-provider pipelines
@@ -108,7 +107,7 @@ NodeLLM.configure({
108
107
  });
109
108
  ```
110
109
 
111
- **[Full Configuration Guide →](docs/getting_started/configuration.md)**
110
+ **[Full Configuration Guide →](https://node-llm.eshaiju.com/getting-started/configuration)**
112
111
 
113
112
  ---
114
113
 
@@ -135,21 +134,50 @@ await chat.ask("Analyze this interface", {
135
134
  Define tools once;`NodeLLM` manages the recursive execution loop for you, keeping your controller logic clean. **Works seamlessly with both regular chat and streaming!**
136
135
 
137
136
  ```ts
137
+ import { Tool, z } from "@node-llm/core";
138
+
138
139
  // Class-based DSL
139
140
  class WeatherTool extends Tool {
140
141
  name = "get_weather";
141
142
  description = "Get current weather";
142
143
  schema = z.object({ location: z.string() });
143
- async execute({ location }) { return `Sunny in ${location}`; }
144
- }
145
144
 
146
- // Register tools
147
- chat.withTools([WeatherTool]);
145
+ async handler({ location }) {
146
+ return `Sunny in ${location}`;
147
+ }
148
+ }
148
149
 
149
150
  // Now the model can use it automatically
150
- await chat.ask("What's the weather in Tokyo?");
151
+ await chat.withTool(WeatherTool).ask("What's the weather in Tokyo?");
152
+ ```
153
+
154
+ ### 🛡️ Loop Protection & Resource Limits
155
+ Prevent runaway costs, infinite loops, and hanging requests with comprehensive protection against resource exhaustion.
156
+
157
+ NodeLLM provides **defense-in-depth** security that you can configure globally or per-request:
158
+
159
+ ```ts
160
+ // 1. Global config
161
+ NodeLLM.configure({
162
+ requestTimeout: 30000, // Timeout requests after 30 seconds (default)
163
+ maxToolCalls: 5, // Stop after 5 sequential tool execution turns
164
+ maxRetries: 2, // Retry provider-level errors up to 2 times
165
+ maxTokens: 4096 // Limit output to 4K tokens (default)
166
+ });
167
+
168
+ // 2. Per request override
169
+ await chat.ask("Deep search task", {
170
+ requestTimeout: 120000, // 2 minutes for this request
171
+ maxToolCalls: 10,
172
+ maxTokens: 8192 // 8K tokens for this request
173
+ });
151
174
  ```
152
- **[Full Tool Calling Guide →](https://node-llm.eshaiju.com/core-features/tool-calling)**
175
+
176
+ **Security Benefits:**
177
+ - **`requestTimeout`**: Prevents DoS attacks and hanging requests
178
+ - **`maxToolCalls`**: Prevents infinite tool execution loops
179
+ - **`maxRetries`**: Prevents retry storms during outages
180
+ - **`maxTokens`**: Prevents excessive output and cost overruns
153
181
 
154
182
  ### 🔍 Comprehensive Debug Logging
155
183
  Enable detailed logging for all API requests and responses across every feature and provider:
@@ -165,6 +193,42 @@ process.env.NODELLM_DEBUG = "true";
165
193
  ```
166
194
  **Covers:** Chat, Streaming, Images, Embeddings, Transcription, Moderation - across all providers!
167
195
 
196
+ ### 🛡️ Content Policy Hooks
197
+ NodeLLM provides pluggable hooks to implement custom security, compliance, and moderation logic. Instead of hard-coded rules, you can inject your own policies at the edge.
198
+
199
+ - **`beforeRequest()`**: Intercept and modify messages before they hit the LLM (e.g., PII detection/redaction).
200
+ - **`afterResponse()`**: Process the final response before it returns to your code (e.g., output masking or compliance checks).
201
+
202
+ ```ts
203
+ chat
204
+ .beforeRequest(async (messages) => {
205
+ // Detect PII and redact
206
+ return redactSSN(messages);
207
+ })
208
+ .afterResponse(async (response) => {
209
+ // Ensure output compliance
210
+ return response.withContent(maskSensitiveData(response.content));
211
+ });
212
+ ```
213
+
214
+ ### 🧱 Smart Context Isolation
215
+ Stop worrying about prompt injection or instruction drift. NodeLLM automatically separates system instructions from the conversation history, providing a higher level of protection and strictness.
216
+
217
+ - **Zero-Config Security**: Enabled by default for all chats. No special flags required.
218
+ - **Smart Model Mapping**: Automatically uses OpenAI's modern `developer` role for compatible models (GPT-4o, o1, o3) while safely falling back to the standard `system` role for older or local models (Ollama, DeepSeek, etc.).
219
+ - **Universal Context**: Instructions stay separated internally, ensuring they are always prioritized by the model and never accidentally overridden by user messages.
220
+ - **Provider Agnostic**: Write instructions once; NodeLLM handles the specific role requirements for every major provider (OpenAI, Anthropic, Gemini).
221
+
222
+ ### 🔍 Observability & Tool Auditing
223
+ For enterprise compliance, NodeLLM provides deep visibility into the tool execution lifecycle. You can monitor, log, and audit every step of a tool's execution.
224
+
225
+ ```ts
226
+ chat
227
+ .onToolCallStart((call) => log(`Starting tool: ${call.function.name}`))
228
+ .onToolCallEnd((call, res) => log(`Tool ${call.id} finished with: ${res}`))
229
+ .onToolCallError((call, err) => alert(`Tool ${call.function.name} failed: ${err.message}`));
230
+ ```
231
+
168
232
  ### ✨ Structured Output
169
233
  Get type-safe, validated JSON back using **Zod** schemas.
170
234
  ```ts
@@ -186,17 +250,15 @@ await NodeLLM.transcribe("meeting-recording.wav");
186
250
  ```
187
251
 
188
252
  ### ⚡ Scoped Parallelism
189
- Run multiple providers in parallel safely without global configuration side effects using isolated contexts. You can also override credentials (API keys) for specific instances.
190
-
253
+ Run multiple providers in parallel safely without global configuration side effects using isolated contexts.
191
254
  ```ts
192
255
  const [gpt, claude] = await Promise.all([
193
256
  // Each call branch off into its own isolated context
194
257
  NodeLLM.withProvider("openai").chat("gpt-4o").ask(prompt),
195
- NodeLLM.withProvider("anthropic", { anthropicApiKey: "..." }).chat("claude-3.5-sonnet").ask(prompt),
258
+ NodeLLM.withProvider("anthropic").chat("claude-3-5-sonnet").ask(prompt),
196
259
  ]);
197
260
  ```
198
261
 
199
-
200
262
  ### 🧠 Deep Reasoning
201
263
  Direct access to the thought process of models like **DeepSeek R1** or **OpenAI o1/o3** using the `.reasoning` field.
202
264
  ```ts
@@ -208,7 +270,7 @@ console.log(res.reasoning); // Chain-of-thought
208
270
 
209
271
  ## 🚀 Why use this over official SDKs?
210
272
 
211
- | Feature |`NodeLLM` | Official SDKs | Architectural Impact |
273
+ | Feature | NodeLLM | Official SDKs | Architectural Impact |
212
274
  | :--- | :--- | :--- | :--- |
213
275
  | **Provider Logic** | Transparently Handled | Exposed to your code | **Low Coupling** |
214
276
  | **Streaming** | Standard `AsyncIterator` | Vendor-specific Events | **Predictable Data Flow** |
@@ -223,7 +285,7 @@ console.log(res.reasoning); // Chain-of-thought
223
285
 
224
286
  | Provider | Supported Features |
225
287
  | :--- | :--- |
226
- | <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai.svg" height="18"> **OpenAI** | Chat, **Streaming + Tools**, Vision, Audio, Images, Transcription, **Reasoning** |
288
+ | <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai.svg" height="18"> **OpenAI** | Chat, **Streaming + Tools**, Vision, Audio, Images, Transcription, **Reasoning**, **Smart Developer Role** |
227
289
  | <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/gemini-color.svg" height="18"> **Gemini** | Chat, **Streaming + Tools**, Vision, Audio, Video, Embeddings |
228
290
  | <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/anthropic-text.svg" height="12"> **Anthropic** | Chat, **Streaming + Tools**, Vision, PDF, Structured Output |
229
291
  | <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/deepseek-color.svg" height="18"> **DeepSeek** | Chat (V3), **Reasoning (R1)**, **Streaming + Tools** |
@@ -242,6 +304,12 @@ npm install @node-llm/core
242
304
 
243
305
  ---
244
306
 
307
+ ## 🤝 Contributing
308
+
309
+ We welcome contributions! Please see our **[Contributing Guide](https://github.com/node-llm/node-llm/blob/main/CONTRIBUTING.md)** for more details on how to get started.
310
+
311
+ ---
312
+
245
313
  ## 🫶 Credits
246
314
 
247
315
  Heavily inspired by the elegant design of [RubyLLM](https://rubyllm.com/).
@@ -250,4 +318,4 @@ Heavily inspired by the elegant design of [RubyLLM](https://rubyllm.com/).
250
318
 
251
319
  ## 📄 License
252
320
 
253
- MIT © [NodeLLMcontributors]
321
+ MIT © [NodeLLM contributors]
@@ -5,12 +5,15 @@ import { Stream } from "../streaming/Stream.js";
5
5
  import { Tool } from "./Tool.js";
6
6
  import { Schema } from "../schema/Schema.js";
7
7
  import { z } from "zod";
8
+ import { ToolExecutionMode } from "../constants.js";
8
9
  export interface AskOptions {
9
10
  images?: string[];
10
11
  files?: string[];
11
12
  temperature?: number;
12
13
  maxTokens?: number;
13
14
  headers?: Record<string, string>;
15
+ maxToolCalls?: number;
16
+ requestTimeout?: number;
14
17
  }
15
18
  import { ChatResponseString } from "./ChatResponse.js";
16
19
  export declare class Chat {
@@ -18,6 +21,7 @@ export declare class Chat {
18
21
  private model;
19
22
  private readonly options;
20
23
  private messages;
24
+ private systemMessages;
21
25
  private executor;
22
26
  constructor(provider: Provider, model: string, options?: ChatOptions, retryConfig?: {
23
27
  attempts: number;
@@ -95,6 +99,40 @@ export declare class Chat {
95
99
  onEndMessage(handler: (message: ChatResponseString) => void): this;
96
100
  onToolCall(handler: (toolCall: any) => void): this;
97
101
  onToolResult(handler: (result: any) => void): this;
102
+ /**
103
+ * Called when a tool call starts.
104
+ */
105
+ onToolCallStart(handler: (toolCall: any) => void): this;
106
+ /**
107
+ * Called when a tool call ends successfully.
108
+ */
109
+ onToolCallEnd(handler: (toolCall: any, result: any) => void): this;
110
+ /**
111
+ * Called when a tool call fails.
112
+ */
113
+ onToolCallError(handler: (toolCall: any, error: Error) => void): this;
114
+ /**
115
+ * Set the tool execution mode.
116
+ * - "auto": (Default) Automatically execute all tool calls.
117
+ * - "confirm": Call onConfirmToolCall before executing each tool.
118
+ * - "dry-run": Propose tool calls but do not execute them.
119
+ */
120
+ withToolExecution(mode: ToolExecutionMode): this;
121
+ /**
122
+ * Hook for confirming tool execution in "confirm" mode.
123
+ * Return true to proceed, false to cancel the specific call.
124
+ */
125
+ onConfirmToolCall(handler: (toolCall: any) => Promise<boolean> | boolean): this;
126
+ /**
127
+ * Add a hook to process messages before sending to the LLM.
128
+ * Useful for PII detection, redaction, and input moderation.
129
+ */
130
+ beforeRequest(handler: (messages: Message[]) => Promise<Message[] | void>): this;
131
+ /**
132
+ * Add a hook to process the response before returning it.
133
+ * Useful for output redaction, compliance, and post-moderation.
134
+ */
135
+ afterResponse(handler: (response: ChatResponseString) => Promise<ChatResponseString | void>): this;
98
136
  /**
99
137
  * Ask the model a question
100
138
  */
@@ -103,5 +141,18 @@ export declare class Chat {
103
141
  * Streams the model's response to a user question.
104
142
  */
105
143
  stream(content: string): Stream<ChatChunk>;
144
+ /**
145
+ * Check if tool execution should proceed based on the current mode.
146
+ */
147
+ private shouldExecuteTools;
148
+ /**
149
+ * Request user confirmation for a tool call in "confirm" mode.
150
+ * Returns true if approved, false if rejected.
151
+ */
152
+ private requestToolConfirmation;
153
+ /**
154
+ * Execute a single tool call and return the result message.
155
+ */
156
+ private executeToolCall;
106
157
  }
107
158
  //# sourceMappingURL=Chat.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"Chat.d.ts","sourceRoot":"","sources":["../../src/chat/Chat.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AAC/C,OAAO,EAAE,QAAQ,EAAE,KAAK,EAAE,SAAS,EAAE,MAAM,0BAA0B,CAAC;AAGtE,OAAO,EAAE,MAAM,EAAE,MAAM,wBAAwB,CAAC;AAChD,OAAO,EAAE,IAAI,EAAkB,MAAM,WAAW,CAAC;AACjD,OAAO,EAAE,MAAM,EAAE,MAAM,qBAAqB,CAAC;AAE7C,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAExB,MAAM,WAAW,UAAU;IACzB,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;IAClB,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;IACjB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAClC;AAED,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAEvD,qBAAa,IAAI;IAKb,OAAO,CAAC,QAAQ,CAAC,QAAQ;IACzB,OAAO,CAAC,KAAK;IACb,OAAO,CAAC,QAAQ,CAAC,OAAO;IAN1B,OAAO,CAAC,QAAQ,CAAiB;IACjC,OAAO,CAAC,QAAQ,CAAW;gBAGR,QAAQ,EAAE,QAAQ,EAC3B,KAAK,EAAE,MAAM,EACJ,OAAO,GAAE,WAAgB,EAC1C,WAAW,GAAE;QAAE,QAAQ,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAA;KAAgC;IAmBlF;;OAEG;IACH,IAAI,OAAO,IAAI,SAAS,OAAO,EAAE,CAEhC;IAED,IAAI,OAAO,IAAI,MAAM,CAEpB;IAED;;OAEG;IACH,IAAI,UAAU,IAAI,KAAK,CAetB;IAED;;;OAGG;IACH,QAAQ,CAAC,IAAI,EAAE,GAAG,GAAG,IAAI;IAIvB;;;;;;;KAOC;IACH,SAAS,CAAC,KAAK,EAAE,CAAC,IAAI,GAAG;QAAE,QAAO,IAAI,CAAA;KAAE,GAAG,GAAG,CAAC,EAAE,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAmCzF;;;;OAIG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAmB5E;;OAEG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAI5E;;;OAGG;IACH,eAAe,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI;IAKnC;;OAEG;IACH,SAAS,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAK9B;;;OAGG;IACH,kBAAkB,CAAC,OAAO,EAAE;QAAE,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QAAC,cAAc,CAAC,EAAE,GAAG,CAAA;KAAE,GAAG,IAAI;IAU7F;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI;IAK7C;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI,GAAG,IAAI;IAkB9E,YAAY,CAAC,OAAO,EAAE,MAAM,IAAI,GAAG,IAAI;IAKvC,YAAY,CAAC,OAAO,EAAE,CAAC,OAAO,EAAE,kBAAkB,KAAK,IAAI,GAAG,IAAI;IAKlE,UAAU,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKlD,YAAY,CAAC,OAAO,EAAE,CAAC,MAAM,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKlD;;OAEG;IACG,GAAG,CAAC,OAAO,EAAE,MAAM,GAAG,GAAG,EAAE,EAAE,OAAO,CAAC,EAAE,UAAU,GAAG,OAAO,CAAC,kBAAkB,CAAC;IAkMrF;;OAEG;IACH,MAAM,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,CAAC,SAAS,CAAC;CAI3C"}
1
+ {"version":3,"file":"Chat.d.ts","sourceRoot":"","sources":["../../src/chat/Chat.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AAC/C,OAAO,EAAE,QAAQ,EAAE,KAAK,EAAE,SAAS,EAAE,MAAM,0BAA0B,CAAC;AAGtE,OAAO,EAAE,MAAM,EAAE,MAAM,wBAAwB,CAAC;AAChD,OAAO,EAAE,IAAI,EAAkB,MAAM,WAAW,CAAC;AACjD,OAAO,EAAE,MAAM,EAAE,MAAM,qBAAqB,CAAC;AAE7C,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAExB,OAAO,EAAE,iBAAiB,EAAE,MAAM,iBAAiB,CAAC;AAEpD,MAAM,WAAW,UAAU;IACzB,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;IAClB,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;IACjB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,cAAc,CAAC,EAAE,MAAM,CAAC;CACzB;AAED,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAEvD,qBAAa,IAAI;IAMb,OAAO,CAAC,QAAQ,CAAC,QAAQ;IACzB,OAAO,CAAC,KAAK;IACb,OAAO,CAAC,QAAQ,CAAC,OAAO;IAP1B,OAAO,CAAC,QAAQ,CAAiB;IACjC,OAAO,CAAC,cAAc,CAAiB;IACvC,OAAO,CAAC,QAAQ,CAAW;gBAGR,QAAQ,EAAE,QAAQ,EAC3B,KAAK,EAAE,MAAM,EACJ,OAAO,GAAE,WAAgB,EAC1C,WAAW,GAAE;QAAE,QAAQ,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAA;KAAgC;IA0BlF;;OAEG;IACH,IAAI,OAAO,IAAI,SAAS,OAAO,EAAE,CAEhC;IAED,IAAI,OAAO,IAAI,MAAM,CAEpB;IAED;;OAEG;IACH,IAAI,UAAU,IAAI,KAAK,CAetB;IAED;;;OAGG;IACH,QAAQ,CAAC,IAAI,EAAE,GAAG,GAAG,IAAI;IAIvB;;;;;;;KAOC;IACH,SAAS,CAAC,KAAK,EAAE,CAAC,IAAI,GAAG;QAAE,QAAO,IAAI,CAAA;KAAE,GAAG,GAAG,CAAC,EAAE,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAmCzF;;;;OAIG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAW5E;;OAEG;IACH,gBAAgB,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;IAI5E;;;OAGG;IACH,eAAe,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI;IAKnC;;OAEG;IACH,SAAS,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI;IAK9B;;;OAGG;IACH,kBAAkB,CAAC,OAAO,EAAE;QAAE,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QAAC,cAAc,CAAC,EAAE,GAAG,CAAA;KAAE,GAAG,IAAI;IAU7F;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI;IAK7C;;;OAGG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI,GAAG,IAAI;IAkB9E,YAAY,CAAC,OAAO,EAAE,MAAM,IAAI,GAAG,IAAI;IAKvC,YAAY,CAAC,OAAO,EAAE,CAAC,OAAO,EAAE,kBAAkB,KAAK,IAAI,GAAG,IAAI;IAKlE,UAAU,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAIlD,YAAY,CAAC,OAAO,EAAE,CAAC,MAAM,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAIlD;;OAEG;IACH,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKvD;;OAEG;IACH,aAAa,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,MAAM,EAAE,GAAG,KAAK,IAAI,GAAG,IAAI;IAKlE;;OAEG;IACH,eAAe,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,KAAK,EAAE,KAAK,KAAK,IAAI,GAAG,IAAI;IAKrE;;;;;OAKG;IACH,iBAAiB,CAAC,IAAI,EAAE,iBAAiB,GAAG,IAAI;IAKhD;;;OAGG;IACH,iBAAiB,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,GAAG,KAAK,OAAO,CAAC,OAAO,CAAC,GAAG,OAAO,GAAG,IAAI;IAK/E;;;OAGG;IACH,aAAa,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,OAAO,EAAE,KAAK,OAAO,CAAC,OAAO,EAAE,GAAG,IAAI,CAAC,GAAG,IAAI;IAKhF;;;OAGG;IACH,aAAa,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,kBAAkB,KAAK,OAAO,CAAC,kBAAkB,GAAG,IAAI,CAAC,GAAG,IAAI;IAKlG;;OAEG;IACG,GAAG,CAAC,OAAO,EAAE,MAAM,GAAG,GAAG,EAAE,EAAE,OAAO,CAAC,EAAE,UAAU,GAAG,OAAO,CAAC,kBAAkB,CAAC;IA0OrF;;OAEG;IACH,MAAM,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,CAAC,SAAS,CAAC;IAK1C;;OAEG;IACH,OAAO,CAAC,kBAAkB;IAM1B;;;OAGG;YACW,uBAAuB;IASrC;;OAEG;YACW,eAAe;CA2C9B"}
package/dist/chat/Chat.js CHANGED
@@ -4,12 +4,15 @@ import { ChatStream } from "./ChatStream.js";
4
4
  import { Schema } from "../schema/Schema.js";
5
5
  import { toJsonSchema } from "../schema/to-json-schema.js";
6
6
  import { z } from "zod";
7
+ import { config } from "../config.js";
8
+ import { ToolExecutionMode } from "../constants.js";
7
9
  import { ChatResponseString } from "./ChatResponse.js";
8
10
  export class Chat {
9
11
  provider;
10
12
  model;
11
13
  options;
12
14
  messages = [];
15
+ systemMessages = [];
13
16
  executor;
14
17
  constructor(provider, model, options = {}, retryConfig = { attempts: 1, delayMs: 0 }) {
15
18
  this.provider = provider;
@@ -17,20 +20,27 @@ export class Chat {
17
20
  this.options = options;
18
21
  this.executor = new Executor(provider, retryConfig);
19
22
  if (options.systemPrompt) {
20
- this.messages.push({
21
- role: "system",
22
- content: options.systemPrompt,
23
- });
23
+ this.withInstructions(options.systemPrompt);
24
24
  }
25
25
  if (options.messages) {
26
- this.messages.push(...options.messages);
26
+ for (const msg of options.messages) {
27
+ if (msg.role === "system" || msg.role === "developer") {
28
+ this.systemMessages.push(msg);
29
+ }
30
+ else {
31
+ this.messages.push(msg);
32
+ }
33
+ }
34
+ }
35
+ if (!this.options.toolExecution) {
36
+ this.options.toolExecution = config.toolExecution || ToolExecutionMode.AUTO;
27
37
  }
28
38
  }
29
39
  /**
30
40
  * Read-only access to message history
31
41
  */
32
42
  get history() {
33
- return this.messages;
43
+ return [...this.systemMessages, ...this.messages];
34
44
  }
35
45
  get modelId() {
36
46
  return this.model;
@@ -106,19 +116,10 @@ export class Chat {
106
116
  */
107
117
  withInstructions(instruction, options) {
108
118
  if (options?.replace) {
109
- this.messages = this.messages.filter((m) => m.role !== "system");
110
- }
111
- // System messages usually go first, but for "appending" behavior
112
- // mid-conversation, most providers handle them fine in history.
113
- // Ideally, if it's "replace", we might want to unshift it to index 0,
114
- // but simply pushing a new system message works for "updating" context too.
115
- // For consistency with "replace" meaning "this is THE system prompt":
116
- if (options?.replace) {
117
- this.messages.unshift({ role: "system", content: instruction });
118
- }
119
- else {
120
- this.messages.push({ role: "system", content: instruction });
119
+ this.systemMessages = [];
121
120
  }
121
+ // Always push to isolated storage
122
+ this.systemMessages.push({ role: "system", content: instruction });
122
123
  return this;
123
124
  }
124
125
  /**
@@ -193,11 +194,64 @@ export class Chat {
193
194
  return this;
194
195
  }
195
196
  onToolCall(handler) {
196
- this.options.onToolCall = handler;
197
- return this;
197
+ return this.onToolCallStart(handler);
198
198
  }
199
199
  onToolResult(handler) {
200
- this.options.onToolResult = handler;
200
+ return this.onToolCallEnd((_call, result) => handler(result));
201
+ }
202
+ /**
203
+ * Called when a tool call starts.
204
+ */
205
+ onToolCallStart(handler) {
206
+ this.options.onToolCallStart = handler;
207
+ return this;
208
+ }
209
+ /**
210
+ * Called when a tool call ends successfully.
211
+ */
212
+ onToolCallEnd(handler) {
213
+ this.options.onToolCallEnd = handler;
214
+ return this;
215
+ }
216
+ /**
217
+ * Called when a tool call fails.
218
+ */
219
+ onToolCallError(handler) {
220
+ this.options.onToolCallError = handler;
221
+ return this;
222
+ }
223
+ /**
224
+ * Set the tool execution mode.
225
+ * - "auto": (Default) Automatically execute all tool calls.
226
+ * - "confirm": Call onConfirmToolCall before executing each tool.
227
+ * - "dry-run": Propose tool calls but do not execute them.
228
+ */
229
+ withToolExecution(mode) {
230
+ this.options.toolExecution = mode;
231
+ return this;
232
+ }
233
+ /**
234
+ * Hook for confirming tool execution in "confirm" mode.
235
+ * Return true to proceed, false to cancel the specific call.
236
+ */
237
+ onConfirmToolCall(handler) {
238
+ this.options.onConfirmToolCall = handler;
239
+ return this;
240
+ }
241
+ /**
242
+ * Add a hook to process messages before sending to the LLM.
243
+ * Useful for PII detection, redaction, and input moderation.
244
+ */
245
+ beforeRequest(handler) {
246
+ this.options.onBeforeRequest = handler;
247
+ return this;
248
+ }
249
+ /**
250
+ * Add a hook to process the response before returning it.
251
+ * Useful for output redaction, compliance, and post-moderation.
252
+ */
253
+ afterResponse(handler) {
254
+ this.options.onAfterResponse = handler;
201
255
  return this;
202
256
  }
203
257
  /**
@@ -269,14 +323,24 @@ export class Chat {
269
323
  }
270
324
  const executeOptions = {
271
325
  model: this.model,
272
- messages: this.messages,
326
+ messages: [...this.systemMessages, ...this.messages],
273
327
  tools: this.options.tools,
274
328
  temperature: options?.temperature ?? this.options.temperature,
275
- max_tokens: options?.maxTokens ?? this.options.maxTokens,
329
+ max_tokens: options?.maxTokens ?? this.options.maxTokens ?? config.maxTokens,
276
330
  headers: { ...this.options.headers, ...options?.headers },
277
331
  response_format: responseFormat, // Pass to provider
332
+ requestTimeout: options?.requestTimeout ?? this.options.requestTimeout ?? config.requestTimeout,
278
333
  ...this.options.params,
279
334
  };
335
+ // --- Content Policy Hooks (Input) ---
336
+ if (this.options.onBeforeRequest) {
337
+ const messagesToProcess = [...this.systemMessages, ...this.messages];
338
+ const result = await this.options.onBeforeRequest(messagesToProcess);
339
+ if (result) {
340
+ // If the hook returned modified messages, use them for this request
341
+ executeOptions.messages = result;
342
+ }
343
+ }
280
344
  let totalUsage = { input_tokens: 0, output_tokens: 0, total_tokens: 0 };
281
345
  const trackUsage = (u) => {
282
346
  if (u) {
@@ -296,60 +360,70 @@ export class Chat {
296
360
  this.options.onNewMessage();
297
361
  let response = await this.executor.executeChat(executeOptions);
298
362
  trackUsage(response.usage);
299
- const firstAssistantMessage = new ChatResponseString(response.content ?? "", response.usage ?? { input_tokens: 0, output_tokens: 0, total_tokens: 0 }, this.model, this.provider.id, response.reasoning);
363
+ let assistantMessage = new ChatResponseString(response.content ?? "", response.usage ?? { input_tokens: 0, output_tokens: 0, total_tokens: 0 }, this.model, this.provider.id, response.reasoning, response.tool_calls);
364
+ // --- Content Policy Hooks (Output - Turn 1) ---
365
+ if (this.options.onAfterResponse) {
366
+ const result = await this.options.onAfterResponse(assistantMessage);
367
+ if (result) {
368
+ assistantMessage = result;
369
+ }
370
+ }
300
371
  this.messages.push({
301
372
  role: "assistant",
302
- content: firstAssistantMessage,
373
+ content: assistantMessage || null,
303
374
  tool_calls: response.tool_calls,
304
375
  usage: response.usage,
305
376
  });
306
377
  if (this.options.onEndMessage && (!response.tool_calls || response.tool_calls.length === 0)) {
307
- this.options.onEndMessage(firstAssistantMessage);
378
+ this.options.onEndMessage(assistantMessage);
308
379
  }
380
+ const maxToolCalls = options?.maxToolCalls ?? this.options.maxToolCalls ?? 5;
381
+ let stepCount = 0;
309
382
  while (response.tool_calls && response.tool_calls.length > 0) {
383
+ // Dry-run mode: stop after proposing tools
384
+ if (!this.shouldExecuteTools(response.tool_calls, this.options.toolExecution)) {
385
+ break;
386
+ }
387
+ stepCount++;
388
+ if (stepCount > maxToolCalls) {
389
+ throw new Error(`[NodeLLM] Maximum tool execution calls (${maxToolCalls}) exceeded.`);
390
+ }
310
391
  for (const toolCall of response.tool_calls) {
311
- if (this.options.onToolCall)
312
- this.options.onToolCall(toolCall);
313
- const tool = this.options.tools?.find((t) => t.function.name === toolCall.function.name);
314
- if (tool?.handler) {
315
- try {
316
- const args = JSON.parse(toolCall.function.arguments);
317
- const result = await tool.handler(args);
318
- if (this.options.onToolResult)
319
- this.options.onToolResult(result);
320
- this.messages.push({
321
- role: "tool",
322
- tool_call_id: toolCall.id,
323
- content: result,
324
- });
325
- }
326
- catch (error) {
392
+ // Confirm mode: request approval
393
+ if (this.options.toolExecution === ToolExecutionMode.CONFIRM) {
394
+ const approved = await this.requestToolConfirmation(toolCall, this.options.onConfirmToolCall);
395
+ if (!approved) {
327
396
  this.messages.push({
328
397
  role: "tool",
329
398
  tool_call_id: toolCall.id,
330
- content: `Error executing tool: ${error.message}`,
399
+ content: "Action cancelled by user.",
331
400
  });
401
+ continue;
332
402
  }
333
403
  }
334
- else {
335
- this.messages.push({
336
- role: "tool",
337
- tool_call_id: toolCall.id,
338
- content: "Error: Tool not found or no handler provided",
339
- });
340
- }
404
+ // Execute the tool
405
+ const toolResult = await this.executeToolCall(toolCall, this.options.tools, this.options.onToolCallStart, this.options.onToolCallEnd, this.options.onToolCallError);
406
+ this.messages.push(toolResult);
341
407
  }
342
408
  response = await this.executor.executeChat({
343
409
  model: this.model,
344
- messages: this.messages,
410
+ messages: [...this.systemMessages, ...this.messages],
345
411
  tools: this.options.tools,
346
412
  headers: this.options.headers,
413
+ requestTimeout: options?.requestTimeout ?? this.options.requestTimeout ?? config.requestTimeout,
347
414
  });
348
415
  trackUsage(response.usage);
349
- const assistantMessage = new ChatResponseString(response.content ?? "", response.usage ?? { input_tokens: 0, output_tokens: 0, total_tokens: 0 }, this.model, this.provider.id, response.reasoning);
416
+ assistantMessage = new ChatResponseString(response.content ?? "", response.usage ?? { input_tokens: 0, output_tokens: 0, total_tokens: 0 }, this.model, this.provider.id, response.reasoning);
417
+ // --- Content Policy Hooks (Output - Tool Turns) ---
418
+ if (this.options.onAfterResponse) {
419
+ const result = await this.options.onAfterResponse(assistantMessage);
420
+ if (result) {
421
+ assistantMessage = result;
422
+ }
423
+ }
350
424
  this.messages.push({
351
425
  role: "assistant",
352
- content: assistantMessage,
426
+ content: assistantMessage || null,
353
427
  tool_calls: response.tool_calls,
354
428
  usage: response.usage,
355
429
  });
@@ -359,13 +433,73 @@ export class Chat {
359
433
  }
360
434
  // For the final return, we might want to aggregate reasoning too if it happened in multiple turns?
361
435
  // Usually reasoning only happens once or we just want the last one.
362
- return new ChatResponseString(response.content ?? "", totalUsage, this.model, this.provider.id, response.reasoning);
436
+ return new ChatResponseString(assistantMessage.toString() || "", totalUsage, this.model, this.provider.id, assistantMessage.reasoning, response.tool_calls);
363
437
  }
364
438
  /**
365
439
  * Streams the model's response to a user question.
366
440
  */
367
441
  stream(content) {
368
- const streamer = new ChatStream(this.provider, this.model, this.options, this.messages);
442
+ const streamer = new ChatStream(this.provider, this.model, this.options, this.messages, this.systemMessages);
369
443
  return streamer.create(content);
370
444
  }
445
+ /**
446
+ * Check if tool execution should proceed based on the current mode.
447
+ */
448
+ shouldExecuteTools(toolCalls, mode) {
449
+ if (!toolCalls || toolCalls.length === 0)
450
+ return false;
451
+ if (mode === ToolExecutionMode.DRY_RUN)
452
+ return false;
453
+ return true;
454
+ }
455
+ /**
456
+ * Request user confirmation for a tool call in "confirm" mode.
457
+ * Returns true if approved, false if rejected.
458
+ */
459
+ async requestToolConfirmation(toolCall, onConfirm) {
460
+ if (!onConfirm)
461
+ return true;
462
+ const confirmed = await onConfirm(toolCall);
463
+ return confirmed !== false;
464
+ }
465
+ /**
466
+ * Execute a single tool call and return the result message.
467
+ */
468
+ async executeToolCall(toolCall, tools, onStart, onEnd, onError) {
469
+ if (onStart)
470
+ onStart(toolCall);
471
+ const tool = tools?.find((t) => t.function.name === toolCall.function.name);
472
+ if (tool?.handler) {
473
+ try {
474
+ const args = JSON.parse(toolCall.function.arguments);
475
+ const result = await tool.handler(args);
476
+ if (onEnd)
477
+ onEnd(toolCall, result);
478
+ return {
479
+ role: "tool",
480
+ tool_call_id: toolCall.id,
481
+ content: result,
482
+ };
483
+ }
484
+ catch (error) {
485
+ if (onError)
486
+ onError(toolCall, error);
487
+ return {
488
+ role: "tool",
489
+ tool_call_id: toolCall.id,
490
+ content: `Error executing tool: ${error.message}`,
491
+ };
492
+ }
493
+ }
494
+ else {
495
+ const error = new Error("Tool not found or no handler provided");
496
+ if (onError)
497
+ onError(toolCall, error);
498
+ return {
499
+ role: "tool",
500
+ tool_call_id: toolCall.id,
501
+ content: "Error: Tool not found or no handler provided",
502
+ };
503
+ }
504
+ }
371
505
  }
@@ -1,6 +1,8 @@
1
1
  import { Message } from "./Message.js";
2
2
  import { ToolDefinition } from "./Tool.js";
3
3
  import { Schema } from "../schema/Schema.js";
4
+ import { ChatResponseString } from "./ChatResponse.js";
5
+ import { ToolExecutionMode } from "../constants.js";
4
6
  export interface ChatOptions {
5
7
  systemPrompt?: string;
6
8
  messages?: Message[];
@@ -9,8 +11,9 @@ export interface ChatOptions {
9
11
  maxTokens?: number;
10
12
  onNewMessage?: () => void;
11
13
  onEndMessage?: (message: any) => void;
12
- onToolCall?: (toolCall: any) => void;
13
- onToolResult?: (result: any) => void;
14
+ onToolCallStart?: (toolCall: any) => void;
15
+ onToolCallEnd?: (toolCall: any, result: any) => void;
16
+ onToolCallError?: (toolCall: any, error: Error) => void;
14
17
  headers?: Record<string, string>;
15
18
  schema?: Schema;
16
19
  responseFormat?: {
@@ -19,5 +22,11 @@ export interface ChatOptions {
19
22
  params?: Record<string, any>;
20
23
  assumeModelExists?: boolean;
21
24
  provider?: string;
25
+ maxToolCalls?: number;
26
+ requestTimeout?: number;
27
+ toolExecution?: ToolExecutionMode;
28
+ onConfirmToolCall?: (toolCall: any) => Promise<boolean> | boolean;
29
+ onBeforeRequest?: (messages: Message[]) => Promise<Message[] | void>;
30
+ onAfterResponse?: (response: ChatResponseString) => Promise<ChatResponseString | void>;
22
31
  }
23
32
  //# sourceMappingURL=ChatOptions.d.ts.map