@node-llm/core 1.4.3 → 1.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +65 -2
- package/dist/aliases.d.ts +563 -71
- package/dist/aliases.d.ts.map +1 -1
- package/dist/aliases.js +569 -77
- package/dist/chat/Chat.d.ts +51 -0
- package/dist/chat/Chat.d.ts.map +1 -1
- package/dist/chat/Chat.js +189 -55
- package/dist/chat/ChatOptions.d.ts +11 -2
- package/dist/chat/ChatOptions.d.ts.map +1 -1
- package/dist/chat/ChatResponse.d.ts +7 -1
- package/dist/chat/ChatResponse.d.ts.map +1 -1
- package/dist/chat/ChatResponse.js +9 -1
- package/dist/chat/ChatStream.d.ts +15 -1
- package/dist/chat/ChatStream.d.ts.map +1 -1
- package/dist/chat/ChatStream.js +122 -36
- package/dist/chat/Role.d.ts +1 -1
- package/dist/chat/Role.d.ts.map +1 -1
- package/dist/config.d.ts +11 -0
- package/dist/config.d.ts.map +1 -1
- package/dist/config.js +7 -1
- package/dist/constants.d.ts +11 -0
- package/dist/constants.d.ts.map +1 -1
- package/dist/constants.js +12 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1 -0
- package/dist/llm.d.ts +4 -0
- package/dist/llm.d.ts.map +1 -1
- package/dist/llm.js +10 -0
- package/dist/models/models.d.ts +113 -1427
- package/dist/models/models.d.ts.map +1 -1
- package/dist/models/models.js +5700 -24218
- package/dist/models/types.js +0 -3
- package/dist/providers/Provider.d.ts +6 -0
- package/dist/providers/Provider.d.ts.map +1 -1
- package/dist/providers/anthropic/AnthropicProvider.d.ts +1 -0
- package/dist/providers/anthropic/AnthropicProvider.d.ts.map +1 -1
- package/dist/providers/anthropic/AnthropicProvider.js +1 -0
- package/dist/providers/anthropic/Chat.d.ts.map +1 -1
- package/dist/providers/anthropic/Chat.js +4 -3
- package/dist/providers/anthropic/Streaming.d.ts.map +1 -1
- package/dist/providers/anthropic/Streaming.js +3 -2
- package/dist/providers/anthropic/Utils.js +2 -2
- package/dist/providers/deepseek/Chat.d.ts.map +1 -1
- package/dist/providers/deepseek/Chat.js +8 -5
- package/dist/providers/deepseek/DeepSeekProvider.d.ts +1 -0
- package/dist/providers/deepseek/DeepSeekProvider.d.ts.map +1 -1
- package/dist/providers/deepseek/DeepSeekProvider.js +1 -0
- package/dist/providers/deepseek/Streaming.d.ts.map +1 -1
- package/dist/providers/deepseek/Streaming.js +7 -4
- package/dist/providers/gemini/Chat.d.ts.map +1 -1
- package/dist/providers/gemini/Chat.js +4 -3
- package/dist/providers/gemini/ChatUtils.js +5 -5
- package/dist/providers/gemini/GeminiProvider.d.ts +1 -0
- package/dist/providers/gemini/GeminiProvider.d.ts.map +1 -1
- package/dist/providers/gemini/GeminiProvider.js +1 -0
- package/dist/providers/gemini/Streaming.d.ts.map +1 -1
- package/dist/providers/gemini/Streaming.js +3 -2
- package/dist/providers/ollama/OllamaProvider.d.ts.map +1 -1
- package/dist/providers/ollama/OllamaProvider.js +1 -0
- package/dist/providers/openai/Capabilities.d.ts +1 -0
- package/dist/providers/openai/Capabilities.d.ts.map +1 -1
- package/dist/providers/openai/Capabilities.js +3 -0
- package/dist/providers/openai/Chat.d.ts +4 -2
- package/dist/providers/openai/Chat.d.ts.map +1 -1
- package/dist/providers/openai/Chat.js +16 -7
- package/dist/providers/openai/Embedding.d.ts.map +1 -1
- package/dist/providers/openai/Embedding.js +3 -2
- package/dist/providers/openai/Image.d.ts.map +1 -1
- package/dist/providers/openai/Image.js +3 -2
- package/dist/providers/openai/Moderation.d.ts.map +1 -1
- package/dist/providers/openai/Moderation.js +3 -2
- package/dist/providers/openai/OpenAIProvider.d.ts +1 -0
- package/dist/providers/openai/OpenAIProvider.d.ts.map +1 -1
- package/dist/providers/openai/OpenAIProvider.js +3 -2
- package/dist/providers/openai/Streaming.d.ts +4 -2
- package/dist/providers/openai/Streaming.d.ts.map +1 -1
- package/dist/providers/openai/Streaming.js +15 -6
- package/dist/providers/openai/Transcription.d.ts.map +1 -1
- package/dist/providers/openai/Transcription.js +5 -4
- package/dist/providers/openrouter/OpenRouterProvider.d.ts +1 -0
- package/dist/providers/openrouter/OpenRouterProvider.d.ts.map +1 -1
- package/dist/providers/openrouter/OpenRouterProvider.js +1 -0
- package/dist/providers/utils.d.ts +8 -0
- package/dist/providers/utils.d.ts.map +1 -0
- package/dist/providers/utils.js +16 -0
- package/dist/utils/fetch.d.ts +12 -0
- package/dist/utils/fetch.d.ts.map +1 -0
- package/dist/utils/fetch.js +34 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -150,7 +150,34 @@ class WeatherTool extends Tool {
|
|
|
150
150
|
// Now the model can use it automatically
|
|
151
151
|
await chat.withTool(WeatherTool).ask("What's the weather in Tokyo?");
|
|
152
152
|
```
|
|
153
|
-
|
|
153
|
+
|
|
154
|
+
### 🛡️ Loop Protection & Resource Limits
|
|
155
|
+
Prevent runaway costs, infinite loops, and hanging requests with comprehensive protection against resource exhaustion.
|
|
156
|
+
|
|
157
|
+
NodeLLM provides **defense-in-depth** security that you can configure globally or per-request:
|
|
158
|
+
|
|
159
|
+
```ts
|
|
160
|
+
// 1. Global config
|
|
161
|
+
NodeLLM.configure({
|
|
162
|
+
requestTimeout: 30000, // Timeout requests after 30 seconds (default)
|
|
163
|
+
maxToolCalls: 5, // Stop after 5 sequential tool execution turns
|
|
164
|
+
maxRetries: 2, // Retry provider-level errors up to 2 times
|
|
165
|
+
maxTokens: 4096 // Limit output to 4K tokens (default)
|
|
166
|
+
});
|
|
167
|
+
|
|
168
|
+
// 2. Per request override
|
|
169
|
+
await chat.ask("Deep search task", {
|
|
170
|
+
requestTimeout: 120000, // 2 minutes for this request
|
|
171
|
+
maxToolCalls: 10,
|
|
172
|
+
maxTokens: 8192 // 8K tokens for this request
|
|
173
|
+
});
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
**Security Benefits:**
|
|
177
|
+
- **`requestTimeout`**: Prevents DoS attacks and hanging requests
|
|
178
|
+
- **`maxToolCalls`**: Prevents infinite tool execution loops
|
|
179
|
+
- **`maxRetries`**: Prevents retry storms during outages
|
|
180
|
+
- **`maxTokens`**: Prevents excessive output and cost overruns
|
|
154
181
|
|
|
155
182
|
### 🔍 Comprehensive Debug Logging
|
|
156
183
|
Enable detailed logging for all API requests and responses across every feature and provider:
|
|
@@ -166,6 +193,42 @@ process.env.NODELLM_DEBUG = "true";
|
|
|
166
193
|
```
|
|
167
194
|
**Covers:** Chat, Streaming, Images, Embeddings, Transcription, Moderation - across all providers!
|
|
168
195
|
|
|
196
|
+
### 🛡️ Content Policy Hooks
|
|
197
|
+
NodeLLM provides pluggable hooks to implement custom security, compliance, and moderation logic. Instead of hard-coded rules, you can inject your own policies at the edge.
|
|
198
|
+
|
|
199
|
+
- **`beforeRequest()`**: Intercept and modify messages before they hit the LLM (e.g., PII detection/redaction).
|
|
200
|
+
- **`afterResponse()`**: Process the final response before it returns to your code (e.g., output masking or compliance checks).
|
|
201
|
+
|
|
202
|
+
```ts
|
|
203
|
+
chat
|
|
204
|
+
.beforeRequest(async (messages) => {
|
|
205
|
+
// Detect PII and redact
|
|
206
|
+
return redactSSN(messages);
|
|
207
|
+
})
|
|
208
|
+
.afterResponse(async (response) => {
|
|
209
|
+
// Ensure output compliance
|
|
210
|
+
return response.withContent(maskSensitiveData(response.content));
|
|
211
|
+
});
|
|
212
|
+
```
|
|
213
|
+
|
|
214
|
+
### 🧱 Smart Context Isolation
|
|
215
|
+
Stop worrying about prompt injection or instruction drift. NodeLLM automatically separates system instructions from the conversation history, providing a higher level of protection and strictness.
|
|
216
|
+
|
|
217
|
+
- **Zero-Config Security**: Enabled by default for all chats. No special flags required.
|
|
218
|
+
- **Smart Model Mapping**: Automatically uses OpenAI's modern `developer` role for compatible models (GPT-4o, o1, o3) while safely falling back to the standard `system` role for older or local models (Ollama, DeepSeek, etc.).
|
|
219
|
+
- **Universal Context**: Instructions stay separated internally, ensuring they are always prioritized by the model and never accidentally overridden by user messages.
|
|
220
|
+
- **Provider Agnostic**: Write instructions once; NodeLLM handles the specific role requirements for every major provider (OpenAI, Anthropic, Gemini).
|
|
221
|
+
|
|
222
|
+
### 🔍 Observability & Tool Auditing
|
|
223
|
+
For enterprise compliance, NodeLLM provides deep visibility into the tool execution lifecycle. You can monitor, log, and audit every step of a tool's execution.
|
|
224
|
+
|
|
225
|
+
```ts
|
|
226
|
+
chat
|
|
227
|
+
.onToolCallStart((call) => log(`Starting tool: ${call.function.name}`))
|
|
228
|
+
.onToolCallEnd((call, res) => log(`Tool ${call.id} finished with: ${res}`))
|
|
229
|
+
.onToolCallError((call, err) => alert(`Tool ${call.function.name} failed: ${err.message}`));
|
|
230
|
+
```
|
|
231
|
+
|
|
169
232
|
### ✨ Structured Output
|
|
170
233
|
Get type-safe, validated JSON back using **Zod** schemas.
|
|
171
234
|
```ts
|
|
@@ -222,7 +285,7 @@ console.log(res.reasoning); // Chain-of-thought
|
|
|
222
285
|
|
|
223
286
|
| Provider | Supported Features |
|
|
224
287
|
| :--- | :--- |
|
|
225
|
-
| <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai.svg" height="18"> **OpenAI** | Chat, **Streaming + Tools**, Vision, Audio, Images, Transcription, **Reasoning** |
|
|
288
|
+
| <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/openai.svg" height="18"> **OpenAI** | Chat, **Streaming + Tools**, Vision, Audio, Images, Transcription, **Reasoning**, **Smart Developer Role** |
|
|
226
289
|
| <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/gemini-color.svg" height="18"> **Gemini** | Chat, **Streaming + Tools**, Vision, Audio, Video, Embeddings |
|
|
227
290
|
| <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/anthropic-text.svg" height="12"> **Anthropic** | Chat, **Streaming + Tools**, Vision, PDF, Structured Output |
|
|
228
291
|
| <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/deepseek-color.svg" height="18"> **DeepSeek** | Chat (V3), **Reasoning (R1)**, **Streaming + Tools** |
|