llmjs2 1.1.0 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CONFIG_README.md +98 -0
- package/README.md +382 -357
- package/cli.js +195 -0
- package/config.yaml +149 -0
- package/docs/BASIC_USAGE.md +296 -0
- package/docs/CLI.md +455 -0
- package/docs/GET_STARTED.md +129 -0
- package/docs/GUARDRAILS_GUIDE.md +734 -0
- package/docs/README.md +47 -0
- package/docs/ROUTER_GUIDE.md +397 -0
- package/docs/SERVER_MODE.md +350 -0
- package/index.js +199 -228
- package/package.json +43 -28
- package/providers/ollama.js +120 -88
- package/providers/openai.js +104 -0
- package/providers/openrouter.js +113 -79
- package/router.js +248 -0
- package/server.js +186 -0
- package/test.js +246 -296
- package/validate-config.js +87 -0
- package/example.js +0 -298
|
@@ -0,0 +1,734 @@
|
|
|
1
|
+
# llmjs2 Guardrails Usage Guide
|
|
2
|
+
|
|
3
|
+
Guardrails provide a powerful mechanism to add custom logic before and after LLM calls, enabling content filtering, logging, transformation, and safety checks.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
Guardrails are simple function configurations that run before or after LLM calls, allowing you to:
|
|
8
|
+
|
|
9
|
+
- **Content Filtering**: Block or modify inappropriate content
|
|
10
|
+
- **Logging & Monitoring**: Track requests and responses
|
|
11
|
+
- **Transformation**: Modify messages before/after processing
|
|
12
|
+
- **Safety Checks**: Validate inputs and outputs
|
|
13
|
+
- **Rate Limiting**: Implement custom throttling logic
|
|
14
|
+
- **Caching**: Add intelligent response caching
|
|
15
|
+
|
|
16
|
+
## Basic Usage
|
|
17
|
+
|
|
18
|
+
### Router with Guardrails
|
|
19
|
+
|
|
20
|
+
```javascript
|
|
21
|
+
import { router } from 'llmjs2';
|
|
22
|
+
|
|
23
|
+
const modelList = [
|
|
24
|
+
{
|
|
25
|
+
"model_name": "gpt-3.5-turbo",
|
|
26
|
+
"llm_params": {
|
|
27
|
+
"model": "openai/gpt-3.5-turbo",
|
|
28
|
+
"api_key": process.env.OPENAI_API_KEY
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
];
|
|
32
|
+
|
|
33
|
+
// Create router with guardrails
|
|
34
|
+
const route = router({
|
|
35
|
+
model_list: modelList,
|
|
36
|
+
routing: 'random',
|
|
37
|
+
guardrails: [
|
|
38
|
+
{
|
|
39
|
+
name: "content_filter",
|
|
40
|
+
mode: "pre_call",
|
|
41
|
+
code: (processId, input) => {
|
|
42
|
+
// Filter inappropriate content before LLM call
|
|
43
|
+
const { model, messages } = input;
|
|
44
|
+
const filteredMessages = messages.map(msg => ({
|
|
45
|
+
...msg,
|
|
46
|
+
content: msg.content.replace(/badword/gi, '****')
|
|
47
|
+
}));
|
|
48
|
+
return { model, messages: filteredMessages };
|
|
49
|
+
}
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
name: "response_logger",
|
|
53
|
+
mode: "post_call",
|
|
54
|
+
code: (processId, result) => {
|
|
55
|
+
// Log responses after LLM call
|
|
56
|
+
console.log(`[${processId}] Response:`, result);
|
|
57
|
+
return result;
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
]
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
// Use router
|
|
64
|
+
const response = await route.completion({
|
|
65
|
+
messages: [{"role": "user", "content": "Hello!"}]
|
|
66
|
+
});
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
## Guardrail Configuration
|
|
70
|
+
|
|
71
|
+
Guardrails are defined as objects with three properties:
|
|
72
|
+
|
|
73
|
+
```javascript
|
|
74
|
+
{
|
|
75
|
+
name: "string", // Unique identifier for the guardrail
|
|
76
|
+
mode: "pre_call" | "post_call", // When to execute
|
|
77
|
+
code: function // The guardrail function
|
|
78
|
+
}
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
### Pre-call Guardrails
|
|
82
|
+
|
|
83
|
+
Executed before sending to LLM provider:
|
|
84
|
+
|
|
85
|
+
```javascript
|
|
86
|
+
{
|
|
87
|
+
name: "content_filter",
|
|
88
|
+
mode: "pre_call",
|
|
89
|
+
code: (processId, input) => {
|
|
90
|
+
/**
|
|
91
|
+
* @param {string} processId - Unique identifier for this request
|
|
92
|
+
* @param {object} input - Input object containing model and messages
|
|
93
|
+
* @param {string} input.model - Final model selected by router
|
|
94
|
+
* @param {Array} input.messages - Final message array to send to LLM
|
|
95
|
+
* @returns {object} Modified input object or throw error to block
|
|
96
|
+
*/
|
|
97
|
+
const { model, messages } = input;
|
|
98
|
+
|
|
99
|
+
// Your pre-processing logic here
|
|
100
|
+
return { model, messages }; // Return modified input
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
### Post-call Guardrails
|
|
106
|
+
|
|
107
|
+
Executed after receiving response from LLM provider:
|
|
108
|
+
|
|
109
|
+
```javascript
|
|
110
|
+
{
|
|
111
|
+
name: "response_logger",
|
|
112
|
+
mode: "post_call",
|
|
113
|
+
code: (processId, result) => {
|
|
114
|
+
/**
|
|
115
|
+
* @param {string} processId - Unique identifier for this request
|
|
116
|
+
* @param {string|object} result - Normalized LLM response (llmjs2 format)
|
|
117
|
+
* @returns {string|object} Modified result or throw error to block
|
|
118
|
+
*/
|
|
119
|
+
|
|
120
|
+
// Your post-processing logic here
|
|
121
|
+
return result; // Return modified result
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
## Built-in Guardrail Examples
|
|
127
|
+
|
|
128
|
+
### Content Filter Guardrail
|
|
129
|
+
|
|
130
|
+
```javascript
|
|
131
|
+
const contentFilterGuardrail = {
|
|
132
|
+
name: "content_filter",
|
|
133
|
+
mode: "pre_call",
|
|
134
|
+
code: (processId, input) => {
|
|
135
|
+
const { model, messages } = input;
|
|
136
|
+
|
|
137
|
+
// Log which model was selected
|
|
138
|
+
console.log(`[${processId}] Selected model: ${model}`);
|
|
139
|
+
|
|
140
|
+
for (const message of messages) {
|
|
141
|
+
if (message.role === 'user' && message.content) {
|
|
142
|
+
// Check for inappropriate content
|
|
143
|
+
if (containsProfanity(message.content)) {
|
|
144
|
+
throw new Error('Content violates usage policy');
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// Apply content transformations
|
|
148
|
+
message.content = sanitizeContent(message.content);
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
return { model, messages };
|
|
153
|
+
}
|
|
154
|
+
};
|
|
155
|
+
|
|
156
|
+
const responseFilterGuardrail = {
|
|
157
|
+
name: "response_filter",
|
|
158
|
+
mode: "post_call",
|
|
159
|
+
code: (processId, result) => {
|
|
160
|
+
if (typeof result === 'string') {
|
|
161
|
+
// Filter response content
|
|
162
|
+
return filterResponse(result);
|
|
163
|
+
}
|
|
164
|
+
return result;
|
|
165
|
+
}
|
|
166
|
+
};
|
|
167
|
+
|
|
168
|
+
// Helper functions
|
|
169
|
+
function containsProfanity(text) {
|
|
170
|
+
const badWords = ['badword1', 'badword2'];
|
|
171
|
+
return badWords.some(word => text.toLowerCase().includes(word));
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
function sanitizeContent(text) {
|
|
175
|
+
// Implement content sanitization logic
|
|
176
|
+
return text;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
function filterResponse(text) {
|
|
180
|
+
// Implement response filtering logic
|
|
181
|
+
return text;
|
|
182
|
+
}
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
### Logging Guardrail
|
|
186
|
+
|
|
187
|
+
```javascript
|
|
188
|
+
const requestLoggerGuardrail = {
|
|
189
|
+
name: "request_logger",
|
|
190
|
+
mode: "pre_call",
|
|
191
|
+
code: (processId, input) => {
|
|
192
|
+
const { model, messages } = input;
|
|
193
|
+
|
|
194
|
+
console.log(`[${new Date().toISOString()}] [${processId}] REQUEST:`, {
|
|
195
|
+
model,
|
|
196
|
+
messageCount: messages.length,
|
|
197
|
+
totalChars: messages.reduce((sum, m) => sum + (m.content?.length || 0), 0),
|
|
198
|
+
userMessages: messages.filter(m => m.role === 'user').length
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
return input;
|
|
202
|
+
}
|
|
203
|
+
};
|
|
204
|
+
|
|
205
|
+
const responseLoggerGuardrail = {
|
|
206
|
+
name: "response_logger",
|
|
207
|
+
mode: "post_call",
|
|
208
|
+
code: (processId, result) => {
|
|
209
|
+
const responseInfo = {
|
|
210
|
+
processId,
|
|
211
|
+
timestamp: new Date().toISOString(),
|
|
212
|
+
responseType: typeof result,
|
|
213
|
+
responseLength: typeof result === 'string' ? result.length : 'object'
|
|
214
|
+
};
|
|
215
|
+
|
|
216
|
+
console.log(`[${responseInfo.timestamp}] [${processId}] RESPONSE:`, responseInfo);
|
|
217
|
+
|
|
218
|
+
// Could also log to external service
|
|
219
|
+
logToExternalService(responseInfo);
|
|
220
|
+
|
|
221
|
+
return result;
|
|
222
|
+
}
|
|
223
|
+
};
|
|
224
|
+
|
|
225
|
+
// Helper function
|
|
226
|
+
function logToExternalService(data) {
|
|
227
|
+
// Implement external logging (e.g., to database, monitoring service)
|
|
228
|
+
}
|
|
229
|
+
```
|
|
230
|
+
|
|
231
|
+
### Rate Limiting Guardrail
|
|
232
|
+
|
|
233
|
+
```javascript
|
|
234
|
+
// Store for rate limiting (in production, use Redis or similar)
|
|
235
|
+
const rateLimitStore = new Map();
|
|
236
|
+
|
|
237
|
+
const rateLimitGuardrail = {
|
|
238
|
+
name: "rate_limiter",
|
|
239
|
+
mode: "pre_call",
|
|
240
|
+
code: (processId, input) => {
|
|
241
|
+
const { model, messages } = input;
|
|
242
|
+
const now = Date.now();
|
|
243
|
+
const userId = getUserFromMessages(messages);
|
|
244
|
+
|
|
245
|
+
if (!userId) {
|
|
246
|
+
return input; // No user context, allow
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// Clean old requests
|
|
250
|
+
cleanOldRequests(userId, now);
|
|
251
|
+
|
|
252
|
+
// Check rate limit
|
|
253
|
+
const userRequests = rateLimitStore.get(userId) || [];
|
|
254
|
+
if (userRequests.length >= 60) { // 60 requests per minute
|
|
255
|
+
throw new Error(`Rate limit exceeded. Maximum 60 requests per minute.`);
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// Record this request
|
|
259
|
+
userRequests.push(now);
|
|
260
|
+
rateLimitStore.set(userId, userRequests);
|
|
261
|
+
|
|
262
|
+
return input;
|
|
263
|
+
}
|
|
264
|
+
};
|
|
265
|
+
|
|
266
|
+
// Helper functions
|
|
267
|
+
function getUserFromMessages(messages) {
|
|
268
|
+
// Extract user identifier from messages (e.g., from metadata)
|
|
269
|
+
// This is application-specific
|
|
270
|
+
return null; // Implement based on your needs
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
function cleanOldRequests(userId, now) {
|
|
274
|
+
const userRequests = rateLimitStore.get(userId) || [];
|
|
275
|
+
const oneMinuteAgo = now - 60000;
|
|
276
|
+
|
|
277
|
+
const recentRequests = userRequests.filter(time => time > oneMinuteAgo);
|
|
278
|
+
if (recentRequests.length === 0) {
|
|
279
|
+
rateLimitStore.delete(userId);
|
|
280
|
+
} else {
|
|
281
|
+
rateLimitStore.set(userId, recentRequests);
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
```
|
|
285
|
+
|
|
286
|
+
### Response Caching Guardrail
|
|
287
|
+
|
|
288
|
+
```javascript
|
|
289
|
+
// Cache store (in production, use Redis or similar)
|
|
290
|
+
const cacheStore = new Map();
|
|
291
|
+
const CACHE_TTL = 300000; // 5 minutes in milliseconds
|
|
292
|
+
|
|
293
|
+
const cachingGuardrail = {
|
|
294
|
+
name: "response_cache",
|
|
295
|
+
mode: "pre_call",
|
|
296
|
+
code: (processId, input) => {
|
|
297
|
+
const { model, messages } = input;
|
|
298
|
+
|
|
299
|
+
// Create cache key from model and messages
|
|
300
|
+
const cacheKey = createCacheKey(model, messages);
|
|
301
|
+
|
|
302
|
+
// Check cache
|
|
303
|
+
const cached = cacheStore.get(cacheKey);
|
|
304
|
+
if (cached && (Date.now() - cached.timestamp) < CACHE_TTL) {
|
|
305
|
+
console.log(`[${processId}] Cache hit for key: ${cacheKey}`);
|
|
306
|
+
// Return cached result instead of making API call
|
|
307
|
+
// Note: This would need special handling in the router
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
return input;
|
|
311
|
+
}
|
|
312
|
+
};
|
|
313
|
+
|
|
314
|
+
const cacheWriterGuardrail = {
|
|
315
|
+
name: "cache_writer",
|
|
316
|
+
mode: "post_call",
|
|
317
|
+
code: (processId, result) => {
|
|
318
|
+
// Cache the result (this would need the original input to generate key)
|
|
319
|
+
// For now, just return the result
|
|
320
|
+
return result;
|
|
321
|
+
}
|
|
322
|
+
};
|
|
323
|
+
|
|
324
|
+
// Helper functions
|
|
325
|
+
function createCacheKey(model, messages) {
|
|
326
|
+
// Create deterministic key from model and messages
|
|
327
|
+
const content = `${model}|${messages.map(m => `${m.role}:${m.content}`).join('|')}`;
|
|
328
|
+
return hashString(content);
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
function hashString(str) {
|
|
332
|
+
let hash = 0;
|
|
333
|
+
for (let i = 0; i < str.length; i++) {
|
|
334
|
+
const char = str.charCodeAt(i);
|
|
335
|
+
hash = ((hash << 5) - hash) + char;
|
|
336
|
+
hash = hash & hash; // Convert to 32-bit integer
|
|
337
|
+
}
|
|
338
|
+
return hash.toString();
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
function cleanCache() {
|
|
342
|
+
const now = Date.now();
|
|
343
|
+
for (const [key, value] of cacheStore.entries()) {
|
|
344
|
+
if ((now - value.timestamp) > CACHE_TTL) {
|
|
345
|
+
cacheStore.delete(key);
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
```
|
|
350
|
+
|
|
351
|
+
## Advanced Guardrail Patterns
|
|
352
|
+
|
|
353
|
+
### Chained Transformations
|
|
354
|
+
|
|
355
|
+
```javascript
|
|
356
|
+
const sanitizationGuardrail = {
|
|
357
|
+
name: "content_sanitizer",
|
|
358
|
+
mode: "pre_call",
|
|
359
|
+
code: (processId, input) => {
|
|
360
|
+
const { model, messages } = input;
|
|
361
|
+
|
|
362
|
+
const sanitizedMessages = messages.map(message => ({
|
|
363
|
+
...message,
|
|
364
|
+
content: sanitize(message.content)
|
|
365
|
+
}));
|
|
366
|
+
|
|
367
|
+
return { model, messages: sanitizedMessages };
|
|
368
|
+
}
|
|
369
|
+
};
|
|
370
|
+
|
|
371
|
+
const responseSanitizationGuardrail = {
|
|
372
|
+
name: "response_sanitizer",
|
|
373
|
+
mode: "post_call",
|
|
374
|
+
code: (processId, result) => {
|
|
375
|
+
if (typeof result === 'string') {
|
|
376
|
+
return finalSanitize(result);
|
|
377
|
+
}
|
|
378
|
+
return result;
|
|
379
|
+
}
|
|
380
|
+
};
|
|
381
|
+
|
|
382
|
+
// Helper functions
|
|
383
|
+
function sanitize(text) {
|
|
384
|
+
// Basic sanitization
|
|
385
|
+
return text.replace(/<script>/gi, '').replace(/javascript:/gi, '');
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
function finalSanitize(text) {
|
|
389
|
+
// Additional sanitization for responses
|
|
390
|
+
return text;
|
|
391
|
+
}
|
|
392
|
+
```
|
|
393
|
+
|
|
394
|
+
### Conditional Processing
|
|
395
|
+
|
|
396
|
+
```javascript
|
|
397
|
+
const conditionalGuardrail = {
|
|
398
|
+
name: "conditional_processor",
|
|
399
|
+
mode: "pre_call",
|
|
400
|
+
code: (processId, input) => {
|
|
401
|
+
const { model, messages } = input;
|
|
402
|
+
|
|
403
|
+
if (shouldProcess(messages)) {
|
|
404
|
+
const processedMessages = processMessages(messages);
|
|
405
|
+
return { model, messages: processedMessages };
|
|
406
|
+
}
|
|
407
|
+
return input;
|
|
408
|
+
}
|
|
409
|
+
};
|
|
410
|
+
|
|
411
|
+
const conditionalResponseGuardrail = {
|
|
412
|
+
name: "conditional_response_processor",
|
|
413
|
+
mode: "post_call",
|
|
414
|
+
code: (processId, result) => {
|
|
415
|
+
if (shouldProcessResult(result)) {
|
|
416
|
+
return processResult(result);
|
|
417
|
+
}
|
|
418
|
+
return result;
|
|
419
|
+
}
|
|
420
|
+
};
|
|
421
|
+
|
|
422
|
+
// Helper functions
|
|
423
|
+
function shouldProcess(messages) {
|
|
424
|
+
// Check conditions for processing
|
|
425
|
+
return messages.some(m => m.content?.length > 100);
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
function shouldProcessResult(result) {
|
|
429
|
+
// Check conditions for result processing
|
|
430
|
+
return typeof result === 'string' && result.length > 50;
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
function processMessages(messages) {
|
|
434
|
+
// Apply processing logic
|
|
435
|
+
return messages;
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
function processResult(result) {
|
|
439
|
+
// Apply result processing
|
|
440
|
+
return result;
|
|
441
|
+
}
|
|
442
|
+
```
|
|
443
|
+
|
|
444
|
+
### Async Guardrails
|
|
445
|
+
|
|
446
|
+
```javascript
|
|
447
|
+
const asyncValidationGuardrail = {
|
|
448
|
+
name: "async_validator",
|
|
449
|
+
mode: "pre_call",
|
|
450
|
+
code: async (processId, input) => {
|
|
451
|
+
const { model, messages } = input;
|
|
452
|
+
|
|
453
|
+
// Async validation
|
|
454
|
+
await validateContent(messages);
|
|
455
|
+
|
|
456
|
+
// Async transformation
|
|
457
|
+
const transformedMessages = await transformMessages(messages);
|
|
458
|
+
|
|
459
|
+
return { model, messages: transformedMessages };
|
|
460
|
+
}
|
|
461
|
+
};
|
|
462
|
+
|
|
463
|
+
const asyncProcessingGuardrail = {
|
|
464
|
+
name: "async_processor",
|
|
465
|
+
mode: "post_call",
|
|
466
|
+
code: async (processId, result) => {
|
|
467
|
+
// Async result processing
|
|
468
|
+
const processed = await processResultAsync(result);
|
|
469
|
+
|
|
470
|
+
// Async logging
|
|
471
|
+
await logResult(processId, processed);
|
|
472
|
+
|
|
473
|
+
return processed;
|
|
474
|
+
}
|
|
475
|
+
};
|
|
476
|
+
|
|
477
|
+
// Helper functions
|
|
478
|
+
async function validateContent(messages) {
|
|
479
|
+
// Simulate async validation
|
|
480
|
+
await new Promise(resolve => setTimeout(resolve, 10));
|
|
481
|
+
// Throw error if validation fails
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
async function transformMessages(messages) {
|
|
485
|
+
// Simulate async transformation
|
|
486
|
+
await new Promise(resolve => setTimeout(resolve, 5));
|
|
487
|
+
return messages;
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
async function processResultAsync(result) {
|
|
491
|
+
// Simulate async processing
|
|
492
|
+
await new Promise(resolve => setTimeout(resolve, 5));
|
|
493
|
+
return result;
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
async function logResult(processId, result) {
|
|
497
|
+
// Simulate async logging
|
|
498
|
+
await new Promise(resolve => setTimeout(resolve, 1));
|
|
499
|
+
}
|
|
500
|
+
```
|
|
501
|
+
|
|
502
|
+
## Configuration Examples
|
|
503
|
+
|
|
504
|
+
### Production Setup with Multiple Guardrails
|
|
505
|
+
|
|
506
|
+
```javascript
|
|
507
|
+
import { router } from 'llmjs2';
|
|
508
|
+
|
|
509
|
+
const route = router({
|
|
510
|
+
model_list: [
|
|
511
|
+
{
|
|
512
|
+
"model_name": "gpt-4",
|
|
513
|
+
"llm_params": {
|
|
514
|
+
"model": "openai/gpt-4",
|
|
515
|
+
"api_key": process.env.OPENAI_API_KEY
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
],
|
|
519
|
+
routing: 'random',
|
|
520
|
+
guardrails: [
|
|
521
|
+
new ContentFilterGuardrail(),
|
|
522
|
+
new LoggingGuardrail(),
|
|
523
|
+
new RateLimitGuardrail(100), // 100 requests per minute
|
|
524
|
+
new CachingGuardrail(600) // 10 minute cache TTL
|
|
525
|
+
]
|
|
526
|
+
});
|
|
527
|
+
|
|
528
|
+
// Use with comprehensive protection
|
|
529
|
+
const response = await route.completion({
|
|
530
|
+
messages: [{"role": "user", "content": "Hello!"}]
|
|
531
|
+
});
|
|
532
|
+
```
|
|
533
|
+
|
|
534
|
+
### Development Setup with Minimal Guardrails
|
|
535
|
+
|
|
536
|
+
```javascript
|
|
537
|
+
const devRoute = router({
|
|
538
|
+
model_list: [
|
|
539
|
+
{
|
|
540
|
+
"model_name": "test-model",
|
|
541
|
+
"llm_params": {
|
|
542
|
+
"model": "ollama/llama2",
|
|
543
|
+
"api_key": process.env.OLLAMA_API_KEY
|
|
544
|
+
}
|
|
545
|
+
}
|
|
546
|
+
],
|
|
547
|
+
routing: 'sequential',
|
|
548
|
+
guardrails: [
|
|
549
|
+
{
|
|
550
|
+
name: "dev_logger",
|
|
551
|
+
mode: "post_call",
|
|
552
|
+
code: (processId, result) => {
|
|
553
|
+
console.log(`[DEV] ${processId}:`, result);
|
|
554
|
+
return result;
|
|
555
|
+
}
|
|
556
|
+
}
|
|
557
|
+
]
|
|
558
|
+
});
|
|
559
|
+
```
|
|
560
|
+
|
|
561
|
+
## Error Handling
|
|
562
|
+
|
|
563
|
+
Guardrails can throw errors to block requests:
|
|
564
|
+
|
|
565
|
+
```javascript
|
|
566
|
+
const strictContentGuardrail = {
|
|
567
|
+
name: "strict_content_filter",
|
|
568
|
+
mode: "pre_call",
|
|
569
|
+
code: (processId, input) => {
|
|
570
|
+
const { model, messages } = input;
|
|
571
|
+
|
|
572
|
+
for (const message of messages) {
|
|
573
|
+
if (message.content?.includes('blocked_word')) {
|
|
574
|
+
throw new Error('Content blocked by guardrail policy');
|
|
575
|
+
}
|
|
576
|
+
}
|
|
577
|
+
return input;
|
|
578
|
+
}
|
|
579
|
+
};
|
|
580
|
+
|
|
581
|
+
post_call(processId, result) {
|
|
582
|
+
if (typeof result === 'string' && result.includes('inappropriate')) {
|
|
583
|
+
throw new Error('Response blocked by guardrail policy');
|
|
584
|
+
}
|
|
585
|
+
return result;
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
|
|
589
|
+
// Usage
|
|
590
|
+
try {
|
|
591
|
+
const response = await route.completion({
|
|
592
|
+
messages: [{"role": "user", "content": "This contains blocked_word"}]
|
|
593
|
+
});
|
|
594
|
+
} catch (error) {
|
|
595
|
+
if (error.message.includes('blocked by guardrail')) {
|
|
596
|
+
console.log('Request blocked by guardrail');
|
|
597
|
+
}
|
|
598
|
+
}
|
|
599
|
+
```
|
|
600
|
+
|
|
601
|
+
## Best Practices
|
|
602
|
+
|
|
603
|
+
### Guardrail Ordering
|
|
604
|
+
|
|
605
|
+
```javascript
|
|
606
|
+
const route = router({
|
|
607
|
+
model_list: modelList,
|
|
608
|
+
routing: 'random',
|
|
609
|
+
guardrails: [
|
|
610
|
+
// 1. Rate limiting first (fast rejection)
|
|
611
|
+
new RateLimitGuardrail(60),
|
|
612
|
+
|
|
613
|
+
// 2. Content validation
|
|
614
|
+
new ContentFilterGuardrail(),
|
|
615
|
+
|
|
616
|
+
// 3. Logging (should be reliable)
|
|
617
|
+
new LoggingGuardrail(),
|
|
618
|
+
|
|
619
|
+
// 4. Caching (can be complex)
|
|
620
|
+
new CachingGuardrail()
|
|
621
|
+
]
|
|
622
|
+
});
|
|
623
|
+
```
|
|
624
|
+
|
|
625
|
+
### Performance Considerations
|
|
626
|
+
|
|
627
|
+
- **Fast First**: Put quick checks (rate limiting) before expensive operations
|
|
628
|
+
- **Fail Fast**: Throw errors early for clear violations
|
|
629
|
+
- **Async Wisely**: Use async operations only when necessary
|
|
630
|
+
- **Memory Management**: Clean up caches and state periodically
|
|
631
|
+
|
|
632
|
+
### Security Guidelines
|
|
633
|
+
|
|
634
|
+
- **Input Validation**: Always validate and sanitize inputs
|
|
635
|
+
- **Output Filtering**: Filter responses for sensitive information
|
|
636
|
+
- **Rate Limiting**: Implement appropriate limits for your use case
|
|
637
|
+
- **Logging**: Log security events without exposing sensitive data
|
|
638
|
+
- **Error Messages**: Don't reveal system internals in error messages
|
|
639
|
+
|
|
640
|
+
## Integration with Server Mode
|
|
641
|
+
|
|
642
|
+
Guardrails work seamlessly with server mode:
|
|
643
|
+
|
|
644
|
+
```javascript
|
|
645
|
+
import { router, app } from 'llmjs2';
|
|
646
|
+
|
|
647
|
+
const route = router({
|
|
648
|
+
model_list: modelList,
|
|
649
|
+
routing: 'random',
|
|
650
|
+
guardrails: [
|
|
651
|
+
{
|
|
652
|
+
name: "content_filter",
|
|
653
|
+
mode: "pre_call",
|
|
654
|
+
code: (processId, input) => {
|
|
655
|
+
// Content filtering logic
|
|
656
|
+
return input;
|
|
657
|
+
}
|
|
658
|
+
},
|
|
659
|
+
{
|
|
660
|
+
name: "request_logger",
|
|
661
|
+
mode: "pre_call",
|
|
662
|
+
code: (processId, input) => {
|
|
663
|
+
console.log(`[${processId}] Processing request`);
|
|
664
|
+
return input;
|
|
665
|
+
}
|
|
666
|
+
},
|
|
667
|
+
{
|
|
668
|
+
name: "rate_limiter",
|
|
669
|
+
mode: "pre_call",
|
|
670
|
+
code: (processId, input) => {
|
|
671
|
+
// Rate limiting logic
|
|
672
|
+
return input;
|
|
673
|
+
}
|
|
674
|
+
},
|
|
675
|
+
{
|
|
676
|
+
name: "response_logger",
|
|
677
|
+
mode: "post_call",
|
|
678
|
+
code: (processId, result) => {
|
|
679
|
+
console.log(`[${processId}] Response received`);
|
|
680
|
+
return result;
|
|
681
|
+
}
|
|
682
|
+
}
|
|
683
|
+
]
|
|
684
|
+
});
|
|
685
|
+
|
|
686
|
+
app.use(route);
|
|
687
|
+
app.listen(3000);
|
|
688
|
+
```
|
|
689
|
+
|
|
690
|
+
All API requests will automatically go through the guardrails before routing to models.
|
|
691
|
+
|
|
692
|
+
## Creating Reusable Guardrails
|
|
693
|
+
|
|
694
|
+
### Custom Base Class
|
|
695
|
+
|
|
696
|
+
```javascript
|
|
697
|
+
// Guardrail factory function for reusable guardrails
|
|
698
|
+
function createLoggingGuardrail(options = {}) {
|
|
699
|
+
const enabled = options.enabled !== false;
|
|
700
|
+
|
|
701
|
+
return {
|
|
702
|
+
name: options.name || "logger",
|
|
703
|
+
mode: "post_call",
|
|
704
|
+
code: (processId, result) => {
|
|
705
|
+
if (!enabled) return result;
|
|
706
|
+
|
|
707
|
+
console.log(`[${new Date().toISOString()}] [${processId}] ${options.prefix || 'LOG'}:`, result);
|
|
708
|
+
return result;
|
|
709
|
+
}
|
|
710
|
+
};
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
// Usage
|
|
714
|
+
const debugLogger = createLoggingGuardrail({ name: "debug_logger", prefix: "DEBUG" });
|
|
715
|
+
const errorLogger = createLoggingGuardrail({ name: "error_logger", prefix: "ERROR" });
|
|
716
|
+
|
|
717
|
+
// Custom guardrail function
|
|
718
|
+
const customGuardrail = {
|
|
719
|
+
name: "custom_processor",
|
|
720
|
+
mode: "pre_call",
|
|
721
|
+
code: (processId, input) => {
|
|
722
|
+
const { model, messages } = input;
|
|
723
|
+
// Custom logic
|
|
724
|
+
return { model, messages };
|
|
725
|
+
}
|
|
726
|
+
|
|
727
|
+
processPostCall(processId, result) {
|
|
728
|
+
// Custom logic
|
|
729
|
+
return result;
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
```
|
|
733
|
+
|
|
734
|
+
Guardrails provide a simple and powerful way to add custom logic around LLM calls using function-based configurations, enabling everything from basic logging to advanced content filtering and security measures.
|