te.js 2.0.3 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -1
- package/auto-docs/docs-llm/index.js +7 -0
- package/auto-docs/{llm → docs-llm}/provider.js +12 -67
- package/auto-docs/index.js +3 -3
- package/docs/ammo.md +13 -13
- package/docs/api-reference.md +7 -6
- package/docs/auto-docs.md +1 -0
- package/docs/configuration.md +48 -6
- package/docs/database.md +0 -1
- package/docs/error-handling.md +58 -37
- package/docs/file-uploads.md +0 -1
- package/docs/getting-started.md +0 -1
- package/docs/middleware.md +0 -1
- package/docs/rate-limiting.md +0 -1
- package/package.json +2 -2
- package/server/ammo.js +84 -25
- package/server/errors/code-context.js +125 -0
- package/server/errors/llm-error-service.js +140 -0
- package/server/handler.js +13 -7
- package/te.js +39 -0
- package/utils/errors-llm-config.js +84 -0
- package/auto-docs/llm/index.js +0 -6
- package/auto-docs/llm/parse.js +0 -88
- /package/auto-docs/{llm → docs-llm}/prompts.js +0 -0
package/server/ammo.js
CHANGED
|
@@ -7,6 +7,24 @@ import {
|
|
|
7
7
|
import html from '../utils/tejas-entrypoint-html.js';
|
|
8
8
|
import ammoEnhancer from './ammo/enhancer.js';
|
|
9
9
|
import TejError from './error.js';
|
|
10
|
+
import { getErrorsLlmConfig } from '../utils/errors-llm-config.js';
|
|
11
|
+
import { inferErrorFromContext } from './errors/llm-error-service.js';
|
|
12
|
+
import { captureCodeContext } from './errors/code-context.js';
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Detect if the value is a throw() options object (per-call overrides).
|
|
16
|
+
* @param {unknown} v
|
|
17
|
+
* @returns {v is { useLlm?: boolean, messageType?: 'endUser'|'developer' }}
|
|
18
|
+
*/
|
|
19
|
+
function isThrowOptions(v) {
|
|
20
|
+
if (!v || typeof v !== 'object' || Array.isArray(v)) return false;
|
|
21
|
+
const o = /** @type {Record<string, unknown>} */ (v);
|
|
22
|
+
const hasUseLlm = 'useLlm' in o;
|
|
23
|
+
const hasMessageType =
|
|
24
|
+
'messageType' in o &&
|
|
25
|
+
(o.messageType === 'endUser' || o.messageType === 'developer');
|
|
26
|
+
return hasUseLlm || hasMessageType === true;
|
|
27
|
+
}
|
|
10
28
|
|
|
11
29
|
/**
|
|
12
30
|
* Ammo class for handling HTTP requests and responses.
|
|
@@ -255,8 +273,8 @@ class Ammo {
|
|
|
255
273
|
/**
|
|
256
274
|
* Throws an error response with appropriate status code and message.
|
|
257
275
|
*
|
|
258
|
-
* @param {number|Error|string} [arg1] - Status code, Error object, or
|
|
259
|
-
* @param {string} [arg2] - Error message (
|
|
276
|
+
* @param {number|Error|string|object} [arg1] - Status code, Error object, error message, or (when no code) options
|
|
277
|
+
* @param {string|object} [arg2] - Error message (when arg1 is status code) or options (when arg1 is error/empty)
|
|
260
278
|
*
|
|
261
279
|
* @description
|
|
262
280
|
* The throw method is flexible and can handle different argument patterns:
|
|
@@ -267,8 +285,14 @@ class Ammo {
|
|
|
267
285
|
* 4. Error object: Extracts status code and message from the error
|
|
268
286
|
* 5. String: Treats as error message with 500 status code
|
|
269
287
|
*
|
|
270
|
-
*
|
|
271
|
-
*
|
|
288
|
+
* When errors.llm.enabled is true and no explicit code/message is given (no args,
|
|
289
|
+
* Error, or string/other), an LLM infers statusCode and message from context.
|
|
290
|
+
* In that case throw() returns a Promise; otherwise it returns undefined.
|
|
291
|
+
*
|
|
292
|
+
* Per-call options (last argument, only when no explicit status code): pass an object
|
|
293
|
+
* with `useLlm` (boolean) and/or `messageType` ('endUser' | 'developer'). Use
|
|
294
|
+
* `useLlm: false` to skip the LLM for this call; use `messageType` to override
|
|
295
|
+
* errors.llm.messageType for this call (end-user-friendly vs developer-friendly message).
|
|
272
296
|
*
|
|
273
297
|
* @example
|
|
274
298
|
* // Throw a 404 Not Found error
|
|
@@ -285,18 +309,69 @@ class Ammo {
|
|
|
285
309
|
* @example
|
|
286
310
|
* // Throw an error with a custom message
|
|
287
311
|
* ammo.throw('Something went wrong');
|
|
312
|
+
*
|
|
313
|
+
* @example
|
|
314
|
+
* // Skip LLM for this call; use default 500
|
|
315
|
+
* ammo.throw(err, { useLlm: false });
|
|
316
|
+
*
|
|
317
|
+
* @example
|
|
318
|
+
* // Force developer-friendly message for this call
|
|
319
|
+
* ammo.throw(err, { messageType: 'developer' });
|
|
320
|
+
*
|
|
321
|
+
* @returns {Promise<void>|void} Promise when LLM path is used; otherwise void
|
|
288
322
|
*/
|
|
289
323
|
throw() {
|
|
290
|
-
|
|
291
|
-
const
|
|
324
|
+
let args = Array.from(arguments);
|
|
325
|
+
const { enabled: llmEnabled } = getErrorsLlmConfig();
|
|
326
|
+
|
|
327
|
+
// Per-call options: last arg can be { useLlm?, messageType? } when call is LLM-eligible (no explicit code).
|
|
328
|
+
const llmEligible =
|
|
329
|
+
args.length === 0 ||
|
|
330
|
+
(!isStatusCode(args[0]) && !(args[0] instanceof TejError));
|
|
331
|
+
let throwOpts = /** @type {{ useLlm?: boolean, messageType?: 'endUser'|'developer' } | null} */ (null);
|
|
332
|
+
if (llmEligible && args.length > 0 && isThrowOptions(args[args.length - 1])) {
|
|
333
|
+
throwOpts = /** @type {{ useLlm?: boolean, messageType?: 'endUser'|'developer' } } */ (args.pop());
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
const useLlm =
|
|
337
|
+
llmEnabled &&
|
|
338
|
+
llmEligible &&
|
|
339
|
+
throwOpts?.useLlm !== false;
|
|
340
|
+
|
|
341
|
+
if (useLlm) {
|
|
342
|
+
// Use stack from thrown error when available (e.g. handler caught and called ammo.throw(err)) so we capture user code; else current call site.
|
|
343
|
+
const stack =
|
|
344
|
+
args[0] instanceof Error && args[0].stack
|
|
345
|
+
? args[0].stack
|
|
346
|
+
: new Error().stack;
|
|
347
|
+
return captureCodeContext(stack)
|
|
348
|
+
.then((codeContext) => {
|
|
349
|
+
const context = {
|
|
350
|
+
codeContext,
|
|
351
|
+
method: this.method,
|
|
352
|
+
path: this.path,
|
|
353
|
+
includeDevInsight: true,
|
|
354
|
+
...(throwOpts?.messageType && { messageType: throwOpts.messageType }),
|
|
355
|
+
};
|
|
356
|
+
if (args[0] !== undefined && args[0] !== null) context.error = args[0];
|
|
357
|
+
return inferErrorFromContext(context);
|
|
358
|
+
})
|
|
359
|
+
.then(({ statusCode, message, devInsight }) => {
|
|
360
|
+
const isProduction = process.env.NODE_ENV === 'production';
|
|
361
|
+
const data =
|
|
362
|
+
!isProduction && devInsight
|
|
363
|
+
? { message, _dev: devInsight }
|
|
364
|
+
: message;
|
|
365
|
+
this.fire(statusCode, data);
|
|
366
|
+
});
|
|
367
|
+
}
|
|
292
368
|
|
|
293
|
-
//
|
|
369
|
+
// Sync path: explicit code/message or useLlm: false
|
|
294
370
|
if (args.length === 0) {
|
|
295
371
|
this.fire(500, 'Internal Server Error');
|
|
296
372
|
return;
|
|
297
373
|
}
|
|
298
374
|
|
|
299
|
-
// Case 2: First argument is a status code
|
|
300
375
|
if (isStatusCode(args[0])) {
|
|
301
376
|
const statusCode = args[0];
|
|
302
377
|
const message = args[1] || toStatusMessage(statusCode);
|
|
@@ -304,51 +379,35 @@ class Ammo {
|
|
|
304
379
|
return;
|
|
305
380
|
}
|
|
306
381
|
|
|
307
|
-
// Case 3.1: First argument is an instance of TejError
|
|
308
382
|
if (args[0] instanceof TejError) {
|
|
309
383
|
const error = args[0];
|
|
310
|
-
|
|
311
|
-
const message = error.message;
|
|
312
|
-
|
|
313
|
-
this.fire(statusCode, message);
|
|
384
|
+
this.fire(error.code, error.message);
|
|
314
385
|
return;
|
|
315
386
|
}
|
|
316
387
|
|
|
317
|
-
// Case 3.2: First argument is an Error object
|
|
318
388
|
if (args[0] instanceof Error) {
|
|
319
389
|
const error = args[0];
|
|
320
|
-
|
|
321
|
-
// Check if error message is a numeric status code
|
|
322
390
|
if (!isNaN(parseInt(error.message))) {
|
|
323
391
|
const statusCode = parseInt(error.message);
|
|
324
392
|
const message = toStatusMessage(statusCode) || toStatusMessage(500);
|
|
325
393
|
this.fire(statusCode, message);
|
|
326
394
|
return;
|
|
327
395
|
}
|
|
328
|
-
|
|
329
|
-
// Use error message as status code if it's a valid status code string
|
|
330
396
|
const statusCode = toStatusCode(error.message);
|
|
331
397
|
if (statusCode) {
|
|
332
398
|
this.fire(statusCode, error.message);
|
|
333
399
|
return;
|
|
334
400
|
}
|
|
335
|
-
|
|
336
|
-
// Default error handling
|
|
337
401
|
this.fire(500, error.message);
|
|
338
402
|
return;
|
|
339
403
|
}
|
|
340
404
|
|
|
341
|
-
// Case 4: First argument is a string or other value
|
|
342
405
|
const errorValue = args[0];
|
|
343
|
-
|
|
344
|
-
// Check if the string represents a status code
|
|
345
406
|
const statusCode = toStatusCode(errorValue);
|
|
346
407
|
if (statusCode) {
|
|
347
408
|
this.fire(statusCode, toStatusMessage(statusCode));
|
|
348
409
|
return;
|
|
349
410
|
}
|
|
350
|
-
|
|
351
|
-
// Default case: treat as error message
|
|
352
411
|
this.fire(500, errorValue.toString());
|
|
353
412
|
}
|
|
354
413
|
}
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Capture code context from the call stack: surrounding source with line numbers,
|
|
3
|
+
* including upstream callers and downstream code. Used by LLM error inference.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { readFile } from 'node:fs/promises';
|
|
7
|
+
import { fileURLToPath } from 'node:url';
|
|
8
|
+
import path from 'node:path';
|
|
9
|
+
|
|
10
|
+
/** Path segments that identify te.js internals (excluded from "user" stack frames). */
|
|
11
|
+
const INTERNAL_PATTERNS = [
|
|
12
|
+
'server/ammo.js',
|
|
13
|
+
'server/handler.js',
|
|
14
|
+
'server/errors/llm-error-service.js',
|
|
15
|
+
'server/errors/code-context.js',
|
|
16
|
+
'node_modules',
|
|
17
|
+
];
|
|
18
|
+
|
|
19
|
+
const LINES_ABOVE = 25;
|
|
20
|
+
const LINES_BELOW = 25;
|
|
21
|
+
const MAX_FRAMES = 6;
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Parse a single stack frame line to extract file path, line, and column.
|
|
25
|
+
* Handles "at fn (file:///path:line:col)" and "at file:///path:line:col" and "at /path:line:col".
|
|
26
|
+
* @param {string} line
|
|
27
|
+
* @returns {{ filePath: string, line: number, column: number } | null}
|
|
28
|
+
*/
|
|
29
|
+
function parseStackFrame(line) {
|
|
30
|
+
const trimmed = line.trim();
|
|
31
|
+
if (!trimmed.startsWith('at ')) return null;
|
|
32
|
+
// Last occurrence of :number:number is line:column (path may contain colons on Windows/file URL)
|
|
33
|
+
const match = trimmed.match(/:(\d+):(\d+)\s*\)?\s*$/);
|
|
34
|
+
if (!match) return null;
|
|
35
|
+
const lineNum = parseInt(match[1], 10);
|
|
36
|
+
const colNum = parseInt(match[2], 10);
|
|
37
|
+
const before = trimmed.slice(0, trimmed.lastIndexOf(':' + match[1] + ':' + match[2]));
|
|
38
|
+
// Strip "at ... (" or "at " prefix to get path
|
|
39
|
+
let filePath = before.replace(/^\s*at\s+(?:.*?\s+\()?/, '').replace(/\)?\s*$/, '').trim();
|
|
40
|
+
if (filePath.startsWith('file://')) {
|
|
41
|
+
try {
|
|
42
|
+
filePath = fileURLToPath(filePath);
|
|
43
|
+
} catch {
|
|
44
|
+
return null;
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
if (!filePath || lineNum <= 0) return null;
|
|
48
|
+
return { filePath, line: lineNum, column: colNum };
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Return true if this file path is internal (te.js / node_modules) and should be skipped for user context.
|
|
53
|
+
* @param {string} filePath - Absolute or relative path
|
|
54
|
+
*/
|
|
55
|
+
function isInternalFrame(filePath) {
|
|
56
|
+
const normalized = path.normalize(filePath).replace(/\\/g, '/');
|
|
57
|
+
return INTERNAL_PATTERNS.some((p) => normalized.includes(p));
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Read source file and return lines [lineNum - LINES_ABOVE, lineNum + LINES_BELOW] with line numbers.
|
|
62
|
+
* @param {string} filePath - Absolute path
|
|
63
|
+
* @param {number} lineNum - Center line (1-based)
|
|
64
|
+
* @returns {Promise<{ file: string, line: number, snippet: string } | null>}
|
|
65
|
+
*/
|
|
66
|
+
async function readSnippet(filePath, lineNum) {
|
|
67
|
+
let content;
|
|
68
|
+
try {
|
|
69
|
+
content = await readFile(filePath, 'utf-8');
|
|
70
|
+
} catch {
|
|
71
|
+
return null;
|
|
72
|
+
}
|
|
73
|
+
const lines = content.split(/\r?\n/);
|
|
74
|
+
const start = Math.max(0, lineNum - 1 - LINES_ABOVE);
|
|
75
|
+
const end = Math.min(lines.length, lineNum + LINES_BELOW);
|
|
76
|
+
const snippet = lines
|
|
77
|
+
.slice(start, end)
|
|
78
|
+
.map((text, i) => {
|
|
79
|
+
const num = start + i + 1;
|
|
80
|
+
const marker = num === lineNum ? ' →' : ' ';
|
|
81
|
+
return `${String(num).padStart(4)}${marker} ${text}`;
|
|
82
|
+
})
|
|
83
|
+
.join('\n');
|
|
84
|
+
|
|
85
|
+
return {
|
|
86
|
+
file: filePath,
|
|
87
|
+
line: lineNum,
|
|
88
|
+
snippet,
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Capture code context from the current call stack: parse stack, filter to user frames,
|
|
94
|
+
* and read surrounding source (with line numbers) for each frame. First frame is the
|
|
95
|
+
* throw site; remaining frames are upstream callers. Each snippet includes lines
|
|
96
|
+
* above and below (downstream in the same function).
|
|
97
|
+
*
|
|
98
|
+
* @param {string} [stack] - Stack string (e.g. new Error().stack). If omitted, captures current stack.
|
|
99
|
+
* @param {{ maxFrames?: number, linesAround?: number }} [options]
|
|
100
|
+
* @returns {Promise<{ snippets: Array<{ file: string, line: number, snippet: string }> }>}
|
|
101
|
+
*/
|
|
102
|
+
export async function captureCodeContext(stack, options = {}) {
|
|
103
|
+
const stackStr = typeof stack === 'string' && stack ? stack : new Error().stack;
|
|
104
|
+
if (!stackStr) return { snippets: [] };
|
|
105
|
+
|
|
106
|
+
const maxFrames = options.maxFrames ?? MAX_FRAMES;
|
|
107
|
+
const lines = stackStr.split('\n');
|
|
108
|
+
const frames = [];
|
|
109
|
+
|
|
110
|
+
for (const line of lines) {
|
|
111
|
+
const parsed = parseStackFrame(line);
|
|
112
|
+
if (!parsed) continue;
|
|
113
|
+
if (isInternalFrame(parsed.filePath)) continue;
|
|
114
|
+
frames.push(parsed);
|
|
115
|
+
if (frames.length >= maxFrames) break;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
const snippets = [];
|
|
119
|
+
for (const { filePath, line } of frames) {
|
|
120
|
+
const one = await readSnippet(filePath, line);
|
|
121
|
+
if (one) snippets.push(one);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
return { snippets };
|
|
125
|
+
}
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM-based error inference: given code context (surrounding + upstream/downstream),
|
|
3
|
+
* returns statusCode and message (and optionally devInsight in non-production).
|
|
4
|
+
* Uses shared lib/llm with errors.llm config. Developers do not pass an error object;
|
|
5
|
+
* the LLM infers from the code where ammo.throw() was called.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { createProvider } from '../../lib/llm/index.js';
|
|
9
|
+
import { extractJSON } from '../../lib/llm/parse.js';
|
|
10
|
+
import { getErrorsLlmConfig } from '../../utils/errors-llm-config.js';
|
|
11
|
+
|
|
12
|
+
const DEFAULT_STATUS = 500;
|
|
13
|
+
const DEFAULT_MESSAGE = 'Internal Server Error';
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Build prompt text from code context (and optional error) for the LLM.
|
|
17
|
+
* @param {object} context
|
|
18
|
+
* @param {{ snippets: Array<{ file: string, line: number, snippet: string }> }} context.codeContext - Source snippets with line numbers (first = throw site, rest = upstream).
|
|
19
|
+
* @param {string} [context.method] - HTTP method.
|
|
20
|
+
* @param {string} [context.path] - Request path.
|
|
21
|
+
* @param {boolean} [context.includeDevInsight] - If true, ask for devInsight.
|
|
22
|
+
* @param {'endUser'|'developer'} [context.messageType] - Message tone.
|
|
23
|
+
* @param {string|Error|undefined} [context.error] - Optional error if one was passed (secondary signal).
|
|
24
|
+
* @returns {string}
|
|
25
|
+
*/
|
|
26
|
+
function buildPrompt(context) {
|
|
27
|
+
const { codeContext, method, path, includeDevInsight, messageType, error } = context;
|
|
28
|
+
const forDeveloper = messageType === 'developer';
|
|
29
|
+
|
|
30
|
+
const requestPart = [method, path].filter(Boolean).length
|
|
31
|
+
? `Request: ${[method, path].filter(Boolean).join(' ')}`
|
|
32
|
+
: '';
|
|
33
|
+
|
|
34
|
+
let codePart = 'No code context was captured.';
|
|
35
|
+
if (codeContext?.snippets?.length) {
|
|
36
|
+
codePart = codeContext.snippets
|
|
37
|
+
.map((s, i) => {
|
|
38
|
+
const label = i === 0 ? 'Call site (where ammo.throw() was invoked)' : `Upstream caller ${i}`;
|
|
39
|
+
return `--- ${label}: ${s.file} (line ${s.line}) ---\n${s.snippet}`;
|
|
40
|
+
})
|
|
41
|
+
.join('\n\n');
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
let errorPart = '';
|
|
45
|
+
if (error !== undefined && error !== null) {
|
|
46
|
+
if (error instanceof Error) {
|
|
47
|
+
errorPart = `\nOptional error message (may be empty): ${error.message}`;
|
|
48
|
+
} else {
|
|
49
|
+
errorPart = `\nOptional error/message: ${String(error)}`;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
const devPart = includeDevInsight
|
|
54
|
+
? '\nAlso provide a short "devInsight" string (one or two sentences) for the developer: (a) Is this likely a bug in the code or an environment/setup issue? (b) If the developer can fix it, suggest the fix. Be concise.'
|
|
55
|
+
: '';
|
|
56
|
+
|
|
57
|
+
const messageInstruction = forDeveloper
|
|
58
|
+
? '- "message": string (short message for developers: may include technical detail, error type, or cause; do not include raw stack traces)'
|
|
59
|
+
: '- "message": string (short, end-user-facing message: safe for clients; do not expose stack traces, internal details, or technical jargon)';
|
|
60
|
+
|
|
61
|
+
return `You are helping map an application error to an HTTP response. The developer called ammo.throw() (or an error was thrown and caught) at the call site below. Use the surrounding code with line numbers and all upstream/downstream context to infer what went wrong and choose an appropriate HTTP status and message.
|
|
62
|
+
|
|
63
|
+
Consider:
|
|
64
|
+
- The code BEFORE the throw (upstream in the same function and in callers) — what led to this point.
|
|
65
|
+
- The code AFTER the throw line (downstream) — what would have run next; this shows intent and expected flow.
|
|
66
|
+
- The first snippet is the call site (line marked with →); later snippets are upstream callers.
|
|
67
|
+
|
|
68
|
+
${requestPart ? requestPart + '\n\n' : ''}Code context (with line numbers; → marks the throw line):
|
|
69
|
+
|
|
70
|
+
${codePart}${errorPart}
|
|
71
|
+
${devPart ? '\n' + devPart : ''}
|
|
72
|
+
|
|
73
|
+
Respond with only valid JSON, no markdown or explanation. Use this shape:
|
|
74
|
+
- "statusCode": number (HTTP status, typically 4xx or 5xx; use 500 for generic/server errors)
|
|
75
|
+
${messageInstruction}
|
|
76
|
+
${includeDevInsight ? '- "devInsight": string (brief note for the developer only)' : ''}
|
|
77
|
+
|
|
78
|
+
JSON:`;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Infer HTTP statusCode and message (and optionally devInsight) from code context using the LLM.
|
|
83
|
+
* Uses errors.llm config (getErrorsLlmConfig). Call only when errors.llm.enabled is true and config is valid.
|
|
84
|
+
* The primary input is codeContext (surrounding + upstream/downstream snippets); error is optional.
|
|
85
|
+
*
|
|
86
|
+
* @param {object} context - Context for the prompt.
|
|
87
|
+
* @param {{ snippets: Array<{ file: string, line: number, snippet: string }> }} context.codeContext - Source snippets with line numbers (from captureCodeContext).
|
|
88
|
+
* @param {string} [context.method] - HTTP method.
|
|
89
|
+
* @param {string} [context.path] - Request path.
|
|
90
|
+
* @param {boolean} [context.includeDevInsight] - In non-production, dev insight is included by default; set to false to disable.
|
|
91
|
+
* @param {'endUser'|'developer'} [context.messageType] - Override config: 'endUser' or 'developer'. Default from errors.llm.messageType.
|
|
92
|
+
* @param {string|Error|undefined} [context.error] - Optional error if the caller passed one (secondary signal).
|
|
93
|
+
* @returns {Promise<{ statusCode: number, message: string, devInsight?: string }>}
|
|
94
|
+
*/
|
|
95
|
+
export async function inferErrorFromContext(context) {
|
|
96
|
+
const config = getErrorsLlmConfig();
|
|
97
|
+
const { baseURL, apiKey, model, messageType: configMessageType } = config;
|
|
98
|
+
const provider = createProvider({ baseURL, apiKey, model });
|
|
99
|
+
|
|
100
|
+
const isProduction = process.env.NODE_ENV === 'production';
|
|
101
|
+
const includeDevInsight = !isProduction && context.includeDevInsight !== false;
|
|
102
|
+
const messageType = context.messageType ?? configMessageType;
|
|
103
|
+
|
|
104
|
+
const prompt = buildPrompt({
|
|
105
|
+
codeContext: context.codeContext,
|
|
106
|
+
method: context.method,
|
|
107
|
+
path: context.path,
|
|
108
|
+
includeDevInsight,
|
|
109
|
+
messageType,
|
|
110
|
+
error: context.error,
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
const { content } = await provider.analyze(prompt);
|
|
114
|
+
const parsed = extractJSON(content);
|
|
115
|
+
|
|
116
|
+
if (!parsed || typeof parsed !== 'object') {
|
|
117
|
+
return {
|
|
118
|
+
statusCode: DEFAULT_STATUS,
|
|
119
|
+
message: DEFAULT_MESSAGE,
|
|
120
|
+
...(includeDevInsight && { devInsight: 'Could not parse LLM response.' }),
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
let statusCode = Number(parsed.statusCode);
|
|
125
|
+
if (Number.isNaN(statusCode) || statusCode < 100 || statusCode > 599) {
|
|
126
|
+
statusCode = DEFAULT_STATUS;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
const message =
|
|
130
|
+
typeof parsed.message === 'string' && parsed.message.trim()
|
|
131
|
+
? parsed.message.trim()
|
|
132
|
+
: DEFAULT_MESSAGE;
|
|
133
|
+
|
|
134
|
+
const result = { statusCode, message };
|
|
135
|
+
if (includeDevInsight && typeof parsed.devInsight === 'string' && parsed.devInsight.trim()) {
|
|
136
|
+
result.devInsight = parsed.devInsight.trim();
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
return result;
|
|
140
|
+
}
|
package/server/handler.js
CHANGED
|
@@ -54,7 +54,7 @@ const executeChain = async (target, ammo) => {
|
|
|
54
54
|
} catch (err) {
|
|
55
55
|
// Only handle error if response hasn't been sent
|
|
56
56
|
if (!ammo.res.headersSent && !ammo.res.writableEnded && !ammo.res.finished) {
|
|
57
|
-
errorHandler(ammo, err);
|
|
57
|
+
await errorHandler(ammo, err);
|
|
58
58
|
}
|
|
59
59
|
}
|
|
60
60
|
};
|
|
@@ -63,16 +63,22 @@ const executeChain = async (target, ammo) => {
|
|
|
63
63
|
};
|
|
64
64
|
|
|
65
65
|
/**
|
|
66
|
-
* Handles errors
|
|
66
|
+
* Handles errors: optional logging (log.exceptions) and sending the response via ammo.throw(err).
|
|
67
|
+
* One mechanism — ammo.throw — takes care of everything (no separate "log then send").
|
|
68
|
+
* When errors.llm.enabled, framework-caught errors get the same LLM-inferred response as explicit ammo.throw().
|
|
69
|
+
* When ammo.throw() returns a Promise (LLM path), waits for it to complete.
|
|
67
70
|
*
|
|
68
71
|
* @param {Ammo} ammo - The Ammo instance containing request and response objects.
|
|
69
72
|
* @param {Error} err - The error object to handle.
|
|
73
|
+
* @returns {Promise<void>}
|
|
70
74
|
*/
|
|
71
|
-
const errorHandler = (ammo, err) => {
|
|
75
|
+
const errorHandler = async (ammo, err) => {
|
|
72
76
|
if (env('LOG_EXCEPTIONS')) errorLogger.error(err);
|
|
73
77
|
|
|
74
|
-
|
|
75
|
-
|
|
78
|
+
const result = ammo.throw(err);
|
|
79
|
+
if (result != null && typeof result.then === 'function') {
|
|
80
|
+
await result;
|
|
81
|
+
}
|
|
76
82
|
};
|
|
77
83
|
|
|
78
84
|
/**
|
|
@@ -102,11 +108,11 @@ const handler = async (req, res) => {
|
|
|
102
108
|
if (req.url === '/') {
|
|
103
109
|
ammo.defaultEntry();
|
|
104
110
|
} else {
|
|
105
|
-
errorHandler(ammo, new TejError(404, `URL not found: ${url}`));
|
|
111
|
+
await errorHandler(ammo, new TejError(404, `URL not found: ${url}`));
|
|
106
112
|
}
|
|
107
113
|
}
|
|
108
114
|
} catch (err) {
|
|
109
|
-
errorHandler(ammo, err);
|
|
115
|
+
await errorHandler(ammo, err);
|
|
110
116
|
}
|
|
111
117
|
};
|
|
112
118
|
|
package/te.js
CHANGED
|
@@ -9,6 +9,7 @@ import dbManager from './database/index.js';
|
|
|
9
9
|
import { loadConfigFile, standardizeObj } from './utils/configuration.js';
|
|
10
10
|
|
|
11
11
|
import targetHandler from './server/handler.js';
|
|
12
|
+
import { getErrorsLlmConfig, validateErrorsLlmAtTakeoff } from './utils/errors-llm-config.js';
|
|
12
13
|
import path from 'node:path';
|
|
13
14
|
import { pathToFileURL } from 'node:url';
|
|
14
15
|
import { readFile } from 'node:fs/promises';
|
|
@@ -190,6 +191,13 @@ class Tejas {
|
|
|
190
191
|
* app.takeoff(); // Server starts on default port 1403
|
|
191
192
|
*/
|
|
192
193
|
takeoff({ withRedis, withMongo } = {}) {
|
|
194
|
+
validateErrorsLlmAtTakeoff();
|
|
195
|
+
const errorsLlm = getErrorsLlmConfig();
|
|
196
|
+
if (errorsLlm.enabled) {
|
|
197
|
+
logger.info(
|
|
198
|
+
`errors.llm enabled successfully — baseURL: ${errorsLlm.baseURL}, model: ${errorsLlm.model}, messageType: ${errorsLlm.messageType}, apiKey: ${errorsLlm.apiKey ? '***' : '(missing)'}`,
|
|
199
|
+
);
|
|
200
|
+
}
|
|
193
201
|
this.engine = createServer(targetHandler);
|
|
194
202
|
this.engine.listen(env('PORT'), async () => {
|
|
195
203
|
logger.info(`Took off from port ${env('PORT')}`);
|
|
@@ -294,6 +302,37 @@ class Tejas {
|
|
|
294
302
|
return this;
|
|
295
303
|
}
|
|
296
304
|
|
|
305
|
+
/**
|
|
306
|
+
* Enables LLM-inferred error codes and messages for ammo.throw() and framework-caught errors.
|
|
307
|
+
* Call before takeoff(). Remaining options (baseURL, apiKey, model, messageType) can come from
|
|
308
|
+
* config, or from env/tejas.config.json (LLM_* / ERRORS_LLM_*). Validation runs at takeoff.
|
|
309
|
+
*
|
|
310
|
+
* @param {Object} [config] - Optional errors.llm overrides
|
|
311
|
+
* @param {string} [config.baseURL] - LLM provider endpoint (e.g. https://api.openai.com/v1)
|
|
312
|
+
* @param {string} [config.apiKey] - LLM provider API key
|
|
313
|
+
* @param {string} [config.model] - Model name (e.g. gpt-4o-mini)
|
|
314
|
+
* @param {'endUser'|'developer'} [config.messageType] - Default message tone
|
|
315
|
+
* @returns {Tejas} The Tejas instance for chaining
|
|
316
|
+
*
|
|
317
|
+
* @example
|
|
318
|
+
* app.withLLMErrors();
|
|
319
|
+
* app.takeoff();
|
|
320
|
+
*
|
|
321
|
+
* @example
|
|
322
|
+
* app.withLLMErrors({ baseURL: 'https://api.openai.com/v1', apiKey: process.env.OPENAI_KEY, model: 'gpt-4o-mini' });
|
|
323
|
+
* app.takeoff();
|
|
324
|
+
*/
|
|
325
|
+
withLLMErrors(config) {
|
|
326
|
+
setEnv('ERRORS_LLM_ENABLED', true);
|
|
327
|
+
if (config && typeof config === 'object') {
|
|
328
|
+
if (config.baseURL != null) setEnv('ERRORS_LLM_BASE_URL', config.baseURL);
|
|
329
|
+
if (config.apiKey != null) setEnv('ERRORS_LLM_API_KEY', config.apiKey);
|
|
330
|
+
if (config.model != null) setEnv('ERRORS_LLM_MODEL', config.model);
|
|
331
|
+
if (config.messageType != null) setEnv('ERRORS_LLM_MESSAGE_TYPE', config.messageType);
|
|
332
|
+
}
|
|
333
|
+
return this;
|
|
334
|
+
}
|
|
335
|
+
|
|
297
336
|
/**
|
|
298
337
|
* Adds global rate limiting to all endpoints
|
|
299
338
|
*
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Resolve and validate errors.llm configuration (LLM-inferred error codes/messages).
|
|
3
|
+
* Uses ERRORS_LLM_* env vars with fallback to LLM_*.
|
|
4
|
+
* Config file keys (e.g. errors.llm.baseURL) are standardized to ERRORS_LLM_BASEURL etc.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { env } from 'tej-env';
|
|
8
|
+
|
|
9
|
+
const MESSAGE_TYPES = /** @type {const} */ (['endUser', 'developer']);
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Normalize messageType to 'endUser' | 'developer'.
|
|
13
|
+
* @param {string} v
|
|
14
|
+
* @returns {'endUser'|'developer'}
|
|
15
|
+
*/
|
|
16
|
+
function normalizeMessageType(v) {
|
|
17
|
+
const s = String(v ?? '').trim().toLowerCase();
|
|
18
|
+
if (s === 'developer' || s === 'dev') return 'developer';
|
|
19
|
+
return 'endUser'; // endUser, end_user, default
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Resolve errors.llm config from env (feature-specific then LLM_ fallback).
|
|
24
|
+
* @returns {{ enabled: boolean, baseURL: string, apiKey: string, model: string, messageType: 'endUser'|'developer' }}
|
|
25
|
+
*/
|
|
26
|
+
export function getErrorsLlmConfig() {
|
|
27
|
+
const enabledRaw = env('ERRORS_LLM_ENABLED') ?? '';
|
|
28
|
+
const enabled =
|
|
29
|
+
enabledRaw === true ||
|
|
30
|
+
enabledRaw === 'true' ||
|
|
31
|
+
enabledRaw === '1' ||
|
|
32
|
+
enabledRaw === 1;
|
|
33
|
+
|
|
34
|
+
const baseURL =
|
|
35
|
+
env('ERRORS_LLM_BASE_URL') ??
|
|
36
|
+
env('ERRORS_LLM_BASEURL') ??
|
|
37
|
+
env('LLM_BASE_URL') ??
|
|
38
|
+
env('LLM_BASEURL') ??
|
|
39
|
+
'';
|
|
40
|
+
|
|
41
|
+
const apiKey =
|
|
42
|
+
env('ERRORS_LLM_API_KEY') ??
|
|
43
|
+
env('ERRORS_LLM_APIKEY') ??
|
|
44
|
+
env('LLM_API_KEY') ??
|
|
45
|
+
env('LLM_APIKEY') ??
|
|
46
|
+
'';
|
|
47
|
+
|
|
48
|
+
const model =
|
|
49
|
+
env('ERRORS_LLM_MODEL') ?? env('LLM_MODEL') ?? '';
|
|
50
|
+
|
|
51
|
+
const messageTypeRaw =
|
|
52
|
+
env('ERRORS_LLM_MESSAGE_TYPE') ?? env('ERRORS_LLM_MESSAGETYPE') ?? env('LLM_MESSAGE_TYPE') ?? env('LLM_MESSAGETYPE') ?? '';
|
|
53
|
+
|
|
54
|
+
return {
|
|
55
|
+
enabled: Boolean(enabled),
|
|
56
|
+
baseURL: String(baseURL ?? '').trim(),
|
|
57
|
+
apiKey: String(apiKey ?? '').trim(),
|
|
58
|
+
model: String(model ?? '').trim(),
|
|
59
|
+
messageType: normalizeMessageType(messageTypeRaw || 'endUser'),
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
export { MESSAGE_TYPES };
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Validate errors.llm when enabled: require baseURL, apiKey, and model (after LLM_ fallback).
|
|
67
|
+
* Call at takeoff. Throws if enabled but config is invalid; no-op otherwise.
|
|
68
|
+
* @throws {Error} If errors.llm.enabled is true but any of baseURL, apiKey, or model is missing
|
|
69
|
+
*/
|
|
70
|
+
export function validateErrorsLlmAtTakeoff() {
|
|
71
|
+
const { enabled, baseURL, apiKey, model } = getErrorsLlmConfig();
|
|
72
|
+
if (!enabled) return;
|
|
73
|
+
|
|
74
|
+
const missing = [];
|
|
75
|
+
if (!baseURL) missing.push('baseURL (ERRORS_LLM_BASE_URL or LLM_BASE_URL)');
|
|
76
|
+
if (!apiKey) missing.push('apiKey (ERRORS_LLM_API_KEY or LLM_API_KEY)');
|
|
77
|
+
if (!model) missing.push('model (ERRORS_LLM_MODEL or LLM_MODEL)');
|
|
78
|
+
|
|
79
|
+
if (missing.length > 0) {
|
|
80
|
+
throw new Error(
|
|
81
|
+
`errors.llm is enabled but required config is missing: ${missing.join(', ')}. Set these env vars or disable errors.llm.enabled.`,
|
|
82
|
+
);
|
|
83
|
+
}
|
|
84
|
+
}
|
package/auto-docs/llm/index.js
DELETED