te.js 2.1.5 → 2.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,64 @@
1
+ /**
2
+ * Console channel: pretty-prints LLM error results to the terminal using ansi-colors.
3
+ */
4
+
5
+ import ansi from 'ansi-colors';
6
+ import { ErrorChannel } from './base.js';
7
+
8
+ const { red, yellow, cyan, white, bold, dim, italic } = ansi;
9
+
10
+ /**
11
+ * Format an HTTP status code with color (red for 5xx, yellow for 4xx, white for others).
12
+ * @param {number} statusCode
13
+ * @returns {string}
14
+ */
15
+ function colorStatus(statusCode) {
16
+ if (statusCode >= 500) return red(bold(String(statusCode)));
17
+ if (statusCode >= 400) return yellow(bold(String(statusCode)));
18
+ return white(bold(String(statusCode)));
19
+ }
20
+
21
+ export class ConsoleChannel extends ErrorChannel {
22
+ async dispatch(payload) {
23
+ const {
24
+ timestamp,
25
+ method,
26
+ path,
27
+ statusCode,
28
+ message,
29
+ devInsight,
30
+ error,
31
+ cached,
32
+ rateLimited,
33
+ } = payload;
34
+
35
+ const time = dim(italic(new Date(timestamp).toLocaleTimeString()));
36
+ const route = white(`${method} ${path}`);
37
+ const status = colorStatus(statusCode);
38
+
39
+ const flags = [];
40
+ if (cached) flags.push(cyan('[CACHED]'));
41
+ if (rateLimited) flags.push(yellow('[RATE LIMITED]'));
42
+ const flagStr = flags.length ? ' ' + flags.join(' ') : '';
43
+
44
+ const lines = [
45
+ ``,
46
+ `${time} ${red('[LLM ERROR]')} ${route} → ${status}${flagStr}`,
47
+ ` ${white(message)}`,
48
+ ];
49
+
50
+ if (devInsight) {
51
+ lines.push(` ${cyan('⟶')} ${cyan(devInsight)}`);
52
+ }
53
+
54
+ if (error?.message && !rateLimited) {
55
+ lines.push(
56
+ ` ${dim(`original: ${error.type ? error.type + ': ' : ''}${error.message}`)}`,
57
+ );
58
+ }
59
+
60
+ lines.push('');
61
+
62
+ process.stderr.write(lines.join('\n'));
63
+ }
64
+ }
@@ -0,0 +1,111 @@
1
+ /**
2
+ * Channel registry for LLM error output.
3
+ * Maps channel config values ('console' | 'log' | 'both') to channel instances.
4
+ */
5
+
6
+ import { ConsoleChannel } from './console.js';
7
+ import { LogChannel } from './log.js';
8
+
9
+ /** @type {ConsoleChannel|null} */
10
+ let _console = null;
11
+
12
+ /** @type {Map<string, LogChannel>} */
13
+ const _logInstances = new Map();
14
+
15
+ /**
16
+ * Get (or create) the singleton ConsoleChannel.
17
+ * @returns {ConsoleChannel}
18
+ */
19
+ function getConsoleChannel() {
20
+ if (!_console) _console = new ConsoleChannel();
21
+ return _console;
22
+ }
23
+
24
+ /**
25
+ * Get (or create) a LogChannel for the given file path.
26
+ * @param {string} logFile
27
+ * @returns {LogChannel}
28
+ */
29
+ function getLogChannel(logFile) {
30
+ if (!_logInstances.has(logFile)) {
31
+ _logInstances.set(logFile, new LogChannel(logFile));
32
+ }
33
+ return _logInstances.get(logFile);
34
+ }
35
+
36
+ /**
37
+ * Resolve channel instances for the given config value and log file.
38
+ * @param {'console'|'log'|'both'} channel
39
+ * @param {string} logFile
40
+ * @returns {import('./base.js').ErrorChannel[]}
41
+ */
42
+ export function getChannels(channel, logFile) {
43
+ switch (channel) {
44
+ case 'log':
45
+ return [getLogChannel(logFile)];
46
+ case 'both':
47
+ return [getConsoleChannel(), getLogChannel(logFile)];
48
+ case 'console':
49
+ default:
50
+ return [getConsoleChannel()];
51
+ }
52
+ }
53
+
54
+ /**
55
+ * Build the standard channel payload from available context and LLM result.
56
+ * @param {object} opts
57
+ * @param {string} opts.method
58
+ * @param {string} opts.path
59
+ * @param {Error|string|null|undefined} opts.originalError
60
+ * @param {{ snippets: Array<{ file: string, line: number, snippet: string }> }} opts.codeContext
61
+ * @param {number} opts.statusCode
62
+ * @param {string} opts.message
63
+ * @param {string} [opts.devInsight]
64
+ * @param {boolean} [opts.cached]
65
+ * @param {boolean} [opts.rateLimited]
66
+ * @returns {import('./base.js').ChannelPayload}
67
+ */
68
+ export function buildPayload({
69
+ method,
70
+ path,
71
+ originalError,
72
+ codeContext,
73
+ statusCode,
74
+ message,
75
+ devInsight,
76
+ cached,
77
+ rateLimited,
78
+ }) {
79
+ let errorSummary = null;
80
+ if (originalError instanceof Error) {
81
+ errorSummary = {
82
+ type: originalError.constructor?.name ?? 'Error',
83
+ message: originalError.message ?? '',
84
+ };
85
+ } else if (originalError != null) {
86
+ errorSummary = { type: 'string', message: String(originalError) };
87
+ }
88
+
89
+ return {
90
+ timestamp: new Date().toISOString(),
91
+ method: method ?? '',
92
+ path: path ?? '',
93
+ statusCode,
94
+ message,
95
+ ...(devInsight != null && { devInsight }),
96
+ error: errorSummary,
97
+ codeContext: codeContext ?? { snippets: [] },
98
+ ...(cached != null && { cached }),
99
+ ...(rateLimited != null && { rateLimited }),
100
+ };
101
+ }
102
+
103
+ /**
104
+ * Dispatch a payload to all resolved channels, swallowing individual channel errors.
105
+ * @param {import('./base.js').ErrorChannel[]} channels
106
+ * @param {import('./base.js').ChannelPayload} payload
107
+ * @returns {Promise<void>}
108
+ */
109
+ export async function dispatchToChannels(channels, payload) {
110
+ await Promise.allSettled(channels.map((ch) => ch.dispatch(payload)));
111
+ }
@@ -0,0 +1,27 @@
1
+ /**
2
+ * Log channel: appends a full JSONL entry to a log file for each LLM error result.
3
+ * Each line is a self-contained JSON object with all fields for post-mortem debugging.
4
+ */
5
+
6
+ import { appendFile } from 'node:fs/promises';
7
+ import { ErrorChannel } from './base.js';
8
+
9
+ export class LogChannel extends ErrorChannel {
10
+ /**
11
+ * @param {string} logFile - Absolute or relative path to the JSONL log file.
12
+ */
13
+ constructor(logFile) {
14
+ super();
15
+ this.logFile = logFile;
16
+ }
17
+
18
+ async dispatch(payload) {
19
+ const line = JSON.stringify(payload) + '\n';
20
+ try {
21
+ await appendFile(this.logFile, line, 'utf-8');
22
+ } catch {
23
+ // Silently ignore write failures (e.g. permissions, disk full) so logging never
24
+ // crashes the process or blocks error handling.
25
+ }
26
+ }
27
+ }
@@ -0,0 +1,102 @@
1
+ /**
2
+ * In-memory TTL cache for LLM error inference results.
3
+ * Key: file:line:errorMessage -- deduplicates repeated errors at the same throw site.
4
+ * Shared singleton across the process; configured from errors.llm.cacheTTL.
5
+ */
6
+
7
+ const SWEEP_INTERVAL_MS = 5 * 60 * 1000; // prune expired entries every 5 minutes
8
+
9
+ class LLMErrorCache {
10
+ /**
11
+ * @param {number} ttl - Time-to-live in milliseconds for each cached result.
12
+ */
13
+ constructor(ttl) {
14
+ this.ttl = ttl > 0 ? ttl : 3_600_000;
15
+ /** @type {Map<string, { statusCode: number, message: string, devInsight?: string, cachedAt: number }>} */
16
+ this._store = new Map();
17
+ this._sweepTimer =
18
+ setInterval(() => this._sweep(), SWEEP_INTERVAL_MS).unref?.() ?? null;
19
+ }
20
+
21
+ /**
22
+ * Build a cache key from code context and error.
23
+ * Uses the first (throw-site) snippet's file and line + error text.
24
+ * @param {{ snippets?: Array<{ file: string, line: number }> }} codeContext
25
+ * @param {Error|string|undefined} error
26
+ * @returns {string}
27
+ */
28
+ buildKey(codeContext, error) {
29
+ const snippet = codeContext?.snippets?.[0];
30
+ const location = snippet ? `${snippet.file}:${snippet.line}` : 'unknown';
31
+ let errText = '';
32
+ if (error instanceof Error) {
33
+ errText = error.message ?? '';
34
+ } else if (error != null) {
35
+ errText = String(error);
36
+ }
37
+ return `${location}:${errText}`;
38
+ }
39
+
40
+ /**
41
+ * Get a cached result. Returns null if missing or expired (and prunes the entry).
42
+ * @param {string} key
43
+ * @returns {{ statusCode: number, message: string, devInsight?: string } | null}
44
+ */
45
+ get(key) {
46
+ const entry = this._store.get(key);
47
+ if (!entry) return null;
48
+ if (Date.now() - entry.cachedAt > this.ttl) {
49
+ this._store.delete(key);
50
+ return null;
51
+ }
52
+ const { cachedAt: _removed, ...result } = entry;
53
+ return result;
54
+ }
55
+
56
+ /**
57
+ * Store a result in the cache.
58
+ * @param {string} key
59
+ * @param {{ statusCode: number, message: string, devInsight?: string }} result
60
+ */
61
+ set(key, result) {
62
+ this._store.set(key, { ...result, cachedAt: Date.now() });
63
+ }
64
+
65
+ /**
66
+ * Remove all expired entries (called periodically by the sweep timer).
67
+ */
68
+ _sweep() {
69
+ const now = Date.now();
70
+ for (const [key, entry] of this._store) {
71
+ if (now - entry.cachedAt > this.ttl) {
72
+ this._store.delete(key);
73
+ }
74
+ }
75
+ }
76
+
77
+ /**
78
+ * Number of entries currently in the cache (including potentially stale ones not yet swept).
79
+ * @returns {number}
80
+ */
81
+ get size() {
82
+ return this._store.size;
83
+ }
84
+ }
85
+
86
+ /** @type {LLMErrorCache|null} */
87
+ let _instance = null;
88
+
89
+ /**
90
+ * Get (or create) the singleton cache.
91
+ * Re-initializes if ttl changes.
92
+ * @param {number} ttl
93
+ * @returns {LLMErrorCache}
94
+ */
95
+ export function getCache(ttl) {
96
+ if (!_instance || _instance.ttl !== ttl) {
97
+ _instance = new LLMErrorCache(ttl);
98
+ }
99
+ return _instance;
100
+ }
101
+
102
+ export { LLMErrorCache };
@@ -3,11 +3,15 @@
3
3
  * returns statusCode and message (and optionally devInsight in non-production).
4
4
  * Uses shared lib/llm with errors.llm config. Developers do not pass an error object;
5
5
  * the LLM infers from the code where ammo.throw() was called.
6
+ *
7
+ * Flow: cache check -> rate limit check -> LLM call -> record rate -> store cache -> return.
6
8
  */
7
9
 
8
10
  import { createProvider } from '../../lib/llm/index.js';
9
11
  import { extractJSON } from '../../lib/llm/parse.js';
10
12
  import { getErrorsLlmConfig } from '../../utils/errors-llm-config.js';
13
+ import { getRateLimiter } from './llm-rate-limiter.js';
14
+ import { getCache } from './llm-cache.js';
11
15
 
12
16
  const DEFAULT_STATUS = 500;
13
17
  const DEFAULT_MESSAGE = 'Internal Server Error';
@@ -24,7 +28,8 @@ const DEFAULT_MESSAGE = 'Internal Server Error';
24
28
  * @returns {string}
25
29
  */
26
30
  function buildPrompt(context) {
27
- const { codeContext, method, path, includeDevInsight, messageType, error } = context;
31
+ const { codeContext, method, path, includeDevInsight, messageType, error } =
32
+ context;
28
33
  const forDeveloper = messageType === 'developer';
29
34
 
30
35
  const requestPart = [method, path].filter(Boolean).length
@@ -35,7 +40,10 @@ function buildPrompt(context) {
35
40
  if (codeContext?.snippets?.length) {
36
41
  codePart = codeContext.snippets
37
42
  .map((s, i) => {
38
- const label = i === 0 ? 'Call site (where ammo.throw() was invoked)' : `Upstream caller ${i}`;
43
+ const label =
44
+ i === 0
45
+ ? 'Call site (where ammo.throw() was invoked)'
46
+ : `Upstream caller ${i}`;
39
47
  return `--- ${label}: ${s.file} (line ${s.line}) ---\n${s.snippet}`;
40
48
  })
41
49
  .join('\n\n');
@@ -80,27 +88,65 @@ JSON:`;
80
88
 
81
89
  /**
82
90
  * Infer HTTP statusCode and message (and optionally devInsight) from code context using the LLM.
83
- * Uses errors.llm config (getErrorsLlmConfig). Call only when errors.llm.enabled is true and config is valid.
84
- * The primary input is codeContext (surrounding + upstream/downstream snippets); error is optional.
91
+ * Checks cache first, then rate limit. On success stores result in cache.
85
92
  *
86
93
  * @param {object} context - Context for the prompt.
87
- * @param {{ snippets: Array<{ file: string, line: number, snippet: string }> }} context.codeContext - Source snippets with line numbers (from captureCodeContext).
88
- * @param {string} [context.method] - HTTP method.
89
- * @param {string} [context.path] - Request path.
90
- * @param {boolean} [context.includeDevInsight] - In non-production, dev insight is included by default; set to false to disable.
91
- * @param {'endUser'|'developer'} [context.messageType] - Override config: 'endUser' or 'developer'. Default from errors.llm.messageType.
92
- * @param {string|Error|undefined} [context.error] - Optional error if the caller passed one (secondary signal).
93
- * @returns {Promise<{ statusCode: number, message: string, devInsight?: string }>}
94
+ * @param {{ snippets: Array<{ file: string, line: number, snippet: string }> }} context.codeContext
95
+ * @param {string} [context.method]
96
+ * @param {string} [context.path]
97
+ * @param {boolean} [context.includeDevInsight]
98
+ * @param {'endUser'|'developer'} [context.messageType]
99
+ * @param {string|Error|undefined} [context.error]
100
+ * @returns {Promise<{ statusCode: number, message: string, devInsight?: string, cached?: boolean, rateLimited?: boolean }>}
94
101
  */
95
102
  export async function inferErrorFromContext(context) {
96
103
  const config = getErrorsLlmConfig();
97
- const { baseURL, apiKey, model, messageType: configMessageType } = config;
98
- const provider = createProvider({ baseURL, apiKey, model });
104
+ const {
105
+ baseURL,
106
+ apiKey,
107
+ model,
108
+ messageType: configMessageType,
109
+ timeout,
110
+ rateLimit,
111
+ cache: cacheEnabled,
112
+ cacheTTL,
113
+ } = config;
99
114
 
100
115
  const isProduction = process.env.NODE_ENV === 'production';
101
- const includeDevInsight = !isProduction && context.includeDevInsight !== false;
116
+ const includeDevInsight =
117
+ context.includeDevInsight !== false
118
+ ? context.forceDevInsight
119
+ ? true
120
+ : !isProduction
121
+ : false;
102
122
  const messageType = context.messageType ?? configMessageType;
103
123
 
124
+ // 1. Cache check
125
+ if (cacheEnabled) {
126
+ const cache = getCache(cacheTTL);
127
+ const key = cache.buildKey(context.codeContext, context.error);
128
+ const cached = cache.get(key);
129
+ if (cached) {
130
+ return { ...cached, cached: true };
131
+ }
132
+ }
133
+
134
+ // 2. Rate limit check
135
+ const limiter = getRateLimiter(rateLimit);
136
+ if (!limiter.canCall()) {
137
+ return {
138
+ statusCode: DEFAULT_STATUS,
139
+ message: DEFAULT_MESSAGE,
140
+ ...(includeDevInsight && {
141
+ devInsight: 'LLM rate limit exceeded — error was not enhanced.',
142
+ }),
143
+ rateLimited: true,
144
+ };
145
+ }
146
+
147
+ // 3. LLM call
148
+ const provider = createProvider({ baseURL, apiKey, model, timeout });
149
+
104
150
  const prompt = buildPrompt({
105
151
  codeContext: context.codeContext,
106
152
  method: context.method,
@@ -111,6 +157,10 @@ export async function inferErrorFromContext(context) {
111
157
  });
112
158
 
113
159
  const { content } = await provider.analyze(prompt);
160
+
161
+ // 4. Record the call against the rate limit
162
+ limiter.record();
163
+
114
164
  const parsed = extractJSON(content);
115
165
 
116
166
  if (!parsed || typeof parsed !== 'object') {
@@ -132,9 +182,20 @@ export async function inferErrorFromContext(context) {
132
182
  : DEFAULT_MESSAGE;
133
183
 
134
184
  const result = { statusCode, message };
135
- if (includeDevInsight && typeof parsed.devInsight === 'string' && parsed.devInsight.trim()) {
185
+ if (
186
+ includeDevInsight &&
187
+ typeof parsed.devInsight === 'string' &&
188
+ parsed.devInsight.trim()
189
+ ) {
136
190
  result.devInsight = parsed.devInsight.trim();
137
191
  }
138
192
 
193
+ // 5. Store in cache
194
+ if (cacheEnabled) {
195
+ const cache = getCache(cacheTTL);
196
+ const key = cache.buildKey(context.codeContext, context.error);
197
+ cache.set(key, result);
198
+ }
199
+
139
200
  return result;
140
201
  }
@@ -0,0 +1,72 @@
1
+ /**
2
+ * In-memory sliding window rate limiter for LLM error inference calls.
3
+ * Tracks LLM call timestamps in the last 60 seconds.
4
+ * Shared singleton across the process; configured from errors.llm.rateLimit.
5
+ */
6
+
7
+ class LLMRateLimiter {
8
+ /**
9
+ * @param {number} maxPerMinute - Maximum LLM calls allowed per 60-second window.
10
+ */
11
+ constructor(maxPerMinute) {
12
+ this.maxPerMinute = maxPerMinute > 0 ? Math.floor(maxPerMinute) : 10;
13
+ /** @type {number[]} timestamps of recent LLM calls (ms since epoch) */
14
+ this._timestamps = [];
15
+ }
16
+
17
+ /**
18
+ * Prune timestamps older than 60 seconds from now.
19
+ */
20
+ _prune() {
21
+ const cutoff = Date.now() - 60_000;
22
+ let i = 0;
23
+ while (i < this._timestamps.length && this._timestamps[i] <= cutoff) {
24
+ i++;
25
+ }
26
+ if (i > 0) this._timestamps.splice(0, i);
27
+ }
28
+
29
+ /**
30
+ * Returns true if an LLM call is allowed under the current rate.
31
+ * @returns {boolean}
32
+ */
33
+ canCall() {
34
+ this._prune();
35
+ return this._timestamps.length < this.maxPerMinute;
36
+ }
37
+
38
+ /**
39
+ * Record that an LLM call was made right now.
40
+ */
41
+ record() {
42
+ this._prune();
43
+ this._timestamps.push(Date.now());
44
+ }
45
+
46
+ /**
47
+ * Returns how many calls remain in the current window.
48
+ * @returns {number}
49
+ */
50
+ remaining() {
51
+ this._prune();
52
+ return Math.max(0, this.maxPerMinute - this._timestamps.length);
53
+ }
54
+ }
55
+
56
+ /** @type {LLMRateLimiter|null} */
57
+ let _instance = null;
58
+
59
+ /**
60
+ * Get (or create) the singleton rate limiter.
61
+ * Re-initializes if maxPerMinute changes.
62
+ * @param {number} maxPerMinute
63
+ * @returns {LLMRateLimiter}
64
+ */
65
+ export function getRateLimiter(maxPerMinute) {
66
+ if (!_instance || _instance.maxPerMinute !== maxPerMinute) {
67
+ _instance = new LLMRateLimiter(maxPerMinute);
68
+ }
69
+ return _instance;
70
+ }
71
+
72
+ export { LLMRateLimiter };
package/server/handler.js CHANGED
@@ -112,7 +112,8 @@ const executeChain = async (target, ammo) => {
112
112
  * @returns {Promise<void>}
113
113
  */
114
114
  const errorHandler = async (ammo, err) => {
115
- if (env('LOG_EXCEPTIONS')) errorLogger.error(err);
115
+ // Pass false as second arg to suppress tej-logger's Console.trace() double-stack output.
116
+ if (env('LOG_EXCEPTIONS')) errorLogger.error(err, false);
116
117
 
117
118
  const result = ammo.throw(err);
118
119
  if (result != null && typeof result.then === 'function') {
@@ -165,7 +166,8 @@ const handler = async (req, res) => {
165
166
  }
166
167
  }
167
168
 
168
- // Add route parameters to ammo.payload
169
+ // Add route parameters to ammo.params and ammo.payload
170
+ ammo.params = match.params || {};
169
171
  if (match.params && Object.keys(match.params).length > 0) {
170
172
  Object.assign(ammo.payload, match.params);
171
173
  }
@@ -94,16 +94,16 @@ class TargetRegistry {
94
94
  const patternSegments = pattern.split('/').filter((s) => s.length > 0);
95
95
  const urlSegments = url.split('/').filter((s) => s.length > 0);
96
96
 
97
- // Must have same number of segments
98
- if (patternSegments.length !== urlSegments.length) {
99
- return null;
100
- }
101
-
102
97
  // If both are empty (root paths), they match
103
98
  if (patternSegments.length === 0 && urlSegments.length === 0) {
104
99
  return {};
105
100
  }
106
101
 
102
+ // Must have same number of segments
103
+ if (patternSegments.length !== urlSegments.length) {
104
+ return null;
105
+ }
106
+
107
107
  const params = {};
108
108
 
109
109
  // Match each segment
@@ -133,7 +133,7 @@ class TargetRegistry {
133
133
  */
134
134
  getAllEndpoints(options = {}) {
135
135
  const grouped =
136
- typeof options === 'boolean' ? options : (options && options.grouped);
136
+ typeof options === 'boolean' ? options : options && options.grouped;
137
137
  const detailed =
138
138
  typeof options === 'object' && options && options.detailed === true;
139
139
 
package/te.js CHANGED
@@ -10,7 +10,10 @@ import dbManager from './database/index.js';
10
10
  import { loadConfigFile, standardizeObj } from './utils/configuration.js';
11
11
 
12
12
  import targetHandler from './server/handler.js';
13
- import { getErrorsLlmConfig, validateErrorsLlmAtTakeoff } from './utils/errors-llm-config.js';
13
+ import {
14
+ getErrorsLlmConfig,
15
+ validateErrorsLlmAtTakeoff,
16
+ } from './utils/errors-llm-config.js';
14
17
  import path from 'node:path';
15
18
  import { pathToFileURL } from 'node:url';
16
19
  import { readFile } from 'node:fs/promises';
@@ -133,10 +136,9 @@ class Tejas {
133
136
  ? path.join(parentPath, file.name)
134
137
  : path.join(baseDir, parentPath, file.name);
135
138
  const relativePath = path.relative(baseDir, fullPath);
136
- const groupId = relativePath
137
- .replace(/\.target\.js$/i, '')
138
- .replace(/\\/g, '/')
139
- || 'index';
139
+ const groupId =
140
+ relativePath.replace(/\.target\.js$/i, '').replace(/\\/g, '/') ||
141
+ 'index';
140
142
  targetRegistry.setCurrentSourceGroup(groupId);
141
143
  try {
142
144
  await import(pathToFileURL(fullPath).href);
@@ -305,14 +307,21 @@ class Tejas {
305
307
 
306
308
  /**
307
309
  * Enables LLM-inferred error codes and messages for ammo.throw() and framework-caught errors.
308
- * Call before takeoff(). Remaining options (baseURL, apiKey, model, messageType) can come from
309
- * config, or from env/tejas.config.json (LLM_* / ERRORS_LLM_*). Validation runs at takeoff.
310
+ * Call before takeoff(). Remaining options can come from env/tejas.config.json (LLM_* / ERRORS_LLM_*).
311
+ * Validation runs at takeoff.
310
312
  *
311
313
  * @param {Object} [config] - Optional errors.llm overrides
312
314
  * @param {string} [config.baseURL] - LLM provider endpoint (e.g. https://api.openai.com/v1)
313
315
  * @param {string} [config.apiKey] - LLM provider API key
314
316
  * @param {string} [config.model] - Model name (e.g. gpt-4o-mini)
315
317
  * @param {'endUser'|'developer'} [config.messageType] - Default message tone
318
+ * @param {'sync'|'async'} [config.mode] - 'sync' blocks the response until LLM returns (default); 'async' responds immediately with 500 and dispatches LLM result to a channel
319
+ * @param {number} [config.timeout] - LLM fetch timeout in milliseconds (default 10000)
320
+ * @param {'console'|'log'|'both'} [config.channel] - Output channel for async mode results (default 'console')
321
+ * @param {string} [config.logFile] - Path to JSONL log file used by 'log' and 'both' channels (default './errors.llm.log')
322
+ * @param {number} [config.rateLimit] - Max LLM calls per minute across all requests (default 10)
323
+ * @param {boolean} [config.cache] - Cache LLM results by throw site + error message to avoid repeated calls (default true)
324
+ * @param {number} [config.cacheTTL] - How long cached results are reused in milliseconds (default 3600000 = 1 hour)
316
325
  * @returns {Tejas} The Tejas instance for chaining
317
326
  *
318
327
  * @example
@@ -322,6 +331,10 @@ class Tejas {
322
331
  * @example
323
332
  * app.withLLMErrors({ baseURL: 'https://api.openai.com/v1', apiKey: process.env.OPENAI_KEY, model: 'gpt-4o-mini' });
324
333
  * app.takeoff();
334
+ *
335
+ * @example
336
+ * app.withLLMErrors({ mode: 'async', channel: 'both', rateLimit: 20 });
337
+ * app.takeoff();
325
338
  */
326
339
  withLLMErrors(config) {
327
340
  setEnv('ERRORS_LLM_ENABLED', true);
@@ -329,7 +342,17 @@ class Tejas {
329
342
  if (config.baseURL != null) setEnv('ERRORS_LLM_BASE_URL', config.baseURL);
330
343
  if (config.apiKey != null) setEnv('ERRORS_LLM_API_KEY', config.apiKey);
331
344
  if (config.model != null) setEnv('ERRORS_LLM_MODEL', config.model);
332
- if (config.messageType != null) setEnv('ERRORS_LLM_MESSAGE_TYPE', config.messageType);
345
+ if (config.messageType != null)
346
+ setEnv('ERRORS_LLM_MESSAGE_TYPE', config.messageType);
347
+ if (config.mode != null) setEnv('ERRORS_LLM_MODE', config.mode);
348
+ if (config.timeout != null) setEnv('ERRORS_LLM_TIMEOUT', config.timeout);
349
+ if (config.channel != null) setEnv('ERRORS_LLM_CHANNEL', config.channel);
350
+ if (config.logFile != null) setEnv('ERRORS_LLM_LOG_FILE', config.logFile);
351
+ if (config.rateLimit != null)
352
+ setEnv('ERRORS_LLM_RATE_LIMIT', config.rateLimit);
353
+ if (config.cache != null) setEnv('ERRORS_LLM_CACHE', config.cache);
354
+ if (config.cacheTTL != null)
355
+ setEnv('ERRORS_LLM_CACHE_TTL', config.cacheTTL);
333
356
  }
334
357
  return this;
335
358
  }
@@ -401,16 +424,21 @@ class Tejas {
401
424
  * app.takeoff();
402
425
  */
403
426
  serveDocs(config = {}) {
404
- const specPath = path.resolve(process.cwd(), config.specPath || './openapi.json');
427
+ const specPath = path.resolve(
428
+ process.cwd(),
429
+ config.specPath || './openapi.json',
430
+ );
405
431
  const { scalarConfig } = config;
406
432
  const getSpec = async () => {
407
433
  const content = await readFile(specPath, 'utf8');
408
434
  return JSON.parse(content);
409
435
  };
410
- registerDocRoutes({ getSpec, specUrl: '/docs/openapi.json', scalarConfig }, targetRegistry);
436
+ registerDocRoutes(
437
+ { getSpec, specUrl: '/docs/openapi.json', scalarConfig },
438
+ targetRegistry,
439
+ );
411
440
  return this;
412
441
  }
413
-
414
442
  }
415
443
 
416
444
  const listAllEndpoints = (grouped = false) => {