te.js 2.1.4 → 2.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,64 @@
1
+ /**
2
+ * Console channel: pretty-prints LLM error results to the terminal using ansi-colors.
3
+ */
4
+
5
+ import ansi from 'ansi-colors';
6
+ import { ErrorChannel } from './base.js';
7
+
8
+ const { red, yellow, cyan, white, bold, dim, italic } = ansi;
9
+
10
+ /**
11
+ * Format an HTTP status code with color (red for 5xx, yellow for 4xx, white for others).
12
+ * @param {number} statusCode
13
+ * @returns {string}
14
+ */
15
+ function colorStatus(statusCode) {
16
+ if (statusCode >= 500) return red(bold(String(statusCode)));
17
+ if (statusCode >= 400) return yellow(bold(String(statusCode)));
18
+ return white(bold(String(statusCode)));
19
+ }
20
+
21
+ export class ConsoleChannel extends ErrorChannel {
22
+ async dispatch(payload) {
23
+ const {
24
+ timestamp,
25
+ method,
26
+ path,
27
+ statusCode,
28
+ message,
29
+ devInsight,
30
+ error,
31
+ cached,
32
+ rateLimited,
33
+ } = payload;
34
+
35
+ const time = dim(italic(new Date(timestamp).toLocaleTimeString()));
36
+ const route = white(`${method} ${path}`);
37
+ const status = colorStatus(statusCode);
38
+
39
+ const flags = [];
40
+ if (cached) flags.push(cyan('[CACHED]'));
41
+ if (rateLimited) flags.push(yellow('[RATE LIMITED]'));
42
+ const flagStr = flags.length ? ' ' + flags.join(' ') : '';
43
+
44
+ const lines = [
45
+ ``,
46
+ `${time} ${red('[LLM ERROR]')} ${route} → ${status}${flagStr}`,
47
+ ` ${white(message)}`,
48
+ ];
49
+
50
+ if (devInsight) {
51
+ lines.push(` ${cyan('⟶')} ${cyan(devInsight)}`);
52
+ }
53
+
54
+ if (error?.message && !rateLimited) {
55
+ lines.push(
56
+ ` ${dim(`original: ${error.type ? error.type + ': ' : ''}${error.message}`)}`,
57
+ );
58
+ }
59
+
60
+ lines.push('');
61
+
62
+ process.stderr.write(lines.join('\n'));
63
+ }
64
+ }
@@ -0,0 +1,111 @@
1
+ /**
2
+ * Channel registry for LLM error output.
3
+ * Maps channel config values ('console' | 'log' | 'both') to channel instances.
4
+ */
5
+
6
+ import { ConsoleChannel } from './console.js';
7
+ import { LogChannel } from './log.js';
8
+
9
+ /** @type {ConsoleChannel|null} */
10
+ let _console = null;
11
+
12
+ /** @type {Map<string, LogChannel>} */
13
+ const _logInstances = new Map();
14
+
15
+ /**
16
+ * Get (or create) the singleton ConsoleChannel.
17
+ * @returns {ConsoleChannel}
18
+ */
19
+ function getConsoleChannel() {
20
+ if (!_console) _console = new ConsoleChannel();
21
+ return _console;
22
+ }
23
+
24
+ /**
25
+ * Get (or create) a LogChannel for the given file path.
26
+ * @param {string} logFile
27
+ * @returns {LogChannel}
28
+ */
29
+ function getLogChannel(logFile) {
30
+ if (!_logInstances.has(logFile)) {
31
+ _logInstances.set(logFile, new LogChannel(logFile));
32
+ }
33
+ return _logInstances.get(logFile);
34
+ }
35
+
36
+ /**
37
+ * Resolve channel instances for the given config value and log file.
38
+ * @param {'console'|'log'|'both'} channel
39
+ * @param {string} logFile
40
+ * @returns {import('./base.js').ErrorChannel[]}
41
+ */
42
+ export function getChannels(channel, logFile) {
43
+ switch (channel) {
44
+ case 'log':
45
+ return [getLogChannel(logFile)];
46
+ case 'both':
47
+ return [getConsoleChannel(), getLogChannel(logFile)];
48
+ case 'console':
49
+ default:
50
+ return [getConsoleChannel()];
51
+ }
52
+ }
53
+
54
+ /**
55
+ * Build the standard channel payload from available context and LLM result.
56
+ * @param {object} opts
57
+ * @param {string} opts.method
58
+ * @param {string} opts.path
59
+ * @param {Error|string|null|undefined} opts.originalError
60
+ * @param {{ snippets: Array<{ file: string, line: number, snippet: string }> }} opts.codeContext
61
+ * @param {number} opts.statusCode
62
+ * @param {string} opts.message
63
+ * @param {string} [opts.devInsight]
64
+ * @param {boolean} [opts.cached]
65
+ * @param {boolean} [opts.rateLimited]
66
+ * @returns {import('./base.js').ChannelPayload}
67
+ */
68
+ export function buildPayload({
69
+ method,
70
+ path,
71
+ originalError,
72
+ codeContext,
73
+ statusCode,
74
+ message,
75
+ devInsight,
76
+ cached,
77
+ rateLimited,
78
+ }) {
79
+ let errorSummary = null;
80
+ if (originalError instanceof Error) {
81
+ errorSummary = {
82
+ type: originalError.constructor?.name ?? 'Error',
83
+ message: originalError.message ?? '',
84
+ };
85
+ } else if (originalError != null) {
86
+ errorSummary = { type: 'string', message: String(originalError) };
87
+ }
88
+
89
+ return {
90
+ timestamp: new Date().toISOString(),
91
+ method: method ?? '',
92
+ path: path ?? '',
93
+ statusCode,
94
+ message,
95
+ ...(devInsight != null && { devInsight }),
96
+ error: errorSummary,
97
+ codeContext: codeContext ?? { snippets: [] },
98
+ ...(cached != null && { cached }),
99
+ ...(rateLimited != null && { rateLimited }),
100
+ };
101
+ }
102
+
103
+ /**
104
+ * Dispatch a payload to all resolved channels, swallowing individual channel errors.
105
+ * @param {import('./base.js').ErrorChannel[]} channels
106
+ * @param {import('./base.js').ChannelPayload} payload
107
+ * @returns {Promise<void>}
108
+ */
109
+ export async function dispatchToChannels(channels, payload) {
110
+ await Promise.allSettled(channels.map((ch) => ch.dispatch(payload)));
111
+ }
@@ -0,0 +1,27 @@
1
+ /**
2
+ * Log channel: appends a full JSONL entry to a log file for each LLM error result.
3
+ * Each line is a self-contained JSON object with all fields for post-mortem debugging.
4
+ */
5
+
6
+ import { appendFile } from 'node:fs/promises';
7
+ import { ErrorChannel } from './base.js';
8
+
9
+ export class LogChannel extends ErrorChannel {
10
+ /**
11
+ * @param {string} logFile - Absolute or relative path to the JSONL log file.
12
+ */
13
+ constructor(logFile) {
14
+ super();
15
+ this.logFile = logFile;
16
+ }
17
+
18
+ async dispatch(payload) {
19
+ const line = JSON.stringify(payload) + '\n';
20
+ try {
21
+ await appendFile(this.logFile, line, 'utf-8');
22
+ } catch {
23
+ // Silently ignore write failures (e.g. permissions, disk full) so logging never
24
+ // crashes the process or blocks error handling.
25
+ }
26
+ }
27
+ }
@@ -0,0 +1,102 @@
1
+ /**
2
+ * In-memory TTL cache for LLM error inference results.
3
+ * Key: file:line:errorMessage -- deduplicates repeated errors at the same throw site.
4
+ * Shared singleton across the process; configured from errors.llm.cacheTTL.
5
+ */
6
+
7
+ const SWEEP_INTERVAL_MS = 5 * 60 * 1000; // prune expired entries every 5 minutes
8
+
9
+ class LLMErrorCache {
10
+ /**
11
+ * @param {number} ttl - Time-to-live in milliseconds for each cached result.
12
+ */
13
+ constructor(ttl) {
14
+ this.ttl = ttl > 0 ? ttl : 3_600_000;
15
+ /** @type {Map<string, { statusCode: number, message: string, devInsight?: string, cachedAt: number }>} */
16
+ this._store = new Map();
17
+ this._sweepTimer =
18
+ setInterval(() => this._sweep(), SWEEP_INTERVAL_MS).unref?.() ?? null;
19
+ }
20
+
21
+ /**
22
+ * Build a cache key from code context and error.
23
+ * Uses the first (throw-site) snippet's file and line + error text.
24
+ * @param {{ snippets?: Array<{ file: string, line: number }> }} codeContext
25
+ * @param {Error|string|undefined} error
26
+ * @returns {string}
27
+ */
28
+ buildKey(codeContext, error) {
29
+ const snippet = codeContext?.snippets?.[0];
30
+ const location = snippet ? `${snippet.file}:${snippet.line}` : 'unknown';
31
+ let errText = '';
32
+ if (error instanceof Error) {
33
+ errText = error.message ?? '';
34
+ } else if (error != null) {
35
+ errText = String(error);
36
+ }
37
+ return `${location}:${errText}`;
38
+ }
39
+
40
+ /**
41
+ * Get a cached result. Returns null if missing or expired (and prunes the entry).
42
+ * @param {string} key
43
+ * @returns {{ statusCode: number, message: string, devInsight?: string } | null}
44
+ */
45
+ get(key) {
46
+ const entry = this._store.get(key);
47
+ if (!entry) return null;
48
+ if (Date.now() - entry.cachedAt > this.ttl) {
49
+ this._store.delete(key);
50
+ return null;
51
+ }
52
+ const { cachedAt: _removed, ...result } = entry;
53
+ return result;
54
+ }
55
+
56
+ /**
57
+ * Store a result in the cache.
58
+ * @param {string} key
59
+ * @param {{ statusCode: number, message: string, devInsight?: string }} result
60
+ */
61
+ set(key, result) {
62
+ this._store.set(key, { ...result, cachedAt: Date.now() });
63
+ }
64
+
65
+ /**
66
+ * Remove all expired entries (called periodically by the sweep timer).
67
+ */
68
+ _sweep() {
69
+ const now = Date.now();
70
+ for (const [key, entry] of this._store) {
71
+ if (now - entry.cachedAt > this.ttl) {
72
+ this._store.delete(key);
73
+ }
74
+ }
75
+ }
76
+
77
+ /**
78
+ * Number of entries currently in the cache (including potentially stale ones not yet swept).
79
+ * @returns {number}
80
+ */
81
+ get size() {
82
+ return this._store.size;
83
+ }
84
+ }
85
+
86
+ /** @type {LLMErrorCache|null} */
87
+ let _instance = null;
88
+
89
+ /**
90
+ * Get (or create) the singleton cache.
91
+ * Re-initializes if ttl changes.
92
+ * @param {number} ttl
93
+ * @returns {LLMErrorCache}
94
+ */
95
+ export function getCache(ttl) {
96
+ if (!_instance || _instance.ttl !== ttl) {
97
+ _instance = new LLMErrorCache(ttl);
98
+ }
99
+ return _instance;
100
+ }
101
+
102
+ export { LLMErrorCache };
@@ -3,11 +3,15 @@
3
3
  * returns statusCode and message (and optionally devInsight in non-production).
4
4
  * Uses shared lib/llm with errors.llm config. Developers do not pass an error object;
5
5
  * the LLM infers from the code where ammo.throw() was called.
6
+ *
7
+ * Flow: cache check -> rate limit check -> LLM call -> record rate -> store cache -> return.
6
8
  */
7
9
 
8
10
  import { createProvider } from '../../lib/llm/index.js';
9
11
  import { extractJSON } from '../../lib/llm/parse.js';
10
12
  import { getErrorsLlmConfig } from '../../utils/errors-llm-config.js';
13
+ import { getRateLimiter } from './llm-rate-limiter.js';
14
+ import { getCache } from './llm-cache.js';
11
15
 
12
16
  const DEFAULT_STATUS = 500;
13
17
  const DEFAULT_MESSAGE = 'Internal Server Error';
@@ -24,7 +28,8 @@ const DEFAULT_MESSAGE = 'Internal Server Error';
24
28
  * @returns {string}
25
29
  */
26
30
  function buildPrompt(context) {
27
- const { codeContext, method, path, includeDevInsight, messageType, error } = context;
31
+ const { codeContext, method, path, includeDevInsight, messageType, error } =
32
+ context;
28
33
  const forDeveloper = messageType === 'developer';
29
34
 
30
35
  const requestPart = [method, path].filter(Boolean).length
@@ -35,7 +40,10 @@ function buildPrompt(context) {
35
40
  if (codeContext?.snippets?.length) {
36
41
  codePart = codeContext.snippets
37
42
  .map((s, i) => {
38
- const label = i === 0 ? 'Call site (where ammo.throw() was invoked)' : `Upstream caller ${i}`;
43
+ const label =
44
+ i === 0
45
+ ? 'Call site (where ammo.throw() was invoked)'
46
+ : `Upstream caller ${i}`;
39
47
  return `--- ${label}: ${s.file} (line ${s.line}) ---\n${s.snippet}`;
40
48
  })
41
49
  .join('\n\n');
@@ -80,27 +88,65 @@ JSON:`;
80
88
 
81
89
  /**
82
90
  * Infer HTTP statusCode and message (and optionally devInsight) from code context using the LLM.
83
- * Uses errors.llm config (getErrorsLlmConfig). Call only when errors.llm.enabled is true and config is valid.
84
- * The primary input is codeContext (surrounding + upstream/downstream snippets); error is optional.
91
+ * Checks cache first, then rate limit. On success stores result in cache.
85
92
  *
86
93
  * @param {object} context - Context for the prompt.
87
- * @param {{ snippets: Array<{ file: string, line: number, snippet: string }> }} context.codeContext - Source snippets with line numbers (from captureCodeContext).
88
- * @param {string} [context.method] - HTTP method.
89
- * @param {string} [context.path] - Request path.
90
- * @param {boolean} [context.includeDevInsight] - In non-production, dev insight is included by default; set to false to disable.
91
- * @param {'endUser'|'developer'} [context.messageType] - Override config: 'endUser' or 'developer'. Default from errors.llm.messageType.
92
- * @param {string|Error|undefined} [context.error] - Optional error if the caller passed one (secondary signal).
93
- * @returns {Promise<{ statusCode: number, message: string, devInsight?: string }>}
94
+ * @param {{ snippets: Array<{ file: string, line: number, snippet: string }> }} context.codeContext
95
+ * @param {string} [context.method]
96
+ * @param {string} [context.path]
97
+ * @param {boolean} [context.includeDevInsight]
98
+ * @param {'endUser'|'developer'} [context.messageType]
99
+ * @param {string|Error|undefined} [context.error]
100
+ * @returns {Promise<{ statusCode: number, message: string, devInsight?: string, cached?: boolean, rateLimited?: boolean }>}
94
101
  */
95
102
  export async function inferErrorFromContext(context) {
96
103
  const config = getErrorsLlmConfig();
97
- const { baseURL, apiKey, model, messageType: configMessageType } = config;
98
- const provider = createProvider({ baseURL, apiKey, model });
104
+ const {
105
+ baseURL,
106
+ apiKey,
107
+ model,
108
+ messageType: configMessageType,
109
+ timeout,
110
+ rateLimit,
111
+ cache: cacheEnabled,
112
+ cacheTTL,
113
+ } = config;
99
114
 
100
115
  const isProduction = process.env.NODE_ENV === 'production';
101
- const includeDevInsight = !isProduction && context.includeDevInsight !== false;
116
+ const includeDevInsight =
117
+ context.includeDevInsight !== false
118
+ ? context.forceDevInsight
119
+ ? true
120
+ : !isProduction
121
+ : false;
102
122
  const messageType = context.messageType ?? configMessageType;
103
123
 
124
+ // 1. Cache check
125
+ if (cacheEnabled) {
126
+ const cache = getCache(cacheTTL);
127
+ const key = cache.buildKey(context.codeContext, context.error);
128
+ const cached = cache.get(key);
129
+ if (cached) {
130
+ return { ...cached, cached: true };
131
+ }
132
+ }
133
+
134
+ // 2. Rate limit check
135
+ const limiter = getRateLimiter(rateLimit);
136
+ if (!limiter.canCall()) {
137
+ return {
138
+ statusCode: DEFAULT_STATUS,
139
+ message: DEFAULT_MESSAGE,
140
+ ...(includeDevInsight && {
141
+ devInsight: 'LLM rate limit exceeded — error was not enhanced.',
142
+ }),
143
+ rateLimited: true,
144
+ };
145
+ }
146
+
147
+ // 3. LLM call
148
+ const provider = createProvider({ baseURL, apiKey, model, timeout });
149
+
104
150
  const prompt = buildPrompt({
105
151
  codeContext: context.codeContext,
106
152
  method: context.method,
@@ -111,6 +157,10 @@ export async function inferErrorFromContext(context) {
111
157
  });
112
158
 
113
159
  const { content } = await provider.analyze(prompt);
160
+
161
+ // 4. Record the call against the rate limit
162
+ limiter.record();
163
+
114
164
  const parsed = extractJSON(content);
115
165
 
116
166
  if (!parsed || typeof parsed !== 'object') {
@@ -132,9 +182,20 @@ export async function inferErrorFromContext(context) {
132
182
  : DEFAULT_MESSAGE;
133
183
 
134
184
  const result = { statusCode, message };
135
- if (includeDevInsight && typeof parsed.devInsight === 'string' && parsed.devInsight.trim()) {
185
+ if (
186
+ includeDevInsight &&
187
+ typeof parsed.devInsight === 'string' &&
188
+ parsed.devInsight.trim()
189
+ ) {
136
190
  result.devInsight = parsed.devInsight.trim();
137
191
  }
138
192
 
193
+ // 5. Store in cache
194
+ if (cacheEnabled) {
195
+ const cache = getCache(cacheTTL);
196
+ const key = cache.buildKey(context.codeContext, context.error);
197
+ cache.set(key, result);
198
+ }
199
+
139
200
  return result;
140
201
  }
@@ -0,0 +1,72 @@
1
+ /**
2
+ * In-memory sliding window rate limiter for LLM error inference calls.
3
+ * Tracks LLM call timestamps in the last 60 seconds.
4
+ * Shared singleton across the process; configured from errors.llm.rateLimit.
5
+ */
6
+
7
+ class LLMRateLimiter {
8
+ /**
9
+ * @param {number} maxPerMinute - Maximum LLM calls allowed per 60-second window.
10
+ */
11
+ constructor(maxPerMinute) {
12
+ this.maxPerMinute = maxPerMinute > 0 ? Math.floor(maxPerMinute) : 10;
13
+ /** @type {number[]} timestamps of recent LLM calls (ms since epoch) */
14
+ this._timestamps = [];
15
+ }
16
+
17
+ /**
18
+ * Prune timestamps older than 60 seconds from now.
19
+ */
20
+ _prune() {
21
+ const cutoff = Date.now() - 60_000;
22
+ let i = 0;
23
+ while (i < this._timestamps.length && this._timestamps[i] <= cutoff) {
24
+ i++;
25
+ }
26
+ if (i > 0) this._timestamps.splice(0, i);
27
+ }
28
+
29
+ /**
30
+ * Returns true if an LLM call is allowed under the current rate.
31
+ * @returns {boolean}
32
+ */
33
+ canCall() {
34
+ this._prune();
35
+ return this._timestamps.length < this.maxPerMinute;
36
+ }
37
+
38
+ /**
39
+ * Record that an LLM call was made right now.
40
+ */
41
+ record() {
42
+ this._prune();
43
+ this._timestamps.push(Date.now());
44
+ }
45
+
46
+ /**
47
+ * Returns how many calls remain in the current window.
48
+ * @returns {number}
49
+ */
50
+ remaining() {
51
+ this._prune();
52
+ return Math.max(0, this.maxPerMinute - this._timestamps.length);
53
+ }
54
+ }
55
+
56
+ /** @type {LLMRateLimiter|null} */
57
+ let _instance = null;
58
+
59
+ /**
60
+ * Get (or create) the singleton rate limiter.
61
+ * Re-initializes if maxPerMinute changes.
62
+ * @param {number} maxPerMinute
63
+ * @returns {LLMRateLimiter}
64
+ */
65
+ export function getRateLimiter(maxPerMinute) {
66
+ if (!_instance || _instance.maxPerMinute !== maxPerMinute) {
67
+ _instance = new LLMRateLimiter(maxPerMinute);
68
+ }
69
+ return _instance;
70
+ }
71
+
72
+ export { LLMRateLimiter };
package/server/handler.js CHANGED
@@ -12,7 +12,13 @@ const logger = new TejLogger('Tejas');
12
12
  const warnedPaths = new Set();
13
13
 
14
14
  const DEFAULT_ALLOWED_METHODS = [
15
- 'GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'HEAD', 'OPTIONS',
15
+ 'GET',
16
+ 'POST',
17
+ 'PUT',
18
+ 'DELETE',
19
+ 'PATCH',
20
+ 'HEAD',
21
+ 'OPTIONS',
16
22
  ];
17
23
 
18
24
  /**
@@ -24,9 +30,13 @@ const getAllowedMethods = () => {
24
30
  if (raw == null) return new Set(DEFAULT_ALLOWED_METHODS);
25
31
  const arr = Array.isArray(raw)
26
32
  ? raw
27
- : (typeof raw === 'string' ? raw.split(',').map((s) => s.trim()) : []);
33
+ : typeof raw === 'string'
34
+ ? raw.split(',').map((s) => s.trim())
35
+ : [];
28
36
  const normalized = arr.map((m) => String(m).toUpperCase()).filter(Boolean);
29
- return normalized.length > 0 ? new Set(normalized) : new Set(DEFAULT_ALLOWED_METHODS);
37
+ return normalized.length > 0
38
+ ? new Set(normalized)
39
+ : new Set(DEFAULT_ALLOWED_METHODS);
30
40
  };
31
41
 
32
42
  /**
@@ -58,23 +68,31 @@ const executeChain = async (target, ammo) => {
58
68
 
59
69
  try {
60
70
  const result = await middleware(...args);
61
-
71
+
62
72
  // Check again after middleware execution (passport might have redirected)
63
73
  if (ammo.res.headersSent || ammo.res.writableEnded || ammo.res.finished) {
64
74
  return;
65
75
  }
66
-
76
+
67
77
  // If middleware returned a promise that resolved, continue chain
68
78
  if (result && typeof result.then === 'function') {
69
79
  await result;
70
80
  // Check one more time after promise resolution
71
- if (ammo.res.headersSent || ammo.res.writableEnded || ammo.res.finished) {
81
+ if (
82
+ ammo.res.headersSent ||
83
+ ammo.res.writableEnded ||
84
+ ammo.res.finished
85
+ ) {
72
86
  return;
73
87
  }
74
88
  }
75
89
  } catch (err) {
76
90
  // Only handle error if response hasn't been sent
77
- if (!ammo.res.headersSent && !ammo.res.writableEnded && !ammo.res.finished) {
91
+ if (
92
+ !ammo.res.headersSent &&
93
+ !ammo.res.writableEnded &&
94
+ !ammo.res.finished
95
+ ) {
78
96
  await errorHandler(ammo, err);
79
97
  }
80
98
  }
@@ -94,7 +112,8 @@ const executeChain = async (target, ammo) => {
94
112
  * @returns {Promise<void>}
95
113
  */
96
114
  const errorHandler = async (ammo, err) => {
97
- if (env('LOG_EXCEPTIONS')) errorLogger.error(err);
115
+ // Pass false as second arg to suppress tej-logger's Console.trace() double-stack output.
116
+ if (env('LOG_EXCEPTIONS')) errorLogger.error(err, false);
98
117
 
99
118
  const result = ammo.throw(err);
100
119
  if (result != null && typeof result.then === 'function') {
@@ -126,9 +145,11 @@ const handler = async (req, res) => {
126
145
  const ammo = new Ammo(req, res);
127
146
 
128
147
  try {
129
- if (match && match.target) {
130
- await ammo.enhance();
148
+ // Enhance ammo for all requests (matched or not) so global middlewares
149
+ // always receive a fully-populated ammo (method flags, headers, payload, etc.).
150
+ await ammo.enhance();
131
151
 
152
+ if (match && match.target) {
132
153
  const allowedMethods = match.target.getMethods();
133
154
  if (allowedMethods != null && allowedMethods.length > 0) {
134
155
  const method = ammo.method && String(ammo.method).toUpperCase();
@@ -145,7 +166,8 @@ const handler = async (req, res) => {
145
166
  }
146
167
  }
147
168
 
148
- // Add route parameters to ammo.payload
169
+ // Add route parameters to ammo.params and ammo.payload
170
+ ammo.params = match.params || {};
149
171
  if (match.params && Object.keys(match.params).length > 0) {
150
172
  Object.assign(ammo.payload, match.params);
151
173
  }
@@ -156,7 +178,23 @@ const handler = async (req, res) => {
156
178
  if (req.url === '/') {
157
179
  ammo.defaultEntry();
158
180
  } else {
159
- await errorHandler(ammo, new TejError(404, `URL not found: ${url}`));
181
+ // Run global middlewares (CORS preflight, auth, logging, etc.) even for
182
+ // unmatched routes. A pseudo-target with no route-specific middlewares
183
+ // is used so the 404 response is sent at the end of the global chain.
184
+ await executeChain(
185
+ {
186
+ getMiddlewares: () => [],
187
+ getHandler: () => async () => {
188
+ if (!ammo.res.headersSent) {
189
+ await errorHandler(
190
+ ammo,
191
+ new TejError(404, `URL not found: ${url}`),
192
+ );
193
+ }
194
+ },
195
+ },
196
+ ammo,
197
+ );
160
198
  }
161
199
  }
162
200
  } catch (err) {
@@ -94,16 +94,16 @@ class TargetRegistry {
94
94
  const patternSegments = pattern.split('/').filter((s) => s.length > 0);
95
95
  const urlSegments = url.split('/').filter((s) => s.length > 0);
96
96
 
97
- // Must have same number of segments
98
- if (patternSegments.length !== urlSegments.length) {
99
- return null;
100
- }
101
-
102
97
  // If both are empty (root paths), they match
103
98
  if (patternSegments.length === 0 && urlSegments.length === 0) {
104
99
  return {};
105
100
  }
106
101
 
102
+ // Must have same number of segments
103
+ if (patternSegments.length !== urlSegments.length) {
104
+ return null;
105
+ }
106
+
107
107
  const params = {};
108
108
 
109
109
  // Match each segment
@@ -133,7 +133,7 @@ class TargetRegistry {
133
133
  */
134
134
  getAllEndpoints(options = {}) {
135
135
  const grouped =
136
- typeof options === 'boolean' ? options : (options && options.grouped);
136
+ typeof options === 'boolean' ? options : options && options.grouped;
137
137
  const detailed =
138
138
  typeof options === 'object' && options && options.detailed === true;
139
139