te.js 2.1.5 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/auto-docs/analysis/handler-analyzer.test.js +106 -0
  2. package/auto-docs/analysis/source-resolver.test.js +58 -0
  3. package/auto-docs/constants.js +13 -2
  4. package/auto-docs/openapi/generator.js +7 -5
  5. package/auto-docs/openapi/generator.test.js +132 -0
  6. package/auto-docs/openapi/spec-builders.js +39 -19
  7. package/cli/docs-command.js +44 -36
  8. package/cors/index.test.js +82 -0
  9. package/database/index.js +3 -1
  10. package/database/mongodb.js +17 -11
  11. package/database/redis.js +53 -44
  12. package/docs/configuration.md +24 -10
  13. package/docs/error-handling.md +134 -50
  14. package/lib/llm/client.js +40 -10
  15. package/lib/llm/index.js +14 -1
  16. package/lib/llm/parse.test.js +60 -0
  17. package/package.json +3 -1
  18. package/radar/index.js +281 -0
  19. package/rate-limit/index.js +8 -11
  20. package/rate-limit/index.test.js +64 -0
  21. package/server/ammo/body-parser.js +156 -152
  22. package/server/ammo/body-parser.test.js +79 -0
  23. package/server/ammo/enhancer.js +8 -4
  24. package/server/ammo.js +216 -17
  25. package/server/context/request-context.js +51 -0
  26. package/server/context/request-context.test.js +53 -0
  27. package/server/endpoint.js +15 -0
  28. package/server/error.js +56 -3
  29. package/server/error.test.js +45 -0
  30. package/server/errors/channels/base.js +31 -0
  31. package/server/errors/channels/channels.test.js +148 -0
  32. package/server/errors/channels/console.js +64 -0
  33. package/server/errors/channels/index.js +111 -0
  34. package/server/errors/channels/log.js +27 -0
  35. package/server/errors/llm-cache.js +102 -0
  36. package/server/errors/llm-cache.test.js +160 -0
  37. package/server/errors/llm-error-service.js +77 -16
  38. package/server/errors/llm-rate-limiter.js +72 -0
  39. package/server/errors/llm-rate-limiter.test.js +105 -0
  40. package/server/files/uploader.js +38 -26
  41. package/server/handler.js +5 -3
  42. package/server/targets/registry.js +9 -9
  43. package/server/targets/registry.test.js +108 -0
  44. package/te.js +214 -57
  45. package/utils/auto-register.js +1 -1
  46. package/utils/configuration.js +23 -9
  47. package/utils/configuration.test.js +58 -0
  48. package/utils/errors-llm-config.js +142 -9
  49. package/utils/request-logger.js +49 -3
@@ -15,8 +15,8 @@ Tejas wraps all middleware and route handlers with built-in error catching. Any
15
15
  ```javascript
16
16
  // ✅ No try-catch needed — Tejas handles errors automatically
17
17
  target.register('/users/:id', async (ammo) => {
18
- const user = await database.findUser(ammo.payload.id); // If this throws, Tejas catches it
19
- const posts = await database.getUserPosts(user.id); // Same here
18
+ const user = await database.findUser(ammo.payload.id); // If this throws, Tejas catches it
19
+ const posts = await database.getUserPosts(user.id); // Same here
20
20
  ammo.fire({ user, posts });
21
21
  });
22
22
  ```
@@ -30,8 +30,8 @@ app.get('/users/:id', async (req, res) => {
30
30
  const user = await database.findUser(req.params.id);
31
31
  res.json(user);
32
32
  } catch (error) {
33
- console.error(error); // 1. log
34
- res.status(500).json({ error: 'Internal Server Error' }); // 2. send response
33
+ console.error(error); // 1. log
34
+ res.status(500).json({ error: 'Internal Server Error' }); // 2. send response
35
35
  }
36
36
  });
37
37
  ```
@@ -47,8 +47,8 @@ To see caught exceptions in your logs, enable exception logging:
47
47
  ```javascript
48
48
  const app = new Tejas({
49
49
  log: {
50
- exceptions: true // Log all caught exceptions
51
- }
50
+ exceptions: true, // Log all caught exceptions
51
+ },
52
52
  });
53
53
  ```
54
54
 
@@ -89,6 +89,87 @@ ammo.throw({ messageType: 'developer' });
89
89
  ammo.throw(caughtErr, { useLlm: false });
90
90
  ```
91
91
 
92
+ ### Async mode
93
+
94
+ By default (`errors.llm.mode: 'sync'`), `ammo.throw()` blocks the HTTP response until the LLM returns. This adds LLM latency (typically 1–3 seconds) to every error response.
95
+
96
+ Set `errors.llm.mode` to `'async'` to respond immediately with a generic `500 Internal Server Error` and run the LLM inference in the background. The result is dispatched to the configured **channel** once ready — the client never waits.
97
+
98
+ ```bash
99
+ # .env
100
+ ERRORS_LLM_MODE=async
101
+ ERRORS_LLM_CHANNEL=both # console + log file
102
+ ```
103
+
104
+ ```javascript
105
+ // tejas.config.json
106
+ {
107
+ "errors": {
108
+ "llm": {
109
+ "enabled": true,
110
+ "mode": "async",
111
+ "channel": "both"
112
+ }
113
+ }
114
+ }
115
+ ```
116
+
117
+ In async mode:
118
+
119
+ - The HTTP response is always `500 Internal Server Error` regardless of what the LLM would infer. The LLM-inferred status and message are only visible in the channel.
120
+ - Developer insight (`devInsight`) is **always** included in the channel output, even in production — it never reaches the HTTP response.
121
+ - If the LLM call fails or times out in the background, it is silently swallowed. The HTTP response has already been sent.
122
+
123
+ ### Output channels (async mode)
124
+
125
+ When `mode` is `async`, the LLM result is sent to the configured channel after the response. Set `errors.llm.channel`:
126
+
127
+ | Channel | Output |
128
+ | --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
129
+ | `"console"` (default) | Pretty-printed colored block in the terminal: timestamp, method+path, inferred status, message, dev insight. Shows `[CACHED]` or `[RATE LIMITED]` flags. |
130
+ | `"log"` | Appends a JSON line to `errors.llm.logFile` (default `./errors.llm.log`). Each entry contains all fields: timestamp, method, path, statusCode, message, devInsight, original error, code context snippets, cached, rateLimited. |
131
+ | `"both"` | Both console and log file. |
132
+
133
+ The log file uses **JSONL format** (one JSON object per line), so it can be read by log analysis tools or Radar.
134
+
135
+ ```bash
136
+ ERRORS_LLM_CHANNEL=log
137
+ ERRORS_LLM_LOG_FILE=./logs/llm-errors.log
138
+ ```
139
+
140
+ ### Rate limiting
141
+
142
+ Set `errors.llm.rateLimit` (default `10`) to cap the number of LLM calls per minute across all requests. This prevents a burst of errors from exhausting your API quota.
143
+
144
+ ```bash
145
+ ERRORS_LLM_RATE_LIMIT=20
146
+ ```
147
+
148
+ When the rate limit is exceeded:
149
+
150
+ - **Sync mode**: responds immediately with `500 Internal Server Error` (no LLM call).
151
+ - **Async mode**: the channel still receives a dispatch with `rateLimited: true` so the error occurrence is recorded even though LLM enhancement was skipped.
152
+
153
+ Cached results do **not** count against the rate limit.
154
+
155
+ ### Error caching
156
+
157
+ By default (`errors.llm.cache: true`), Tejas caches LLM results by throw site and error message. If the same error is thrown at the same file and line, the cached result is reused without making another LLM call.
158
+
159
+ ```bash
160
+ ERRORS_LLM_CACHE=true
161
+ ERRORS_LLM_CACHE_TTL=3600000 # 1 hour (default)
162
+ ```
163
+
164
+ The cache key is: `file:line:errorMessage`. After the TTL expires, the next occurrence triggers a fresh LLM call.
165
+
166
+ To effectively **only enhance new errors**, keep caching enabled with a long TTL. To re-evaluate errors more frequently, reduce the TTL.
167
+
168
+ ```javascript
169
+ // Only enhance errors once per 24 hours
170
+ app.withLLMErrors({ cache: true, cacheTTL: 86400000 });
171
+ ```
172
+
92
173
  ---
93
174
 
94
175
  ## TejError Class
@@ -113,6 +194,7 @@ throw new TejError(404, 'Resource not found');
113
194
  ```
114
195
 
115
196
  **Response:**
197
+
116
198
  ```
117
199
  HTTP/1.1 404 Not Found
118
200
  Content-Type: text/plain
@@ -148,7 +230,7 @@ ammo.throw(new TejError(400, 'Bad request'));
148
230
  // When errors.llm.enabled: LLM infers code and message from context
149
231
  ammo.throw(new Error('Something went wrong'));
150
232
  ammo.throw('Validation failed');
151
- ammo.throw(); // context still used when available
233
+ ammo.throw(); // context still used when available
152
234
  ```
153
235
 
154
236
  See [Ammo — throw()](./ammo.md#throw--send-error-response) for all signatures and the LLM-inferred row.
@@ -160,13 +242,13 @@ See [Ammo — throw()](./ammo.md#throw--send-error-response) for all signatures
160
242
  ```javascript
161
243
  target.register('/users/:id', async (ammo) => {
162
244
  const { id } = ammo.payload;
163
-
245
+
164
246
  const user = await findUser(id);
165
-
247
+
166
248
  if (!user) {
167
249
  throw new TejError(404, 'User not found');
168
250
  }
169
-
251
+
170
252
  ammo.fire(user);
171
253
  });
172
254
  ```
@@ -194,8 +276,8 @@ Errors are automatically caught by Tejas's handler. Enable logging:
194
276
  ```javascript
195
277
  const app = new Tejas({
196
278
  log: {
197
- exceptions: true // Log all exceptions
198
- }
279
+ exceptions: true, // Log all exceptions
280
+ },
199
281
  });
200
282
  ```
201
283
 
@@ -207,18 +289,18 @@ Create middleware to customize error handling:
207
289
  // middleware/error-handler.js
208
290
  export const errorHandler = (ammo, next) => {
209
291
  const originalThrow = ammo.throw.bind(ammo);
210
-
292
+
211
293
  ammo.throw = (...args) => {
212
294
  // Log errors
213
295
  console.error('Error:', args);
214
-
296
+
215
297
  // Send to error tracking service
216
298
  errorTracker.capture(args[0]);
217
-
299
+
218
300
  // Call original throw
219
301
  originalThrow(...args);
220
302
  };
221
-
303
+
222
304
  next();
223
305
  };
224
306
 
@@ -234,12 +316,12 @@ For APIs, return structured error objects:
234
316
  // middleware/api-errors.js
235
317
  export const apiErrorHandler = (ammo, next) => {
236
318
  const originalThrow = ammo.throw.bind(ammo);
237
-
319
+
238
320
  ammo.throw = (statusOrError, message) => {
239
321
  let status = 500;
240
322
  let errorMessage = 'Internal Server Error';
241
323
  let errorCode = 'INTERNAL_ERROR';
242
-
324
+
243
325
  if (typeof statusOrError === 'number') {
244
326
  status = statusOrError;
245
327
  errorMessage = message || getDefaultMessage(status);
@@ -249,16 +331,16 @@ export const apiErrorHandler = (ammo, next) => {
249
331
  errorMessage = statusOrError.message;
250
332
  errorCode = getErrorCode(status);
251
333
  }
252
-
334
+
253
335
  ammo.fire(status, {
254
336
  error: {
255
337
  code: errorCode,
256
338
  message: errorMessage,
257
- status
258
- }
339
+ status,
340
+ },
259
341
  });
260
342
  };
261
-
343
+
262
344
  next();
263
345
  };
264
346
 
@@ -270,7 +352,7 @@ function getDefaultMessage(status) {
270
352
  404: 'Not Found',
271
353
  405: 'Method Not Allowed',
272
354
  429: 'Too Many Requests',
273
- 500: 'Internal Server Error'
355
+ 500: 'Internal Server Error',
274
356
  };
275
357
  return messages[status] || 'Unknown Error';
276
358
  }
@@ -283,13 +365,14 @@ function getErrorCode(status) {
283
365
  404: 'NOT_FOUND',
284
366
  405: 'METHOD_NOT_ALLOWED',
285
367
  429: 'RATE_LIMITED',
286
- 500: 'INTERNAL_ERROR'
368
+ 500: 'INTERNAL_ERROR',
287
369
  };
288
370
  return codes[status] || 'UNKNOWN_ERROR';
289
371
  }
290
372
  ```
291
373
 
292
374
  **Response:**
375
+
293
376
  ```json
294
377
  {
295
378
  "error": {
@@ -307,10 +390,10 @@ For input validation, return detailed errors:
307
390
  ```javascript
308
391
  target.register('/users', (ammo) => {
309
392
  if (!ammo.POST) return ammo.notAllowed();
310
-
393
+
311
394
  const { name, email, age } = ammo.payload;
312
395
  const errors = [];
313
-
396
+
314
397
  if (!name) errors.push({ field: 'name', message: 'Name is required' });
315
398
  if (!email) errors.push({ field: 'email', message: 'Email is required' });
316
399
  if (email && !isValidEmail(email)) {
@@ -319,17 +402,17 @@ target.register('/users', (ammo) => {
319
402
  if (age && (isNaN(age) || age < 0)) {
320
403
  errors.push({ field: 'age', message: 'Age must be a positive number' });
321
404
  }
322
-
405
+
323
406
  if (errors.length > 0) {
324
407
  return ammo.fire(400, {
325
408
  error: {
326
409
  code: 'VALIDATION_ERROR',
327
410
  message: 'Validation failed',
328
- details: errors
329
- }
411
+ details: errors,
412
+ },
330
413
  });
331
414
  }
332
-
415
+
333
416
  // Process valid data...
334
417
  });
335
418
  ```
@@ -380,16 +463,17 @@ While Tejas catches all errors automatically, you may want try-catch for:
380
463
 
381
464
  `BodyParserError` is a subclass of `TejError` thrown automatically during request body parsing. You do not need to handle these yourself — they are caught by the framework and converted to appropriate HTTP responses.
382
465
 
383
- | Status | Condition |
384
- |--------|-----------|
466
+ | Status | Condition |
467
+ | ------- | -------------------------------------------------------------------------- |
385
468
  | **400** | Malformed JSON, invalid URL-encoded data, or corrupted multipart form data |
386
- | **408** | Body parsing timed out (exceeds `body.timeout`, default 30 seconds) |
387
- | **413** | Request body exceeds `body.max_size` (default 10 MB) |
388
- | **415** | Unsupported content type (not JSON, URL-encoded, or multipart) |
469
+ | **408** | Body parsing timed out (exceeds `body.timeout`, default 30 seconds) |
470
+ | **413** | Request body exceeds `body.max_size` (default 10 MB) |
471
+ | **415** | Unsupported content type (not JSON, URL-encoded, or multipart) |
389
472
 
390
473
  These limits are configured via [Configuration](./configuration.md) (`body.max_size`, `body.timeout`).
391
474
 
392
475
  Supported content types:
476
+
393
477
  - `application/json`
394
478
  - `application/x-www-form-urlencoded`
395
479
  - `multipart/form-data`
@@ -410,21 +494,21 @@ Once a response has been sent (`res.headersSent` is true), no further middleware
410
494
 
411
495
  ## Error Codes Reference
412
496
 
413
- | Status | Name | When to Use |
414
- |--------|------|-------------|
415
- | 400 | Bad Request | Invalid input, malformed request |
416
- | 401 | Unauthorized | Missing or invalid authentication |
417
- | 403 | Forbidden | Authenticated but not authorized |
418
- | 404 | Not Found | Resource doesn't exist |
419
- | 405 | Method Not Allowed | HTTP method not supported |
420
- | 409 | Conflict | Resource conflict (duplicate) |
421
- | 413 | Payload Too Large | Request body too large |
422
- | 422 | Unprocessable Entity | Valid syntax but semantic errors |
423
- | 429 | Too Many Requests | Rate limit exceeded |
424
- | 500 | Internal Server Error | Unexpected server errors |
425
- | 502 | Bad Gateway | Upstream server error |
426
- | 503 | Service Unavailable | Server temporarily unavailable |
427
- | 504 | Gateway Timeout | Upstream server timeout |
497
+ | Status | Name | When to Use |
498
+ | ------ | --------------------- | --------------------------------- |
499
+ | 400 | Bad Request | Invalid input, malformed request |
500
+ | 401 | Unauthorized | Missing or invalid authentication |
501
+ | 403 | Forbidden | Authenticated but not authorized |
502
+ | 404 | Not Found | Resource doesn't exist |
503
+ | 405 | Method Not Allowed | HTTP method not supported |
504
+ | 409 | Conflict | Resource conflict (duplicate) |
505
+ | 413 | Payload Too Large | Request body too large |
506
+ | 422 | Unprocessable Entity | Valid syntax but semantic errors |
507
+ | 429 | Too Many Requests | Rate limit exceeded |
508
+ | 500 | Internal Server Error | Unexpected server errors |
509
+ | 502 | Bad Gateway | Upstream server error |
510
+ | 503 | Service Unavailable | Server temporarily unavailable |
511
+ | 504 | Gateway Timeout | Upstream server timeout |
428
512
 
429
513
  ## Best Practices
430
514
 
package/lib/llm/client.js CHANGED
@@ -6,6 +6,7 @@
6
6
 
7
7
  const DEFAULT_BASE_URL = 'https://api.openai.com/v1';
8
8
  const DEFAULT_MODEL = 'gpt-4o-mini';
9
+ const DEFAULT_TIMEOUT = 10000;
9
10
 
10
11
  /**
11
12
  * OpenAI-compatible LLM provider. Exposes only constructor and analyze(prompt).
@@ -15,15 +16,25 @@ class LLMProvider {
15
16
  this.baseURL = (options.baseURL ?? DEFAULT_BASE_URL).replace(/\/$/, '');
16
17
  this.model = options.model ?? DEFAULT_MODEL;
17
18
  this.apiKey = options.apiKey ?? process.env.OPENAI_API_KEY;
18
- this.options = options;
19
+ this.timeout =
20
+ typeof options.timeout === 'number' && options.timeout > 0
21
+ ? options.timeout
22
+ : DEFAULT_TIMEOUT;
23
+ this.options = Object.freeze({ ...options });
19
24
  }
20
25
 
21
26
  /**
22
27
  * Send a prompt to the LLM and return the raw text response and usage.
28
+ * Aborts after this.timeout milliseconds and throws a clean error.
23
29
  * @param {string} prompt
24
30
  * @returns {Promise<{ content: string, usage: { prompt_tokens: number, completion_tokens: number, total_tokens: number } }>}
25
31
  */
26
32
  async analyze(prompt) {
33
+ if (!prompt || typeof prompt !== 'string') {
34
+ throw new TypeError(
35
+ 'LLMProvider.analyze: prompt must be a non-empty string',
36
+ );
37
+ }
27
38
  const url = `${this.baseURL}/chat/completions`;
28
39
  const headers = {
29
40
  'Content-Type': 'application/json',
@@ -34,25 +45,44 @@ class LLMProvider {
34
45
  messages: [{ role: 'user', content: prompt }],
35
46
  };
36
47
 
37
- const res = await fetch(url, {
38
- method: 'POST',
39
- headers,
40
- body: JSON.stringify(body),
41
- });
48
+ const controller = new AbortController();
49
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
50
+
51
+ let res;
52
+ try {
53
+ res = await fetch(url, {
54
+ method: 'POST',
55
+ headers,
56
+ body: JSON.stringify(body),
57
+ signal: controller.signal,
58
+ });
59
+ } catch (err) {
60
+ if (err.name === 'AbortError') {
61
+ throw new Error(`LLM request timed out after ${this.timeout}ms`);
62
+ }
63
+ throw err;
64
+ } finally {
65
+ clearTimeout(timeoutId);
66
+ }
42
67
 
43
68
  if (!res.ok) {
44
69
  const text = await res.text();
45
- throw new Error(`LLM request failed (${res.status}): ${text.slice(0, 300)}`);
70
+ throw new Error(
71
+ `LLM request failed (${res.status}): ${text.slice(0, 300)}`,
72
+ );
46
73
  }
47
74
 
48
75
  const data = await res.json();
49
76
  const content = data.choices?.[0]?.message?.content ?? '';
50
- const text = typeof content === 'string' ? content : JSON.stringify(content);
77
+ const text =
78
+ typeof content === 'string' ? content : JSON.stringify(content);
51
79
  const rawUsage = data.usage;
52
80
  const usage = {
53
81
  prompt_tokens: rawUsage?.prompt_tokens ?? 0,
54
82
  completion_tokens: rawUsage?.completion_tokens ?? 0,
55
- total_tokens: rawUsage?.total_tokens ?? (rawUsage?.prompt_tokens ?? 0) + (rawUsage?.completion_tokens ?? 0),
83
+ total_tokens:
84
+ rawUsage?.total_tokens ??
85
+ (rawUsage?.prompt_tokens ?? 0) + (rawUsage?.completion_tokens ?? 0),
56
86
  };
57
87
  return { content: text, usage };
58
88
  }
@@ -60,7 +90,7 @@ class LLMProvider {
60
90
 
61
91
  /**
62
92
  * Create an LLM provider from config.
63
- * @param {object} config - { baseURL?, apiKey?, model? }
93
+ * @param {object} config - { baseURL?, apiKey?, model?, timeout? }
64
94
  * @returns {LLMProvider}
65
95
  */
66
96
  function createProvider(config) {
package/lib/llm/index.js CHANGED
@@ -3,5 +3,18 @@
3
3
  * Used by auto-docs, error-inference, and future LLM features.
4
4
  */
5
5
 
6
+ /**
7
+ * OpenAI-compatible LLM client.
8
+ * @see {@link ./client.js}
9
+ */
6
10
  export { LLMProvider, createProvider } from './client.js';
7
- export { extractJSON, extractJSONArray, reconcileOrderedTags } from './parse.js';
11
+
12
+ /**
13
+ * JSON parsing utilities for LLM responses.
14
+ * @see {@link ./parse.js}
15
+ */
16
+ export {
17
+ extractJSON,
18
+ extractJSONArray,
19
+ reconcileOrderedTags,
20
+ } from './parse.js';
@@ -0,0 +1,60 @@
1
+ /**
2
+ * Unit tests for lib/llm parse utilities (extractJSON, extractJSONArray, reconcileOrderedTags).
3
+ */
4
+ import { describe, it, expect } from 'vitest';
5
+ import {
6
+ extractJSON,
7
+ extractJSONArray,
8
+ reconcileOrderedTags,
9
+ } from './index.js';
10
+
11
+ describe('llm/parse', () => {
12
+ describe('extractJSON', () => {
13
+ it('extracts object from plain JSON string', () => {
14
+ const str = '{"name":"Users","description":"CRUD"}';
15
+ expect(extractJSON(str)).toEqual({ name: 'Users', description: 'CRUD' });
16
+ });
17
+ it('extracts first object from text with markdown', () => {
18
+ const str = 'Here is the result:\n```json\n{"summary":"Get item"}\n```';
19
+ expect(extractJSON(str)).toEqual({ summary: 'Get item' });
20
+ });
21
+ it('returns null for empty or no object', () => {
22
+ expect(extractJSON('')).toBeNull();
23
+ expect(extractJSON('no brace here')).toBeNull();
24
+ });
25
+ });
26
+
27
+ describe('extractJSONArray', () => {
28
+ it('extracts array from string', () => {
29
+ const str = '["Users", "Auth", "Health"]';
30
+ expect(extractJSONArray(str)).toEqual(['Users', 'Auth', 'Health']);
31
+ });
32
+ it('returns null when no array', () => {
33
+ expect(extractJSONArray('')).toBeNull();
34
+ expect(extractJSONArray('nothing')).toBeNull();
35
+ });
36
+ });
37
+
38
+ describe('reconcileOrderedTags', () => {
39
+ it('reorders tags by orderedTagNames', () => {
40
+ const tags = [
41
+ { name: 'Health', description: '...' },
42
+ { name: 'Users', description: '...' },
43
+ { name: 'Auth', description: '...' },
44
+ ];
45
+ const ordered = reconcileOrderedTags(['Users', 'Auth', 'Health'], tags);
46
+ expect(ordered.map((t) => t.name)).toEqual(['Users', 'Auth', 'Health']);
47
+ });
48
+ it('appends tags not in orderedTagNames', () => {
49
+ const tags = [{ name: 'Users' }, { name: 'Other' }];
50
+ const ordered = reconcileOrderedTags(['Users'], tags);
51
+ expect(ordered.map((t) => t.name)).toEqual(['Users', 'Other']);
52
+ });
53
+ it('returns copy of tags when orderedTagNames empty', () => {
54
+ const tags = [{ name: 'A' }];
55
+ const ordered = reconcileOrderedTags([], tags);
56
+ expect(ordered).toEqual([{ name: 'A' }]);
57
+ expect(ordered).not.toBe(tags);
58
+ });
59
+ });
60
+ });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "te.js",
3
- "version": "2.1.5",
3
+ "version": "2.2.0",
4
4
  "description": "AI Native Node.js Framework",
5
5
  "type": "module",
6
6
  "main": "te.js",
@@ -18,6 +18,7 @@
18
18
  "license": "ISC",
19
19
  "devDependencies": {
20
20
  "@types/node": "^20.12.5",
21
+ "@vitest/coverage-v8": "^4.0.18",
21
22
  "husky": "^9.0.11",
22
23
  "lint-staged": "^15.2.2",
23
24
  "prettier": "3.2.5",
@@ -31,6 +32,7 @@
31
32
  "te.js",
32
33
  "cli",
33
34
  "cors",
35
+ "radar",
34
36
  "server",
35
37
  "database",
36
38
  "rate-limit",