te.js 2.0.1 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +10 -1
  2. package/auto-docs/docs-llm/index.js +7 -0
  3. package/auto-docs/{llm → docs-llm}/provider.js +12 -67
  4. package/auto-docs/index.js +3 -3
  5. package/docs/ammo.md +13 -13
  6. package/docs/api-reference.md +7 -6
  7. package/docs/auto-docs.md +1 -0
  8. package/docs/configuration.md +48 -6
  9. package/docs/database.md +0 -1
  10. package/docs/error-handling.md +58 -37
  11. package/docs/file-uploads.md +0 -1
  12. package/docs/getting-started.md +0 -1
  13. package/docs/middleware.md +0 -1
  14. package/docs/rate-limiting.md +0 -1
  15. package/package.json +14 -3
  16. package/server/ammo.js +84 -25
  17. package/server/errors/code-context.js +125 -0
  18. package/server/errors/llm-error-service.js +140 -0
  19. package/server/handler.js +13 -7
  20. package/te.js +39 -0
  21. package/utils/errors-llm-config.js +84 -0
  22. package/.cursor/plans/ai_native_framework_features_5bb1a20a.plan.md +0 -234
  23. package/.cursor/plans/auto_error_fix_agent_e68979c5.plan.md +0 -356
  24. package/.cursor/plans/tejas_framework_test_suite_5e3c6fad.plan.md +0 -168
  25. package/.prettierignore +0 -31
  26. package/.prettierrc +0 -5
  27. package/auto-docs/llm/index.js +0 -6
  28. package/auto-docs/llm/parse.js +0 -88
  29. package/example/API_OVERVIEW.md +0 -77
  30. package/example/README.md +0 -155
  31. package/example/index.js +0 -29
  32. package/example/middlewares/auth.js +0 -9
  33. package/example/middlewares/global.midair.js +0 -6
  34. package/example/openapi.json +0 -390
  35. package/example/package.json +0 -23
  36. package/example/services/cache.service.js +0 -25
  37. package/example/services/user.service.js +0 -42
  38. package/example/start-redis.js +0 -2
  39. package/example/targets/cache.target.js +0 -35
  40. package/example/targets/index.target.js +0 -16
  41. package/example/targets/users.target.js +0 -60
  42. package/example/tejas.config.json +0 -22
  43. package/tests/auto-docs/handler-analyzer.test.js +0 -44
  44. package/tests/auto-docs/openapi-generator.test.js +0 -103
  45. package/tests/auto-docs/parse.test.js +0 -63
  46. package/tests/auto-docs/source-resolver.test.js +0 -58
  47. package/tests/helpers/index.js +0 -37
  48. package/tests/helpers/mock-http.js +0 -342
  49. package/tests/helpers/test-utils.js +0 -446
  50. package/tests/setup.test.js +0 -148
  51. package/vitest.config.js +0 -54
  52. /package/auto-docs/{llm → docs-llm}/prompts.js +0 -0
package/README.md CHANGED
@@ -41,12 +41,13 @@ api.register('/hello/:name', (ammo) => {
41
41
  app.takeoff();
42
42
  ```
43
43
 
44
+
44
45
  ## Features
45
46
 
46
47
  - **AI-Native (MCP)** — Ship with an MCP server so AI assistants can scaffold projects, generate routes, and write correct code with full framework knowledge
47
48
  - **Simple Routing** — Clean, method-agnostic URL structures with parameterized routes
48
49
  - **Express Compatible** — Use existing Express middleware alongside Tejas middleware
49
- - **Zero-Config Error Handling** — No try-catch needed! Tejas catches all errors automatically
50
+ - **Zero-Config Error Handling** — No try-catch needed! Tejas catches all errors automatically. Opt in to have an LLM determine error code and message when you don't specify them (see [Error Handling](./docs/error-handling.md))
50
51
  - **Built-in Rate Limiting** — Three algorithms (Token Bucket, Sliding Window, Fixed Window) with memory or Redis storage
51
52
  - **Database Ready** — First-class Redis and MongoDB support with auto-install of drivers
52
53
  - **File Uploads** — Easy file handling with size limits and type validation
@@ -55,6 +56,7 @@ app.takeoff();
55
56
  - **Auto-Discovery** — Automatic route registration from `.target.js` files
56
57
  - **Request Logging** — Built-in HTTP request and exception logging
57
58
 
59
+
58
60
  ## AI-Assisted Setup (MCP)
59
61
 
60
62
  > **Recommended** — The best way to get started with Tejas in the age of AI.
@@ -78,6 +80,7 @@ The [Tejas MCP server](https://www.npmjs.com/package/tejas-mcp) gives your IDE's
78
80
 
79
81
  Once connected, prompt your AI with things like *"Scaffold a new te.js project called my-api"* or *"Create a REST API with user CRUD routes"* — the assistant will generate framework-correct code using real te.js patterns.
80
82
 
83
+
81
84
  ## Quick Start
82
85
 
83
86
  ### Install
@@ -128,6 +131,7 @@ node index.js
128
131
  # Server running at http://localhost:3000
129
132
  ```
130
133
 
134
+
131
135
  ## Core Concepts
132
136
 
133
137
  | Tejas Term | Purpose | Express Equivalent |
@@ -139,6 +143,7 @@ node index.js
139
143
  | `midair()` | Register middleware | `use()` |
140
144
  | `takeoff()` | Start server | `listen()` |
141
145
 
146
+
142
147
  ## CLI
143
148
 
144
149
  ```bash
@@ -147,6 +152,7 @@ tejas generate:docs [--ci] # Generate OpenAPI docs (interactive or CI mode)
147
152
  tejas docs:on-push # Auto-generate docs when pushing to production branch
148
153
  ```
149
154
 
155
+
150
156
  ## API Documentation
151
157
 
152
158
  Generate and serve interactive API docs:
@@ -161,6 +167,7 @@ app.takeoff();
161
167
  // Visit http://localhost:1403/docs
162
168
  ```
163
169
 
170
+
164
171
  ## Documentation
165
172
 
166
173
  For comprehensive documentation, see the [docs folder](./docs) or visit [tejas-documentation.vercel.app](https://tejas-documentation.vercel.app).
@@ -178,10 +185,12 @@ For comprehensive documentation, see the [docs folder](./docs) or visit [tejas-d
178
185
  - [Auto-Documentation](./docs/auto-docs.md) — OpenAPI generation
179
186
  - [API Reference](./docs/api-reference.md) — Complete API docs
180
187
 
188
+
181
189
  ## Contributing
182
190
 
183
191
  Contributions are welcome! Please feel free to submit a Pull Request.
184
192
 
193
+
185
194
  ## License
186
195
 
187
196
  ISC © [Hirak Chhatbar](https://github.com/hirakchhatbar)
@@ -0,0 +1,7 @@
1
+ /**
2
+ * LLM provider for auto-documentation (docs-specific).
3
+ * Use createProvider(config) with baseURL, apiKey, model.
4
+ * For the generic LLM client only, use lib/llm.
5
+ */
6
+
7
+ export { LLMProvider, createProvider, extractJSON, extractJSONArray, reconcileOrderedTags } from './provider.js';
@@ -1,10 +1,9 @@
1
1
  /**
2
- * LLM provider for auto-documentation: single OpenAI-compatible implementation.
3
- * Works with OpenAI, OpenRouter, Ollama (OpenAI-compatible endpoint), Azure, etc.
4
- * Uses fetch() only — no provider-specific npm dependencies.
2
+ * LLM provider for auto-documentation: extends shared lib/llm client with doc-specific methods.
3
+ * Single OpenAI-compatible implementation; works with OpenAI, OpenRouter, Ollama, Azure, etc.
5
4
  */
6
5
 
7
- import { extractJSON, extractJSONArray, reconcileOrderedTags } from './parse.js';
6
+ import { LLMProvider as BaseLLMProvider, extractJSON, extractJSONArray, reconcileOrderedTags } from '../../lib/llm/index.js';
8
7
  import {
9
8
  buildSummarizeGroupPrompt,
10
9
  buildEnhanceEndpointPrompt,
@@ -13,59 +12,10 @@ import {
13
12
  buildOverviewPrompt,
14
13
  } from './prompts.js';
15
14
 
16
- const DEFAULT_BASE_URL = 'https://api.openai.com/v1';
17
- const DEFAULT_MODEL = 'gpt-4o-mini';
18
-
19
15
  /**
20
- * OpenAI-compatible LLM provider. POSTs to {baseURL}/chat/completions.
16
+ * Docs-specific LLM provider: base analyze() from lib/llm plus summarizeTargetGroup, enhanceEndpointDocs, etc.
21
17
  */
22
- class LLMProvider {
23
- constructor(options = {}) {
24
- this.baseURL = (options.baseURL ?? DEFAULT_BASE_URL).replace(/\/$/, '');
25
- this.model = options.model ?? DEFAULT_MODEL;
26
- this.apiKey = options.apiKey ?? process.env.OPENAI_API_KEY;
27
- this.options = options;
28
- }
29
-
30
- /**
31
- * Send a prompt to the LLM and return the raw text response.
32
- * @param {string} prompt
33
- * @returns {Promise<string>}
34
- */
35
- async analyze(prompt) {
36
- const url = `${this.baseURL}/chat/completions`;
37
- const headers = {
38
- 'Content-Type': 'application/json',
39
- ...(this.apiKey && { Authorization: `Bearer ${this.apiKey}` }),
40
- };
41
- const body = {
42
- model: this.model,
43
- messages: [{ role: 'user', content: prompt }],
44
- };
45
-
46
- const res = await fetch(url, {
47
- method: 'POST',
48
- headers,
49
- body: JSON.stringify(body),
50
- });
51
-
52
- if (!res.ok) {
53
- const text = await res.text();
54
- throw new Error(`LLM request failed (${res.status}): ${text.slice(0, 300)}`);
55
- }
56
-
57
- const data = await res.json();
58
- const content = data.choices?.[0]?.message?.content ?? '';
59
- const text = typeof content === 'string' ? content : JSON.stringify(content);
60
- const rawUsage = data.usage;
61
- const usage = {
62
- prompt_tokens: rawUsage?.prompt_tokens ?? 0,
63
- completion_tokens: rawUsage?.completion_tokens ?? 0,
64
- total_tokens: rawUsage?.total_tokens ?? (rawUsage?.prompt_tokens ?? 0) + (rawUsage?.completion_tokens ?? 0),
65
- };
66
- return { content: text, usage };
67
- }
68
-
18
+ class DocsLLMProvider extends BaseLLMProvider {
69
19
  /**
70
20
  * Summarize what a target file (group) does from its endpoints and handler context.
71
21
  * @param {string} groupId - Group id (e.g. target file path without .target.js)
@@ -166,22 +116,17 @@ class LLMProvider {
166
116
  }
167
117
 
168
118
  /**
169
- * Create an LLM provider from config.
170
- * Single OpenAI-compatible setup: works with OpenAI, OpenRouter, Ollama (compat), Azure, etc.
171
- *
119
+ * Create a docs-specific LLM provider from config (same config shape as lib/llm).
172
120
  * @param {object} config - { baseURL?, apiKey?, model? }
173
- * - baseURL: e.g. 'https://api.openai.com/v1' | 'https://openrouter.ai/api/v1' | 'http://localhost:11434/v1'
174
- * - apiKey: optional for local (e.g. Ollama); use OPENAI_API_KEY or OPENROUTER_API_KEY
175
- * - model: e.g. 'gpt-4o-mini' | 'openai/gpt-4o-mini' (OpenRouter)
176
- * @returns {LLMProvider}
121
+ * @returns {DocsLLMProvider}
177
122
  */
178
123
  function createProvider(config) {
179
124
  if (!config || typeof config !== 'object') {
180
- return new LLMProvider({});
125
+ return new DocsLLMProvider({});
181
126
  }
182
- return new LLMProvider(config);
127
+ return new DocsLLMProvider(config);
183
128
  }
184
129
 
185
- export { LLMProvider, createProvider };
186
- export { extractJSON, extractJSONArray } from './parse.js';
187
- export default LLMProvider;
130
+ export { DocsLLMProvider as LLMProvider, createProvider };
131
+ export { extractJSON, extractJSONArray, reconcileOrderedTags } from '../../lib/llm/index.js';
132
+ export default DocsLLMProvider;
@@ -7,13 +7,13 @@
7
7
  * - openapi/level3.js — level-3 pipeline: reorder tags by importance, generate and write overview page
8
8
  * - analysis/handler-analyzer.js — detect HTTP methods from handler source
9
9
  * - analysis/source-resolver.js — resolve target file and dependencies (for level 2 context)
10
- * - llm/ — LLM provider (enhanceEndpointDocs, summarizeTargetGroup, reorderTagsByImportance, generateOverviewPage)
10
+ * - docs-llm/ — Docs-specific LLM provider (enhanceEndpointDocs, summarizeTargetGroup, reorderTagsByImportance, generateOverviewPage)
11
11
  * - ui/docs-ui.js — build Scalar docs HTML, registerDocRoutes
12
12
  */
13
13
 
14
14
  import { writeFile } from 'node:fs/promises';
15
15
  import TejLogger from 'tej-logger';
16
- import { createProvider } from './llm/index.js';
16
+ import { createProvider } from './docs-llm/index.js';
17
17
  import { generateOpenAPISpec } from './openapi/generator.js';
18
18
  import { runLevel3 } from './openapi/level3.js';
19
19
  import targetRegistry from '../server/targets/registry.js';
@@ -141,6 +141,6 @@ export async function generateDocs(registry = targetRegistry, options = {}) {
141
141
  }
142
142
 
143
143
  export { generateOpenAPISpec } from './openapi/generator.js';
144
- export { createProvider } from './llm/index.js';
144
+ export { createProvider } from './docs-llm/index.js';
145
145
  export { buildDocsPage } from './ui/docs-ui.js';
146
146
  export { analyzeHandler, detectMethods } from './analysis/handler-analyzer.js';
package/docs/ammo.md CHANGED
@@ -138,33 +138,34 @@ After `fire()` is called, the sent data is available as `ammo.dispatchedData`.
138
138
 
139
139
  ### throw() — Send Error Response
140
140
 
141
- For intentional error responses:
141
+ **One mechanism** for error responses: you don't log the error and send the response separately — `ammo.throw()` takes care of everything. The framework uses the same `ammo.throw()` when it catches an error, so one config, one behaviour. For intentional errors, call `ammo.throw()` (or pass an error); when [LLM-inferred errors](./error-handling.md#llm-inferred-errors) are enabled, call with no arguments and an LLM infers status and message from code context. Explicit code/message always override. See [Error Handling](./error-handling.md) and per-call options (e.g. `messageType`).
142
142
 
143
143
  ```javascript
144
- // Send 500 Internal Server Error
145
- ammo.throw();
146
-
147
- // Send specific error code
144
+ // Explicit: status code and/or message
148
145
  ammo.throw(404);
149
146
  ammo.throw(404, 'User not found');
147
+ ammo.throw(new TejError(400, 'Invalid input'));
150
148
 
151
- // Throw from Error object
152
- ammo.throw(new Error('Something went wrong'));
149
+ // When errors.llm.enabled: no args — LLM infers from code context (surrounding + upstream/downstream)
150
+ ammo.throw();
153
151
 
154
- // Throw TejError
155
- import { TejError } from 'te.js';
156
- throw new TejError(400, 'Invalid input');
152
+ // Optional: pass caught error for secondary signal; LLM still uses code context (error stack) as primary
153
+ ammo.throw(caughtErr);
154
+
155
+ // Per-call: skip LLM or override message type
156
+ ammo.throw({ useLlm: false });
157
+ ammo.throw({ messageType: 'developer' });
157
158
  ```
158
159
 
159
160
  **All `throw()` signatures:**
160
161
 
161
162
  | Call | Status | Message |
162
163
  |------|--------|---------|
163
- | `throw()` | 500 | `"Internal Server Error"` |
164
+ | `throw()` | 500 or LLM-inferred | Default or LLM-derived from **code context** (see [LLM-inferred errors](./error-handling.md#llm-inferred-errors)) |
164
165
  | `throw(404)` | 404 | Default message for that status code |
165
166
  | `throw(404, "msg")` | 404 | `"msg"` |
166
167
  | `throw(new TejError(code, msg))` | `code` | `msg` |
167
- | `throw(new Error("msg"))` | 500 | `"msg"` (or parses numeric messages as status codes) |
168
+ | `throw(error)` (optional) | LLM-inferred | LLM-derived from code context (error stack used to find call site) |
168
169
 
169
170
  > **Note:** You don't need try-catch blocks in your handlers! Tejas automatically catches all errors and converts them to appropriate HTTP responses. Use `throw()` or `TejError` only for intentional, expected error conditions. See [Error Handling](./error-handling.md) for details.
170
171
 
@@ -359,4 +360,3 @@ target.register('/profile', authMiddleware, (ammo) => {
359
360
  });
360
361
  });
361
362
  ```
362
-
@@ -263,15 +263,16 @@ Send a response to the client.
263
263
 
264
264
  #### throw()
265
265
 
266
- Send an error response.
266
+ Send an error response. When [LLM-inferred errors](./error-handling.md#llm-inferred-errors) are enabled (`errors.llm.enabled`), calls without explicit status code or message use an LLM to infer code and message; explicit code/message always override.
267
267
 
268
268
  | Signature | Behavior |
269
269
  |-----------|----------|
270
- | `throw()` | 500 "Internal Server Error" |
270
+ | `throw()` | 500 "Internal Server Error" (or LLM-inferred when `errors.llm.enabled`) |
271
271
  | `throw(404)` | 404 with default status message |
272
272
  | `throw(404, "msg")` | 404 with custom message |
273
273
  | `throw(new TejError(code, msg))` | Uses TejError's code and message |
274
- | `throw(new Error("msg"))` | 500 with error message |
274
+ | `throw(new Error("msg"))` | 500 with error message, or LLM-inferred when `errors.llm.enabled` |
275
+ | LLM-inferred (no explicit code/message, `errors.llm.enabled`) | Status and message derived by LLM from context |
275
276
 
276
277
  #### redirect(url, statusCode)
277
278
 
@@ -307,7 +308,7 @@ Sends the default Tejas HTML entry page. Used internally for the root `/` route
307
308
 
308
309
  ## TejError Class
309
310
 
310
- Custom error class for HTTP errors.
311
+ Custom error class for HTTP errors. Use it when you want to set the response explicitly; both status code and message are optional when errors are passed through `ammo.throw()` with [LLM-inferred errors](./error-handling.md#llm-inferred-errors) enabled (the LLM can infer them).
311
312
 
312
313
  ```javascript
313
314
  import { TejError } from 'te.js';
@@ -317,8 +318,8 @@ throw new TejError(statusCode, message);
317
318
 
318
319
  | Parameter | Type | Description |
319
320
  |-----------|------|-------------|
320
- | `statusCode` | number | HTTP status code |
321
- | `message` | string | Error message |
321
+ | `statusCode` | number | HTTP status code (optional when LLM infers; otherwise use for override) |
322
+ | `message` | string | Error message (optional when LLM infers; otherwise use for override) |
322
323
 
323
324
  | Property | Type | Description |
324
325
  |----------|------|-------------|
package/docs/auto-docs.md CHANGED
@@ -213,3 +213,4 @@ npx tejas generate:docs
213
213
  - [CLI Reference](./cli.md) — Detailed CLI command documentation
214
214
  - [Configuration](./configuration.md) — Full framework configuration reference
215
215
  - [Routing](./routing.md) — Learn about endpoint metadata
216
+
@@ -66,9 +66,13 @@ app.takeoff();
66
66
  | `body.max_size` | `BODY_MAX_SIZE` | number | `10485760` (10 MB) | Maximum request body size in bytes. Requests exceeding this receive a 413 error |
67
67
  | `body.timeout` | `BODY_TIMEOUT` | number | `30000` (30 s) | Body parsing timeout in milliseconds. Requests exceeding this receive a 408 error |
68
68
 
69
+ ### LLM configuration (feature as parent, LLM inside each feature)
70
+
71
+ Tejas uses a **feature-as-parent** pattern: each feature that needs an LLM has its own `*.llm` block (`docs.llm` for auto-documentation, `errors.llm` for LLM-inferred errors). **Inheritance from `LLM_*`:** unset feature-specific values fall back to `LLM_BASE_URL`, `LLM_API_KEY`, and `LLM_MODEL`. One set of `LLM_*` env vars can serve both features when you don't override with `DOCS_LLM_*` or `ERRORS_LLM_*`. You can also use different LLMs per feature (e.g. a lighter model for errors, a stronger one for docs).
72
+
69
73
  ### Auto-Documentation
70
74
 
71
- These options configure the `tejas generate:docs` CLI command and the auto-documentation system. See [Auto-Documentation](./auto-docs.md) for full details.
75
+ These options configure the `tejas generate:docs` CLI command and the auto-documentation system. The **`docs.llm`** block is the LLM configuration for this feature. See [Auto-Documentation](./auto-docs.md) for full details.
72
76
 
73
77
  | Config Key | Env Variable | Type | Default | Description |
74
78
  |------------|-------------|------|---------|-------------|
@@ -78,12 +82,26 @@ These options configure the `tejas generate:docs` CLI command and the auto-docum
78
82
  | `docs.version` | — | string | `"1.0.0"` | API version in the OpenAPI `info` block |
79
83
  | `docs.description` | — | string | `""` | API description |
80
84
  | `docs.level` | — | number | `1` | LLM enhancement level (1–3). Higher = better docs, more tokens |
81
- | `docs.llm.baseURL` | `LLM_BASE_URL` | string | `"https://api.openai.com/v1"` | LLM provider endpoint |
82
- | `docs.llm.apiKey` | `LLM_API_KEY` | string | — | LLM provider API key |
83
- | `docs.llm.model` | `LLM_MODEL` | string | `"gpt-4o-mini"` | LLM model name |
85
+ | `docs.llm.baseURL` | `DOCS_LLM_BASE_URL` or `LLM_BASE_URL` | string | `"https://api.openai.com/v1"` | LLM provider endpoint for auto-docs |
86
+ | `docs.llm.apiKey` | `DOCS_LLM_API_KEY` or `LLM_API_KEY` | string | — | LLM provider API key for auto-docs |
87
+ | `docs.llm.model` | `DOCS_LLM_MODEL` or `LLM_MODEL` | string | `"gpt-4o-mini"` | LLM model for auto-docs |
84
88
  | `docs.overviewPath` | — | string | `"./API_OVERVIEW.md"` | Path for the generated overview page (level 3 only) |
85
89
  | `docs.productionBranch` | `DOCS_PRODUCTION_BRANCH` | string | `"main"` | Git branch that triggers `docs:on-push` |
86
90
 
91
+ ### Error handling (LLM-inferred errors)
92
+
93
+ When [LLM-inferred error codes and messages](./error-handling.md#llm-inferred-errors) are enabled, the **`errors.llm`** block configures the LLM used for inferring status code and message when you call `ammo.throw()` without explicit code or message. Unset values fall back to `LLM_BASE_URL`, `LLM_API_KEY`, `LLM_MODEL`. You can also enable (and optionally set connection options) by calling **`app.withLLMErrors(config?)`** before `takeoff()` — e.g. `app.withLLMErrors()` to use env/config for baseURL, apiKey, model, or `app.withLLMErrors({ baseURL, apiKey, model, messageType })` to override in code.
94
+
95
+ | Config Key | Env Variable | Type | Default | Description |
96
+ |------------|-------------|------|---------|-------------|
97
+ | `errors.llm.enabled` | `ERRORS_LLM_ENABLED` or `LLM_*` (for connection) | boolean | `false` | Enable LLM-inferred error code and message for `ammo.throw()` |
98
+ | `errors.llm.baseURL` | `ERRORS_LLM_BASE_URL` or `LLM_BASE_URL` | string | `"https://api.openai.com/v1"` | LLM provider endpoint for error inference |
99
+ | `errors.llm.apiKey` | `ERRORS_LLM_API_KEY` or `LLM_API_KEY` | string | — | LLM provider API key for error inference |
100
+ | `errors.llm.model` | `ERRORS_LLM_MODEL` or `LLM_MODEL` | string | `"gpt-4o-mini"` | LLM model for error inference |
101
+ | `errors.llm.messageType` | `ERRORS_LLM_MESSAGE_TYPE` or `LLM_MESSAGE_TYPE` | `"endUser"` \| `"developer"` | `"endUser"` | Default tone for LLM-generated message: `endUser` (safe for clients) or `developer` (technical detail). Overridable per `ammo.throw()` call. |
102
+
103
+ When enabled, the same behaviour applies whether you call `ammo.throw()` or the framework calls it when it catches an error — one mechanism, no separate config.
104
+
87
105
  ## Configuration File
88
106
 
89
107
  Create a `tejas.config.json` in your project root:
@@ -108,7 +126,19 @@ Create a `tejas.config.json` in your project root:
108
126
  "title": "My API",
109
127
  "version": "1.0.0",
110
128
  "level": 2,
111
- "productionBranch": "main"
129
+ "productionBranch": "main",
130
+ "llm": {
131
+ "baseURL": "https://api.openai.com/v1",
132
+ "model": "gpt-4o-mini"
133
+ }
134
+ },
135
+ "errors": {
136
+ "llm": {
137
+ "enabled": true,
138
+ "baseURL": "https://api.openai.com/v1",
139
+ "model": "gpt-4o-mini",
140
+ "messageType": "endUser"
141
+ }
112
142
  }
113
143
  }
114
144
  ```
@@ -132,10 +162,22 @@ BODY_TIMEOUT=15000
132
162
  # Target directory
133
163
  DIR_TARGETS=targets
134
164
 
135
- # LLM (for tejas generate:docs)
165
+ # LLM — shared fallback for docs.llm and errors.llm when feature-specific vars are unset
136
166
  LLM_BASE_URL=https://api.openai.com/v1
137
167
  LLM_API_KEY=sk-...
138
168
  LLM_MODEL=gpt-4o-mini
169
+
170
+ # Optional: override per feature (docs.llm)
171
+ # DOCS_LLM_BASE_URL=...
172
+ # DOCS_LLM_API_KEY=...
173
+ # DOCS_LLM_MODEL=...
174
+
175
+ # Optional: override for error-inference (errors.llm)
176
+ # ERRORS_LLM_ENABLED=true
177
+ # ERRORS_LLM_BASE_URL=...
178
+ # ERRORS_LLM_API_KEY=...
179
+ # ERRORS_LLM_MODEL=...
180
+ # ERRORS_LLM_MESSAGE_TYPE=endUser # or "developer" for technical messages
139
181
  ```
140
182
 
141
183
  ## Constructor Options
package/docs/database.md CHANGED
@@ -388,4 +388,3 @@ process.on('SIGTERM', async () => {
388
388
  process.exit(0);
389
389
  });
390
390
  ```
391
-
@@ -1,6 +1,6 @@
1
1
  # Error Handling
2
2
 
3
- Tejas provides robust error handling to keep your application running even when unexpected errors occur.
3
+ Tejas keeps your application from crashing on unhandled errors. You don't log the error and send the response separately **`ammo.throw()` is the single mechanism**: it sends the appropriate HTTP response (logging is optional via `log.exceptions`). Whether you call `ammo.throw()` or the framework calls it when it catches an error, the same behaviour applies. When LLM-inferred errors are enabled, call `ammo.throw()` with no arguments and an LLM infers status and message from code context; explicit code and message always override.
4
4
 
5
5
  ## Zero-Config Error Handling
6
6
 
@@ -8,13 +8,7 @@ Tejas provides robust error handling to keep your application running even when
8
8
 
9
9
  ### How It Works
10
10
 
11
- Tejas wraps all middleware and route handlers with built-in error catching. Any error thrown in your code is automatically:
12
-
13
- 1. **Caught** by the framework's error handler
14
- 2. **Logged** (if exception logging is enabled)
15
- 3. **Converted** to an appropriate HTTP error response
16
-
17
- This means your application **never crashes** from unhandled exceptions, and clients always receive proper error responses.
11
+ Tejas wraps all middleware and route handlers with built-in error catching. Any error thrown in your code is automatically passed to `ammo.throw(err)` — the same mechanism you use for intentional errors. So: one place handles everything (response + optional logging via `log.exceptions`). No separate "log then send response"; your app never crashes and clients always receive a proper response.
18
12
 
19
13
  ### Write Clean Code Without Try-Catch
20
14
 
@@ -27,25 +21,24 @@ target.register('/users/:id', async (ammo) => {
27
21
  });
28
22
  ```
29
23
 
30
- Compare this to traditional frameworks where you'd need:
24
+ In other frameworks you typically **log the error and then send the response** (two separate steps). With Tejas, **`ammo.throw()` does both** — and when the framework catches an error it uses the same `ammo.throw()`, so you never define them separately:
31
25
 
32
26
  ```javascript
33
- // ❌ Traditional approach requires manual error handling
27
+ // ❌ Traditional: log then send response (two separate things)
34
28
  app.get('/users/:id', async (req, res) => {
35
29
  try {
36
30
  const user = await database.findUser(req.params.id);
37
- const posts = await database.getUserPosts(user.id);
38
- res.json({ user, posts });
31
+ res.json(user);
39
32
  } catch (error) {
40
- console.error(error);
41
- res.status(500).json({ error: 'Internal Server Error' });
33
+ console.error(error); // 1. log
34
+ res.status(500).json({ error: 'Internal Server Error' }); // 2. send response
42
35
  }
43
36
  });
44
37
  ```
45
38
 
46
39
  ### Automatic Error Responses
47
40
 
48
- When an unhandled error occurs, Tejas automatically sends a `500 Internal Server Error` response. For intentional errors using `TejError`, the appropriate status code is used.
41
+ When an unhandled error occurs, the framework calls `ammo.throw(err)` the same method you use for intentional errors. So one mechanism: explicit `ammo.throw()` or framework-caught, both go through `ammo.throw()`. When [LLM-inferred errors](#llm-inferred-errors) are enabled, status and message are inferred from code context; otherwise or when you pass explicit code/message, those are used.
49
42
 
50
43
  ### Enable Error Logging
51
44
 
@@ -60,26 +53,54 @@ const app = new Tejas({
60
53
  ```
61
54
 
62
55
  Or via environment variable:
56
+
63
57
  ```bash
64
58
  LOG_EXCEPTIONS=true
65
59
  ```
66
60
 
67
61
  ---
68
62
 
63
+ ## LLM-Inferred Errors
64
+
65
+ When **`errors.llm.enabled`** is true and you call `ammo.throw()` without an explicit status code or message, Tejas uses an LLM to infer an appropriate HTTP status code and message from **code context** — you do not pass an error object. The framework captures the code surrounding the `ammo.throw()` call (with line numbers) and all **upstream** (callers) and **downstream** (code that would have run next) context, and the LLM infers what went wrong from that. Explicit code and message always override.
66
+
67
+ - **No error object required:** Call `ammo.throw()` with no arguments (or only options). The LLM receives the source code around the call site and upstream call stacks so it can infer status and message from control flow and intent.
68
+ - **Opt-in:** Enable via config: `errors.llm.enabled: true` and configure `errors.llm` (baseURL, apiKey, model), or call **`app.withLLMErrors()`** / **`app.withLLMErrors({ baseURL, apiKey, model, messageType })`** before `takeoff()`. See [Configuration](./configuration.md#error-handling-llm-inferred-errors).
69
+ - **Framework-caught errors:** When the framework catches an unhandled error (in a handler or middleware), it uses the same `ammo.throw(err)` — so the same `errors.llm` config applies. No separate "log then send response"; one mechanism handles everything.
70
+ - **Override:** Whenever you pass a status code or message (e.g. `ammo.throw(404, 'User not found')` or `throw new TejError(404, 'User not found')`), that value is used; the LLM is not called.
71
+ - **Message type:** Configure whether the LLM generates **end-user-friendly** or **developer-friendly** messages via `errors.llm.messageType`; override per call (see [Per-call overrides](#per-call-overrides)).
72
+ - **Non-production:** In non-production, the LLM can also provide developer insight (e.g. bug vs environment, suggested fix), attached to the response as `_dev` or in logs only — never in production.
73
+
74
+ ### Per-call overrides
75
+
76
+ For any LLM-eligible `ammo.throw()` call (no explicit status code), you can pass an options object as the last argument to override behaviour for that call only:
77
+
78
+ - **`useLlm`** (boolean): Set to `false` to skip the LLM for this call and respond with a default 500 / "Internal Server Error" (or the error's message when you pass an Error/string). Set to `true` to force using the LLM (same as default when eligible).
79
+ - **`messageType`** (`"endUser"` | `"developer"`): Override the configured default for this call — request an end-user-friendly or developer-friendly message.
80
+
81
+ ```javascript
82
+ // Skip LLM for this call; send 500
83
+ ammo.throw({ useLlm: false });
84
+
85
+ // Request a developer-friendly message for this call only
86
+ ammo.throw({ messageType: 'developer' });
87
+
88
+ // When an error was caught and passed in, you can still pass options
89
+ ammo.throw(caughtErr, { useLlm: false });
90
+ ```
91
+
92
+ ---
93
+
69
94
  ## TejError Class
70
95
 
71
- Use `TejError` for throwing HTTP errors with status codes:
96
+ Use `TejError` for throwing HTTP errors with status codes. Both status code and message are **optional** when [LLM-inferred errors](#llm-inferred-errors) are enabled and the error is passed through `ammo.throw()`; otherwise, supply them to set the response explicitly.
72
97
 
73
98
  ```javascript
74
99
  import { TejError } from 'te.js';
75
100
 
76
- // Throw a 404 error
101
+ // Explicit code and message (always used as override)
77
102
  throw new TejError(404, 'User not found');
78
-
79
- // Throw a 400 error
80
103
  throw new TejError(400, 'Invalid email format');
81
-
82
- // Throw a 500 error
83
104
  throw new TejError(500, 'Database connection failed');
84
105
  ```
85
106
 
@@ -116,22 +137,22 @@ ammo.unauthorized();
116
137
 
117
138
  ## Using ammo.throw()
118
139
 
119
- For more control, use `ammo.throw()`:
140
+ For more control, use `ammo.throw()`. When [LLM-inferred errors](#llm-inferred-errors) are enabled, you can omit code and message and the LLM will infer them; otherwise, or when you want to override, pass them explicitly.
120
141
 
121
142
  ```javascript
122
- // Just status code (uses default message)
143
+ // Explicit: status code and/or message
123
144
  ammo.throw(404);
124
-
125
- // Status code with message
126
145
  ammo.throw(404, 'User not found');
146
+ ammo.throw(new TejError(400, 'Bad request'));
127
147
 
128
- // Error object
148
+ // When errors.llm.enabled: LLM infers code and message from context
129
149
  ammo.throw(new Error('Something went wrong'));
130
-
131
- // TejError
132
- ammo.throw(new TejError(400, 'Bad request'));
150
+ ammo.throw('Validation failed');
151
+ ammo.throw(); // context still used when available
133
152
  ```
134
153
 
154
+ See [Ammo — throw()](./ammo.md#throw--send-error-response) for all signatures and the LLM-inferred row.
155
+
135
156
  ## Error Handling in Routes
136
157
 
137
158
  ### Basic Pattern
@@ -375,15 +396,15 @@ Supported content types:
375
396
 
376
397
  ## Error Flow
377
398
 
378
- When any error occurs in your handler or middleware, this is what happens internally:
399
+ When any error occurs in your handler or middleware, the framework uses the **same** `ammo.throw(err)` you use for intentional errors — one mechanism:
379
400
 
380
401
  1. The framework's `executeChain()` catches the error
381
402
  2. If `LOG_EXCEPTIONS` is enabled, the error is logged
382
- 3. The error is passed to `ammo.throw()`:
383
- - **TejError** — uses the error's `code` and `message` directly
384
- - **Standard Error** — returns 500 with the error message
385
- - **Anything else** — returns 500 with a string representation
386
- 4. `ammo.throw()` calls `ammo.fire(statusCode, message)` to send the HTTP response
403
+ 3. The error is passed to `ammo.throw(err)` (no separate "send response" step — `ammo.throw()` does it)
404
+ 4. **TejError** — uses the error's `code` and `message` directly
405
+ 5. **When errors.llm.enabled** — LLM infers status and message from code context (same as explicit `ammo.throw()`)
406
+ 6. **Otherwise** — 500 with the error message or string representation
407
+ 7. `ammo.throw()` sends the HTTP response via `ammo.fire(statusCode, message)`
387
408
 
388
409
  Once a response has been sent (`res.headersSent` is true), no further middleware or handlers execute.
389
410
 
@@ -413,5 +434,5 @@ Once a response has been sent (`res.headersSent` is true), no further middleware
413
434
  4. **Log errors** — Enable exception logging for debugging
414
435
  5. **Be consistent** — Use the same error format throughout your API
415
436
  6. **Validate early** — Check input before processing
416
- 7. **Use TejError** — For HTTP-specific errors with status codes
417
-
437
+ 7. **Use TejError or ammo.throw(code, message)** — For HTTP-specific errors when you want explicit control
438
+ 8. **Opt in to LLM-inferred errors when helpful** — Enable via `errors.llm.enabled` in config or **`app.withLLMErrors(config?)`** before `takeoff()`, then configure baseURL, apiKey, and model so you can throw without specifying code or message and let the LLM infer them; see [Configuration](./configuration.md#error-handling-llm-inferred-errors)
@@ -331,4 +331,3 @@ target.register('/images/:filename', (ammo) => {
331
331
  4. **Store outside web root** — For sensitive files, store in private directories
332
332
  5. **Clean up on errors** — Delete uploaded files if validation fails
333
333
  6. **Scan for malware** — For production systems, integrate virus scanning
334
-
@@ -212,4 +212,3 @@ api.register('/echo', (ammo) => {
212
212
  }
213
213
  });
214
214
  ```
215
-
@@ -353,4 +353,3 @@ target.register('/expensive', cache(300), (ammo) => {
353
353
  4. **Use factories** — For configurable middleware
354
354
  5. **Order matters** — Place authentication before authorization
355
355
  6. **Don't mutate payload directly** — Add new properties instead
356
-
@@ -391,4 +391,3 @@ The built-in backends (`MemoryStorage` and `RedisStorage`) both extend this base
391
391
  5. **Provide clear error messages** — Tell users when they can retry
392
392
  6. **Consider user tiers** — Premium users may need higher limits
393
393
  7. **Monitor and adjust** — Track rate limit hits and adjust accordingly
394
-
package/package.json CHANGED
@@ -1,11 +1,11 @@
1
1
  {
2
2
  "name": "te.js",
3
- "version": "2.0.1",
3
+ "version": "2.1.0",
4
4
  "description": "A nodejs framework",
5
5
  "type": "module",
6
6
  "main": "te.js",
7
7
  "bin": {
8
- "tejas": "./cli/index.js"
8
+ "tejas": "cli/index.js"
9
9
  },
10
10
  "scripts": {
11
11
  "start": "node te.js",
@@ -25,8 +25,19 @@
25
25
  },
26
26
  "repository": {
27
27
  "type": "git",
28
- "url": "git@github.com:hirakchhatbar/te.js.git"
28
+ "url": "git+ssh://git@github.com/hirakchhatbar/te.js.git"
29
29
  },
30
+ "files": [
31
+ "te.js",
32
+ "cli",
33
+ "server",
34
+ "database",
35
+ "rate-limit",
36
+ "utils",
37
+ "auto-docs",
38
+ "README.md",
39
+ "docs"
40
+ ],
30
41
  "dependencies": {
31
42
  "ansi-colors": "^4.1.3",
32
43
  "filesize": "^10.1.1",