te.js 2.1.0 → 2.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +197 -196
  2. package/auto-docs/analysis/handler-analyzer.js +58 -58
  3. package/auto-docs/analysis/source-resolver.js +101 -101
  4. package/auto-docs/constants.js +37 -37
  5. package/auto-docs/docs-llm/index.js +7 -7
  6. package/auto-docs/docs-llm/prompts.js +222 -222
  7. package/auto-docs/docs-llm/provider.js +132 -132
  8. package/auto-docs/index.js +146 -146
  9. package/auto-docs/openapi/endpoint-processor.js +277 -277
  10. package/auto-docs/openapi/generator.js +107 -107
  11. package/auto-docs/openapi/level3.js +131 -131
  12. package/auto-docs/openapi/spec-builders.js +244 -244
  13. package/auto-docs/ui/docs-ui.js +186 -186
  14. package/auto-docs/utils/logger.js +17 -17
  15. package/auto-docs/utils/strip-usage.js +10 -10
  16. package/cli/docs-command.js +315 -315
  17. package/cli/fly-command.js +71 -71
  18. package/cli/index.js +56 -56
  19. package/database/index.js +165 -165
  20. package/database/mongodb.js +146 -146
  21. package/database/redis.js +201 -201
  22. package/docs/README.md +36 -36
  23. package/docs/ammo.md +362 -362
  24. package/docs/api-reference.md +490 -490
  25. package/docs/auto-docs.md +216 -216
  26. package/docs/cli.md +152 -152
  27. package/docs/configuration.md +275 -275
  28. package/docs/database.md +390 -390
  29. package/docs/error-handling.md +438 -438
  30. package/docs/file-uploads.md +333 -333
  31. package/docs/getting-started.md +214 -214
  32. package/docs/middleware.md +355 -355
  33. package/docs/rate-limiting.md +393 -393
  34. package/docs/routing.md +302 -302
  35. package/package.json +62 -62
  36. package/rate-limit/algorithms/fixed-window.js +141 -141
  37. package/rate-limit/algorithms/sliding-window.js +147 -147
  38. package/rate-limit/algorithms/token-bucket.js +115 -115
  39. package/rate-limit/base.js +165 -165
  40. package/rate-limit/index.js +147 -147
  41. package/rate-limit/storage/base.js +104 -104
  42. package/rate-limit/storage/memory.js +101 -101
  43. package/rate-limit/storage/redis.js +88 -88
  44. package/server/ammo/body-parser.js +220 -220
  45. package/server/ammo/dispatch-helper.js +103 -103
  46. package/server/ammo/enhancer.js +57 -57
  47. package/server/ammo.js +454 -415
  48. package/server/endpoint.js +97 -74
  49. package/server/error.js +9 -9
  50. package/server/errors/code-context.js +125 -125
  51. package/server/errors/llm-error-service.js +140 -140
  52. package/server/files/helper.js +33 -33
  53. package/server/files/uploader.js +143 -143
  54. package/server/handler.js +158 -119
  55. package/server/target.js +185 -175
  56. package/server/targets/middleware-validator.js +22 -22
  57. package/server/targets/path-validator.js +21 -21
  58. package/server/targets/registry.js +160 -160
  59. package/server/targets/shoot-validator.js +21 -21
  60. package/te.js +428 -402
  61. package/utils/auto-register.js +17 -17
  62. package/utils/configuration.js +64 -64
  63. package/utils/errors-llm-config.js +84 -84
  64. package/utils/request-logger.js +43 -43
  65. package/utils/status-codes.js +82 -82
  66. package/utils/tejas-entrypoint-html.js +18 -18
@@ -1,132 +1,132 @@
1
- /**
2
- * LLM provider for auto-documentation: extends shared lib/llm client with doc-specific methods.
3
- * Single OpenAI-compatible implementation; works with OpenAI, OpenRouter, Ollama, Azure, etc.
4
- */
5
-
6
- import { LLMProvider as BaseLLMProvider, extractJSON, extractJSONArray, reconcileOrderedTags } from '../../lib/llm/index.js';
7
- import {
8
- buildSummarizeGroupPrompt,
9
- buildEnhanceEndpointPrompt,
10
- buildEnhanceEndpointPerMethodPrompt,
11
- buildReorderTagsPrompt,
12
- buildOverviewPrompt,
13
- } from './prompts.js';
14
-
15
- /**
16
- * Docs-specific LLM provider: base analyze() from lib/llm plus summarizeTargetGroup, enhanceEndpointDocs, etc.
17
- */
18
- class DocsLLMProvider extends BaseLLMProvider {
19
- /**
20
- * Summarize what a target file (group) does from its endpoints and handler context.
21
- * @param {string} groupId - Group id (e.g. target file path without .target.js)
22
- * @param {Array<object>} endpointsInfo - List of { path, methods, summary?, description?, handlerSource?, dependencySources? }
23
- * @param {string} [dependencySources] - Optional full context (target + dependencies) for Level 3
24
- * @returns {Promise<{ name: string, description: string }>} Tag name and description for OpenAPI
25
- */
26
- async summarizeTargetGroup(groupId, endpointsInfo, dependencySources = '') {
27
- if (!endpointsInfo?.length) {
28
- return { name: groupId, description: '', _usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 } };
29
- }
30
- const prompt = buildSummarizeGroupPrompt(groupId, endpointsInfo, dependencySources);
31
- const { content: raw, usage } = await this.analyze(prompt);
32
- const json = extractJSON(raw);
33
- if (!json)
34
- return {
35
- name: groupId.split('/').pop() || groupId,
36
- description: '',
37
- _usage: usage,
38
- };
39
- return {
40
- name: json.name || groupId.split('/').pop() || groupId,
41
- description: (json.description || '').trim(),
42
- _usage: usage,
43
- };
44
- }
45
-
46
- /**
47
- * Build enhanced OpenAPI-style metadata for an endpoint from handler info.
48
- * @param {object} endpointInfo - { path, methods, metadata?, handlerSource?, dependencySources? }
49
- * @returns {Promise<object>} Enhanced metadata (summary, description, request?, response?)
50
- */
51
- async enhanceEndpointDocs(endpointInfo) {
52
- const { path } = endpointInfo;
53
- const prompt = buildEnhanceEndpointPrompt(endpointInfo);
54
- const { content: raw, usage } = await this.analyze(prompt);
55
- const json = extractJSON(raw);
56
- if (!json) return { summary: path, description: '', _usage: usage };
57
- return {
58
- summary: json.summary || path,
59
- description: json.description || '',
60
- ...(json.request && { request: json.request }),
61
- ...(json.response && { response: json.response }),
62
- _usage: usage,
63
- };
64
- }
65
-
66
- /**
67
- * Build method-specific OpenAPI metadata so each HTTP method gets its own summary, description, request, and response.
68
- * Returns an object keyed by lowercase method (get, put, post, delete, patch, head, options). If the LLM
69
- * returns a flat shape or omits methods, the caller should fall back to shared metadata.
70
- *
71
- * @param {object} endpointInfo - { path, methods, metadata?, handlerSource?, dependencySources? }
72
- * @returns {Promise<object>} Method-keyed metadata, e.g. { get: { summary, description?, response }, put: { summary, request?, response }, ... }
73
- */
74
- async enhanceEndpointDocsPerMethod(endpointInfo) {
75
- const prompt = buildEnhanceEndpointPerMethodPrompt(endpointInfo);
76
- const { content: raw, usage } = await this.analyze(prompt);
77
- const json = extractJSON(raw);
78
- if (!json || typeof json !== 'object') return { _usage: usage, _fallback: true };
79
- return { ...json, _usage: usage };
80
- }
81
-
82
- /**
83
- * Return tag names ordered by importance (most important first) for use in OpenAPI spec.tags.
84
- * @param {object} spec - OpenAPI 3 spec with spec.tags (array of { name, description? })
85
- * @returns {Promise<{ orderedTagNames: string[], _usage?: object }>}
86
- */
87
- async reorderTagsByImportance(spec) {
88
- const tags = spec?.tags ?? [];
89
- if (!tags.length) return { orderedTagNames: [], _usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 } };
90
- const prompt = buildReorderTagsPrompt(spec);
91
- const { content: raw, usage } = await this.analyze(prompt);
92
- const parsed = extractJSON(raw) ?? extractJSONArray(raw);
93
- const orderedTagNames = Array.isArray(parsed)
94
- ? parsed.filter((n) => typeof n === 'string').map((n) => String(n).trim()).filter(Boolean)
95
- : tags.map((t) => t.name);
96
- const orderedTags = reconcileOrderedTags(orderedTagNames, tags);
97
- return {
98
- orderedTagNames: orderedTags.map((t) => t.name),
99
- _usage: usage,
100
- _orderedTags: orderedTags,
101
- };
102
- }
103
-
104
- /**
105
- * Generate a comprehensive project/API overview page in Markdown.
106
- * @param {object} spec - OpenAPI 3 spec (after reorder) with info, tags, paths
107
- * @param {object} [options] - { title?, version?, description? } (defaults from spec.info)
108
- * @returns {Promise<{ markdown: string, _usage?: object }>}
109
- */
110
- async generateOverviewPage(spec, options = {}) {
111
- const prompt = buildOverviewPrompt(spec, options);
112
- const { content: raw, usage } = await this.analyze(prompt);
113
- const markdown = typeof raw === 'string' ? raw.trim() : '';
114
- return { markdown, _usage: usage };
115
- }
116
- }
117
-
118
- /**
119
- * Create a docs-specific LLM provider from config (same config shape as lib/llm).
120
- * @param {object} config - { baseURL?, apiKey?, model? }
121
- * @returns {DocsLLMProvider}
122
- */
123
- function createProvider(config) {
124
- if (!config || typeof config !== 'object') {
125
- return new DocsLLMProvider({});
126
- }
127
- return new DocsLLMProvider(config);
128
- }
129
-
130
- export { DocsLLMProvider as LLMProvider, createProvider };
131
- export { extractJSON, extractJSONArray, reconcileOrderedTags } from '../../lib/llm/index.js';
132
- export default DocsLLMProvider;
1
+ /**
2
+ * LLM provider for auto-documentation: extends shared lib/llm client with doc-specific methods.
3
+ * Single OpenAI-compatible implementation; works with OpenAI, OpenRouter, Ollama, Azure, etc.
4
+ */
5
+
6
+ import { LLMProvider as BaseLLMProvider, extractJSON, extractJSONArray, reconcileOrderedTags } from '../../lib/llm/index.js';
7
+ import {
8
+ buildSummarizeGroupPrompt,
9
+ buildEnhanceEndpointPrompt,
10
+ buildEnhanceEndpointPerMethodPrompt,
11
+ buildReorderTagsPrompt,
12
+ buildOverviewPrompt,
13
+ } from './prompts.js';
14
+
15
+ /**
16
+ * Docs-specific LLM provider: base analyze() from lib/llm plus summarizeTargetGroup, enhanceEndpointDocs, etc.
17
+ */
18
+ class DocsLLMProvider extends BaseLLMProvider {
19
+ /**
20
+ * Summarize what a target file (group) does from its endpoints and handler context.
21
+ * @param {string} groupId - Group id (e.g. target file path without .target.js)
22
+ * @param {Array<object>} endpointsInfo - List of { path, methods, summary?, description?, handlerSource?, dependencySources? }
23
+ * @param {string} [dependencySources] - Optional full context (target + dependencies) for Level 3
24
+ * @returns {Promise<{ name: string, description: string }>} Tag name and description for OpenAPI
25
+ */
26
+ async summarizeTargetGroup(groupId, endpointsInfo, dependencySources = '') {
27
+ if (!endpointsInfo?.length) {
28
+ return { name: groupId, description: '', _usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 } };
29
+ }
30
+ const prompt = buildSummarizeGroupPrompt(groupId, endpointsInfo, dependencySources);
31
+ const { content: raw, usage } = await this.analyze(prompt);
32
+ const json = extractJSON(raw);
33
+ if (!json)
34
+ return {
35
+ name: groupId.split('/').pop() || groupId,
36
+ description: '',
37
+ _usage: usage,
38
+ };
39
+ return {
40
+ name: json.name || groupId.split('/').pop() || groupId,
41
+ description: (json.description || '').trim(),
42
+ _usage: usage,
43
+ };
44
+ }
45
+
46
+ /**
47
+ * Build enhanced OpenAPI-style metadata for an endpoint from handler info.
48
+ * @param {object} endpointInfo - { path, methods, metadata?, handlerSource?, dependencySources? }
49
+ * @returns {Promise<object>} Enhanced metadata (summary, description, request?, response?)
50
+ */
51
+ async enhanceEndpointDocs(endpointInfo) {
52
+ const { path } = endpointInfo;
53
+ const prompt = buildEnhanceEndpointPrompt(endpointInfo);
54
+ const { content: raw, usage } = await this.analyze(prompt);
55
+ const json = extractJSON(raw);
56
+ if (!json) return { summary: path, description: '', _usage: usage };
57
+ return {
58
+ summary: json.summary || path,
59
+ description: json.description || '',
60
+ ...(json.request && { request: json.request }),
61
+ ...(json.response && { response: json.response }),
62
+ _usage: usage,
63
+ };
64
+ }
65
+
66
+ /**
67
+ * Build method-specific OpenAPI metadata so each HTTP method gets its own summary, description, request, and response.
68
+ * Returns an object keyed by lowercase method (get, put, post, delete, patch, head, options). If the LLM
69
+ * returns a flat shape or omits methods, the caller should fall back to shared metadata.
70
+ *
71
+ * @param {object} endpointInfo - { path, methods, metadata?, handlerSource?, dependencySources? }
72
+ * @returns {Promise<object>} Method-keyed metadata, e.g. { get: { summary, description?, response }, put: { summary, request?, response }, ... }
73
+ */
74
+ async enhanceEndpointDocsPerMethod(endpointInfo) {
75
+ const prompt = buildEnhanceEndpointPerMethodPrompt(endpointInfo);
76
+ const { content: raw, usage } = await this.analyze(prompt);
77
+ const json = extractJSON(raw);
78
+ if (!json || typeof json !== 'object') return { _usage: usage, _fallback: true };
79
+ return { ...json, _usage: usage };
80
+ }
81
+
82
+ /**
83
+ * Return tag names ordered by importance (most important first) for use in OpenAPI spec.tags.
84
+ * @param {object} spec - OpenAPI 3 spec with spec.tags (array of { name, description? })
85
+ * @returns {Promise<{ orderedTagNames: string[], _usage?: object }>}
86
+ */
87
+ async reorderTagsByImportance(spec) {
88
+ const tags = spec?.tags ?? [];
89
+ if (!tags.length) return { orderedTagNames: [], _usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 } };
90
+ const prompt = buildReorderTagsPrompt(spec);
91
+ const { content: raw, usage } = await this.analyze(prompt);
92
+ const parsed = extractJSON(raw) ?? extractJSONArray(raw);
93
+ const orderedTagNames = Array.isArray(parsed)
94
+ ? parsed.filter((n) => typeof n === 'string').map((n) => String(n).trim()).filter(Boolean)
95
+ : tags.map((t) => t.name);
96
+ const orderedTags = reconcileOrderedTags(orderedTagNames, tags);
97
+ return {
98
+ orderedTagNames: orderedTags.map((t) => t.name),
99
+ _usage: usage,
100
+ _orderedTags: orderedTags,
101
+ };
102
+ }
103
+
104
+ /**
105
+ * Generate a comprehensive project/API overview page in Markdown.
106
+ * @param {object} spec - OpenAPI 3 spec (after reorder) with info, tags, paths
107
+ * @param {object} [options] - { title?, version?, description? } (defaults from spec.info)
108
+ * @returns {Promise<{ markdown: string, _usage?: object }>}
109
+ */
110
+ async generateOverviewPage(spec, options = {}) {
111
+ const prompt = buildOverviewPrompt(spec, options);
112
+ const { content: raw, usage } = await this.analyze(prompt);
113
+ const markdown = typeof raw === 'string' ? raw.trim() : '';
114
+ return { markdown, _usage: usage };
115
+ }
116
+ }
117
+
118
+ /**
119
+ * Create a docs-specific LLM provider from config (same config shape as lib/llm).
120
+ * @param {object} config - { baseURL?, apiKey?, model? }
121
+ * @returns {DocsLLMProvider}
122
+ */
123
+ function createProvider(config) {
124
+ if (!config || typeof config !== 'object') {
125
+ return new DocsLLMProvider({});
126
+ }
127
+ return new DocsLLMProvider(config);
128
+ }
129
+
130
+ export { DocsLLMProvider as LLMProvider, createProvider };
131
+ export { extractJSON, extractJSONArray, reconcileOrderedTags } from '../../lib/llm/index.js';
132
+ export default DocsLLMProvider;
@@ -1,146 +1,146 @@
1
- /**
2
- * Auto-docs orchestrator.
3
- *
4
- * Module layout:
5
- * - index.js (this file) — generateDocs(registry, options): orchestration, config, spec write, level-3 dispatch
6
- * - openapi/generator.js — build OpenAPI spec from registry (levels 1–2: handler analysis, LLM enhancement)
7
- * - openapi/level3.js — level-3 pipeline: reorder tags by importance, generate and write overview page
8
- * - analysis/handler-analyzer.js — detect HTTP methods from handler source
9
- * - analysis/source-resolver.js — resolve target file and dependencies (for level 2 context)
10
- * - docs-llm/ — Docs-specific LLM provider (enhanceEndpointDocs, summarizeTargetGroup, reorderTagsByImportance, generateOverviewPage)
11
- * - ui/docs-ui.js — build Scalar docs HTML, registerDocRoutes
12
- */
13
-
14
- import { writeFile } from 'node:fs/promises';
15
- import TejLogger from 'tej-logger';
16
- import { createProvider } from './docs-llm/index.js';
17
- import { generateOpenAPISpec } from './openapi/generator.js';
18
- import { runLevel3 } from './openapi/level3.js';
19
- import targetRegistry from '../server/targets/registry.js';
20
-
21
- const logger = new TejLogger('Tejas.AutoDocs');
22
-
23
- /**
24
- * Validate llm config, warn if no API key, create and return LLM provider.
25
- * @param {object} llmConfig - options.llm
26
- * @returns {object} LLM provider instance
27
- */
28
- function validateAndCreateLlm(llmConfig) {
29
- if (!llmConfig || typeof llmConfig !== 'object') {
30
- throw new Error(
31
- 'Documentation generation requires an LLM. Provide options.llm with { baseURL?, apiKey?, model? }.',
32
- );
33
- }
34
- const hasApiKey = llmConfig.apiKey || process.env.OPENAI_API_KEY;
35
- if (!hasApiKey) {
36
- logger.warn(
37
- 'No API key set. Provide options.llm.apiKey or OPENAI_API_KEY. Local providers (e.g. Ollama) may work without a key.',
38
- );
39
- }
40
- return createProvider(llmConfig);
41
- }
42
-
43
- /**
44
- * Log start summary when verbose (endpoints count, model, title, output file, building message).
45
- * @param {object} options - { info?, outputPath? }
46
- * @param {number} targetCount
47
- * @param {object} log - logger
48
- * @param {boolean} verbose
49
- */
50
- function logStartSummary(options, targetCount, log, verbose) {
51
- if (!verbose) return;
52
- const { info, outputPath } = options;
53
- log.info('OpenAPI documentation generation started.');
54
- log.info(` Endpoints in registry: ${targetCount}`);
55
- log.info(` LLM model: ${options.llm?.model ?? 'default'}`);
56
- if (info?.title) log.info(` API title: ${info.title}`);
57
- if (outputPath) log.info(` Output file: ${outputPath}`);
58
- if (targetCount === 0) {
59
- log.warn('No endpoints in registry; OpenAPI spec will be minimal.');
60
- } else {
61
- log.info('Building OpenAPI spec (analyzing handlers, LLM enhancement)...');
62
- }
63
- }
64
-
65
- /**
66
- * Log result summary when verbose (paths count, tags).
67
- * @param {object} spec - OpenAPI spec
68
- * @param {object} log - logger
69
- * @param {boolean} verbose
70
- */
71
- function logResultSummary(spec, log, verbose) {
72
- if (!verbose) return;
73
- const pathCount = spec.paths ? Object.keys(spec.paths).length : 0;
74
- const tagList = Array.isArray(spec.tags) ? spec.tags.map((t) => t.name).join(', ') : '';
75
- log.info(` Paths: ${pathCount}`);
76
- log.info(` Tags (groups): ${spec.tags?.length ?? 0} [ ${tagList || '—'}]`);
77
- }
78
-
79
- /**
80
- * Write spec JSON to outputPath if path is set. Logs when verbose.
81
- * @param {object} spec - OpenAPI spec
82
- * @param {string|undefined} outputPath
83
- * @param {object} log - logger
84
- * @param {boolean} verbose
85
- */
86
- async function writeSpecIfNeeded(spec, outputPath, log, verbose) {
87
- if (!outputPath || typeof outputPath !== 'string') return;
88
- if (verbose) log.info(`Writing spec to ${outputPath}...`);
89
- await writeFile(outputPath, JSON.stringify(spec, null, 2), 'utf8');
90
- if (verbose) log.info(`OpenAPI spec written to ${outputPath}.`);
91
- }
92
-
93
- /**
94
- * Generate OpenAPI 3.0 spec from the target registry using an LLM.
95
- *
96
- * @param {object} [registry] - Target registry with .targets (default: app registry)
97
- * @param {object} [options] - llm (required), info, servers, outputPath, level, dirTargets, overviewPath, verbose
98
- * @returns {Promise<object>} OpenAPI 3.0 spec object
99
- */
100
- export async function generateDocs(registry = targetRegistry, options = {}) {
101
- const {
102
- llm: llmConfig,
103
- info,
104
- servers,
105
- outputPath,
106
- level,
107
- dirTargets,
108
- overviewPath: overviewPathOption,
109
- verbose = false,
110
- } = options;
111
- const targets = registry?.targets ?? [];
112
-
113
- const llm = validateAndCreateLlm(llmConfig);
114
- logStartSummary({ ...options, llm: llmConfig }, targets.length, logger, verbose);
115
-
116
- const spec = await generateOpenAPISpec(registry, {
117
- llm,
118
- info,
119
- servers,
120
- level,
121
- dirTargets,
122
- verbose,
123
- logger,
124
- });
125
-
126
- logResultSummary(spec, logger, verbose);
127
- await writeSpecIfNeeded(spec, outputPath, logger, verbose);
128
-
129
- if (level === 3 && llm) {
130
- await runLevel3(spec, {
131
- outputPath,
132
- overviewPath: overviewPathOption,
133
- info,
134
- verbose,
135
- logger,
136
- }, llm);
137
- }
138
-
139
- if (verbose) logger.info('OpenAPI documentation generation completed.');
140
- return spec;
141
- }
142
-
143
- export { generateOpenAPISpec } from './openapi/generator.js';
144
- export { createProvider } from './docs-llm/index.js';
145
- export { buildDocsPage } from './ui/docs-ui.js';
146
- export { analyzeHandler, detectMethods } from './analysis/handler-analyzer.js';
1
+ /**
2
+ * Auto-docs orchestrator.
3
+ *
4
+ * Module layout:
5
+ * - index.js (this file) — generateDocs(registry, options): orchestration, config, spec write, level-3 dispatch
6
+ * - openapi/generator.js — build OpenAPI spec from registry (levels 1–2: handler analysis, LLM enhancement)
7
+ * - openapi/level3.js — level-3 pipeline: reorder tags by importance, generate and write overview page
8
+ * - analysis/handler-analyzer.js — detect HTTP methods from handler source
9
+ * - analysis/source-resolver.js — resolve target file and dependencies (for level 2 context)
10
+ * - docs-llm/ — Docs-specific LLM provider (enhanceEndpointDocs, summarizeTargetGroup, reorderTagsByImportance, generateOverviewPage)
11
+ * - ui/docs-ui.js — build Scalar docs HTML, registerDocRoutes
12
+ */
13
+
14
+ import { writeFile } from 'node:fs/promises';
15
+ import TejLogger from 'tej-logger';
16
+ import { createProvider } from './docs-llm/index.js';
17
+ import { generateOpenAPISpec } from './openapi/generator.js';
18
+ import { runLevel3 } from './openapi/level3.js';
19
+ import targetRegistry from '../server/targets/registry.js';
20
+
21
+ const logger = new TejLogger('Tejas.AutoDocs');
22
+
23
+ /**
24
+ * Validate llm config, warn if no API key, create and return LLM provider.
25
+ * @param {object} llmConfig - options.llm
26
+ * @returns {object} LLM provider instance
27
+ */
28
+ function validateAndCreateLlm(llmConfig) {
29
+ if (!llmConfig || typeof llmConfig !== 'object') {
30
+ throw new Error(
31
+ 'Documentation generation requires an LLM. Provide options.llm with { baseURL?, apiKey?, model? }.',
32
+ );
33
+ }
34
+ const hasApiKey = llmConfig.apiKey || process.env.OPENAI_API_KEY;
35
+ if (!hasApiKey) {
36
+ logger.warn(
37
+ 'No API key set. Provide options.llm.apiKey or OPENAI_API_KEY. Local providers (e.g. Ollama) may work without a key.',
38
+ );
39
+ }
40
+ return createProvider(llmConfig);
41
+ }
42
+
43
+ /**
44
+ * Log start summary when verbose (endpoints count, model, title, output file, building message).
45
+ * @param {object} options - { info?, outputPath? }
46
+ * @param {number} targetCount
47
+ * @param {object} log - logger
48
+ * @param {boolean} verbose
49
+ */
50
+ function logStartSummary(options, targetCount, log, verbose) {
51
+ if (!verbose) return;
52
+ const { info, outputPath } = options;
53
+ log.info('OpenAPI documentation generation started.');
54
+ log.info(` Endpoints in registry: ${targetCount}`);
55
+ log.info(` LLM model: ${options.llm?.model ?? 'default'}`);
56
+ if (info?.title) log.info(` API title: ${info.title}`);
57
+ if (outputPath) log.info(` Output file: ${outputPath}`);
58
+ if (targetCount === 0) {
59
+ log.warn('No endpoints in registry; OpenAPI spec will be minimal.');
60
+ } else {
61
+ log.info('Building OpenAPI spec (analyzing handlers, LLM enhancement)...');
62
+ }
63
+ }
64
+
65
+ /**
66
+ * Log result summary when verbose (paths count, tags).
67
+ * @param {object} spec - OpenAPI spec
68
+ * @param {object} log - logger
69
+ * @param {boolean} verbose
70
+ */
71
+ function logResultSummary(spec, log, verbose) {
72
+ if (!verbose) return;
73
+ const pathCount = spec.paths ? Object.keys(spec.paths).length : 0;
74
+ const tagList = Array.isArray(spec.tags) ? spec.tags.map((t) => t.name).join(', ') : '';
75
+ log.info(` Paths: ${pathCount}`);
76
+ log.info(` Tags (groups): ${spec.tags?.length ?? 0} [ ${tagList || '—'}]`);
77
+ }
78
+
79
+ /**
80
+ * Write spec JSON to outputPath if path is set. Logs when verbose.
81
+ * @param {object} spec - OpenAPI spec
82
+ * @param {string|undefined} outputPath
83
+ * @param {object} log - logger
84
+ * @param {boolean} verbose
85
+ */
86
+ async function writeSpecIfNeeded(spec, outputPath, log, verbose) {
87
+ if (!outputPath || typeof outputPath !== 'string') return;
88
+ if (verbose) log.info(`Writing spec to ${outputPath}...`);
89
+ await writeFile(outputPath, JSON.stringify(spec, null, 2), 'utf8');
90
+ if (verbose) log.info(`OpenAPI spec written to ${outputPath}.`);
91
+ }
92
+
93
+ /**
94
+ * Generate OpenAPI 3.0 spec from the target registry using an LLM.
95
+ *
96
+ * @param {object} [registry] - Target registry with .targets (default: app registry)
97
+ * @param {object} [options] - llm (required), info, servers, outputPath, level, dirTargets, overviewPath, verbose
98
+ * @returns {Promise<object>} OpenAPI 3.0 spec object
99
+ */
100
+ export async function generateDocs(registry = targetRegistry, options = {}) {
101
+ const {
102
+ llm: llmConfig,
103
+ info,
104
+ servers,
105
+ outputPath,
106
+ level,
107
+ dirTargets,
108
+ overviewPath: overviewPathOption,
109
+ verbose = false,
110
+ } = options;
111
+ const targets = registry?.targets ?? [];
112
+
113
+ const llm = validateAndCreateLlm(llmConfig);
114
+ logStartSummary({ ...options, llm: llmConfig }, targets.length, logger, verbose);
115
+
116
+ const spec = await generateOpenAPISpec(registry, {
117
+ llm,
118
+ info,
119
+ servers,
120
+ level,
121
+ dirTargets,
122
+ verbose,
123
+ logger,
124
+ });
125
+
126
+ logResultSummary(spec, logger, verbose);
127
+ await writeSpecIfNeeded(spec, outputPath, logger, verbose);
128
+
129
+ if (level === 3 && llm) {
130
+ await runLevel3(spec, {
131
+ outputPath,
132
+ overviewPath: overviewPathOption,
133
+ info,
134
+ verbose,
135
+ logger,
136
+ }, llm);
137
+ }
138
+
139
+ if (verbose) logger.info('OpenAPI documentation generation completed.');
140
+ return spec;
141
+ }
142
+
143
+ export { generateOpenAPISpec } from './openapi/generator.js';
144
+ export { createProvider } from './docs-llm/index.js';
145
+ export { buildDocsPage } from './ui/docs-ui.js';
146
+ export { analyzeHandler, detectMethods } from './analysis/handler-analyzer.js';