te.js 2.1.1 → 2.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cors/index.js +71 -0
- package/lib/llm/client.js +73 -0
- package/lib/llm/index.js +7 -0
- package/lib/llm/parse.js +89 -0
- package/package.json +4 -2
package/cors/index.js
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* CORS middleware factory. Handles OPTIONS preflight with 204 and sets CORS response headers.
|
|
3
|
+
*
|
|
4
|
+
* @param {Object} config - CORS configuration
|
|
5
|
+
* @param {string|string[]|((origin: string) => boolean)} [config.origin='*'] - Allowed origin(s): '*' or array of origins or function
|
|
6
|
+
* @param {string[]} [config.methods=['GET','POST','PUT','DELETE','PATCH','HEAD','OPTIONS']] - Allowed methods for Access-Control-Allow-Methods
|
|
7
|
+
* @param {string[]} [config.allowedHeaders=['Content-Type','Authorization']] - Allowed request headers for Access-Control-Allow-Headers
|
|
8
|
+
* @param {boolean} [config.credentials=false] - Access-Control-Allow-Credentials (use with specific origin, not '*')
|
|
9
|
+
* @param {number} [config.maxAge] - Access-Control-Max-Age in seconds for preflight cache
|
|
10
|
+
* @returns {Function} Middleware (ammo, next)
|
|
11
|
+
*/
|
|
12
|
+
function corsMiddleware(config = {}) {
|
|
13
|
+
const {
|
|
14
|
+
origin = '*',
|
|
15
|
+
methods = ['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'HEAD', 'OPTIONS'],
|
|
16
|
+
allowedHeaders = ['Content-Type', 'Authorization'],
|
|
17
|
+
credentials = false,
|
|
18
|
+
maxAge,
|
|
19
|
+
} = config;
|
|
20
|
+
|
|
21
|
+
const methodsList = Array.isArray(methods)
|
|
22
|
+
? methods.map((m) => String(m).toUpperCase()).join(', ')
|
|
23
|
+
: String(methods);
|
|
24
|
+
const headersList = Array.isArray(allowedHeaders)
|
|
25
|
+
? allowedHeaders.join(', ')
|
|
26
|
+
: String(allowedHeaders);
|
|
27
|
+
|
|
28
|
+
const resolveOrigin = (requestOrigin) => {
|
|
29
|
+
if (typeof origin === 'function') {
|
|
30
|
+
return origin(requestOrigin) ? requestOrigin || '*' : null;
|
|
31
|
+
}
|
|
32
|
+
if (origin === '*') {
|
|
33
|
+
return credentials ? (requestOrigin || '*') : '*';
|
|
34
|
+
}
|
|
35
|
+
if (Array.isArray(origin)) {
|
|
36
|
+
const normalized = (requestOrigin || '').toLowerCase();
|
|
37
|
+
const allowed = origin.some(
|
|
38
|
+
(o) => String(o).toLowerCase() === normalized,
|
|
39
|
+
);
|
|
40
|
+
return allowed ? requestOrigin : null;
|
|
41
|
+
}
|
|
42
|
+
return String(origin) === (requestOrigin || '') ? requestOrigin : null;
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
return async (ammo, next) => {
|
|
46
|
+
const requestOrigin = ammo.req.headers.origin;
|
|
47
|
+
|
|
48
|
+
const allowOrigin = resolveOrigin(requestOrigin);
|
|
49
|
+
if (allowOrigin != null) {
|
|
50
|
+
ammo.res.setHeader('Access-Control-Allow-Origin', allowOrigin);
|
|
51
|
+
}
|
|
52
|
+
ammo.res.setHeader('Access-Control-Allow-Methods', methodsList);
|
|
53
|
+
ammo.res.setHeader('Access-Control-Allow-Headers', headersList);
|
|
54
|
+
if (credentials) {
|
|
55
|
+
ammo.res.setHeader('Access-Control-Allow-Credentials', 'true');
|
|
56
|
+
}
|
|
57
|
+
if (maxAge != null && Number.isFinite(maxAge)) {
|
|
58
|
+
ammo.res.setHeader('Access-Control-Max-Age', String(maxAge));
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
if (ammo.req.method === 'OPTIONS') {
|
|
62
|
+
ammo.res.writeHead(204);
|
|
63
|
+
ammo.res.end();
|
|
64
|
+
return;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
await next();
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
export default corsMiddleware;
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generic OpenAI-compatible LLM client for te.js.
|
|
3
|
+
* POSTs to {baseURL}/chat/completions; used by auto-docs, error-inference, and future LLM features.
|
|
4
|
+
* No provider-specific npm dependencies — uses fetch() only.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
const DEFAULT_BASE_URL = 'https://api.openai.com/v1';
|
|
8
|
+
const DEFAULT_MODEL = 'gpt-4o-mini';
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* OpenAI-compatible LLM provider. Exposes only constructor and analyze(prompt).
|
|
12
|
+
*/
|
|
13
|
+
class LLMProvider {
|
|
14
|
+
constructor(options = {}) {
|
|
15
|
+
this.baseURL = (options.baseURL ?? DEFAULT_BASE_URL).replace(/\/$/, '');
|
|
16
|
+
this.model = options.model ?? DEFAULT_MODEL;
|
|
17
|
+
this.apiKey = options.apiKey ?? process.env.OPENAI_API_KEY;
|
|
18
|
+
this.options = options;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Send a prompt to the LLM and return the raw text response and usage.
|
|
23
|
+
* @param {string} prompt
|
|
24
|
+
* @returns {Promise<{ content: string, usage: { prompt_tokens: number, completion_tokens: number, total_tokens: number } }>}
|
|
25
|
+
*/
|
|
26
|
+
async analyze(prompt) {
|
|
27
|
+
const url = `${this.baseURL}/chat/completions`;
|
|
28
|
+
const headers = {
|
|
29
|
+
'Content-Type': 'application/json',
|
|
30
|
+
...(this.apiKey && { Authorization: `Bearer ${this.apiKey}` }),
|
|
31
|
+
};
|
|
32
|
+
const body = {
|
|
33
|
+
model: this.model,
|
|
34
|
+
messages: [{ role: 'user', content: prompt }],
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
const res = await fetch(url, {
|
|
38
|
+
method: 'POST',
|
|
39
|
+
headers,
|
|
40
|
+
body: JSON.stringify(body),
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
if (!res.ok) {
|
|
44
|
+
const text = await res.text();
|
|
45
|
+
throw new Error(`LLM request failed (${res.status}): ${text.slice(0, 300)}`);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
const data = await res.json();
|
|
49
|
+
const content = data.choices?.[0]?.message?.content ?? '';
|
|
50
|
+
const text = typeof content === 'string' ? content : JSON.stringify(content);
|
|
51
|
+
const rawUsage = data.usage;
|
|
52
|
+
const usage = {
|
|
53
|
+
prompt_tokens: rawUsage?.prompt_tokens ?? 0,
|
|
54
|
+
completion_tokens: rawUsage?.completion_tokens ?? 0,
|
|
55
|
+
total_tokens: rawUsage?.total_tokens ?? (rawUsage?.prompt_tokens ?? 0) + (rawUsage?.completion_tokens ?? 0),
|
|
56
|
+
};
|
|
57
|
+
return { content: text, usage };
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Create an LLM provider from config.
|
|
63
|
+
* @param {object} config - { baseURL?, apiKey?, model? }
|
|
64
|
+
* @returns {LLMProvider}
|
|
65
|
+
*/
|
|
66
|
+
function createProvider(config) {
|
|
67
|
+
if (!config || typeof config !== 'object') {
|
|
68
|
+
return new LLMProvider({});
|
|
69
|
+
}
|
|
70
|
+
return new LLMProvider(config);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
export { LLMProvider, createProvider };
|
package/lib/llm/index.js
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared LLM module for te.js: generic client and parse utilities.
|
|
3
|
+
* Used by auto-docs, error-inference, and future LLM features.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
export { LLMProvider, createProvider } from './client.js';
|
|
7
|
+
export { extractJSON, extractJSONArray, reconcileOrderedTags } from './parse.js';
|
package/lib/llm/parse.js
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Parse JSON from LLM response text (handles markdown code blocks).
|
|
3
|
+
* Shared by auto-docs, error-inference, and other LLM features.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Extract the first JSON object from a string.
|
|
8
|
+
* @param {string} str - Raw LLM response
|
|
9
|
+
* @returns {object|null}
|
|
10
|
+
*/
|
|
11
|
+
export function extractJSON(str) {
|
|
12
|
+
if (!str || typeof str !== 'string') return null;
|
|
13
|
+
const trimmed = str.trim();
|
|
14
|
+
const open = trimmed.indexOf('{');
|
|
15
|
+
if (open === -1) return null;
|
|
16
|
+
let depth = 0;
|
|
17
|
+
let end = -1;
|
|
18
|
+
for (let i = open; i < trimmed.length; i++) {
|
|
19
|
+
if (trimmed[i] === '{') depth++;
|
|
20
|
+
else if (trimmed[i] === '}') {
|
|
21
|
+
depth--;
|
|
22
|
+
if (depth === 0) {
|
|
23
|
+
end = i + 1;
|
|
24
|
+
break;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
if (end === -1) return null;
|
|
29
|
+
try {
|
|
30
|
+
return JSON.parse(trimmed.slice(open, end));
|
|
31
|
+
} catch {
|
|
32
|
+
return null;
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Extract the first JSON array from a string.
|
|
38
|
+
* @param {string} str - Raw LLM response
|
|
39
|
+
* @returns {Array|null}
|
|
40
|
+
*/
|
|
41
|
+
export function extractJSONArray(str) {
|
|
42
|
+
if (!str || typeof str !== 'string') return null;
|
|
43
|
+
const trimmed = str.trim();
|
|
44
|
+
const open = trimmed.indexOf('[');
|
|
45
|
+
if (open === -1) return null;
|
|
46
|
+
let depth = 0;
|
|
47
|
+
let end = -1;
|
|
48
|
+
for (let i = open; i < trimmed.length; i++) {
|
|
49
|
+
if (trimmed[i] === '[') depth++;
|
|
50
|
+
else if (trimmed[i] === ']') {
|
|
51
|
+
depth--;
|
|
52
|
+
if (depth === 0) {
|
|
53
|
+
end = i + 1;
|
|
54
|
+
break;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
if (end === -1) return null;
|
|
59
|
+
try {
|
|
60
|
+
return JSON.parse(trimmed.slice(open, end));
|
|
61
|
+
} catch {
|
|
62
|
+
return null;
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Reconcile LLM-ordered tag names with actual tag objects. Returns tags in desired order;
|
|
68
|
+
* any tag not in orderedTagNames is appended at the end.
|
|
69
|
+
* @param {string[]} orderedTagNames - Tag names in desired order (from LLM)
|
|
70
|
+
* @param {Array<{ name: string, description?: string }>} tags - Current spec.tags
|
|
71
|
+
* @returns {Array<{ name: string, description?: string }>} Tags reordered
|
|
72
|
+
*/
|
|
73
|
+
export function reconcileOrderedTags(orderedTagNames, tags) {
|
|
74
|
+
if (!Array.isArray(tags) || !tags.length) return [];
|
|
75
|
+
if (!Array.isArray(orderedTagNames) || !orderedTagNames.length) return [...tags];
|
|
76
|
+
const byName = new Map(tags.map((t) => [t.name, t]));
|
|
77
|
+
const ordered = [];
|
|
78
|
+
for (const name of orderedTagNames) {
|
|
79
|
+
const tag = byName.get(name);
|
|
80
|
+
if (tag) {
|
|
81
|
+
ordered.push(tag);
|
|
82
|
+
byName.delete(name);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
for (const [, tag] of byName) {
|
|
86
|
+
ordered.push(tag);
|
|
87
|
+
}
|
|
88
|
+
return ordered;
|
|
89
|
+
}
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "te.js",
|
|
3
|
-
"version": "2.1.
|
|
4
|
-
"description": "
|
|
3
|
+
"version": "2.1.2",
|
|
4
|
+
"description": "AI Native Node.js Framework",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "te.js",
|
|
7
7
|
"bin": {
|
|
@@ -30,10 +30,12 @@
|
|
|
30
30
|
"files": [
|
|
31
31
|
"te.js",
|
|
32
32
|
"cli",
|
|
33
|
+
"cors",
|
|
33
34
|
"server",
|
|
34
35
|
"database",
|
|
35
36
|
"rate-limit",
|
|
36
37
|
"utils",
|
|
38
|
+
"lib",
|
|
37
39
|
"auto-docs",
|
|
38
40
|
"README.md",
|
|
39
41
|
"docs"
|