cto-ai-cli 5.0.0 → 5.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +106 -4
- package/dist/action/index.js +10 -6
- package/dist/api/dashboard.js +10 -6
- package/dist/api/dashboard.js.map +1 -1
- package/dist/api/server.js +86 -29
- package/dist/api/server.js.map +1 -1
- package/dist/cli/gateway.js +60 -46
- package/dist/cli/score.js +2338 -2292
- package/dist/engine/index.d.ts +30 -1
- package/dist/engine/index.js +96 -8
- package/dist/engine/index.js.map +1 -1
- package/dist/gateway/index.d.ts +2 -2
- package/dist/gateway/index.js +60 -46
- package/dist/gateway/index.js.map +1 -1
- package/dist/mcp/v2.js +10 -6
- package/dist/mcp/v2.js.map +1 -1
- package/package.json +1 -1
- package/dist/core/index.d.ts +0 -717
- package/dist/core/index.js +0 -4446
- package/dist/core/index.js.map +0 -1
package/dist/cli/gateway.js
CHANGED
|
@@ -97,7 +97,8 @@ var GOOGLE_MODELS = [
|
|
|
97
97
|
{ id: "gemini-1.5-pro", contextWindow: 2e6, costPerMInput: 1.25, costPerMOutput: 5, maxOutput: 8192 }
|
|
98
98
|
];
|
|
99
99
|
function parseOpenAIRequest(body) {
|
|
100
|
-
const
|
|
100
|
+
const rawMessages = Array.isArray(body.messages) ? body.messages : [];
|
|
101
|
+
const messages = rawMessages.map((m) => ({
|
|
101
102
|
role: m.role || "user",
|
|
102
103
|
content: typeof m.content === "string" ? m.content : JSON.stringify(m.content)
|
|
103
104
|
}));
|
|
@@ -109,34 +110,29 @@ function parseOpenAIRequest(body) {
|
|
|
109
110
|
temperature: body.temperature
|
|
110
111
|
};
|
|
111
112
|
}
|
|
112
|
-
function parseOpenAIResponse(body,
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
outputTokens: body.usage?.completion_tokens || 0,
|
|
118
|
-
content: body.choices?.[0]?.message?.content || "",
|
|
119
|
-
finishReason: body.choices?.[0]?.finish_reason || "stop"
|
|
120
|
-
};
|
|
121
|
-
}
|
|
113
|
+
function parseOpenAIResponse(body, _streaming) {
|
|
114
|
+
const usage = body.usage;
|
|
115
|
+
const choices = Array.isArray(body.choices) ? body.choices : [];
|
|
116
|
+
const first = choices[0];
|
|
117
|
+
const msg = first?.message;
|
|
122
118
|
return {
|
|
123
119
|
model: body.model || "unknown",
|
|
124
|
-
inputTokens:
|
|
125
|
-
outputTokens:
|
|
126
|
-
content:
|
|
127
|
-
finishReason:
|
|
120
|
+
inputTokens: usage?.prompt_tokens || 0,
|
|
121
|
+
outputTokens: usage?.completion_tokens || 0,
|
|
122
|
+
content: msg?.content || "",
|
|
123
|
+
finishReason: first?.finish_reason || "stop"
|
|
128
124
|
};
|
|
129
125
|
}
|
|
130
126
|
function parseAnthropicRequest(body) {
|
|
131
127
|
const messages = [];
|
|
132
128
|
if (body.system) {
|
|
133
|
-
messages.push({ role: "system", content: body.system });
|
|
129
|
+
messages.push({ role: "system", content: String(body.system) });
|
|
134
130
|
}
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
});
|
|
131
|
+
const rawMessages = Array.isArray(body.messages) ? body.messages : [];
|
|
132
|
+
for (const m of rawMessages) {
|
|
133
|
+
const content = typeof m.content === "string" ? m.content : Array.isArray(m.content) ? m.content.map((b) => String(b.text ?? "")).join("\n") : "";
|
|
134
|
+
const role = m.role || "user";
|
|
135
|
+
messages.push({ role, content });
|
|
140
136
|
}
|
|
141
137
|
return {
|
|
142
138
|
model: body.model || "unknown",
|
|
@@ -147,43 +143,53 @@ function parseAnthropicRequest(body) {
|
|
|
147
143
|
};
|
|
148
144
|
}
|
|
149
145
|
function parseAnthropicResponse(body, _streaming) {
|
|
146
|
+
const usage = body.usage;
|
|
147
|
+
const contentBlocks = Array.isArray(body.content) ? body.content : [];
|
|
150
148
|
return {
|
|
151
149
|
model: body.model || "unknown",
|
|
152
|
-
inputTokens:
|
|
153
|
-
outputTokens:
|
|
154
|
-
content:
|
|
150
|
+
inputTokens: usage?.input_tokens || 0,
|
|
151
|
+
outputTokens: usage?.output_tokens || 0,
|
|
152
|
+
content: contentBlocks.map((b) => String(b.text ?? "")).join("\n"),
|
|
155
153
|
finishReason: body.stop_reason || "end_turn"
|
|
156
154
|
};
|
|
157
155
|
}
|
|
158
156
|
function parseGoogleRequest(body) {
|
|
159
157
|
const messages = [];
|
|
160
|
-
|
|
158
|
+
const sysInst = body.systemInstruction;
|
|
159
|
+
if (sysInst && Array.isArray(sysInst.parts)) {
|
|
161
160
|
messages.push({
|
|
162
161
|
role: "system",
|
|
163
|
-
content:
|
|
162
|
+
content: sysInst.parts.map((p) => String(p.text ?? "")).join("\n")
|
|
164
163
|
});
|
|
165
164
|
}
|
|
166
|
-
|
|
165
|
+
const contents = Array.isArray(body.contents) ? body.contents : [];
|
|
166
|
+
for (const item of contents) {
|
|
167
167
|
const role = item.role === "model" ? "assistant" : "user";
|
|
168
|
-
const
|
|
168
|
+
const parts = Array.isArray(item.parts) ? item.parts : [];
|
|
169
|
+
const content = parts.map((p) => String(p.text ?? "")).join("\n");
|
|
169
170
|
messages.push({ role, content });
|
|
170
171
|
}
|
|
171
172
|
const model = body.model || body.modelId || "gemini-2.0-flash";
|
|
173
|
+
const genConfig = body.generationConfig;
|
|
172
174
|
return {
|
|
173
175
|
model,
|
|
174
176
|
messages,
|
|
175
177
|
stream: body.stream === true,
|
|
176
|
-
maxTokens:
|
|
177
|
-
temperature:
|
|
178
|
+
maxTokens: genConfig?.maxOutputTokens,
|
|
179
|
+
temperature: genConfig?.temperature
|
|
178
180
|
};
|
|
179
181
|
}
|
|
180
182
|
function parseGoogleResponse(body, _streaming) {
|
|
181
|
-
const
|
|
183
|
+
const candidates = Array.isArray(body.candidates) ? body.candidates : [];
|
|
184
|
+
const candidate = candidates[0];
|
|
185
|
+
const usage = body.usageMetadata;
|
|
186
|
+
const candidateContent = candidate?.content;
|
|
187
|
+
const parts = Array.isArray(candidateContent?.parts) ? candidateContent.parts : [];
|
|
182
188
|
return {
|
|
183
189
|
model: body.modelVersion || body.model || "gemini-2.0-flash",
|
|
184
|
-
inputTokens:
|
|
185
|
-
outputTokens:
|
|
186
|
-
content:
|
|
190
|
+
inputTokens: usage?.promptTokenCount || 0,
|
|
191
|
+
outputTokens: usage?.candidatesTokenCount || 0,
|
|
192
|
+
content: parts.map((p) => String(p.text ?? "")).join("\n"),
|
|
187
193
|
finishReason: candidate?.finishReason || "STOP"
|
|
188
194
|
};
|
|
189
195
|
}
|
|
@@ -1401,7 +1407,8 @@ async function optimizeContext(messages, analysis, config) {
|
|
|
1401
1407
|
);
|
|
1402
1408
|
return { messages: optimizedMessages, injected: true, optimizeDecisions };
|
|
1403
1409
|
} catch (err) {
|
|
1404
|
-
|
|
1410
|
+
const errMsg = err instanceof Error ? err.message : String(err);
|
|
1411
|
+
optimizeDecisions.push(`Context optimization failed: ${errMsg}`);
|
|
1405
1412
|
return { messages, injected: false, optimizeDecisions };
|
|
1406
1413
|
}
|
|
1407
1414
|
}
|
|
@@ -2186,8 +2193,8 @@ async function analyzeProject(projectPath, config) {
|
|
|
2186
2193
|
maxDepth: mergedConfig.analysis.maxDepth
|
|
2187
2194
|
});
|
|
2188
2195
|
const tokenMethod = mergedConfig.tokens.method;
|
|
2189
|
-
const
|
|
2190
|
-
|
|
2196
|
+
const BATCH_SIZE = 50;
|
|
2197
|
+
async function estimateFileTokens(entry) {
|
|
2191
2198
|
let tokens;
|
|
2192
2199
|
if (tokenMethod === "tiktoken") {
|
|
2193
2200
|
try {
|
|
@@ -2199,7 +2206,7 @@ async function analyzeProject(projectPath, config) {
|
|
|
2199
2206
|
} else {
|
|
2200
2207
|
tokens = countTokensChars4(entry.size);
|
|
2201
2208
|
}
|
|
2202
|
-
|
|
2209
|
+
return {
|
|
2203
2210
|
path: entry.path,
|
|
2204
2211
|
relativePath: entry.relativePath,
|
|
2205
2212
|
extension: entry.extension,
|
|
@@ -2208,16 +2215,20 @@ async function analyzeProject(projectPath, config) {
|
|
|
2208
2215
|
lines: entry.lines,
|
|
2209
2216
|
lastModified: entry.lastModified,
|
|
2210
2217
|
kind: classifyFileKind(entry.relativePath),
|
|
2211
|
-
// Graph data — populated by graph analysis
|
|
2212
2218
|
imports: [],
|
|
2213
2219
|
importedBy: [],
|
|
2214
2220
|
isHub: false,
|
|
2215
2221
|
complexity: 0,
|
|
2216
|
-
// Risk data — populated by risk analysis
|
|
2217
2222
|
riskScore: 0,
|
|
2218
2223
|
riskFactors: [],
|
|
2219
2224
|
exclusionImpact: "none"
|
|
2220
|
-
}
|
|
2225
|
+
};
|
|
2226
|
+
}
|
|
2227
|
+
const files = [];
|
|
2228
|
+
for (let i = 0; i < walkEntries.length; i += BATCH_SIZE) {
|
|
2229
|
+
const batch = walkEntries.slice(i, i + BATCH_SIZE);
|
|
2230
|
+
const results = await Promise.all(batch.map(estimateFileTokens));
|
|
2231
|
+
files.push(...results);
|
|
2221
2232
|
}
|
|
2222
2233
|
const graph = buildProjectGraph(absPath, files);
|
|
2223
2234
|
for (const file of files) {
|
|
@@ -2363,7 +2374,8 @@ var ContextGateway = class {
|
|
|
2363
2374
|
this.analysis = analysis;
|
|
2364
2375
|
return analysis;
|
|
2365
2376
|
} catch (err) {
|
|
2366
|
-
|
|
2377
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
2378
|
+
this.emit({ type: "error", message: `Analysis failed: ${message}`, error: err instanceof Error ? err : void 0 });
|
|
2367
2379
|
throw err;
|
|
2368
2380
|
}
|
|
2369
2381
|
}
|
|
@@ -2400,7 +2412,8 @@ var ContextGateway = class {
|
|
|
2400
2412
|
try {
|
|
2401
2413
|
body = await readBody(req, this.config.maxBodyBytes);
|
|
2402
2414
|
} catch (err) {
|
|
2403
|
-
const
|
|
2415
|
+
const errMsg = err instanceof Error ? err.message : String(err);
|
|
2416
|
+
const status = errMsg === "body-too-large" ? 413 : 400;
|
|
2404
2417
|
res.writeHead(status, { "Content-Type": "application/json" });
|
|
2405
2418
|
res.end(JSON.stringify({ error: status === 413 ? `Request body too large. Max: ${Math.round(this.config.maxBodyBytes / 1024 / 1024)}MB` : "Failed to read request body" }));
|
|
2406
2419
|
return;
|
|
@@ -2511,12 +2524,13 @@ var ContextGateway = class {
|
|
|
2511
2524
|
try {
|
|
2512
2525
|
await this.proxyRequest(targetUrl, req, res, modifiedBody, provider, parsed, interceptResult, startTime);
|
|
2513
2526
|
} catch (err) {
|
|
2527
|
+
const errMsg = err instanceof Error ? err.message : String(err);
|
|
2514
2528
|
if (!res.headersSent) {
|
|
2515
|
-
const status =
|
|
2529
|
+
const status = errMsg === "upstream-timeout" ? 504 : 502;
|
|
2516
2530
|
res.writeHead(status, { "Content-Type": "application/json" });
|
|
2517
|
-
res.end(JSON.stringify({ error: status === 504 ? "Upstream provider timeout" : `Proxy error: ${
|
|
2531
|
+
res.end(JSON.stringify({ error: status === 504 ? "Upstream provider timeout" : `Proxy error: ${errMsg}` }));
|
|
2518
2532
|
}
|
|
2519
|
-
this.emit({ type: "error", message: `Proxy error: ${
|
|
2533
|
+
this.emit({ type: "error", message: `Proxy error: ${errMsg}`, error: err instanceof Error ? err : void 0 });
|
|
2520
2534
|
}
|
|
2521
2535
|
}
|
|
2522
2536
|
// ===== PROXY =====
|