cto-ai-cli 5.0.0 → 5.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,8 +6,8 @@ interface ProviderConfig {
6
6
  authHeader: string;
7
7
  chatPath: string;
8
8
  models: ModelConfig[];
9
- parseRequest: (body: any) => ParsedRequest;
10
- parseResponse: (body: any, streaming: boolean) => ParsedResponse;
9
+ parseRequest: (body: Record<string, unknown>) => ParsedRequest;
10
+ parseResponse: (body: Record<string, unknown>, streaming: boolean) => ParsedResponse;
11
11
  detectProvider: (url: string, headers: Record<string, string>) => boolean;
12
12
  }
13
13
  interface ModelConfig {
@@ -92,7 +92,8 @@ var GOOGLE_MODELS = [
92
92
  { id: "gemini-1.5-pro", contextWindow: 2e6, costPerMInput: 1.25, costPerMOutput: 5, maxOutput: 8192 }
93
93
  ];
94
94
  function parseOpenAIRequest(body) {
95
- const messages = (body.messages || []).map((m) => ({
95
+ const rawMessages = Array.isArray(body.messages) ? body.messages : [];
96
+ const messages = rawMessages.map((m) => ({
96
97
  role: m.role || "user",
97
98
  content: typeof m.content === "string" ? m.content : JSON.stringify(m.content)
98
99
  }));
@@ -104,34 +105,29 @@ function parseOpenAIRequest(body) {
104
105
  temperature: body.temperature
105
106
  };
106
107
  }
107
- function parseOpenAIResponse(body, streaming) {
108
- if (streaming) {
109
- return {
110
- model: body.model || "unknown",
111
- inputTokens: body.usage?.prompt_tokens || 0,
112
- outputTokens: body.usage?.completion_tokens || 0,
113
- content: body.choices?.[0]?.message?.content || "",
114
- finishReason: body.choices?.[0]?.finish_reason || "stop"
115
- };
116
- }
108
+ function parseOpenAIResponse(body, _streaming) {
109
+ const usage = body.usage;
110
+ const choices = Array.isArray(body.choices) ? body.choices : [];
111
+ const first = choices[0];
112
+ const msg = first?.message;
117
113
  return {
118
114
  model: body.model || "unknown",
119
- inputTokens: body.usage?.prompt_tokens || 0,
120
- outputTokens: body.usage?.completion_tokens || 0,
121
- content: body.choices?.[0]?.message?.content || "",
122
- finishReason: body.choices?.[0]?.finish_reason || "stop"
115
+ inputTokens: usage?.prompt_tokens || 0,
116
+ outputTokens: usage?.completion_tokens || 0,
117
+ content: msg?.content || "",
118
+ finishReason: first?.finish_reason || "stop"
123
119
  };
124
120
  }
125
121
  function parseAnthropicRequest(body) {
126
122
  const messages = [];
127
123
  if (body.system) {
128
- messages.push({ role: "system", content: body.system });
124
+ messages.push({ role: "system", content: String(body.system) });
129
125
  }
130
- for (const m of body.messages || []) {
131
- messages.push({
132
- role: m.role || "user",
133
- content: typeof m.content === "string" ? m.content : m.content?.map((b) => b.text || "").join("\n") || ""
134
- });
126
+ const rawMessages = Array.isArray(body.messages) ? body.messages : [];
127
+ for (const m of rawMessages) {
128
+ const content = typeof m.content === "string" ? m.content : Array.isArray(m.content) ? m.content.map((b) => String(b.text ?? "")).join("\n") : "";
129
+ const role = m.role || "user";
130
+ messages.push({ role, content });
135
131
  }
136
132
  return {
137
133
  model: body.model || "unknown",
@@ -142,43 +138,53 @@ function parseAnthropicRequest(body) {
142
138
  };
143
139
  }
144
140
  function parseAnthropicResponse(body, _streaming) {
141
+ const usage = body.usage;
142
+ const contentBlocks = Array.isArray(body.content) ? body.content : [];
145
143
  return {
146
144
  model: body.model || "unknown",
147
- inputTokens: body.usage?.input_tokens || 0,
148
- outputTokens: body.usage?.output_tokens || 0,
149
- content: body.content?.map((b) => b.text || "").join("\n") || "",
145
+ inputTokens: usage?.input_tokens || 0,
146
+ outputTokens: usage?.output_tokens || 0,
147
+ content: contentBlocks.map((b) => String(b.text ?? "")).join("\n"),
150
148
  finishReason: body.stop_reason || "end_turn"
151
149
  };
152
150
  }
153
151
  function parseGoogleRequest(body) {
154
152
  const messages = [];
155
- if (body.systemInstruction?.parts) {
153
+ const sysInst = body.systemInstruction;
154
+ if (sysInst && Array.isArray(sysInst.parts)) {
156
155
  messages.push({
157
156
  role: "system",
158
- content: body.systemInstruction.parts.map((p) => p.text || "").join("\n")
157
+ content: sysInst.parts.map((p) => String(p.text ?? "")).join("\n")
159
158
  });
160
159
  }
161
- for (const item of body.contents || []) {
160
+ const contents = Array.isArray(body.contents) ? body.contents : [];
161
+ for (const item of contents) {
162
162
  const role = item.role === "model" ? "assistant" : "user";
163
- const content = item.parts?.map((p) => p.text || "").join("\n") || "";
163
+ const parts = Array.isArray(item.parts) ? item.parts : [];
164
+ const content = parts.map((p) => String(p.text ?? "")).join("\n");
164
165
  messages.push({ role, content });
165
166
  }
166
167
  const model = body.model || body.modelId || "gemini-2.0-flash";
168
+ const genConfig = body.generationConfig;
167
169
  return {
168
170
  model,
169
171
  messages,
170
172
  stream: body.stream === true,
171
- maxTokens: body.generationConfig?.maxOutputTokens,
172
- temperature: body.generationConfig?.temperature
173
+ maxTokens: genConfig?.maxOutputTokens,
174
+ temperature: genConfig?.temperature
173
175
  };
174
176
  }
175
177
  function parseGoogleResponse(body, _streaming) {
176
- const candidate = body.candidates?.[0];
178
+ const candidates = Array.isArray(body.candidates) ? body.candidates : [];
179
+ const candidate = candidates[0];
180
+ const usage = body.usageMetadata;
181
+ const candidateContent = candidate?.content;
182
+ const parts = Array.isArray(candidateContent?.parts) ? candidateContent.parts : [];
177
183
  return {
178
184
  model: body.modelVersion || body.model || "gemini-2.0-flash",
179
- inputTokens: body.usageMetadata?.promptTokenCount || 0,
180
- outputTokens: body.usageMetadata?.candidatesTokenCount || 0,
181
- content: candidate?.content?.parts?.map((p) => p.text || "").join("\n") || "",
185
+ inputTokens: usage?.promptTokenCount || 0,
186
+ outputTokens: usage?.candidatesTokenCount || 0,
187
+ content: parts.map((p) => String(p.text ?? "")).join("\n"),
182
188
  finishReason: candidate?.finishReason || "STOP"
183
189
  };
184
190
  }
@@ -1396,7 +1402,8 @@ async function optimizeContext(messages, analysis, config) {
1396
1402
  );
1397
1403
  return { messages: optimizedMessages, injected: true, optimizeDecisions };
1398
1404
  } catch (err) {
1399
- optimizeDecisions.push(`Context optimization failed: ${err.message}`);
1405
+ const errMsg = err instanceof Error ? err.message : String(err);
1406
+ optimizeDecisions.push(`Context optimization failed: ${errMsg}`);
1400
1407
  return { messages, injected: false, optimizeDecisions };
1401
1408
  }
1402
1409
  }
@@ -2181,8 +2188,8 @@ async function analyzeProject(projectPath, config) {
2181
2188
  maxDepth: mergedConfig.analysis.maxDepth
2182
2189
  });
2183
2190
  const tokenMethod = mergedConfig.tokens.method;
2184
- const files = [];
2185
- for (const entry of walkEntries) {
2191
+ const BATCH_SIZE = 50;
2192
+ async function estimateFileTokens(entry) {
2186
2193
  let tokens;
2187
2194
  if (tokenMethod === "tiktoken") {
2188
2195
  try {
@@ -2194,7 +2201,7 @@ async function analyzeProject(projectPath, config) {
2194
2201
  } else {
2195
2202
  tokens = countTokensChars4(entry.size);
2196
2203
  }
2197
- files.push({
2204
+ return {
2198
2205
  path: entry.path,
2199
2206
  relativePath: entry.relativePath,
2200
2207
  extension: entry.extension,
@@ -2203,16 +2210,20 @@ async function analyzeProject(projectPath, config) {
2203
2210
  lines: entry.lines,
2204
2211
  lastModified: entry.lastModified,
2205
2212
  kind: classifyFileKind(entry.relativePath),
2206
- // Graph data — populated by graph analysis
2207
2213
  imports: [],
2208
2214
  importedBy: [],
2209
2215
  isHub: false,
2210
2216
  complexity: 0,
2211
- // Risk data — populated by risk analysis
2212
2217
  riskScore: 0,
2213
2218
  riskFactors: [],
2214
2219
  exclusionImpact: "none"
2215
- });
2220
+ };
2221
+ }
2222
+ const files = [];
2223
+ for (let i = 0; i < walkEntries.length; i += BATCH_SIZE) {
2224
+ const batch = walkEntries.slice(i, i + BATCH_SIZE);
2225
+ const results = await Promise.all(batch.map(estimateFileTokens));
2226
+ files.push(...results);
2216
2227
  }
2217
2228
  const graph = buildProjectGraph(absPath, files);
2218
2229
  for (const file of files) {
@@ -2358,7 +2369,8 @@ var ContextGateway = class {
2358
2369
  this.analysis = analysis;
2359
2370
  return analysis;
2360
2371
  } catch (err) {
2361
- this.emit({ type: "error", message: `Analysis failed: ${err.message}`, error: err });
2372
+ const message = err instanceof Error ? err.message : String(err);
2373
+ this.emit({ type: "error", message: `Analysis failed: ${message}`, error: err instanceof Error ? err : void 0 });
2362
2374
  throw err;
2363
2375
  }
2364
2376
  }
@@ -2395,7 +2407,8 @@ var ContextGateway = class {
2395
2407
  try {
2396
2408
  body = await readBody(req, this.config.maxBodyBytes);
2397
2409
  } catch (err) {
2398
- const status = err.message === "body-too-large" ? 413 : 400;
2410
+ const errMsg = err instanceof Error ? err.message : String(err);
2411
+ const status = errMsg === "body-too-large" ? 413 : 400;
2399
2412
  res.writeHead(status, { "Content-Type": "application/json" });
2400
2413
  res.end(JSON.stringify({ error: status === 413 ? `Request body too large. Max: ${Math.round(this.config.maxBodyBytes / 1024 / 1024)}MB` : "Failed to read request body" }));
2401
2414
  return;
@@ -2506,12 +2519,13 @@ var ContextGateway = class {
2506
2519
  try {
2507
2520
  await this.proxyRequest(targetUrl, req, res, modifiedBody, provider, parsed, interceptResult, startTime);
2508
2521
  } catch (err) {
2522
+ const errMsg = err instanceof Error ? err.message : String(err);
2509
2523
  if (!res.headersSent) {
2510
- const status = err.message === "upstream-timeout" ? 504 : 502;
2524
+ const status = errMsg === "upstream-timeout" ? 504 : 502;
2511
2525
  res.writeHead(status, { "Content-Type": "application/json" });
2512
- res.end(JSON.stringify({ error: status === 504 ? "Upstream provider timeout" : `Proxy error: ${err.message}` }));
2526
+ res.end(JSON.stringify({ error: status === 504 ? "Upstream provider timeout" : `Proxy error: ${errMsg}` }));
2513
2527
  }
2514
- this.emit({ type: "error", message: `Proxy error: ${err.message}`, error: err });
2528
+ this.emit({ type: "error", message: `Proxy error: ${errMsg}`, error: err instanceof Error ? err : void 0 });
2515
2529
  }
2516
2530
  }
2517
2531
  // ===== PROXY =====