cto-ai-cli 5.0.0 → 5.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -97,7 +97,8 @@ var GOOGLE_MODELS = [
97
97
  { id: "gemini-1.5-pro", contextWindow: 2e6, costPerMInput: 1.25, costPerMOutput: 5, maxOutput: 8192 }
98
98
  ];
99
99
  function parseOpenAIRequest(body) {
100
- const messages = (body.messages || []).map((m) => ({
100
+ const rawMessages = Array.isArray(body.messages) ? body.messages : [];
101
+ const messages = rawMessages.map((m) => ({
101
102
  role: m.role || "user",
102
103
  content: typeof m.content === "string" ? m.content : JSON.stringify(m.content)
103
104
  }));
@@ -109,34 +110,29 @@ function parseOpenAIRequest(body) {
109
110
  temperature: body.temperature
110
111
  };
111
112
  }
112
- function parseOpenAIResponse(body, streaming) {
113
- if (streaming) {
114
- return {
115
- model: body.model || "unknown",
116
- inputTokens: body.usage?.prompt_tokens || 0,
117
- outputTokens: body.usage?.completion_tokens || 0,
118
- content: body.choices?.[0]?.message?.content || "",
119
- finishReason: body.choices?.[0]?.finish_reason || "stop"
120
- };
121
- }
113
+ function parseOpenAIResponse(body, _streaming) {
114
+ const usage = body.usage;
115
+ const choices = Array.isArray(body.choices) ? body.choices : [];
116
+ const first = choices[0];
117
+ const msg = first?.message;
122
118
  return {
123
119
  model: body.model || "unknown",
124
- inputTokens: body.usage?.prompt_tokens || 0,
125
- outputTokens: body.usage?.completion_tokens || 0,
126
- content: body.choices?.[0]?.message?.content || "",
127
- finishReason: body.choices?.[0]?.finish_reason || "stop"
120
+ inputTokens: usage?.prompt_tokens || 0,
121
+ outputTokens: usage?.completion_tokens || 0,
122
+ content: msg?.content || "",
123
+ finishReason: first?.finish_reason || "stop"
128
124
  };
129
125
  }
130
126
  function parseAnthropicRequest(body) {
131
127
  const messages = [];
132
128
  if (body.system) {
133
- messages.push({ role: "system", content: body.system });
129
+ messages.push({ role: "system", content: String(body.system) });
134
130
  }
135
- for (const m of body.messages || []) {
136
- messages.push({
137
- role: m.role || "user",
138
- content: typeof m.content === "string" ? m.content : m.content?.map((b) => b.text || "").join("\n") || ""
139
- });
131
+ const rawMessages = Array.isArray(body.messages) ? body.messages : [];
132
+ for (const m of rawMessages) {
133
+ const content = typeof m.content === "string" ? m.content : Array.isArray(m.content) ? m.content.map((b) => String(b.text ?? "")).join("\n") : "";
134
+ const role = m.role || "user";
135
+ messages.push({ role, content });
140
136
  }
141
137
  return {
142
138
  model: body.model || "unknown",
@@ -147,43 +143,53 @@ function parseAnthropicRequest(body) {
147
143
  };
148
144
  }
149
145
  function parseAnthropicResponse(body, _streaming) {
146
+ const usage = body.usage;
147
+ const contentBlocks = Array.isArray(body.content) ? body.content : [];
150
148
  return {
151
149
  model: body.model || "unknown",
152
- inputTokens: body.usage?.input_tokens || 0,
153
- outputTokens: body.usage?.output_tokens || 0,
154
- content: body.content?.map((b) => b.text || "").join("\n") || "",
150
+ inputTokens: usage?.input_tokens || 0,
151
+ outputTokens: usage?.output_tokens || 0,
152
+ content: contentBlocks.map((b) => String(b.text ?? "")).join("\n"),
155
153
  finishReason: body.stop_reason || "end_turn"
156
154
  };
157
155
  }
158
156
  function parseGoogleRequest(body) {
159
157
  const messages = [];
160
- if (body.systemInstruction?.parts) {
158
+ const sysInst = body.systemInstruction;
159
+ if (sysInst && Array.isArray(sysInst.parts)) {
161
160
  messages.push({
162
161
  role: "system",
163
- content: body.systemInstruction.parts.map((p) => p.text || "").join("\n")
162
+ content: sysInst.parts.map((p) => String(p.text ?? "")).join("\n")
164
163
  });
165
164
  }
166
- for (const item of body.contents || []) {
165
+ const contents = Array.isArray(body.contents) ? body.contents : [];
166
+ for (const item of contents) {
167
167
  const role = item.role === "model" ? "assistant" : "user";
168
- const content = item.parts?.map((p) => p.text || "").join("\n") || "";
168
+ const parts = Array.isArray(item.parts) ? item.parts : [];
169
+ const content = parts.map((p) => String(p.text ?? "")).join("\n");
169
170
  messages.push({ role, content });
170
171
  }
171
172
  const model = body.model || body.modelId || "gemini-2.0-flash";
173
+ const genConfig = body.generationConfig;
172
174
  return {
173
175
  model,
174
176
  messages,
175
177
  stream: body.stream === true,
176
- maxTokens: body.generationConfig?.maxOutputTokens,
177
- temperature: body.generationConfig?.temperature
178
+ maxTokens: genConfig?.maxOutputTokens,
179
+ temperature: genConfig?.temperature
178
180
  };
179
181
  }
180
182
  function parseGoogleResponse(body, _streaming) {
181
- const candidate = body.candidates?.[0];
183
+ const candidates = Array.isArray(body.candidates) ? body.candidates : [];
184
+ const candidate = candidates[0];
185
+ const usage = body.usageMetadata;
186
+ const candidateContent = candidate?.content;
187
+ const parts = Array.isArray(candidateContent?.parts) ? candidateContent.parts : [];
182
188
  return {
183
189
  model: body.modelVersion || body.model || "gemini-2.0-flash",
184
- inputTokens: body.usageMetadata?.promptTokenCount || 0,
185
- outputTokens: body.usageMetadata?.candidatesTokenCount || 0,
186
- content: candidate?.content?.parts?.map((p) => p.text || "").join("\n") || "",
190
+ inputTokens: usage?.promptTokenCount || 0,
191
+ outputTokens: usage?.candidatesTokenCount || 0,
192
+ content: parts.map((p) => String(p.text ?? "")).join("\n"),
187
193
  finishReason: candidate?.finishReason || "STOP"
188
194
  };
189
195
  }
@@ -1401,7 +1407,8 @@ async function optimizeContext(messages, analysis, config) {
1401
1407
  );
1402
1408
  return { messages: optimizedMessages, injected: true, optimizeDecisions };
1403
1409
  } catch (err) {
1404
- optimizeDecisions.push(`Context optimization failed: ${err.message}`);
1410
+ const errMsg = err instanceof Error ? err.message : String(err);
1411
+ optimizeDecisions.push(`Context optimization failed: ${errMsg}`);
1405
1412
  return { messages, injected: false, optimizeDecisions };
1406
1413
  }
1407
1414
  }
@@ -2186,8 +2193,8 @@ async function analyzeProject(projectPath, config) {
2186
2193
  maxDepth: mergedConfig.analysis.maxDepth
2187
2194
  });
2188
2195
  const tokenMethod = mergedConfig.tokens.method;
2189
- const files = [];
2190
- for (const entry of walkEntries) {
2196
+ const BATCH_SIZE = 50;
2197
+ async function estimateFileTokens(entry) {
2191
2198
  let tokens;
2192
2199
  if (tokenMethod === "tiktoken") {
2193
2200
  try {
@@ -2199,7 +2206,7 @@ async function analyzeProject(projectPath, config) {
2199
2206
  } else {
2200
2207
  tokens = countTokensChars4(entry.size);
2201
2208
  }
2202
- files.push({
2209
+ return {
2203
2210
  path: entry.path,
2204
2211
  relativePath: entry.relativePath,
2205
2212
  extension: entry.extension,
@@ -2208,16 +2215,20 @@ async function analyzeProject(projectPath, config) {
2208
2215
  lines: entry.lines,
2209
2216
  lastModified: entry.lastModified,
2210
2217
  kind: classifyFileKind(entry.relativePath),
2211
- // Graph data — populated by graph analysis
2212
2218
  imports: [],
2213
2219
  importedBy: [],
2214
2220
  isHub: false,
2215
2221
  complexity: 0,
2216
- // Risk data — populated by risk analysis
2217
2222
  riskScore: 0,
2218
2223
  riskFactors: [],
2219
2224
  exclusionImpact: "none"
2220
- });
2225
+ };
2226
+ }
2227
+ const files = [];
2228
+ for (let i = 0; i < walkEntries.length; i += BATCH_SIZE) {
2229
+ const batch = walkEntries.slice(i, i + BATCH_SIZE);
2230
+ const results = await Promise.all(batch.map(estimateFileTokens));
2231
+ files.push(...results);
2221
2232
  }
2222
2233
  const graph = buildProjectGraph(absPath, files);
2223
2234
  for (const file of files) {
@@ -2363,7 +2374,8 @@ var ContextGateway = class {
2363
2374
  this.analysis = analysis;
2364
2375
  return analysis;
2365
2376
  } catch (err) {
2366
- this.emit({ type: "error", message: `Analysis failed: ${err.message}`, error: err });
2377
+ const message = err instanceof Error ? err.message : String(err);
2378
+ this.emit({ type: "error", message: `Analysis failed: ${message}`, error: err instanceof Error ? err : void 0 });
2367
2379
  throw err;
2368
2380
  }
2369
2381
  }
@@ -2400,7 +2412,8 @@ var ContextGateway = class {
2400
2412
  try {
2401
2413
  body = await readBody(req, this.config.maxBodyBytes);
2402
2414
  } catch (err) {
2403
- const status = err.message === "body-too-large" ? 413 : 400;
2415
+ const errMsg = err instanceof Error ? err.message : String(err);
2416
+ const status = errMsg === "body-too-large" ? 413 : 400;
2404
2417
  res.writeHead(status, { "Content-Type": "application/json" });
2405
2418
  res.end(JSON.stringify({ error: status === 413 ? `Request body too large. Max: ${Math.round(this.config.maxBodyBytes / 1024 / 1024)}MB` : "Failed to read request body" }));
2406
2419
  return;
@@ -2511,12 +2524,13 @@ var ContextGateway = class {
2511
2524
  try {
2512
2525
  await this.proxyRequest(targetUrl, req, res, modifiedBody, provider, parsed, interceptResult, startTime);
2513
2526
  } catch (err) {
2527
+ const errMsg = err instanceof Error ? err.message : String(err);
2514
2528
  if (!res.headersSent) {
2515
- const status = err.message === "upstream-timeout" ? 504 : 502;
2529
+ const status = errMsg === "upstream-timeout" ? 504 : 502;
2516
2530
  res.writeHead(status, { "Content-Type": "application/json" });
2517
- res.end(JSON.stringify({ error: status === 504 ? "Upstream provider timeout" : `Proxy error: ${err.message}` }));
2531
+ res.end(JSON.stringify({ error: status === 504 ? "Upstream provider timeout" : `Proxy error: ${errMsg}` }));
2518
2532
  }
2519
- this.emit({ type: "error", message: `Proxy error: ${err.message}`, error: err });
2533
+ this.emit({ type: "error", message: `Proxy error: ${errMsg}`, error: err instanceof Error ? err : void 0 });
2520
2534
  }
2521
2535
  }
2522
2536
  // ===== PROXY =====