openreport 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +12 -11
- package/src/agents/orchestrator.ts +2 -2
- package/src/app/App.tsx +3 -3
- package/src/components/generation/AgentStatusItem.tsx +12 -10
- package/src/config/cli-model.ts +78 -52
- package/src/config/cli-prompt-formatter.ts +13 -9
- package/src/config/providers.ts +12 -12
- package/src/config/resolve-provider.ts +3 -3
- package/src/config/schema.ts +24 -5
- package/src/hooks/useReportGeneration.ts +2 -2
- package/src/pipeline/agent-runner.ts +36 -15
- package/src/pipeline/extraction.ts +3 -3
- package/src/pipeline/progress.ts +8 -1
- package/src/pipeline/runner.ts +8 -10
- package/src/schemas/report.ts +5 -1
- package/src/screens/GenerationScreen.tsx +2 -2
- package/src/tools/get-file-tree.ts +1 -1
- package/src/tools/get-git-info.ts +1 -1
- package/src/tools/glob.ts +1 -1
- package/src/tools/grep.ts +1 -1
- package/src/tools/list-directory.ts +1 -1
- package/src/tools/read-file.ts +1 -1
- package/src/tools/read-package-json.ts +1 -1
- package/src/tools/run-command.ts +1 -1
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "openreport",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.2",
|
|
4
4
|
"packageManager": "bun@1.3.9",
|
|
5
5
|
"description": "A modern TUI to generate detailed AI-powered reports on software projects",
|
|
6
6
|
"type": "module",
|
|
@@ -37,25 +37,26 @@
|
|
|
37
37
|
"LICENSE"
|
|
38
38
|
],
|
|
39
39
|
"dependencies": {
|
|
40
|
-
"@ai-sdk/anthropic": "^
|
|
41
|
-
"@ai-sdk/google": "^
|
|
42
|
-
"@ai-sdk/mistral": "^
|
|
43
|
-
"@ai-sdk/openai": "^
|
|
44
|
-
"ai": "^
|
|
40
|
+
"@ai-sdk/anthropic": "^3.0.43",
|
|
41
|
+
"@ai-sdk/google": "^3.0.29",
|
|
42
|
+
"@ai-sdk/mistral": "^3.0.20",
|
|
43
|
+
"@ai-sdk/openai": "^3.0.28",
|
|
44
|
+
"@ai-sdk/provider": "^3.0.8",
|
|
45
|
+
"ai": "^6.0.85",
|
|
45
46
|
"chalk": "^5.4.0",
|
|
46
47
|
"clipanion": "^3.2.0",
|
|
47
48
|
"ignore": "^7.0.0",
|
|
48
|
-
"ink": "^
|
|
49
|
+
"ink": "^6.7.0",
|
|
49
50
|
"ink-spinner": "^5.0.0",
|
|
50
|
-
"marked": "^
|
|
51
|
+
"marked": "^17.0.2",
|
|
51
52
|
"marked-terminal": "^7.3.0",
|
|
52
|
-
"react": "^
|
|
53
|
-
"zod": "^3.
|
|
53
|
+
"react": "^19.2.4",
|
|
54
|
+
"zod": "^4.3.6"
|
|
54
55
|
},
|
|
55
56
|
"devDependencies": {
|
|
56
57
|
"@biomejs/biome": "^2.3.15",
|
|
57
58
|
"@types/bun": "^1.3.9",
|
|
58
|
-
"@types/react": "^
|
|
59
|
+
"@types/react": "^19.2.14",
|
|
59
60
|
"typescript": "^5.7.0"
|
|
60
61
|
}
|
|
61
62
|
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { LanguageModelV2 } from "@ai-sdk/provider";
|
|
2
2
|
import type { OpenReportConfig } from "../config/schema.js";
|
|
3
3
|
import type { FullReport } from "../types/index.js";
|
|
4
4
|
import { ProgressTracker } from "../pipeline/progress.js";
|
|
@@ -7,7 +7,7 @@ import { runPipeline } from "../pipeline/runner.js";
|
|
|
7
7
|
export interface OrchestratorOptions {
|
|
8
8
|
projectRoot: string;
|
|
9
9
|
reportType: string;
|
|
10
|
-
model:
|
|
10
|
+
model: LanguageModelV2;
|
|
11
11
|
modelId: string;
|
|
12
12
|
config: OpenReportConfig;
|
|
13
13
|
onProgress?: (progress: ProgressTracker) => void;
|
package/src/app/App.tsx
CHANGED
|
@@ -8,13 +8,13 @@ import { ConfigScreen, type ConfigChanges } from "../screens/ConfigScreen.js";
|
|
|
8
8
|
import { createProviderRegistry } from "../config/providers.js";
|
|
9
9
|
import { saveProjectConfig } from "../config/saver.js";
|
|
10
10
|
import { debugLog } from "../utils/debug.js";
|
|
11
|
-
import type {
|
|
11
|
+
import type { LanguageModelV2 } from "@ai-sdk/provider";
|
|
12
12
|
import type { Screen, NavigationState } from "../types/index.js";
|
|
13
13
|
import type { OpenReportConfig } from "../config/schema.js";
|
|
14
14
|
|
|
15
15
|
interface AppProps {
|
|
16
16
|
projectRoot: string;
|
|
17
|
-
initialModel:
|
|
17
|
+
initialModel: LanguageModelV2;
|
|
18
18
|
initialModelId: string;
|
|
19
19
|
initialConfig: OpenReportConfig;
|
|
20
20
|
}
|
|
@@ -28,7 +28,7 @@ export function App({
|
|
|
28
28
|
const { exit } = useApp();
|
|
29
29
|
const [nav, setNav] = useState<NavigationState>({ screen: "home" });
|
|
30
30
|
const [config, setConfig] = useState<OpenReportConfig>(initialConfig);
|
|
31
|
-
const [model, setModel] = useState<
|
|
31
|
+
const [model, setModel] = useState<LanguageModelV2>(initialModel);
|
|
32
32
|
const [modelId, setModelId] = useState<string>(initialModelId);
|
|
33
33
|
|
|
34
34
|
const navigate = useCallback(
|
|
@@ -73,21 +73,23 @@ export const AgentStatusItem = React.memo(function AgentStatusItem({
|
|
|
73
73
|
</Text>
|
|
74
74
|
</Box>
|
|
75
75
|
|
|
76
|
-
{/* Duration */}
|
|
77
|
-
{
|
|
78
|
-
|
|
76
|
+
{/* Duration — always rendered to keep columns aligned with header */}
|
|
77
|
+
<Box width={timeWidth} justifyContent="flex-end">
|
|
78
|
+
{duration > 0 && (
|
|
79
79
|
<Text color="gray" dimColor>
|
|
80
80
|
{formatDuration(duration)}
|
|
81
81
|
</Text>
|
|
82
|
-
|
|
83
|
-
|
|
82
|
+
)}
|
|
83
|
+
</Box>
|
|
84
84
|
|
|
85
|
-
{/* Tokens (
|
|
86
|
-
{
|
|
85
|
+
{/* Tokens — always rendered (when not compact) to keep columns aligned */}
|
|
86
|
+
{!isCompact && (
|
|
87
87
|
<Box width={tokenWidth} justifyContent="flex-end">
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
88
|
+
{tokenStr ? (
|
|
89
|
+
<Text color="gray" dimColor>
|
|
90
|
+
{tokenStr}
|
|
91
|
+
</Text>
|
|
92
|
+
) : null}
|
|
91
93
|
</Box>
|
|
92
94
|
)}
|
|
93
95
|
</Box>
|
package/src/config/cli-model.ts
CHANGED
|
@@ -2,10 +2,11 @@ import { spawn as spawnChild, execFile } from "child_process";
|
|
|
2
2
|
import { promisify } from "util";
|
|
3
3
|
import * as os from "os";
|
|
4
4
|
import type {
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
5
|
+
LanguageModelV2,
|
|
6
|
+
LanguageModelV2CallOptions,
|
|
7
|
+
LanguageModelV2StreamPart,
|
|
8
|
+
LanguageModelV2Content,
|
|
9
|
+
} from "@ai-sdk/provider";
|
|
9
10
|
import { estimateTokens } from "../ingestion/token-budget.js";
|
|
10
11
|
import { debugLog } from "../utils/debug.js";
|
|
11
12
|
import {
|
|
@@ -40,10 +41,10 @@ async function resolveWindowsCommand(command: string): Promise<{ cmd: string; ar
|
|
|
40
41
|
|
|
41
42
|
// ── CLI model marker for provider detection ─────────────────────────────
|
|
42
43
|
|
|
43
|
-
const cliModels = new WeakSet<
|
|
44
|
+
const cliModels = new WeakSet<LanguageModelV2>();
|
|
44
45
|
|
|
45
|
-
/** Check whether a
|
|
46
|
-
export function isCliModel(model:
|
|
46
|
+
/** Check whether a LanguageModelV2 was created by `createCliModel`. */
|
|
47
|
+
export function isCliModel(model: LanguageModelV2): boolean {
|
|
47
48
|
return cliModels.has(model);
|
|
48
49
|
}
|
|
49
50
|
|
|
@@ -146,7 +147,7 @@ function parseStreamEvent(line: string): ParsedStreamEvent | null {
|
|
|
146
147
|
function processRemainingBuffer(
|
|
147
148
|
buffer: string,
|
|
148
149
|
textChunks: string[],
|
|
149
|
-
controller: ReadableStreamDefaultController<
|
|
150
|
+
controller: ReadableStreamDefaultController<LanguageModelV2StreamPart>,
|
|
150
151
|
usage: { inputTokens: number; outputTokens: number },
|
|
151
152
|
): void {
|
|
152
153
|
if (!buffer.trim()) return;
|
|
@@ -156,7 +157,7 @@ function processRemainingBuffer(
|
|
|
156
157
|
|
|
157
158
|
if (parsed.type === "text" && parsed.text) {
|
|
158
159
|
textChunks.push(parsed.text);
|
|
159
|
-
controller.enqueue({ type: "text-delta",
|
|
160
|
+
controller.enqueue({ type: "text-delta", id: "0", delta: parsed.text });
|
|
160
161
|
}
|
|
161
162
|
if (parsed.usage) {
|
|
162
163
|
if (parsed.usage.input_tokens) usage.inputTokens = parsed.usage.input_tokens;
|
|
@@ -168,19 +169,18 @@ function buildStreamFinishResult(
|
|
|
168
169
|
textChunks: string[],
|
|
169
170
|
usage: { inputTokens: number; outputTokens: number },
|
|
170
171
|
fullPrompt: string,
|
|
171
|
-
): { finishParts:
|
|
172
|
+
): { finishParts: LanguageModelV2StreamPart[] } {
|
|
172
173
|
const totalText = textChunks.join("");
|
|
173
174
|
const toolCalls = parseToolCalls(totalText);
|
|
174
|
-
const parts:
|
|
175
|
+
const parts: LanguageModelV2StreamPart[] = [];
|
|
175
176
|
|
|
176
177
|
if (toolCalls) {
|
|
177
178
|
for (const tc of toolCalls) {
|
|
178
179
|
parts.push({
|
|
179
180
|
type: "tool-call",
|
|
180
|
-
toolCallType: "function",
|
|
181
181
|
toolCallId: tc.toolCallId,
|
|
182
182
|
toolName: tc.toolName,
|
|
183
|
-
|
|
183
|
+
input: tc.args,
|
|
184
184
|
});
|
|
185
185
|
}
|
|
186
186
|
}
|
|
@@ -189,8 +189,9 @@ function buildStreamFinishResult(
|
|
|
189
189
|
type: "finish",
|
|
190
190
|
finishReason: toolCalls ? "tool-calls" : "stop",
|
|
191
191
|
usage: {
|
|
192
|
-
|
|
193
|
-
|
|
192
|
+
inputTokens: usage.inputTokens || estimateTokens(fullPrompt),
|
|
193
|
+
outputTokens: usage.outputTokens || estimateTokens(totalText),
|
|
194
|
+
totalTokens: undefined,
|
|
194
195
|
},
|
|
195
196
|
});
|
|
196
197
|
|
|
@@ -204,14 +205,14 @@ async function streamClaude(
|
|
|
204
205
|
fullPrompt: string,
|
|
205
206
|
model: string | null,
|
|
206
207
|
signal?: AbortSignal
|
|
207
|
-
): Promise<ReadableStream<
|
|
208
|
+
): Promise<ReadableStream<LanguageModelV2StreamPart>> {
|
|
208
209
|
const command = getCliCommand(tool.id);
|
|
209
210
|
validateCliCommand(command);
|
|
210
211
|
const args = tool.buildStreamArgs(model);
|
|
211
212
|
|
|
212
213
|
const resolved = await resolveWindowsCommand(command);
|
|
213
214
|
|
|
214
|
-
return new ReadableStream<
|
|
215
|
+
return new ReadableStream<LanguageModelV2StreamPart>({
|
|
215
216
|
start(controller) {
|
|
216
217
|
const proc = spawnChild(resolved.cmd, [...resolved.args, ...args], {
|
|
217
218
|
stdio: ["pipe", "pipe", "pipe"],
|
|
@@ -234,6 +235,7 @@ async function streamClaude(
|
|
|
234
235
|
let buffer = "";
|
|
235
236
|
const textChunks: string[] = [];
|
|
236
237
|
const usage = { inputTokens: 0, outputTokens: 0 };
|
|
238
|
+
let textStarted = false;
|
|
237
239
|
|
|
238
240
|
proc.stdout.on("data", (chunk: Buffer) => {
|
|
239
241
|
buffer += chunk.toString("utf-8");
|
|
@@ -245,8 +247,12 @@ async function streamClaude(
|
|
|
245
247
|
if (!parsed) continue;
|
|
246
248
|
|
|
247
249
|
if (parsed.type === "text" && parsed.text) {
|
|
250
|
+
if (!textStarted) {
|
|
251
|
+
controller.enqueue({ type: "text-start", id: "0" });
|
|
252
|
+
textStarted = true;
|
|
253
|
+
}
|
|
248
254
|
textChunks.push(parsed.text);
|
|
249
|
-
controller.enqueue({ type: "text-delta",
|
|
255
|
+
controller.enqueue({ type: "text-delta", id: "0", delta: parsed.text });
|
|
250
256
|
}
|
|
251
257
|
if (parsed.usage) {
|
|
252
258
|
if (parsed.usage.input_tokens) usage.inputTokens = parsed.usage.input_tokens;
|
|
@@ -268,6 +274,10 @@ async function streamClaude(
|
|
|
268
274
|
|
|
269
275
|
processRemainingBuffer(buffer, textChunks, controller, usage);
|
|
270
276
|
|
|
277
|
+
if (textStarted) {
|
|
278
|
+
controller.enqueue({ type: "text-end", id: "0" });
|
|
279
|
+
}
|
|
280
|
+
|
|
271
281
|
const { finishParts } = buildStreamFinishResult(textChunks, usage, fullPrompt);
|
|
272
282
|
for (const part of finishParts) {
|
|
273
283
|
controller.enqueue(part);
|
|
@@ -283,24 +293,24 @@ async function streamClaude(
|
|
|
283
293
|
});
|
|
284
294
|
}
|
|
285
295
|
|
|
286
|
-
// ──
|
|
296
|
+
// ── LanguageModelV2 implementation ──────────────────────────────────────
|
|
287
297
|
|
|
288
298
|
export function createCliModel(
|
|
289
299
|
toolId: string,
|
|
290
300
|
modelOverride?: string
|
|
291
|
-
):
|
|
301
|
+
): LanguageModelV2 {
|
|
292
302
|
const tool = CLI_TOOLS.find((t) => t.id === toolId);
|
|
293
303
|
if (!tool) throw new Error(`Unknown CLI tool: ${toolId}`);
|
|
294
304
|
|
|
295
305
|
const modelId = modelOverride || tool.defaultModel;
|
|
296
306
|
|
|
297
|
-
const model = {
|
|
298
|
-
specificationVersion: "
|
|
307
|
+
const model: LanguageModelV2 = {
|
|
308
|
+
specificationVersion: "v2" as const,
|
|
299
309
|
provider: tool.id,
|
|
300
310
|
modelId,
|
|
301
|
-
|
|
311
|
+
supportedUrls: {},
|
|
302
312
|
|
|
303
|
-
async doGenerate(options:
|
|
313
|
+
async doGenerate(options: LanguageModelV2CallOptions) {
|
|
304
314
|
const fullPrompt = buildFullPrompt(options);
|
|
305
315
|
|
|
306
316
|
// Use async spawn to avoid blocking the event loop
|
|
@@ -308,26 +318,40 @@ export function createCliModel(
|
|
|
308
318
|
|
|
309
319
|
const { cleanText, toolCalls } = processCliOutput(output);
|
|
310
320
|
|
|
321
|
+
const content: LanguageModelV2Content[] = [];
|
|
322
|
+
if (cleanText) {
|
|
323
|
+
content.push({ type: "text", text: cleanText });
|
|
324
|
+
}
|
|
325
|
+
if (toolCalls) {
|
|
326
|
+
for (const tc of toolCalls) {
|
|
327
|
+
content.push({
|
|
328
|
+
type: "tool-call",
|
|
329
|
+
toolCallId: tc.toolCallId,
|
|
330
|
+
toolName: tc.toolName,
|
|
331
|
+
input: tc.args,
|
|
332
|
+
});
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
|
|
311
336
|
return {
|
|
312
|
-
|
|
313
|
-
toolCalls,
|
|
314
|
-
finishReason: toolCalls
|
|
315
|
-
? ("tool-calls" as const)
|
|
316
|
-
: ("stop" as const),
|
|
337
|
+
content,
|
|
338
|
+
finishReason: toolCalls ? "tool-calls" as const : "stop" as const,
|
|
317
339
|
usage: {
|
|
318
|
-
|
|
319
|
-
|
|
340
|
+
inputTokens: estimateTokens(fullPrompt),
|
|
341
|
+
outputTokens: estimateTokens(output),
|
|
342
|
+
totalTokens: undefined,
|
|
343
|
+
},
|
|
344
|
+
request: {
|
|
345
|
+
body: fullPrompt,
|
|
320
346
|
},
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
rawSettings: { toolId, modelId },
|
|
347
|
+
response: {
|
|
348
|
+
headers: {},
|
|
324
349
|
},
|
|
325
|
-
rawResponse: { headers: {} },
|
|
326
350
|
warnings: [],
|
|
327
351
|
};
|
|
328
352
|
},
|
|
329
353
|
|
|
330
|
-
async doStream(options:
|
|
354
|
+
async doStream(options: LanguageModelV2CallOptions) {
|
|
331
355
|
const fullPrompt = buildFullPrompt(options);
|
|
332
356
|
|
|
333
357
|
// Use native streaming for Claude Code
|
|
@@ -336,12 +360,12 @@ export function createCliModel(
|
|
|
336
360
|
|
|
337
361
|
return {
|
|
338
362
|
stream,
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
363
|
+
request: {
|
|
364
|
+
body: fullPrompt,
|
|
365
|
+
},
|
|
366
|
+
response: {
|
|
367
|
+
headers: {},
|
|
342
368
|
},
|
|
343
|
-
rawResponse: { headers: {} },
|
|
344
|
-
warnings: [],
|
|
345
369
|
};
|
|
346
370
|
}
|
|
347
371
|
|
|
@@ -350,19 +374,20 @@ export function createCliModel(
|
|
|
350
374
|
|
|
351
375
|
const { cleanText, toolCalls } = processCliOutput(output);
|
|
352
376
|
|
|
353
|
-
const stream = new ReadableStream<
|
|
377
|
+
const stream = new ReadableStream<LanguageModelV2StreamPart>({
|
|
354
378
|
start(controller) {
|
|
355
379
|
if (cleanText) {
|
|
356
|
-
controller.enqueue({ type: "text-
|
|
380
|
+
controller.enqueue({ type: "text-start", id: "0" });
|
|
381
|
+
controller.enqueue({ type: "text-delta", id: "0", delta: cleanText });
|
|
382
|
+
controller.enqueue({ type: "text-end", id: "0" });
|
|
357
383
|
}
|
|
358
384
|
if (toolCalls) {
|
|
359
385
|
for (const tc of toolCalls) {
|
|
360
386
|
controller.enqueue({
|
|
361
387
|
type: "tool-call",
|
|
362
|
-
toolCallType: "function",
|
|
363
388
|
toolCallId: tc.toolCallId,
|
|
364
389
|
toolName: tc.toolName,
|
|
365
|
-
|
|
390
|
+
input: tc.args,
|
|
366
391
|
});
|
|
367
392
|
}
|
|
368
393
|
}
|
|
@@ -370,8 +395,9 @@ export function createCliModel(
|
|
|
370
395
|
type: "finish",
|
|
371
396
|
finishReason: toolCalls ? "tool-calls" : "stop",
|
|
372
397
|
usage: {
|
|
373
|
-
|
|
374
|
-
|
|
398
|
+
inputTokens: estimateTokens(fullPrompt),
|
|
399
|
+
outputTokens: estimateTokens(output),
|
|
400
|
+
totalTokens: undefined,
|
|
375
401
|
},
|
|
376
402
|
});
|
|
377
403
|
controller.close();
|
|
@@ -380,12 +406,12 @@ export function createCliModel(
|
|
|
380
406
|
|
|
381
407
|
return {
|
|
382
408
|
stream,
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
409
|
+
request: {
|
|
410
|
+
body: fullPrompt,
|
|
411
|
+
},
|
|
412
|
+
response: {
|
|
413
|
+
headers: {},
|
|
386
414
|
},
|
|
387
|
-
rawResponse: { headers: {} },
|
|
388
|
-
warnings: [],
|
|
389
415
|
};
|
|
390
416
|
},
|
|
391
417
|
};
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { LanguageModelV2CallOptions } from "@ai-sdk/provider";
|
|
2
2
|
import { debugLog } from "../utils/debug.js";
|
|
3
3
|
|
|
4
4
|
// ── Prompt formatting ───────────────────────────────────────────────────
|
|
5
5
|
|
|
6
6
|
export function formatUserPrompt(
|
|
7
|
-
prompt:
|
|
7
|
+
prompt: LanguageModelV2CallOptions["prompt"]
|
|
8
8
|
): string {
|
|
9
9
|
const parts: string[] = [];
|
|
10
10
|
|
|
@@ -30,7 +30,11 @@ export function formatUserPrompt(
|
|
|
30
30
|
case "tool":
|
|
31
31
|
for (const part of msg.content) {
|
|
32
32
|
parts.push(
|
|
33
|
-
`[Tool result for ${part.toolCallId}]\n${JSON.stringify(
|
|
33
|
+
`[Tool result for ${part.toolCallId}]\n${JSON.stringify(
|
|
34
|
+
part.output && typeof part.output === "object" && "value" in part.output
|
|
35
|
+
? part.output.value
|
|
36
|
+
: part.output
|
|
37
|
+
)}`
|
|
34
38
|
);
|
|
35
39
|
}
|
|
36
40
|
break;
|
|
@@ -41,9 +45,9 @@ export function formatUserPrompt(
|
|
|
41
45
|
}
|
|
42
46
|
|
|
43
47
|
export function formatToolsForPrompt(
|
|
44
|
-
|
|
48
|
+
tools: LanguageModelV2CallOptions["tools"]
|
|
45
49
|
): string {
|
|
46
|
-
if (
|
|
50
|
+
if (!tools || tools.length === 0) {
|
|
47
51
|
return "";
|
|
48
52
|
}
|
|
49
53
|
|
|
@@ -53,9 +57,9 @@ export function formatToolsForPrompt(
|
|
|
53
57
|
text +=
|
|
54
58
|
'```json\n{"tool_calls": [{"id": "call_1", "name": "toolName", "arguments": {...}}]}\n```\n\n';
|
|
55
59
|
|
|
56
|
-
for (const tool of
|
|
60
|
+
for (const tool of tools) {
|
|
57
61
|
if (tool.type === "function") {
|
|
58
|
-
text += `### ${tool.name}\n${tool.description || ""}\nParameters: ${JSON.stringify(tool.
|
|
62
|
+
text += `### ${tool.name}\n${tool.description || ""}\nParameters: ${JSON.stringify(tool.inputSchema)}\n\n`;
|
|
59
63
|
}
|
|
60
64
|
}
|
|
61
65
|
|
|
@@ -102,8 +106,8 @@ export function parseToolCalls(text: string): ParsedToolCall[] | undefined {
|
|
|
102
106
|
* Build the full prompt from SDK call options.
|
|
103
107
|
* Shared between doGenerate and doStream to avoid duplication.
|
|
104
108
|
*/
|
|
105
|
-
export function buildFullPrompt(options:
|
|
106
|
-
const toolsText = formatToolsForPrompt(options.
|
|
109
|
+
export function buildFullPrompt(options: LanguageModelV2CallOptions): string {
|
|
110
|
+
const toolsText = formatToolsForPrompt(options.tools);
|
|
107
111
|
return formatUserPrompt(options.prompt) + toolsText;
|
|
108
112
|
}
|
|
109
113
|
|
package/src/config/providers.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { LanguageModelV2 } from "@ai-sdk/provider";
|
|
2
2
|
import type { OpenReportConfig } from "./schema.js";
|
|
3
3
|
import { createCliModel } from "./cli-model.js";
|
|
4
4
|
import { CLI_TOOLS } from "./cli-detection.js";
|
|
@@ -6,7 +6,7 @@ import { ProviderError } from "../errors.js";
|
|
|
6
6
|
|
|
7
7
|
export interface ProviderEntry {
|
|
8
8
|
id: string;
|
|
9
|
-
createModel: (modelId: string) =>
|
|
9
|
+
createModel: (modelId: string) => LanguageModelV2;
|
|
10
10
|
defaultModel: string;
|
|
11
11
|
type: "api" | "local" | "cli";
|
|
12
12
|
}
|
|
@@ -21,7 +21,7 @@ const PROVIDER_DEFAULTS: Record<string, string> = {
|
|
|
21
21
|
|
|
22
22
|
// ── Lazy-loaded SDK factory ──────────────────────────────────────────────
|
|
23
23
|
|
|
24
|
-
type SdkFactory = (modelId: string) =>
|
|
24
|
+
type SdkFactory = (modelId: string) => LanguageModelV2;
|
|
25
25
|
type SdkCreator<T> = (opts: Record<string, unknown>) => T;
|
|
26
26
|
|
|
27
27
|
function createSdkFactory<T>(
|
|
@@ -132,7 +132,7 @@ export function createProviderRegistry(config: OpenReportConfig) {
|
|
|
132
132
|
return providers.get(id);
|
|
133
133
|
},
|
|
134
134
|
|
|
135
|
-
getModel(config: OpenReportConfig):
|
|
135
|
+
getModel(config: OpenReportConfig): LanguageModelV2 {
|
|
136
136
|
const provider = providers.get(config.defaultProvider);
|
|
137
137
|
if (!provider) {
|
|
138
138
|
throw new ProviderError(`Unknown provider: ${config.defaultProvider}`);
|
|
@@ -153,28 +153,28 @@ export function createProviderRegistry(config: OpenReportConfig) {
|
|
|
153
153
|
export type ProviderRegistry = ReturnType<typeof createProviderRegistry>;
|
|
154
154
|
|
|
155
155
|
// ── Lazy model proxy ────────────────────────────────────────────────────
|
|
156
|
-
// Creates a
|
|
156
|
+
// Creates a LanguageModelV2 that only loads the SDK when doGenerate/doStream is called.
|
|
157
157
|
|
|
158
158
|
function createLazyModel(
|
|
159
159
|
getSdk: () => Promise<SdkFactory>,
|
|
160
160
|
modelId: string,
|
|
161
161
|
providerId: string,
|
|
162
|
-
):
|
|
163
|
-
let _resolved:
|
|
162
|
+
): LanguageModelV2 {
|
|
163
|
+
let _resolved: LanguageModelV2 | null = null;
|
|
164
164
|
|
|
165
|
-
async function resolve(): Promise<
|
|
165
|
+
async function resolve(): Promise<LanguageModelV2> {
|
|
166
166
|
if (!_resolved) {
|
|
167
167
|
const sdk = await getSdk();
|
|
168
|
-
_resolved = sdk(modelId) as
|
|
168
|
+
_resolved = sdk(modelId) as LanguageModelV2;
|
|
169
169
|
}
|
|
170
170
|
return _resolved;
|
|
171
171
|
}
|
|
172
172
|
|
|
173
|
-
const base:
|
|
174
|
-
specificationVersion: "
|
|
173
|
+
const base: LanguageModelV2 = {
|
|
174
|
+
specificationVersion: "v2",
|
|
175
175
|
provider: providerId,
|
|
176
176
|
modelId,
|
|
177
|
-
|
|
177
|
+
supportedUrls: {},
|
|
178
178
|
|
|
179
179
|
async doGenerate(options) {
|
|
180
180
|
const model = await resolve();
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { LanguageModelV2 } from "@ai-sdk/provider";
|
|
2
2
|
import type { OpenReportConfig } from "./schema.js";
|
|
3
3
|
import type { ProviderRegistry } from "./providers.js";
|
|
4
4
|
import { detectInstalledClis } from "./cli-detection.js";
|
|
@@ -6,7 +6,7 @@ import { ProviderError } from "../errors.js";
|
|
|
6
6
|
import { debugLog } from "../utils/debug.js";
|
|
7
7
|
|
|
8
8
|
export interface ResolvedProvider {
|
|
9
|
-
model:
|
|
9
|
+
model: LanguageModelV2;
|
|
10
10
|
effectiveProvider: string;
|
|
11
11
|
effectiveModel: string;
|
|
12
12
|
warnings: string[];
|
|
@@ -33,7 +33,7 @@ export async function resolveProvider(
|
|
|
33
33
|
effectiveModel = detectedClis[0].defaultModel;
|
|
34
34
|
}
|
|
35
35
|
|
|
36
|
-
let model:
|
|
36
|
+
let model: LanguageModelV2;
|
|
37
37
|
try {
|
|
38
38
|
const provider = registry.getProvider(effectiveProvider);
|
|
39
39
|
if (provider) {
|
package/src/config/schema.ts
CHANGED
|
@@ -17,7 +17,11 @@ export const OpenReportConfigSchema = z.object({
|
|
|
17
17
|
format: z.enum(["markdown", "json"]).default("markdown"),
|
|
18
18
|
includeMetadata: z.boolean().default(true),
|
|
19
19
|
})
|
|
20
|
-
.default({
|
|
20
|
+
.default(() => ({
|
|
21
|
+
directory: ".openreport/reports",
|
|
22
|
+
format: "markdown" as const,
|
|
23
|
+
includeMetadata: true,
|
|
24
|
+
})),
|
|
21
25
|
agents: z
|
|
22
26
|
.object({
|
|
23
27
|
maxConcurrency: z.number().min(1).max(10).default(3),
|
|
@@ -25,7 +29,12 @@ export const OpenReportConfigSchema = z.object({
|
|
|
25
29
|
temperature: z.number().min(0).max(1).default(0.3),
|
|
26
30
|
maxTokens: z.number().default(8192),
|
|
27
31
|
})
|
|
28
|
-
.default({
|
|
32
|
+
.default(() => ({
|
|
33
|
+
maxConcurrency: 3,
|
|
34
|
+
maxStepsOverride: {} as Record<string, number>,
|
|
35
|
+
temperature: 0.3,
|
|
36
|
+
maxTokens: 8192,
|
|
37
|
+
})),
|
|
29
38
|
modelTokenLimits: z.record(z.string(), z.number()).default({}).optional(),
|
|
30
39
|
scan: z
|
|
31
40
|
.object({
|
|
@@ -33,19 +42,29 @@ export const OpenReportConfigSchema = z.object({
|
|
|
33
42
|
maxFileSize: z.number().default(50000),
|
|
34
43
|
maxDepth: z.number().default(10),
|
|
35
44
|
})
|
|
36
|
-
.default({
|
|
45
|
+
.default(() => ({
|
|
46
|
+
exclude: [] as string[],
|
|
47
|
+
maxFileSize: 50000,
|
|
48
|
+
maxDepth: 10,
|
|
49
|
+
})),
|
|
37
50
|
ui: z
|
|
38
51
|
.object({
|
|
39
52
|
theme: z.enum(["auto", "dark", "light"]).default("auto"),
|
|
40
53
|
showTokenCount: z.boolean().default(true),
|
|
41
54
|
streamOutput: z.boolean().default(true),
|
|
42
55
|
})
|
|
43
|
-
.default({
|
|
56
|
+
.default(() => ({
|
|
57
|
+
theme: "auto" as const,
|
|
58
|
+
showTokenCount: true,
|
|
59
|
+
streamOutput: true,
|
|
60
|
+
})),
|
|
44
61
|
features: z
|
|
45
62
|
.object({
|
|
46
63
|
todoList: z.boolean().default(false),
|
|
47
64
|
})
|
|
48
|
-
.default({
|
|
65
|
+
.default(() => ({
|
|
66
|
+
todoList: false,
|
|
67
|
+
})),
|
|
49
68
|
});
|
|
50
69
|
|
|
51
70
|
export type OpenReportConfig = z.infer<typeof OpenReportConfigSchema>;
|
|
@@ -2,7 +2,7 @@ import { useState, useEffect, useCallback, useRef, useMemo } from "react";
|
|
|
2
2
|
import { orchestrate } from "../agents/orchestrator.js";
|
|
3
3
|
import { saveReport } from "../storage/report-store.js";
|
|
4
4
|
import { getErrorMessage } from "../utils/format.js";
|
|
5
|
-
import type {
|
|
5
|
+
import type { LanguageModelV2 } from "@ai-sdk/provider";
|
|
6
6
|
import type {
|
|
7
7
|
AgentStatus,
|
|
8
8
|
PipelinePhase,
|
|
@@ -16,7 +16,7 @@ import type { ProgressTracker } from "../pipeline/progress.js";
|
|
|
16
16
|
export interface UseReportGenerationOptions {
|
|
17
17
|
projectRoot: string;
|
|
18
18
|
reportType: string;
|
|
19
|
-
model:
|
|
19
|
+
model: LanguageModelV2;
|
|
20
20
|
modelId: string;
|
|
21
21
|
config: OpenReportConfig;
|
|
22
22
|
generateTodoList?: boolean;
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { generateText, streamText } from "ai";
|
|
2
|
-
import type {
|
|
1
|
+
import { generateText, streamText, stepCountIs } from "ai";
|
|
2
|
+
import type { LanguageModelV2 } from "@ai-sdk/provider";
|
|
3
3
|
import { ProgressTracker } from "./progress.js";
|
|
4
4
|
import { extractSubReport } from "./extraction.js";
|
|
5
5
|
import { buildFileContentBlock, type SharedContext } from "./context.js";
|
|
@@ -8,6 +8,7 @@ import { createBaseTools, createExtendedTools, type BaseTools, type ExtendedTool
|
|
|
8
8
|
import { getRelevantFilePaths } from "../ingestion/context-selector.js";
|
|
9
9
|
import { debugLog } from "../utils/debug.js";
|
|
10
10
|
import { getErrorMessage } from "../utils/format.js";
|
|
11
|
+
import { estimateTokens } from "../ingestion/token-budget.js";
|
|
11
12
|
import type { AgentId, AgentDefinition, SubReport } from "../types/index.js";
|
|
12
13
|
import type { OpenReportConfig } from "../config/schema.js";
|
|
13
14
|
|
|
@@ -169,7 +170,7 @@ This overview will be provided to specialized agents (security, performance, cod
|
|
|
169
170
|
Output your analysis as plain markdown (no JSON, no tags).`;
|
|
170
171
|
|
|
171
172
|
export async function runOrchestratorAgent(
|
|
172
|
-
model:
|
|
173
|
+
model: LanguageModelV2,
|
|
173
174
|
sharedContext: SharedContext,
|
|
174
175
|
config: OpenReportConfig,
|
|
175
176
|
progress: ProgressTracker,
|
|
@@ -187,24 +188,38 @@ ${fileContentBlock}
|
|
|
187
188
|
Produce your project overview now.`;
|
|
188
189
|
|
|
189
190
|
try {
|
|
191
|
+
const estimatedInputTokens = estimateTokens(prompt);
|
|
192
|
+
|
|
190
193
|
const fullText = await withRetry(async () => {
|
|
191
194
|
const result = await streamText({
|
|
192
195
|
model,
|
|
193
196
|
prompt,
|
|
194
|
-
|
|
195
|
-
maxTokens: config.agents.maxTokens,
|
|
197
|
+
maxOutputTokens: config.agents.maxTokens,
|
|
196
198
|
temperature: 0.3,
|
|
197
199
|
abortSignal: signal,
|
|
198
200
|
});
|
|
199
201
|
|
|
202
|
+
let outputCharCount = 0;
|
|
200
203
|
const throttle = createStreamThrottle((preview) => {
|
|
201
204
|
progress.setPhaseDetail(preview);
|
|
205
|
+
progress.setBaseTokens({
|
|
206
|
+
input: estimatedInputTokens,
|
|
207
|
+
output: Math.ceil(outputCharCount / 4),
|
|
208
|
+
});
|
|
202
209
|
});
|
|
203
210
|
|
|
204
211
|
for await (const chunk of result.textStream) {
|
|
212
|
+
outputCharCount += chunk.length;
|
|
205
213
|
throttle.push(chunk);
|
|
206
214
|
}
|
|
207
215
|
|
|
216
|
+
// Set final token count from actual usage
|
|
217
|
+
const usage = await result.usage;
|
|
218
|
+
progress.setBaseTokens({
|
|
219
|
+
input: usage.inputTokens || estimatedInputTokens,
|
|
220
|
+
output: usage.outputTokens || Math.ceil(outputCharCount / 4),
|
|
221
|
+
});
|
|
222
|
+
|
|
208
223
|
return throttle.getFullText();
|
|
209
224
|
});
|
|
210
225
|
|
|
@@ -221,7 +236,7 @@ Produce your project overview now.`;
|
|
|
221
236
|
|
|
222
237
|
export async function runAgentWithTools(
|
|
223
238
|
agentId: AgentId,
|
|
224
|
-
model:
|
|
239
|
+
model: LanguageModelV2,
|
|
225
240
|
projectRoot: string,
|
|
226
241
|
sharedContext: SharedContext,
|
|
227
242
|
config: OpenReportConfig,
|
|
@@ -247,9 +262,9 @@ export async function runAgentWithTools(
|
|
|
247
262
|
system: agentDef.systemPrompt,
|
|
248
263
|
prompt: buildAnalysisPrompt(agentId, agentDef.name, contextWithFiles),
|
|
249
264
|
tools,
|
|
250
|
-
maxSteps,
|
|
265
|
+
stopWhen: stepCountIs(maxSteps),
|
|
251
266
|
temperature: config.agents.temperature,
|
|
252
|
-
|
|
267
|
+
maxOutputTokens: config.agents.maxTokens,
|
|
253
268
|
abortSignal: signal,
|
|
254
269
|
onStepFinish: (step) => {
|
|
255
270
|
if (step.text) {
|
|
@@ -258,8 +273,8 @@ export async function runAgentWithTools(
|
|
|
258
273
|
}
|
|
259
274
|
if (step.usage) {
|
|
260
275
|
progress.updateAgentTokens(agentId, {
|
|
261
|
-
input: step.usage.
|
|
262
|
-
output: step.usage.
|
|
276
|
+
input: step.usage.inputTokens ?? 0,
|
|
277
|
+
output: step.usage.outputTokens ?? 0,
|
|
263
278
|
});
|
|
264
279
|
}
|
|
265
280
|
},
|
|
@@ -285,7 +300,7 @@ export async function runAgentWithTools(
|
|
|
285
300
|
|
|
286
301
|
export async function runAgentWithCli(
|
|
287
302
|
agentId: AgentId,
|
|
288
|
-
model:
|
|
303
|
+
model: LanguageModelV2,
|
|
289
304
|
projectRoot: string,
|
|
290
305
|
sharedContext: SharedContext,
|
|
291
306
|
config: OpenReportConfig,
|
|
@@ -315,20 +330,26 @@ ${buildAnalysisPrompt(agentId, agentDef.name, "")}`;
|
|
|
315
330
|
const result = await streamText({
|
|
316
331
|
model,
|
|
317
332
|
prompt,
|
|
318
|
-
maxSteps: 1,
|
|
319
333
|
temperature: config.agents.temperature,
|
|
320
|
-
|
|
334
|
+
maxOutputTokens: config.agents.maxTokens,
|
|
321
335
|
abortSignal: signal,
|
|
322
336
|
});
|
|
323
337
|
|
|
324
338
|
const pendingChunks: string[] = [];
|
|
339
|
+
const estimatedInputTokens = estimateTokens(prompt);
|
|
340
|
+
let outputCharCount = 0;
|
|
325
341
|
const throttle = createStreamThrottle((preview) => {
|
|
326
342
|
progress.updateAgentStatus(agentId, "running", preview);
|
|
327
343
|
progress.addStreamText(pendingChunks.join(""), agentId);
|
|
344
|
+
progress.updateAgentTokens(agentId, {
|
|
345
|
+
input: estimatedInputTokens,
|
|
346
|
+
output: Math.ceil(outputCharCount / 4),
|
|
347
|
+
});
|
|
328
348
|
pendingChunks.length = 0;
|
|
329
349
|
});
|
|
330
350
|
|
|
331
351
|
for await (const chunk of result.textStream) {
|
|
352
|
+
outputCharCount += chunk.length;
|
|
332
353
|
pendingChunks.push(chunk);
|
|
333
354
|
throttle.push(chunk);
|
|
334
355
|
}
|
|
@@ -339,8 +360,8 @@ ${buildAnalysisPrompt(agentId, agentDef.name, "")}`;
|
|
|
339
360
|
});
|
|
340
361
|
|
|
341
362
|
progress.updateAgentTokens(agentId, {
|
|
342
|
-
input: usage.
|
|
343
|
-
output: usage.
|
|
363
|
+
input: usage.inputTokens ?? 0,
|
|
364
|
+
output: usage.outputTokens ?? 0,
|
|
344
365
|
});
|
|
345
366
|
|
|
346
367
|
const subReport = extractSubReport(
|
|
@@ -65,13 +65,13 @@ export function extractSubReport(
|
|
|
65
65
|
agentName: string,
|
|
66
66
|
text: string,
|
|
67
67
|
filesAnalyzed: number,
|
|
68
|
-
usage?: { promptTokens
|
|
68
|
+
usage?: { inputTokens?: number; outputTokens?: number; promptTokens?: number; completionTokens?: number }
|
|
69
69
|
): SubReport {
|
|
70
70
|
const makeMeta = () => ({
|
|
71
71
|
filesAnalyzed,
|
|
72
72
|
tokensUsed: {
|
|
73
|
-
input: usage?.promptTokens || 0,
|
|
74
|
-
output: usage?.completionTokens || 0,
|
|
73
|
+
input: usage?.inputTokens || usage?.promptTokens || 0,
|
|
74
|
+
output: usage?.outputTokens || usage?.completionTokens || 0,
|
|
75
75
|
},
|
|
76
76
|
duration: 0,
|
|
77
77
|
});
|
package/src/pipeline/progress.ts
CHANGED
|
@@ -72,7 +72,7 @@ export class ProgressTracker extends TypedEventEmitter<ProgressEventMap> {
|
|
|
72
72
|
status: "pending",
|
|
73
73
|
};
|
|
74
74
|
this.agents.set(agentId, status);
|
|
75
|
-
this.emit("agentStatusChange", status);
|
|
75
|
+
this.emit("agentStatusChange", { ...status });
|
|
76
76
|
}
|
|
77
77
|
|
|
78
78
|
updateAgentStatus(
|
|
@@ -125,6 +125,13 @@ export class ProgressTracker extends TypedEventEmitter<ProgressEventMap> {
|
|
|
125
125
|
this.previousTokens.set(agentId, { input: tokens.input, output: tokens.output });
|
|
126
126
|
|
|
127
127
|
this.emit("tokenUpdate", { ...this.totalTokens });
|
|
128
|
+
this.emit("agentStatusChange", { ...agent });
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
/** Set base token count (e.g. from orchestrator) before agents run. Agent deltas add on top. */
|
|
132
|
+
setBaseTokens(tokens: { input: number; output: number }): void {
|
|
133
|
+
this.totalTokens = { ...tokens };
|
|
134
|
+
this.emit("tokenUpdate", { ...this.totalTokens });
|
|
128
135
|
}
|
|
129
136
|
|
|
130
137
|
addStreamText(delta: string, agentId: AgentId): void {
|
package/src/pipeline/runner.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { streamText } from "ai";
|
|
2
|
-
import type {
|
|
2
|
+
import type { LanguageModelV2 } from "@ai-sdk/provider";
|
|
3
3
|
import { ProgressTracker } from "./progress.js";
|
|
4
4
|
import { combineReport, countFindings, type CombineOptions } from "./combiner.js";
|
|
5
5
|
import { getAgentById, getAgentsForReportType } from "../agents/registry.js";
|
|
@@ -31,7 +31,7 @@ import { isCliModel } from "../config/cli-model.js";
|
|
|
31
31
|
export interface RunPipelineOptions {
|
|
32
32
|
projectRoot: string;
|
|
33
33
|
reportType: string;
|
|
34
|
-
model:
|
|
34
|
+
model: LanguageModelV2;
|
|
35
35
|
modelId: string;
|
|
36
36
|
config: OpenReportConfig;
|
|
37
37
|
progress: ProgressTracker;
|
|
@@ -152,7 +152,7 @@ async function preReadFiles(
|
|
|
152
152
|
// ── Phase 4: Orchestrator global analysis ─────────────────────────────
|
|
153
153
|
|
|
154
154
|
async function runOrchestrator(
|
|
155
|
-
model:
|
|
155
|
+
model: LanguageModelV2,
|
|
156
156
|
sharedContext: SharedContext,
|
|
157
157
|
config: OpenReportConfig,
|
|
158
158
|
progress: ProgressTracker,
|
|
@@ -179,7 +179,7 @@ async function runOrchestrator(
|
|
|
179
179
|
|
|
180
180
|
async function executeAgents(
|
|
181
181
|
applicableAgents: AgentId[],
|
|
182
|
-
model:
|
|
182
|
+
model: LanguageModelV2,
|
|
183
183
|
projectRoot: string,
|
|
184
184
|
sharedContext: SharedContext,
|
|
185
185
|
config: OpenReportConfig,
|
|
@@ -249,7 +249,7 @@ async function executeAgents(
|
|
|
249
249
|
// ── Phase 5.5: Generate todo list from findings ───────────────────────
|
|
250
250
|
|
|
251
251
|
async function executeTodoAgent(
|
|
252
|
-
model:
|
|
252
|
+
model: LanguageModelV2,
|
|
253
253
|
subReports: SubReport[],
|
|
254
254
|
config: OpenReportConfig,
|
|
255
255
|
progress: ProgressTracker,
|
|
@@ -303,8 +303,7 @@ Generate the prioritized todo list based on these findings. Wrap your entire res
|
|
|
303
303
|
const response = await streamText({
|
|
304
304
|
model,
|
|
305
305
|
prompt,
|
|
306
|
-
|
|
307
|
-
maxTokens: config.agents.maxTokens,
|
|
306
|
+
maxOutputTokens: config.agents.maxTokens,
|
|
308
307
|
temperature: config.agents.temperature,
|
|
309
308
|
abortSignal: signal,
|
|
310
309
|
});
|
|
@@ -330,7 +329,7 @@ Generate the prioritized todo list based on these findings. Wrap your entire res
|
|
|
330
329
|
// ── Phase 6: Generate executive summary ───────────────────────────────
|
|
331
330
|
|
|
332
331
|
async function generateExecutiveSummary(
|
|
333
|
-
model:
|
|
332
|
+
model: LanguageModelV2,
|
|
334
333
|
subReports: SubReport[],
|
|
335
334
|
projectName: string,
|
|
336
335
|
classification: ProjectClassification,
|
|
@@ -379,8 +378,7 @@ Be specific, reference actual findings, and provide actionable insights. Write 4
|
|
|
379
378
|
const result = await streamText({
|
|
380
379
|
model,
|
|
381
380
|
prompt: summaryPrompt,
|
|
382
|
-
|
|
383
|
-
maxTokens: 2000,
|
|
381
|
+
maxOutputTokens: 2000,
|
|
384
382
|
temperature: 0.3,
|
|
385
383
|
abortSignal: signal,
|
|
386
384
|
});
|
package/src/schemas/report.ts
CHANGED
|
@@ -23,7 +23,11 @@ export const SubReportSchema = z.object({
|
|
|
23
23
|
.default({ input: 0, output: 0 }),
|
|
24
24
|
duration: z.number().default(0),
|
|
25
25
|
})
|
|
26
|
-
.default({
|
|
26
|
+
.default(() => ({
|
|
27
|
+
filesAnalyzed: 0,
|
|
28
|
+
tokensUsed: { input: 0, output: 0 },
|
|
29
|
+
duration: 0,
|
|
30
|
+
})),
|
|
27
31
|
});
|
|
28
32
|
|
|
29
33
|
export const FindingSummarySchema = z.object({
|
|
@@ -12,14 +12,14 @@ import { openInBrowser } from "../report/open-browser.js";
|
|
|
12
12
|
import { REPORT_TYPES } from "../config/defaults.js";
|
|
13
13
|
import { formatDuration, formatTokens } from "../utils/format.js";
|
|
14
14
|
import { getGradeTerminalColor } from "../utils/grade-colors.js";
|
|
15
|
-
import type {
|
|
15
|
+
import type { LanguageModelV2 } from "@ai-sdk/provider";
|
|
16
16
|
import type { Screen } from "../types/index.js";
|
|
17
17
|
import type { OpenReportConfig } from "../config/schema.js";
|
|
18
18
|
|
|
19
19
|
interface GenerationScreenProps {
|
|
20
20
|
projectRoot: string;
|
|
21
21
|
reportType: string;
|
|
22
|
-
model:
|
|
22
|
+
model: LanguageModelV2;
|
|
23
23
|
modelId: string;
|
|
24
24
|
config: OpenReportConfig;
|
|
25
25
|
onNavigate: (screen: Screen, params?: Record<string, string>) => void;
|
|
@@ -123,7 +123,7 @@ export function createGetFileTreeTool(projectRoot: string) {
|
|
|
123
123
|
return tool({
|
|
124
124
|
description:
|
|
125
125
|
"Get the complete file tree of the project. Excludes node_modules, .git, dist, and binary files. Shows file sizes for large files.",
|
|
126
|
-
|
|
126
|
+
inputSchema: z.object({
|
|
127
127
|
maxDepth: z
|
|
128
128
|
.number()
|
|
129
129
|
.optional()
|
|
@@ -19,7 +19,7 @@ export function createGetGitInfoTool(projectRoot: string) {
|
|
|
19
19
|
return tool({
|
|
20
20
|
description:
|
|
21
21
|
"Get git information about the project: recent commits, branches, most-changed files, and contributors.",
|
|
22
|
-
|
|
22
|
+
inputSchema: z.object({
|
|
23
23
|
type: z
|
|
24
24
|
.enum(["summary", "recent-commits", "file-frequency", "contributors"])
|
|
25
25
|
.describe("Type of git information to retrieve"),
|
package/src/tools/glob.ts
CHANGED
|
@@ -6,7 +6,7 @@ export function createGlobTool(projectRoot: string) {
|
|
|
6
6
|
return tool({
|
|
7
7
|
description:
|
|
8
8
|
"Find files matching a glob pattern within the project. Returns up to 200 matching file paths.",
|
|
9
|
-
|
|
9
|
+
inputSchema: z.object({
|
|
10
10
|
pattern: z
|
|
11
11
|
.string()
|
|
12
12
|
.describe(
|
package/src/tools/grep.ts
CHANGED
|
@@ -44,7 +44,7 @@ export function createGrepTool(projectRoot: string) {
|
|
|
44
44
|
return tool({
|
|
45
45
|
description:
|
|
46
46
|
"Search for a regex pattern in project files. Returns matching lines with file paths and line numbers.",
|
|
47
|
-
|
|
47
|
+
inputSchema: z.object({
|
|
48
48
|
pattern: z.string().describe("Regex pattern to search for"),
|
|
49
49
|
filePattern: z
|
|
50
50
|
.string()
|
|
@@ -9,7 +9,7 @@ export function createListDirectoryTool(projectRoot: string) {
|
|
|
9
9
|
return tool({
|
|
10
10
|
description:
|
|
11
11
|
"List the contents of a directory relative to the project root. Shows files and subdirectories with basic metadata.",
|
|
12
|
-
|
|
12
|
+
inputSchema: z.object({
|
|
13
13
|
path: z
|
|
14
14
|
.string()
|
|
15
15
|
.default(".")
|
package/src/tools/read-file.ts
CHANGED
|
@@ -7,7 +7,7 @@ export function createReadFileTool(projectRoot: string) {
|
|
|
7
7
|
return tool({
|
|
8
8
|
description:
|
|
9
9
|
"Read the contents of a file. Path must be relative to the project root. Can optionally read a specific range of lines.",
|
|
10
|
-
|
|
10
|
+
inputSchema: z.object({
|
|
11
11
|
path: z.string().describe("Relative file path from project root"),
|
|
12
12
|
maxLines: z
|
|
13
13
|
.number()
|
|
@@ -7,7 +7,7 @@ export function createReadPackageJsonTool(projectRoot: string) {
|
|
|
7
7
|
return tool({
|
|
8
8
|
description:
|
|
9
9
|
"Read and parse a package.json (or similar manifest) file. Returns structured data about dependencies, scripts, and metadata.",
|
|
10
|
-
|
|
10
|
+
inputSchema: z.object({
|
|
11
11
|
path: z
|
|
12
12
|
.string()
|
|
13
13
|
.default("package.json")
|
package/src/tools/run-command.ts
CHANGED
|
@@ -87,7 +87,7 @@ export function createRunCommandTool(projectRoot: string) {
|
|
|
87
87
|
return tool({
|
|
88
88
|
description:
|
|
89
89
|
"Run an allowed shell command in the project directory. Only safe, read-only commands are permitted (eslint, tsc --noEmit, npm audit, etc.).",
|
|
90
|
-
|
|
90
|
+
inputSchema: z.object({
|
|
91
91
|
command: z.string().describe("The shell command to execute"),
|
|
92
92
|
}),
|
|
93
93
|
execute: async ({ command }) => {
|