@brainst0rm/core 0.13.0 → 0.14.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/dist/chunk-M7BBX56R.js +340 -0
  2. package/dist/chunk-M7BBX56R.js.map +1 -0
  3. package/dist/{chunk-SWXTFHC7.js → chunk-Z5D2QZY6.js} +3 -3
  4. package/dist/chunk-Z5D2QZY6.js.map +1 -0
  5. package/dist/chunk-Z6ZWNWWR.js +34 -0
  6. package/dist/index.d.ts +2717 -188
  7. package/dist/index.js +16178 -7949
  8. package/dist/index.js.map +1 -1
  9. package/dist/self-extend-47LWSK3E.js +52 -0
  10. package/dist/self-extend-47LWSK3E.js.map +1 -0
  11. package/dist/skills/builtin/api-and-interface-design/SKILL.md +300 -0
  12. package/dist/skills/builtin/browser-testing-with-devtools/SKILL.md +307 -0
  13. package/dist/skills/builtin/ci-cd-and-automation/SKILL.md +391 -0
  14. package/dist/skills/builtin/code-review-and-quality/SKILL.md +353 -0
  15. package/dist/skills/builtin/code-simplification/SKILL.md +340 -0
  16. package/dist/skills/builtin/context-engineering/SKILL.md +301 -0
  17. package/dist/skills/builtin/daemon-operations/SKILL.md +55 -0
  18. package/dist/skills/builtin/debugging-and-error-recovery/SKILL.md +306 -0
  19. package/dist/skills/builtin/deprecation-and-migration/SKILL.md +207 -0
  20. package/dist/skills/builtin/documentation-and-adrs/SKILL.md +295 -0
  21. package/dist/skills/builtin/frontend-ui-engineering/SKILL.md +333 -0
  22. package/dist/skills/builtin/git-workflow-and-versioning/SKILL.md +303 -0
  23. package/dist/skills/builtin/github-collaboration/SKILL.md +215 -0
  24. package/dist/skills/builtin/godmode-operations/SKILL.md +68 -0
  25. package/dist/skills/builtin/idea-refine/SKILL.md +186 -0
  26. package/dist/skills/builtin/idea-refine/examples.md +244 -0
  27. package/dist/skills/builtin/idea-refine/frameworks.md +101 -0
  28. package/dist/skills/builtin/idea-refine/refinement-criteria.md +126 -0
  29. package/dist/skills/builtin/idea-refine/scripts/idea-refine.sh +15 -0
  30. package/dist/skills/builtin/incremental-implementation/SKILL.md +243 -0
  31. package/dist/skills/builtin/memory-init/SKILL.md +54 -0
  32. package/dist/skills/builtin/memory-reflection/SKILL.md +59 -0
  33. package/dist/skills/builtin/multi-model-routing/SKILL.md +56 -0
  34. package/dist/skills/builtin/performance-optimization/SKILL.md +291 -0
  35. package/dist/skills/builtin/planning-and-task-breakdown/SKILL.md +240 -0
  36. package/dist/skills/builtin/security-and-hardening/SKILL.md +368 -0
  37. package/dist/skills/builtin/shipping-and-launch/SKILL.md +310 -0
  38. package/dist/skills/builtin/spec-driven-development/SKILL.md +212 -0
  39. package/dist/skills/builtin/test-driven-development/SKILL.md +376 -0
  40. package/dist/skills/builtin/using-agent-skills/SKILL.md +173 -0
  41. package/dist/trajectory-analyzer-ZAI2XUAI.js +14 -0
  42. package/dist/{trajectory-capture-RF7TUN6I.js → trajectory-capture-ERPIVYQJ.js} +3 -3
  43. package/package.json +14 -11
  44. package/dist/chunk-OU3NPQBH.js +0 -87
  45. package/dist/chunk-OU3NPQBH.js.map +0 -1
  46. package/dist/chunk-PZ5AY32C.js +0 -10
  47. package/dist/chunk-SWXTFHC7.js.map +0 -1
  48. package/dist/trajectory-MOCIJBV6.js +0 -8
  49. /package/dist/{chunk-PZ5AY32C.js.map → chunk-Z6ZWNWWR.js.map} +0 -0
  50. /package/dist/{trajectory-MOCIJBV6.js.map → trajectory-analyzer-ZAI2XUAI.js.map} +0 -0
  51. /package/dist/{trajectory-capture-RF7TUN6I.js.map → trajectory-capture-ERPIVYQJ.js.map} +0 -0
@@ -0,0 +1,340 @@
1
+ // src/session/trajectory-analyzer.ts
2
+ import {
3
+ readdirSync,
4
+ readFileSync,
5
+ writeFileSync,
6
+ renameSync,
7
+ existsSync,
8
+ mkdirSync,
9
+ statSync
10
+ } from "fs";
11
+ import { join } from "path";
12
+ import { homedir } from "os";
13
+ import { createLogger } from "@brainst0rm/shared";
14
+ var log = createLogger("trajectory-analyzer");
15
+ function wilsonLowerBound(successes, total, z = 1.96) {
16
+ if (total === 0) return 0;
17
+ const p = successes / total;
18
+ const denom = 1 + z * z / total;
19
+ const center = p + z * z / (2 * total);
20
+ const margin = z * Math.sqrt((p * (1 - p) + z * z / (4 * total)) / total);
21
+ return Math.max(0, (center - margin) / denom);
22
+ }
23
+ var READ_TOOLS = /* @__PURE__ */ new Set([
24
+ "file_read",
25
+ "glob",
26
+ "grep",
27
+ "list_dir",
28
+ "git_status",
29
+ "git_diff",
30
+ "git_log",
31
+ "memory"
32
+ ]);
33
+ var WRITE_TOOLS = /* @__PURE__ */ new Set([
34
+ "file_write",
35
+ "file_edit",
36
+ "multi_edit",
37
+ "batch_edit",
38
+ "shell"
39
+ ]);
40
+ function analyzeTrajectoryFile(filePath) {
41
+ try {
42
+ const content = readFileSync(filePath, "utf-8");
43
+ const lines = content.split("\n").filter((l) => l.trim());
44
+ if (lines.length === 0) return null;
45
+ const summary = {
46
+ sessionId: "",
47
+ projectPath: "",
48
+ model: "",
49
+ provider: "",
50
+ taskType: "unknown",
51
+ complexity: "unknown",
52
+ totalCost: 0,
53
+ totalLLMCalls: 0,
54
+ totalInputTokens: 0,
55
+ totalOutputTokens: 0,
56
+ toolCalls: 0,
57
+ toolSuccesses: 0,
58
+ toolFailures: 0,
59
+ reads: 0,
60
+ writes: 0,
61
+ hadErrors: false,
62
+ duration: 0,
63
+ success: false
64
+ };
65
+ let sessionStartTime = 0;
66
+ let sessionEndTime = 0;
67
+ for (const line of lines) {
68
+ let event;
69
+ try {
70
+ event = JSON.parse(line);
71
+ } catch {
72
+ continue;
73
+ }
74
+ if (!summary.sessionId && event.sessionId) {
75
+ summary.sessionId = event.sessionId;
76
+ }
77
+ switch (event.type) {
78
+ case "session-start":
79
+ sessionStartTime = new Date(event.timestamp).getTime();
80
+ summary.projectPath = event.data?.projectPath ?? "";
81
+ break;
82
+ case "session-end":
83
+ sessionEndTime = new Date(event.timestamp).getTime();
84
+ summary.success = !summary.hadErrors && summary.totalLLMCalls > 0 && summary.totalOutputTokens > 0;
85
+ break;
86
+ case "llm-call":
87
+ summary.totalLLMCalls++;
88
+ summary.totalCost += event.data?.cost ?? 0;
89
+ summary.totalInputTokens += event.data?.inputTokens ?? 0;
90
+ summary.totalOutputTokens += event.data?.outputTokens ?? 0;
91
+ if (!summary.model && event.data?.model) {
92
+ summary.model = event.data.model;
93
+ summary.provider = event.data.provider ?? "";
94
+ }
95
+ break;
96
+ case "routing-decision":
97
+ if (event.data?.taskType) summary.taskType = event.data.taskType;
98
+ if (event.data?.complexity)
99
+ summary.complexity = event.data.complexity;
100
+ break;
101
+ case "tool-call":
102
+ summary.toolCalls++;
103
+ const toolName = event.data?.name;
104
+ if (toolName) {
105
+ if (READ_TOOLS.has(toolName)) summary.reads++;
106
+ if (WRITE_TOOLS.has(toolName)) summary.writes++;
107
+ }
108
+ break;
109
+ case "tool-result":
110
+ if (event.data?.ok === true) summary.toolSuccesses++;
111
+ else if (event.data?.ok === false) summary.toolFailures++;
112
+ break;
113
+ case "error":
114
+ summary.hadErrors = true;
115
+ break;
116
+ }
117
+ }
118
+ summary.duration = sessionEndTime - sessionStartTime;
119
+ return summary;
120
+ } catch (e) {
121
+ log.warn({ err: e, filePath }, "Failed to analyze trajectory file");
122
+ return null;
123
+ }
124
+ }
125
+ function aggregate(summaries) {
126
+ const models = {};
127
+ const projectStats = {};
128
+ for (const s of summaries) {
129
+ if (!s.model) continue;
130
+ if (!models[s.model]) {
131
+ models[s.model] = {
132
+ totalSessions: 0,
133
+ successCount: 0,
134
+ failureCount: 0,
135
+ totalCost: 0,
136
+ totalReads: 0,
137
+ totalWrites: 0,
138
+ totalToolCalls: 0,
139
+ totalToolSuccesses: 0,
140
+ byTaskType: {}
141
+ };
142
+ }
143
+ const m = models[s.model];
144
+ m.totalSessions++;
145
+ if (s.success) m.successCount++;
146
+ else m.failureCount++;
147
+ m.totalCost += s.totalCost;
148
+ m.totalReads += s.reads;
149
+ m.totalWrites += s.writes;
150
+ m.totalToolCalls += s.toolCalls;
151
+ m.totalToolSuccesses += s.toolSuccesses;
152
+ if (!m.byTaskType[s.taskType]) {
153
+ m.byTaskType[s.taskType] = { successes: 0, failures: 0, avgCost: 0 };
154
+ }
155
+ const t = m.byTaskType[s.taskType];
156
+ if (s.success) t.successes++;
157
+ else t.failures++;
158
+ t.avgCost = (t.avgCost * (t.successes + t.failures - 1) + s.totalCost) / (t.successes + t.failures);
159
+ if (s.projectPath) {
160
+ if (!projectStats[s.projectPath]) projectStats[s.projectPath] = {};
161
+ if (!projectStats[s.projectPath][s.model]) {
162
+ projectStats[s.projectPath][s.model] = { successes: 0, failures: 0 };
163
+ }
164
+ if (s.success) projectStats[s.projectPath][s.model].successes++;
165
+ else projectStats[s.projectPath][s.model].failures++;
166
+ }
167
+ }
168
+ const taskTypeBest = {};
169
+ const MIN_SAMPLES = 5;
170
+ const COST_EPSILON = 1e-3;
171
+ const modelsOut = {};
172
+ for (const [modelId, stats] of Object.entries(models)) {
173
+ const enrichedByTaskType = {};
174
+ for (const [taskType, t] of Object.entries(stats.byTaskType)) {
175
+ const total = t.successes + t.failures;
176
+ const wlb = wilsonLowerBound(t.successes, total);
177
+ const vpd = wlb / (t.avgCost + COST_EPSILON);
178
+ enrichedByTaskType[taskType] = {
179
+ successes: t.successes,
180
+ failures: t.failures,
181
+ avgCost: t.avgCost,
182
+ wilsonLowerBound: wlb,
183
+ valuePerDollar: vpd
184
+ };
185
+ }
186
+ modelsOut[modelId] = {
187
+ totalSessions: stats.totalSessions,
188
+ successCount: stats.successCount,
189
+ failureCount: stats.failureCount,
190
+ successRate: stats.totalSessions > 0 ? stats.successCount / stats.totalSessions : 0,
191
+ avgCostPerSession: stats.totalSessions > 0 ? stats.totalCost / stats.totalSessions : 0,
192
+ avgReadEditRatio: stats.totalWrites > 0 ? stats.totalReads / stats.totalWrites : Infinity,
193
+ avgToolSuccessRate: stats.totalToolCalls > 0 ? stats.totalToolSuccesses / stats.totalToolCalls : 0,
194
+ byTaskType: enrichedByTaskType
195
+ };
196
+ for (const [taskType, t] of Object.entries(enrichedByTaskType)) {
197
+ const total = t.successes + t.failures;
198
+ if (total < MIN_SAMPLES) continue;
199
+ if (!taskTypeBest[taskType]) {
200
+ taskTypeBest[taskType] = {
201
+ bestModel: modelId,
202
+ bestModelSuccessRate: t.wilsonLowerBound,
203
+ bestValueModel: modelId,
204
+ bestValueScore: t.valuePerDollar,
205
+ worstModel: modelId,
206
+ worstModelSuccessRate: t.wilsonLowerBound,
207
+ totalSamples: total
208
+ };
209
+ continue;
210
+ }
211
+ const slot = taskTypeBest[taskType];
212
+ slot.totalSamples += total;
213
+ if (t.wilsonLowerBound > slot.bestModelSuccessRate) {
214
+ slot.bestModel = modelId;
215
+ slot.bestModelSuccessRate = t.wilsonLowerBound;
216
+ }
217
+ if (t.wilsonLowerBound < slot.worstModelSuccessRate) {
218
+ slot.worstModel = modelId;
219
+ slot.worstModelSuccessRate = t.wilsonLowerBound;
220
+ }
221
+ if (t.valuePerDollar > slot.bestValueScore) {
222
+ slot.bestValueModel = modelId;
223
+ slot.bestValueScore = t.valuePerDollar;
224
+ }
225
+ }
226
+ }
227
+ const projectPreferences = {};
228
+ for (const [projectPath, projectModels] of Object.entries(projectStats)) {
229
+ let bestModel = null;
230
+ let bestRate = 0;
231
+ let totalSessions = 0;
232
+ for (const [modelId, stats] of Object.entries(projectModels)) {
233
+ const total = stats.successes + stats.failures;
234
+ totalSessions += total;
235
+ if (total < 2) continue;
236
+ const rate = stats.successes / total;
237
+ if (rate > bestRate) {
238
+ bestModel = modelId;
239
+ bestRate = rate;
240
+ }
241
+ }
242
+ projectPreferences[projectPath] = {
243
+ preferredModel: bestModel,
244
+ preferredModelSuccessRate: bestRate,
245
+ sessionsOnProject: totalSessions
246
+ };
247
+ }
248
+ return {
249
+ updatedAt: (/* @__PURE__ */ new Date()).toISOString(),
250
+ sessionsAnalyzed: summaries.length,
251
+ models: modelsOut,
252
+ taskTypes: taskTypeBest,
253
+ projectPreferences
254
+ };
255
+ }
256
+ function analyzeTrajectories(opts) {
257
+ const trajectoriesDir = opts?.trajectoriesDir ?? join(homedir(), ".brainstorm", "trajectories");
258
+ const outputPath = opts?.outputPath ?? join(homedir(), ".brainstorm", "routing-intelligence.json");
259
+ const maxAgeDays = opts?.maxAgeDays ?? 30;
260
+ if (!existsSync(trajectoriesDir)) {
261
+ log.info(
262
+ { trajectoriesDir },
263
+ "No trajectories directory \u2014 nothing to analyze"
264
+ );
265
+ const empty = {
266
+ updatedAt: (/* @__PURE__ */ new Date()).toISOString(),
267
+ sessionsAnalyzed: 0,
268
+ models: {},
269
+ taskTypes: {},
270
+ projectPreferences: {}
271
+ };
272
+ return empty;
273
+ }
274
+ const files = readdirSync(trajectoriesDir).filter(
275
+ (f) => f.endsWith(".jsonl")
276
+ );
277
+ const cutoffMs = Date.now() - maxAgeDays * 24 * 60 * 60 * 1e3;
278
+ const summaries = [];
279
+ for (const file of files) {
280
+ const filePath = join(trajectoriesDir, file);
281
+ try {
282
+ const stat = statSync(filePath);
283
+ if (stat.mtimeMs < cutoffMs) continue;
284
+ } catch {
285
+ continue;
286
+ }
287
+ const summary = analyzeTrajectoryFile(filePath);
288
+ if (summary) summaries.push(summary);
289
+ }
290
+ log.info(
291
+ { sessionCount: summaries.length, filesScanned: files.length },
292
+ "Analyzed trajectories"
293
+ );
294
+ const intelligence = aggregate(summaries);
295
+ const outputDir = join(homedir(), ".brainstorm");
296
+ if (!existsSync(outputDir)) mkdirSync(outputDir, { recursive: true });
297
+ const tmpPath = `${outputPath}.${process.pid}.tmp`;
298
+ writeFileSync(tmpPath, JSON.stringify(intelligence, null, 2), "utf-8");
299
+ renameSync(tmpPath, outputPath);
300
+ log.info({ outputPath }, "Routing intelligence updated");
301
+ return intelligence;
302
+ }
303
+ function loadRoutingIntelligence() {
304
+ const path = join(homedir(), ".brainstorm", "routing-intelligence.json");
305
+ if (!existsSync(path)) return null;
306
+ try {
307
+ return JSON.parse(readFileSync(path, "utf-8"));
308
+ } catch (e) {
309
+ log.warn({ err: e }, "Failed to load routing intelligence");
310
+ return null;
311
+ }
312
+ }
313
+ function toHistoricalStats(intelligence) {
314
+ const stats = [];
315
+ for (const [modelId, model] of Object.entries(intelligence.models)) {
316
+ for (const [taskType, t] of Object.entries(model.byTaskType)) {
317
+ const samples = t.successes + t.failures;
318
+ if (samples === 0) continue;
319
+ stats.push({
320
+ taskType,
321
+ modelId,
322
+ successes: t.successes,
323
+ failures: t.failures,
324
+ avgLatencyMs: 0,
325
+ // Not yet tracked per task-type
326
+ avgCost: t.avgCost,
327
+ samples
328
+ });
329
+ }
330
+ }
331
+ return stats;
332
+ }
333
+
334
+ export {
335
+ wilsonLowerBound,
336
+ analyzeTrajectories,
337
+ loadRoutingIntelligence,
338
+ toHistoricalStats
339
+ };
340
+ //# sourceMappingURL=chunk-M7BBX56R.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/session/trajectory-analyzer.ts"],"sourcesContent":["/**\n * Trajectory Analyzer — the learning loop.\n *\n * Reads trajectory JSONL files from ~/.brainstorm/trajectories/,\n * extracts per-session signals (model performance, task type, cost,\n * Read:Edit ratio, tool success), and aggregates them into a\n * routing-intelligence.json file that the router uses as priors.\n *\n * This is the flywheel: every session makes routing smarter.\n *\n * Schema of routing-intelligence.json:\n * {\n * updatedAt: ISO,\n * sessionsAnalyzed: N,\n * models: {\n * \"anthropic/claude-sonnet-4.6\": {\n * totalSessions: N,\n * successCount: N,\n * failureCount: N,\n * avgCostPerSession: N,\n * avgReadEditRatio: N,\n * avgToolSuccessRate: N,\n * byTaskType: {\n * \"code-generation\": { successes: N, failures: N },\n * \"refactoring\": { successes: N, failures: N }\n * }\n * }\n * },\n * taskTypes: {\n * \"code-generation\": { bestModel: \"...\", worstModel: \"...\" }\n * },\n * projectPreferences: {\n * \"<project-hash>\": { preferredModel: \"...\", successRate: N }\n * }\n * }\n */\n\nimport {\n readdirSync,\n readFileSync,\n writeFileSync,\n renameSync,\n existsSync,\n mkdirSync,\n statSync,\n} from \"node:fs\";\nimport { join } from \"node:path\";\nimport { homedir } from \"node:os\";\nimport { createLogger } from \"@brainst0rm/shared\";\n\nconst log = createLogger(\"trajectory-analyzer\");\n\ninterface ModelStats {\n totalSessions: number;\n successCount: number;\n failureCount: number;\n totalCost: number;\n totalReads: number;\n totalWrites: number;\n totalToolCalls: number;\n totalToolSuccesses: number;\n byTaskType: Record<\n string,\n {\n successes: number;\n failures: number;\n avgCost: number;\n }\n >;\n}\n\nexport interface RoutingIntelligence {\n updatedAt: string;\n sessionsAnalyzed: number;\n models: Record<\n string,\n {\n totalSessions: number;\n successCount: number;\n failureCount: number;\n successRate: number;\n avgCostPerSession: number;\n avgReadEditRatio: number;\n avgToolSuccessRate: number;\n byTaskType: Record<\n string,\n {\n successes: number;\n failures: number;\n avgCost: number;\n /** Wilson lower bound on success rate at 95% confidence — penalizes\n * low-sample claims. 100% on 3 trials gets a much lower bound than\n * 99% on 1000 trials. Use this for ranking, not raw successRate.\n * Optional for backward compatibility with pre-WLB intelligence files. */\n wilsonLowerBound?: number;\n /** Cost-adjusted score: wilsonLowerBound / (avgCost + epsilon).\n * Higher is better. Lets a slightly-worse model that's 10x cheaper\n * win over a marginally-better expensive model.\n * Optional for backward compatibility. */\n valuePerDollar?: number;\n }\n >;\n }\n >;\n taskTypes: Record<\n string,\n {\n /** Highest Wilson-bound success rate, ignoring cost. */\n bestModel: string | null;\n bestModelSuccessRate: number;\n /** Highest cost-adjusted score (Wilson bound / avg cost).\n * Optional for backward compatibility. */\n bestValueModel?: string | null;\n bestValueScore?: number;\n /** Lowest Wilson-bound success rate (with min sample size).\n * Optional for backward compatibility. */\n worstModel?: string | null;\n worstModelSuccessRate?: number;\n totalSamples: number;\n }\n >;\n projectPreferences: Record<\n string,\n {\n preferredModel: string | null;\n preferredModelSuccessRate: number;\n sessionsOnProject: number;\n }\n >;\n}\n\n/**\n * Wilson score interval lower bound at 95% confidence.\n *\n * Penalizes low-sample claims — 100% success on 3 trials gets a much lower\n * bound (~0.31) than 99% on 1000 trials (~0.98). Use for ranking when sample\n * sizes vary widely. Reference: Edwin B. Wilson, 1927.\n */\nexport function wilsonLowerBound(\n successes: number,\n total: number,\n z: number = 1.96,\n): number {\n if (total === 0) return 0;\n const p = successes / total;\n const denom = 1 + (z * z) / total;\n const center = p + (z * z) / (2 * total);\n const margin = z * Math.sqrt((p * (1 - p) + (z * z) / (4 * total)) / total);\n return Math.max(0, (center - margin) / denom);\n}\n\nconst READ_TOOLS = new Set([\n \"file_read\",\n \"glob\",\n \"grep\",\n \"list_dir\",\n \"git_status\",\n \"git_diff\",\n \"git_log\",\n \"memory\",\n]);\n\nconst WRITE_TOOLS = new Set([\n \"file_write\",\n \"file_edit\",\n \"multi_edit\",\n \"batch_edit\",\n \"shell\",\n]);\n\ninterface SessionSummary {\n sessionId: string;\n projectPath: string;\n model: string;\n provider: string;\n taskType: string;\n complexity: string;\n totalCost: number;\n totalLLMCalls: number;\n totalInputTokens: number;\n totalOutputTokens: number;\n toolCalls: number;\n toolSuccesses: number;\n toolFailures: number;\n reads: number;\n writes: number;\n hadErrors: boolean;\n duration: number;\n success: boolean;\n}\n\n/**\n * Parse a single trajectory JSONL file and extract a session summary.\n */\nfunction analyzeTrajectoryFile(filePath: string): SessionSummary | null {\n try {\n const content = readFileSync(filePath, \"utf-8\");\n const lines = content.split(\"\\n\").filter((l) => l.trim());\n if (lines.length === 0) return null;\n\n const summary: SessionSummary = {\n sessionId: \"\",\n projectPath: \"\",\n model: \"\",\n provider: \"\",\n taskType: \"unknown\",\n complexity: \"unknown\",\n totalCost: 0,\n totalLLMCalls: 0,\n totalInputTokens: 0,\n totalOutputTokens: 0,\n toolCalls: 0,\n toolSuccesses: 0,\n toolFailures: 0,\n reads: 0,\n writes: 0,\n hadErrors: false,\n duration: 0,\n success: false,\n };\n\n let sessionStartTime = 0;\n let sessionEndTime = 0;\n\n for (const line of lines) {\n let event: any;\n try {\n event = JSON.parse(line);\n } catch {\n continue;\n }\n\n if (!summary.sessionId && event.sessionId) {\n summary.sessionId = event.sessionId;\n }\n\n switch (event.type) {\n case \"session-start\":\n sessionStartTime = new Date(event.timestamp).getTime();\n summary.projectPath = event.data?.projectPath ?? \"\";\n break;\n\n case \"session-end\":\n sessionEndTime = new Date(event.timestamp).getTime();\n // Success means: no errors, at least one LLM call completed, and the\n // model produced output. Tool use is orthogonal — conversational\n // sessions (storm run without --tools) are valid successes when the\n // LLM actually answers the question. Previously this required\n // toolCalls > 0, which counted every non-tool session as a failure\n // and poisoned the Thompson sampling priors with false negatives.\n summary.success =\n !summary.hadErrors &&\n summary.totalLLMCalls > 0 &&\n summary.totalOutputTokens > 0;\n break;\n\n case \"llm-call\":\n summary.totalLLMCalls++;\n summary.totalCost += event.data?.cost ?? 0;\n summary.totalInputTokens += event.data?.inputTokens ?? 0;\n summary.totalOutputTokens += event.data?.outputTokens ?? 0;\n if (!summary.model && event.data?.model) {\n summary.model = event.data.model;\n summary.provider = event.data.provider ?? \"\";\n }\n break;\n\n case \"routing-decision\":\n if (event.data?.taskType) summary.taskType = event.data.taskType;\n if (event.data?.complexity)\n summary.complexity = event.data.complexity;\n break;\n\n case \"tool-call\":\n summary.toolCalls++;\n const toolName = event.data?.name;\n if (toolName) {\n if (READ_TOOLS.has(toolName)) summary.reads++;\n if (WRITE_TOOLS.has(toolName)) summary.writes++;\n }\n break;\n\n case \"tool-result\":\n if (event.data?.ok === true) summary.toolSuccesses++;\n else if (event.data?.ok === false) summary.toolFailures++;\n break;\n\n case \"error\":\n summary.hadErrors = true;\n break;\n }\n }\n\n summary.duration = sessionEndTime - sessionStartTime;\n return summary;\n } catch (e) {\n log.warn({ err: e, filePath }, \"Failed to analyze trajectory file\");\n return null;\n }\n}\n\n/**\n * Aggregate session summaries into routing intelligence.\n */\nfunction aggregate(summaries: SessionSummary[]): RoutingIntelligence {\n const models: Record<string, ModelStats> = {};\n const projectStats: Record<\n string,\n Record<string, { successes: number; failures: number }>\n > = {};\n\n for (const s of summaries) {\n if (!s.model) continue;\n\n // Model stats\n if (!models[s.model]) {\n models[s.model] = {\n totalSessions: 0,\n successCount: 0,\n failureCount: 0,\n totalCost: 0,\n totalReads: 0,\n totalWrites: 0,\n totalToolCalls: 0,\n totalToolSuccesses: 0,\n byTaskType: {},\n };\n }\n const m = models[s.model];\n m.totalSessions++;\n if (s.success) m.successCount++;\n else m.failureCount++;\n m.totalCost += s.totalCost;\n m.totalReads += s.reads;\n m.totalWrites += s.writes;\n m.totalToolCalls += s.toolCalls;\n m.totalToolSuccesses += s.toolSuccesses;\n\n if (!m.byTaskType[s.taskType]) {\n m.byTaskType[s.taskType] = { successes: 0, failures: 0, avgCost: 0 };\n }\n const t = m.byTaskType[s.taskType];\n if (s.success) t.successes++;\n else t.failures++;\n t.avgCost =\n (t.avgCost * (t.successes + t.failures - 1) + s.totalCost) /\n (t.successes + t.failures);\n\n // Project stats\n if (s.projectPath) {\n if (!projectStats[s.projectPath]) projectStats[s.projectPath] = {};\n if (!projectStats[s.projectPath][s.model]) {\n projectStats[s.projectPath][s.model] = { successes: 0, failures: 0 };\n }\n if (s.success) projectStats[s.projectPath][s.model].successes++;\n else projectStats[s.projectPath][s.model].failures++;\n }\n }\n\n // Compute derived metrics and best/worst/best-value models per task type.\n // Track all candidates first, then pick best/worst at the end so a single\n // pass over all models can rank them properly.\n const taskTypeBest: Record<\n string,\n {\n bestModel: string | null;\n bestModelSuccessRate: number;\n bestValueModel: string | null;\n bestValueScore: number;\n worstModel: string | null;\n worstModelSuccessRate: number;\n totalSamples: number;\n }\n > = {};\n\n // Minimum samples before a model qualifies for best/worst ranking. Below\n // this we have no statistical signal — penalizing or rewarding based on a\n // few trials is noise. 5 is the convention used in many production\n // bandit systems for \"warm-up\" before exploitation.\n const MIN_SAMPLES = 5;\n const COST_EPSILON = 0.001; // Avoid div-by-zero for free models like ollama\n\n const modelsOut: RoutingIntelligence[\"models\"] = {};\n for (const [modelId, stats] of Object.entries(models)) {\n // Enrich each task-type bucket with Wilson lower bound + value-per-dollar\n // BEFORE writing it back so the consumer sees both raw and adjusted.\n const enrichedByTaskType: Record<\n string,\n {\n successes: number;\n failures: number;\n avgCost: number;\n wilsonLowerBound: number;\n valuePerDollar: number;\n }\n > = {};\n for (const [taskType, t] of Object.entries(stats.byTaskType)) {\n const total = t.successes + t.failures;\n const wlb = wilsonLowerBound(t.successes, total);\n const vpd = wlb / (t.avgCost + COST_EPSILON);\n enrichedByTaskType[taskType] = {\n successes: t.successes,\n failures: t.failures,\n avgCost: t.avgCost,\n wilsonLowerBound: wlb,\n valuePerDollar: vpd,\n };\n }\n\n modelsOut[modelId] = {\n totalSessions: stats.totalSessions,\n successCount: stats.successCount,\n failureCount: stats.failureCount,\n successRate:\n stats.totalSessions > 0 ? stats.successCount / stats.totalSessions : 0,\n avgCostPerSession:\n stats.totalSessions > 0 ? stats.totalCost / stats.totalSessions : 0,\n avgReadEditRatio:\n stats.totalWrites > 0 ? stats.totalReads / stats.totalWrites : Infinity,\n avgToolSuccessRate:\n stats.totalToolCalls > 0\n ? stats.totalToolSuccesses / stats.totalToolCalls\n : 0,\n byTaskType: enrichedByTaskType,\n };\n\n // Rank this model against existing best/worst per task type. Use Wilson\n // lower bound (not raw success rate) so 100% on 3 samples doesn't beat\n // 99% on 1000 samples. Cost-adjusted ranking uses the same Wilson bound\n // divided by avg cost — a model that's 96% as good for 1/8th the cost\n // wins on value-per-dollar.\n for (const [taskType, t] of Object.entries(enrichedByTaskType)) {\n const total = t.successes + t.failures;\n if (total < MIN_SAMPLES) continue;\n\n if (!taskTypeBest[taskType]) {\n taskTypeBest[taskType] = {\n bestModel: modelId,\n bestModelSuccessRate: t.wilsonLowerBound,\n bestValueModel: modelId,\n bestValueScore: t.valuePerDollar,\n worstModel: modelId,\n worstModelSuccessRate: t.wilsonLowerBound,\n totalSamples: total,\n };\n continue;\n }\n\n const slot = taskTypeBest[taskType];\n slot.totalSamples += total;\n\n if (t.wilsonLowerBound > slot.bestModelSuccessRate) {\n slot.bestModel = modelId;\n slot.bestModelSuccessRate = t.wilsonLowerBound;\n }\n if (t.wilsonLowerBound < slot.worstModelSuccessRate) {\n slot.worstModel = modelId;\n slot.worstModelSuccessRate = t.wilsonLowerBound;\n }\n if (t.valuePerDollar > slot.bestValueScore) {\n slot.bestValueModel = modelId;\n slot.bestValueScore = t.valuePerDollar;\n }\n }\n }\n\n // Project preferences — best model per project\n const projectPreferences: RoutingIntelligence[\"projectPreferences\"] = {};\n for (const [projectPath, projectModels] of Object.entries(projectStats)) {\n let bestModel: string | null = null;\n let bestRate = 0;\n let totalSessions = 0;\n for (const [modelId, stats] of Object.entries(projectModels)) {\n const total = stats.successes + stats.failures;\n totalSessions += total;\n if (total < 2) continue;\n const rate = stats.successes / total;\n if (rate > bestRate) {\n bestModel = modelId;\n bestRate = rate;\n }\n }\n projectPreferences[projectPath] = {\n preferredModel: bestModel,\n preferredModelSuccessRate: bestRate,\n sessionsOnProject: totalSessions,\n };\n }\n\n return {\n updatedAt: new Date().toISOString(),\n sessionsAnalyzed: summaries.length,\n models: modelsOut,\n taskTypes: taskTypeBest,\n projectPreferences,\n };\n}\n\n/**\n * Read all trajectory files in ~/.brainstorm/trajectories/ and write\n * ~/.brainstorm/routing-intelligence.json with aggregated stats.\n */\nexport function analyzeTrajectories(opts?: {\n trajectoriesDir?: string;\n outputPath?: string;\n maxAgeDays?: number;\n}): RoutingIntelligence {\n const trajectoriesDir =\n opts?.trajectoriesDir ?? join(homedir(), \".brainstorm\", \"trajectories\");\n const outputPath =\n opts?.outputPath ??\n join(homedir(), \".brainstorm\", \"routing-intelligence.json\");\n const maxAgeDays = opts?.maxAgeDays ?? 30;\n\n if (!existsSync(trajectoriesDir)) {\n log.info(\n { trajectoriesDir },\n \"No trajectories directory — nothing to analyze\",\n );\n const empty: RoutingIntelligence = {\n updatedAt: new Date().toISOString(),\n sessionsAnalyzed: 0,\n models: {},\n taskTypes: {},\n projectPreferences: {},\n };\n return empty;\n }\n\n const files = readdirSync(trajectoriesDir).filter((f) =>\n f.endsWith(\".jsonl\"),\n );\n const cutoffMs = Date.now() - maxAgeDays * 24 * 60 * 60 * 1000;\n\n const summaries: SessionSummary[] = [];\n for (const file of files) {\n const filePath = join(trajectoriesDir, file);\n try {\n const stat = statSync(filePath);\n if (stat.mtimeMs < cutoffMs) continue; // Skip old files\n } catch {\n continue;\n }\n const summary = analyzeTrajectoryFile(filePath);\n if (summary) summaries.push(summary);\n }\n\n log.info(\n { sessionCount: summaries.length, filesScanned: files.length },\n \"Analyzed trajectories\",\n );\n\n const intelligence = aggregate(summaries);\n\n // Ensure output directory exists\n const outputDir = join(homedir(), \".brainstorm\");\n if (!existsSync(outputDir)) mkdirSync(outputDir, { recursive: true });\n\n // Atomic write: write to temp file then rename, so parallel sessions\n // don't corrupt routing-intelligence.json by interleaving writes.\n const tmpPath = `${outputPath}.${process.pid}.tmp`;\n writeFileSync(tmpPath, JSON.stringify(intelligence, null, 2), \"utf-8\");\n renameSync(tmpPath, outputPath);\n log.info({ outputPath }, \"Routing intelligence updated\");\n\n return intelligence;\n}\n\n/**\n * Load routing intelligence from disk. Returns empty state if not found.\n */\nexport function loadRoutingIntelligence(): RoutingIntelligence | null {\n const path = join(homedir(), \".brainstorm\", \"routing-intelligence.json\");\n if (!existsSync(path)) return null;\n try {\n return JSON.parse(readFileSync(path, \"utf-8\")) as RoutingIntelligence;\n } catch (e) {\n log.warn({ err: e }, \"Failed to load routing intelligence\");\n return null;\n }\n}\n\n/**\n * Convert RoutingIntelligence to the format BrainstormRouter.loadStats() expects.\n * This is the bridge that closes the learning loop: trajectories → analyzer →\n * intelligence → router priors → next session's decisions.\n */\nexport function toHistoricalStats(intelligence: RoutingIntelligence): Array<{\n taskType: string;\n modelId: string;\n successes: number;\n failures: number;\n avgLatencyMs: number;\n avgCost: number;\n samples: number;\n}> {\n const stats: Array<{\n taskType: string;\n modelId: string;\n successes: number;\n failures: number;\n avgLatencyMs: number;\n avgCost: number;\n samples: number;\n }> = [];\n\n for (const [modelId, model] of Object.entries(intelligence.models)) {\n for (const [taskType, t] of Object.entries(model.byTaskType)) {\n const samples = t.successes + t.failures;\n if (samples === 0) continue;\n stats.push({\n taskType,\n modelId,\n successes: t.successes,\n failures: t.failures,\n avgLatencyMs: 0, // Not yet tracked per task-type\n avgCost: t.avgCost,\n samples,\n });\n }\n }\n\n return stats;\n}\n"],"mappings":";AAqCA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,SAAS,YAAY;AACrB,SAAS,eAAe;AACxB,SAAS,oBAAoB;AAE7B,IAAM,MAAM,aAAa,qBAAqB;AAwFvC,SAAS,iBACd,WACA,OACA,IAAY,MACJ;AACR,MAAI,UAAU,EAAG,QAAO;AACxB,QAAM,IAAI,YAAY;AACtB,QAAM,QAAQ,IAAK,IAAI,IAAK;AAC5B,QAAM,SAAS,IAAK,IAAI,KAAM,IAAI;AAClC,QAAM,SAAS,IAAI,KAAK,MAAM,KAAK,IAAI,KAAM,IAAI,KAAM,IAAI,UAAU,KAAK;AAC1E,SAAO,KAAK,IAAI,IAAI,SAAS,UAAU,KAAK;AAC9C;AAEA,IAAM,aAAa,oBAAI,IAAI;AAAA,EACzB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAED,IAAM,cAAc,oBAAI,IAAI;AAAA,EAC1B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AA0BD,SAAS,sBAAsB,UAAyC;AACtE,MAAI;AACF,UAAM,UAAU,aAAa,UAAU,OAAO;AAC9C,UAAM,QAAQ,QAAQ,MAAM,IAAI,EAAE,OAAO,CAAC,MAAM,EAAE,KAAK,CAAC;AACxD,QAAI,MAAM,WAAW,EAAG,QAAO;AAE/B,UAAM,UAA0B;AAAA,MAC9B,WAAW;AAAA,MACX,aAAa;AAAA,MACb,OAAO;AAAA,MACP,UAAU;AAAA,MACV,UAAU;AAAA,MACV,YAAY;AAAA,MACZ,WAAW;AAAA,MACX,eAAe;AAAA,MACf,kBAAkB;AAAA,MAClB,mBAAmB;AAAA,MACnB,WAAW;AAAA,MACX,eAAe;AAAA,MACf,cAAc;AAAA,MACd,OAAO;AAAA,MACP,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,UAAU;AAAA,MACV,SAAS;AAAA,IACX;AAEA,QAAI,mBAAmB;AACvB,QAAI,iBAAiB;AAErB,eAAW,QAAQ,OAAO;AACxB,UAAI;AACJ,UAAI;AACF,gBAAQ,KAAK,MAAM,IAAI;AAAA,MACzB,QAAQ;AACN;AAAA,MACF;AAEA,UAAI,CAAC,QAAQ,aAAa,MAAM,WAAW;AACzC,gBAAQ,YAAY,MAAM;AAAA,MAC5B;AAEA,cAAQ,MAAM,MAAM;AAAA,QAClB,KAAK;AACH,6BAAmB,IAAI,KAAK,MAAM,SAAS,EAAE,QAAQ;AACrD,kBAAQ,cAAc,MAAM,MAAM,eAAe;AACjD;AAAA,QAEF,KAAK;AACH,2BAAiB,IAAI,KAAK,MAAM,SAAS,EAAE,QAAQ;AAOnD,kBAAQ,UACN,CAAC,QAAQ,aACT,QAAQ,gBAAgB,KACxB,QAAQ,oBAAoB;AAC9B;AAAA,QAEF,KAAK;AACH,kBAAQ;AACR,kBAAQ,aAAa,MAAM,MAAM,QAAQ;AACzC,kBAAQ,oBAAoB,MAAM,MAAM,eAAe;AACvD,kBAAQ,qBAAqB,MAAM,MAAM,gBAAgB;AACzD,cAAI,CAAC,QAAQ,SAAS,MAAM,MAAM,OAAO;AACvC,oBAAQ,QAAQ,MAAM,KAAK;AAC3B,oBAAQ,WAAW,MAAM,KAAK,YAAY;AAAA,UAC5C;AACA;AAAA,QAEF,KAAK;AACH,cAAI,MAAM,MAAM,SAAU,SAAQ,WAAW,MAAM,KAAK;AACxD,cAAI,MAAM,MAAM;AACd,oBAAQ,aAAa,MAAM,KAAK;AAClC;AAAA,QAEF,KAAK;AACH,kBAAQ;AACR,gBAAM,WAAW,MAAM,MAAM;AAC7B,cAAI,UAAU;AACZ,gBAAI,WAAW,IAAI,QAAQ,EAAG,SAAQ;AACtC,gBAAI,YAAY,IAAI,QAAQ,EAAG,SAAQ;AAAA,UACzC;AACA;AAAA,QAEF,KAAK;AACH,cAAI,MAAM,MAAM,OAAO,KAAM,SAAQ;AAAA,mBAC5B,MAAM,MAAM,OAAO,MAAO,SAAQ;AAC3C;AAAA,QAEF,KAAK;AACH,kBAAQ,YAAY;AACpB;AAAA,MACJ;AAAA,IACF;AAEA,YAAQ,WAAW,iBAAiB;AACpC,WAAO;AAAA,EACT,SAAS,GAAG;AACV,QAAI,KAAK,EAAE,KAAK,GAAG,SAAS,GAAG,mCAAmC;AAClE,WAAO;AAAA,EACT;AACF;AAKA,SAAS,UAAU,WAAkD;AACnE,QAAM,SAAqC,CAAC;AAC5C,QAAM,eAGF,CAAC;AAEL,aAAW,KAAK,WAAW;AACzB,QAAI,CAAC,EAAE,MAAO;AAGd,QAAI,CAAC,OAAO,EAAE,KAAK,GAAG;AACpB,aAAO,EAAE,KAAK,IAAI;AAAA,QAChB,eAAe;AAAA,QACf,cAAc;AAAA,QACd,cAAc;AAAA,QACd,WAAW;AAAA,QACX,YAAY;AAAA,QACZ,aAAa;AAAA,QACb,gBAAgB;AAAA,QAChB,oBAAoB;AAAA,QACpB,YAAY,CAAC;AAAA,MACf;AAAA,IACF;AACA,UAAM,IAAI,OAAO,EAAE,KAAK;AACxB,MAAE;AACF,QAAI,EAAE,QAAS,GAAE;AAAA,QACZ,GAAE;AACP,MAAE,aAAa,EAAE;AACjB,MAAE,cAAc,EAAE;AAClB,MAAE,eAAe,EAAE;AACnB,MAAE,kBAAkB,EAAE;AACtB,MAAE,sBAAsB,EAAE;AAE1B,QAAI,CAAC,EAAE,WAAW,EAAE,QAAQ,GAAG;AAC7B,QAAE,WAAW,EAAE,QAAQ,IAAI,EAAE,WAAW,GAAG,UAAU,GAAG,SAAS,EAAE;AAAA,IACrE;AACA,UAAM,IAAI,EAAE,WAAW,EAAE,QAAQ;AACjC,QAAI,EAAE,QAAS,GAAE;AAAA,QACZ,GAAE;AACP,MAAE,WACC,EAAE,WAAW,EAAE,YAAY,EAAE,WAAW,KAAK,EAAE,cAC/C,EAAE,YAAY,EAAE;AAGnB,QAAI,EAAE,aAAa;AACjB,UAAI,CAAC,aAAa,EAAE,WAAW,EAAG,cAAa,EAAE,WAAW,IAAI,CAAC;AACjE,UAAI,CAAC,aAAa,EAAE,WAAW,EAAE,EAAE,KAAK,GAAG;AACzC,qBAAa,EAAE,WAAW,EAAE,EAAE,KAAK,IAAI,EAAE,WAAW,GAAG,UAAU,EAAE;AAAA,MACrE;AACA,UAAI,EAAE,QAAS,cAAa,EAAE,WAAW,EAAE,EAAE,KAAK,EAAE;AAAA,UAC/C,cAAa,EAAE,WAAW,EAAE,EAAE,KAAK,EAAE;AAAA,IAC5C;AAAA,EACF;AAKA,QAAM,eAWF,CAAC;AAML,QAAM,cAAc;AACpB,QAAM,eAAe;AAErB,QAAM,YAA2C,CAAC;AAClD,aAAW,CAAC,SAAS,KAAK,KAAK,OAAO,QAAQ,MAAM,GAAG;AAGrD,UAAM,qBASF,CAAC;AACL,eAAW,CAAC,UAAU,CAAC,KAAK,OAAO,QAAQ,MAAM,UAAU,GAAG;AAC5D,YAAM,QAAQ,EAAE,YAAY,EAAE;AAC9B,YAAM,MAAM,iBAAiB,EAAE,WAAW,KAAK;AAC/C,YAAM,MAAM,OAAO,EAAE,UAAU;AAC/B,yBAAmB,QAAQ,IAAI;AAAA,QAC7B,WAAW,EAAE;AAAA,QACb,UAAU,EAAE;AAAA,QACZ,SAAS,EAAE;AAAA,QACX,kBAAkB;AAAA,QAClB,gBAAgB;AAAA,MAClB;AAAA,IACF;AAEA,cAAU,OAAO,IAAI;AAAA,MACnB,eAAe,MAAM;AAAA,MACrB,cAAc,MAAM;AAAA,MACpB,cAAc,MAAM;AAAA,MACpB,aACE,MAAM,gBAAgB,IAAI,MAAM,eAAe,MAAM,gBAAgB;AAAA,MACvE,mBACE,MAAM,gBAAgB,IAAI,MAAM,YAAY,MAAM,gBAAgB;AAAA,MACpE,kBACE,MAAM,cAAc,IAAI,MAAM,aAAa,MAAM,cAAc;AAAA,MACjE,oBACE,MAAM,iBAAiB,IACnB,MAAM,qBAAqB,MAAM,iBACjC;AAAA,MACN,YAAY;AAAA,IACd;AAOA,eAAW,CAAC,UAAU,CAAC,KAAK,OAAO,QAAQ,kBAAkB,GAAG;AAC9D,YAAM,QAAQ,EAAE,YAAY,EAAE;AAC9B,UAAI,QAAQ,YAAa;AAEzB,UAAI,CAAC,aAAa,QAAQ,GAAG;AAC3B,qBAAa,QAAQ,IAAI;AAAA,UACvB,WAAW;AAAA,UACX,sBAAsB,EAAE;AAAA,UACxB,gBAAgB;AAAA,UAChB,gBAAgB,EAAE;AAAA,UAClB,YAAY;AAAA,UACZ,uBAAuB,EAAE;AAAA,UACzB,cAAc;AAAA,QAChB;AACA;AAAA,MACF;AAEA,YAAM,OAAO,aAAa,QAAQ;AAClC,WAAK,gBAAgB;AAErB,UAAI,EAAE,mBAAmB,KAAK,sBAAsB;AAClD,aAAK,YAAY;AACjB,aAAK,uBAAuB,EAAE;AAAA,MAChC;AACA,UAAI,EAAE,mBAAmB,KAAK,uBAAuB;AACnD,aAAK,aAAa;AAClB,aAAK,wBAAwB,EAAE;AAAA,MACjC;AACA,UAAI,EAAE,iBAAiB,KAAK,gBAAgB;AAC1C,aAAK,iBAAiB;AACtB,aAAK,iBAAiB,EAAE;AAAA,MAC1B;AAAA,IACF;AAAA,EACF;AAGA,QAAM,qBAAgE,CAAC;AACvE,aAAW,CAAC,aAAa,aAAa,KAAK,OAAO,QAAQ,YAAY,GAAG;AACvE,QAAI,YAA2B;AAC/B,QAAI,WAAW;AACf,QAAI,gBAAgB;AACpB,eAAW,CAAC,SAAS,KAAK,KAAK,OAAO,QAAQ,aAAa,GAAG;AAC5D,YAAM,QAAQ,MAAM,YAAY,MAAM;AACtC,uBAAiB;AACjB,UAAI,QAAQ,EAAG;AACf,YAAM,OAAO,MAAM,YAAY;AAC/B,UAAI,OAAO,UAAU;AACnB,oBAAY;AACZ,mBAAW;AAAA,MACb;AAAA,IACF;AACA,uBAAmB,WAAW,IAAI;AAAA,MAChC,gBAAgB;AAAA,MAChB,2BAA2B;AAAA,MAC3B,mBAAmB;AAAA,IACrB;AAAA,EACF;AAEA,SAAO;AAAA,IACL,YAAW,oBAAI,KAAK,GAAE,YAAY;AAAA,IAClC,kBAAkB,UAAU;AAAA,IAC5B,QAAQ;AAAA,IACR,WAAW;AAAA,IACX;AAAA,EACF;AACF;AAMO,SAAS,oBAAoB,MAIZ;AACtB,QAAM,kBACJ,MAAM,mBAAmB,KAAK,QAAQ,GAAG,eAAe,cAAc;AACxE,QAAM,aACJ,MAAM,cACN,KAAK,QAAQ,GAAG,eAAe,2BAA2B;AAC5D,QAAM,aAAa,MAAM,cAAc;AAEvC,MAAI,CAAC,WAAW,eAAe,GAAG;AAChC,QAAI;AAAA,MACF,EAAE,gBAAgB;AAAA,MAClB;AAAA,IACF;AACA,UAAM,QAA6B;AAAA,MACjC,YAAW,oBAAI,KAAK,GAAE,YAAY;AAAA,MAClC,kBAAkB;AAAA,MAClB,QAAQ,CAAC;AAAA,MACT,WAAW,CAAC;AAAA,MACZ,oBAAoB,CAAC;AAAA,IACvB;AACA,WAAO;AAAA,EACT;AAEA,QAAM,QAAQ,YAAY,eAAe,EAAE;AAAA,IAAO,CAAC,MACjD,EAAE,SAAS,QAAQ;AAAA,EACrB;AACA,QAAM,WAAW,KAAK,IAAI,IAAI,aAAa,KAAK,KAAK,KAAK;AAE1D,QAAM,YAA8B,CAAC;AACrC,aAAW,QAAQ,OAAO;AACxB,UAAM,WAAW,KAAK,iBAAiB,IAAI;AAC3C,QAAI;AACF,YAAM,OAAO,SAAS,QAAQ;AAC9B,UAAI,KAAK,UAAU,SAAU;AAAA,IAC/B,QAAQ;AACN;AAAA,IACF;AACA,UAAM,UAAU,sBAAsB,QAAQ;AAC9C,QAAI,QAAS,WAAU,KAAK,OAAO;AAAA,EACrC;AAEA,MAAI;AAAA,IACF,EAAE,cAAc,UAAU,QAAQ,cAAc,MAAM,OAAO;AAAA,IAC7D;AAAA,EACF;AAEA,QAAM,eAAe,UAAU,SAAS;AAGxC,QAAM,YAAY,KAAK,QAAQ,GAAG,aAAa;AAC/C,MAAI,CAAC,WAAW,SAAS,EAAG,WAAU,WAAW,EAAE,WAAW,KAAK,CAAC;AAIpE,QAAM,UAAU,GAAG,UAAU,IAAI,QAAQ,GAAG;AAC5C,gBAAc,SAAS,KAAK,UAAU,cAAc,MAAM,CAAC,GAAG,OAAO;AACrE,aAAW,SAAS,UAAU;AAC9B,MAAI,KAAK,EAAE,WAAW,GAAG,8BAA8B;AAEvD,SAAO;AACT;AAKO,SAAS,0BAAsD;AACpE,QAAM,OAAO,KAAK,QAAQ,GAAG,eAAe,2BAA2B;AACvE,MAAI,CAAC,WAAW,IAAI,EAAG,QAAO;AAC9B,MAAI;AACF,WAAO,KAAK,MAAM,aAAa,MAAM,OAAO,CAAC;AAAA,EAC/C,SAAS,GAAG;AACV,QAAI,KAAK,EAAE,KAAK,EAAE,GAAG,qCAAqC;AAC1D,WAAO;AAAA,EACT;AACF;AAOO,SAAS,kBAAkB,cAQ/B;AACD,QAAM,QAQD,CAAC;AAEN,aAAW,CAAC,SAAS,KAAK,KAAK,OAAO,QAAQ,aAAa,MAAM,GAAG;AAClE,eAAW,CAAC,UAAU,CAAC,KAAK,OAAO,QAAQ,MAAM,UAAU,GAAG;AAC5D,YAAM,UAAU,EAAE,YAAY,EAAE;AAChC,UAAI,YAAY,EAAG;AACnB,YAAM,KAAK;AAAA,QACT;AAAA,QACA;AAAA,QACA,WAAW,EAAE;AAAA,QACb,UAAU,EAAE;AAAA,QACZ,cAAc;AAAA;AAAA,QACd,SAAS,EAAE;AAAA,QACX;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;","names":[]}
@@ -40,7 +40,7 @@ var CREDENTIAL_PATTERNS = [
40
40
  { name: "Basic Auth URL", pattern: /https?:\/\/[^:]+:[^@]+@/g },
41
41
  {
42
42
  name: "Generic Credential",
43
- pattern: /(?:password|secret|token|api_key|apikey)\s*[:=]\s*['"]?([A-Za-z0-9/+_.~-]{8,})['"]?/gi
43
+ pattern: /(?<=(?:password|secret|token|api_key|apikey)\s*[:=]\s*['"]?)([A-Za-z0-9/+_.~-]{8,})['"]?/gi
44
44
  },
45
45
  {
46
46
  name: "JWT",
@@ -211,7 +211,7 @@ var TrajectoryRecorder = class {
211
211
  const apiKey = process.env.BRAINSTORM_API_KEY ?? process.env.BRAINSTORM_ADMIN_KEY;
212
212
  if (!apiKey) return;
213
213
  const res = await fetch(
214
- "https://api.brainstormrouter.com/v1/agent/trajectories",
214
+ "https://api.brainstormrouter.com/v1/agent/trajectory",
215
215
  {
216
216
  method: "POST",
217
217
  headers: {
@@ -282,4 +282,4 @@ export {
282
282
  trajectoryToSFTExamples,
283
283
  sftExamplesToJSONL
284
284
  };
285
- //# sourceMappingURL=chunk-SWXTFHC7.js.map
285
+ //# sourceMappingURL=chunk-Z5D2QZY6.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/plan/trajectory-capture.ts","../src/security/secret-scanner.ts"],"sourcesContent":["/**\n * Trajectory Capture — records orchestration pipeline executions as training data.\n *\n * Every pipeline run produces a structured trajectory that captures:\n * - The user's request\n * - Each phase's agent, model, tools, cost, duration, and output quality\n * - The pipeline outcome (build pass, test pass, review findings)\n * - Feedback loops (review → re-implementation cycles)\n *\n * Trajectories are emitted as JSONL for:\n * 1. Local storage (~/.brainstorm/trajectories/orchestration/)\n * 2. BrainstormRouter Intelligence API (POST /v1/agent/trajectory)\n * 3. HuggingFace dataset push (justinjilg/brainstorm-orchestration-trajectories)\n *\n * This data trains BrainstormLLM v2 — the orchestration model.\n */\n\nimport { mkdirSync, appendFileSync, existsSync } from \"node:fs\";\nimport { join, basename } from \"node:path\";\nimport { homedir } from \"node:os\";\nimport { randomUUID } from \"node:crypto\";\nimport { redactCredentials } from \"../security/secret-scanner.js\";\nimport type {\n PipelineEvent,\n PipelinePhase,\n PhaseResult,\n} from \"./orchestration-pipeline.js\";\n\n// ── Types ──────────────────────────────────────────────────────────\n\nexport interface OrchestrationTrajectory {\n id: string;\n timestamp: string;\n request: string;\n projectPath: string;\n projectType?: string;\n phases: PhaseTrajectory[];\n outcome: PipelineOutcome;\n totalCost: number;\n totalDuration: number;\n feedbackLoops: FeedbackLoop[];\n}\n\nexport interface PhaseTrajectory {\n phase: PipelinePhase;\n agentId: string;\n modelUsed?: string;\n subagentType: string;\n toolCalls: string[];\n inputTokens?: number;\n outputTokens?: number;\n cost: number;\n duration: number;\n success: boolean;\n skipped: boolean;\n error?: string;\n outputLength: number;\n}\n\nexport interface PipelineOutcome {\n success: boolean;\n phasesCompleted: number;\n phasesTotal: number;\n buildPassed?: boolean;\n testsPassed?: boolean;\n reviewFindings: number;\n criticalFindings: number;\n filesChanged?: number;\n}\n\nexport interface FeedbackLoop {\n from: PipelinePhase;\n to: PipelinePhase;\n reason: string;\n timestamp: number;\n}\n\n// ── Trajectory Recorder ────────────────────────────────────────────\n\nconst TRAJECTORY_DIR = join(\n homedir(),\n \".brainstorm\",\n \"trajectories\",\n \"orchestration\",\n);\n\nexport class TrajectoryRecorder {\n private id: string;\n private request: string;\n private projectPath: string;\n private startTime: number;\n private phases: PhaseTrajectory[] = [];\n private feedbackLoops: FeedbackLoop[] = [];\n private currentPhase: Partial<PhaseTrajectory> | null = null;\n private outcome: PipelineOutcome = {\n success: false,\n phasesCompleted: 0,\n phasesTotal: 0,\n reviewFindings: 0,\n criticalFindings: 0,\n };\n\n constructor(request: string, projectPath: string) {\n this.id = randomUUID();\n this.request = request;\n this.projectPath = projectPath;\n this.startTime = Date.now();\n\n if (!existsSync(TRAJECTORY_DIR)) {\n mkdirSync(TRAJECTORY_DIR, { recursive: true });\n }\n }\n\n /** Process a pipeline event and record relevant data. */\n recordEvent(event: PipelineEvent): void {\n switch (event.type) {\n case \"pipeline-started\":\n this.outcome.phasesTotal = event.phases.length;\n break;\n\n case \"phase-started\":\n this.currentPhase = {\n phase: event.phase,\n agentId: event.agentId,\n cost: 0,\n duration: 0,\n success: false,\n skipped: false,\n toolCalls: [],\n outputLength: 0,\n };\n break;\n\n case \"phase-completed\":\n if (this.currentPhase) {\n this.phases.push({\n phase: event.result.phase,\n agentId: event.result.agentId,\n subagentType: \"auto\", // BR picks the model\n toolCalls: event.result.toolCalls,\n cost: event.result.cost,\n duration: event.result.duration,\n success: event.result.success,\n skipped: false,\n error: event.result.error,\n outputLength: event.result.output.length,\n });\n if (event.result.success) this.outcome.phasesCompleted++;\n this.currentPhase = null;\n }\n break;\n\n case \"phase-failed\":\n if (this.currentPhase) {\n this.phases.push({\n phase: event.phase,\n agentId: this.currentPhase.agentId ?? \"unknown\",\n subagentType: \"auto\",\n toolCalls: [],\n cost: 0,\n duration: 0,\n success: false,\n skipped: false,\n error: event.error,\n outputLength: 0,\n });\n this.currentPhase = null;\n }\n break;\n\n case \"review-findings\":\n this.outcome.reviewFindings = event.findings.length;\n this.outcome.criticalFindings = event.findings.filter(\n (f) => f.severity === \"critical\",\n ).length;\n break;\n\n case \"feedback-loop\":\n this.feedbackLoops.push({\n from: event.from,\n to: event.to,\n reason: event.reason,\n timestamp: Date.now(),\n });\n break;\n\n case \"pipeline-completed\":\n this.outcome.success =\n event.results.every((r) => r.success) && event.totalCost >= 0;\n this.outcome.phasesCompleted = event.results.filter(\n (r) => r.success,\n ).length;\n\n // Check verify phase for build/test results\n const verifyResult = event.results.find((r) => r.phase === \"verify\");\n if (verifyResult) {\n this.outcome.buildPassed =\n verifyResult.output.includes(\"Build: PASS\");\n this.outcome.testsPassed =\n verifyResult.output.includes(\"Tests: PASS\");\n }\n break;\n }\n }\n\n /** Finalize and persist the trajectory. Returns the trajectory object. */\n finalize(): OrchestrationTrajectory {\n const trajectory: OrchestrationTrajectory = {\n id: this.id,\n timestamp: new Date().toISOString(),\n request: redactCredentials(this.request),\n projectPath: basename(this.projectPath), // strip full path — only project name\n phases: this.phases,\n outcome: this.outcome,\n totalCost: this.phases.reduce((sum, p) => sum + p.cost, 0),\n totalDuration: Date.now() - this.startTime,\n feedbackLoops: this.feedbackLoops,\n };\n\n // Write to local JSONL (source of truth)\n const filename = `${new Date().toISOString().slice(0, 10)}.jsonl`;\n const filepath = join(TRAJECTORY_DIR, filename);\n appendFileSync(filepath, JSON.stringify(trajectory) + \"\\n\", \"utf-8\");\n\n // Push to BrainstormRouter (fire-and-forget, local is source of truth)\n this.pushToBR(trajectory).catch(() => {\n // Silent failure — local JSONL is the primary store\n });\n\n return trajectory;\n }\n\n /** Push trajectory to BrainstormRouter's trajectory endpoint. */\n private async pushToBR(trajectory: OrchestrationTrajectory): Promise<void> {\n const apiKey =\n process.env.BRAINSTORM_API_KEY ?? process.env.BRAINSTORM_ADMIN_KEY;\n if (!apiKey) return; // No key = skip push silently\n\n // BR's public contract uses the singular path /v1/agent/trajectory.\n // The plural form (/v1/agent/trajectories) was never published in\n // /openapi.json — we were firing into a 404. See\n // packages/gateway/src/intelligence-api.ts which already uses the\n // singular path for the simpler TrajectorySubmission shape. A future\n // refactor can route this richer OrchestrationTrajectory through that\n // client once BR accepts both payload shapes; for now we just fix\n // the URL so the POST actually reaches BR.\n const res = await fetch(\n \"https://api.brainstormrouter.com/v1/agent/trajectory\",\n {\n method: \"POST\",\n headers: {\n Authorization: `Bearer ${apiKey}`,\n \"Content-Type\": \"application/json\",\n },\n body: JSON.stringify(trajectory),\n signal: AbortSignal.timeout(10_000),\n },\n );\n\n if (!res.ok) {\n // Log but don't throw — local JSONL is the real store\n const body = await res.text().catch(() => \"\");\n console.error(\n `[trajectory] BR push failed: ${res.status} ${body.slice(0, 200)}`,\n );\n }\n }\n\n /** Get the trajectory ID (for linking to BR API). */\n getId(): string {\n return this.id;\n }\n}\n\n// ── SFT Training Data Converter ────────────────────────────────────\n\n/**\n * Convert a trajectory into SFT training examples for BrainstormLLM v2.\n *\n * Each phase in the trajectory becomes one training example:\n * - Input: request + phase + project context\n * - Label: what worked (agent, tools, cost, duration)\n * - Weight: pipeline outcome quality (success = 1.0, partial = 0.5, fail = 0.1)\n */\nexport function trajectoryToSFTExamples(\n trajectory: OrchestrationTrajectory,\n): Array<{ input: string; label: string; weight: number }> {\n const examples: Array<{ input: string; label: string; weight: number }> = [];\n\n // Outcome weight: successful pipelines are worth more as training data\n const outcomeWeight = trajectory.outcome.success\n ? 1.0\n : trajectory.outcome.phasesCompleted /\n Math.max(trajectory.outcome.phasesTotal, 1) >\n 0.5\n ? 0.5\n : 0.1;\n\n for (const phase of trajectory.phases) {\n if (phase.skipped) continue;\n\n const input = [\n `request: ${trajectory.request}`,\n `phase: ${phase.phase}`,\n `project_path: ${trajectory.projectPath}`,\n `budget_remaining: $${(trajectory.totalCost > 0 ? trajectory.totalCost : 1.0).toFixed(2)}`,\n `phases_completed: ${trajectory.phases.indexOf(phase)}`,\n `feedback_loops: ${trajectory.feedbackLoops.length}`,\n ].join(\"\\n\");\n\n const label = [\n `agent: ${phase.agentId}`,\n `tools: ${phase.toolCalls.join(\",\") || \"none\"}`,\n `estimated_cost: $${phase.cost.toFixed(4)}`,\n `max_steps: ${Math.ceil(phase.duration / 5000) || 5}`,\n `skip: ${phase.skipped}`,\n `success: ${phase.success}`,\n ].join(\"\\n\");\n\n examples.push({ input, label, weight: outcomeWeight });\n }\n\n return examples;\n}\n\n/**\n * Format SFT examples as JSONL for training.\n */\nexport function sftExamplesToJSONL(\n examples: Array<{ input: string; label: string; weight: number }>,\n): string {\n return examples\n .map((ex) =>\n JSON.stringify({\n messages: [\n {\n role: \"system\",\n content:\n \"You are BrainstormLLM, an orchestration model that predicts how to structure software development pipelines. Given a request and context, predict which agent, tools, and resource allocation to use for the current phase.\",\n },\n { role: \"user\", content: ex.input },\n { role: \"assistant\", content: ex.label },\n ],\n weight: ex.weight,\n }),\n )\n .join(\"\\n\");\n}\n","/**\n * Scans text for credential patterns and redacts them before sending to LLM providers.\n * 19 regex patterns matching common credential formats.\n */\n\nconst CREDENTIAL_PATTERNS: Array<{ name: string; pattern: RegExp }> = [\n // AWS\n { name: \"AWS Access Key\", pattern: /AKIA[0-9A-Z]{16}/g },\n {\n name: \"AWS Credential\",\n pattern:\n /(?:aws_secret_access_key|secret_key)\\s*[:=]\\s*['\"]?([A-Za-z0-9/+=]{40})['\"]?/gi,\n },\n {\n name: \"AWS Session Token\",\n pattern:\n /(?:aws_session_token)\\s*[:=]\\s*['\"]?([A-Za-z0-9/+=]{100,})['\"]?/gi,\n },\n // GitHub\n { name: \"GitHub Token\", pattern: /gh[ps]_[A-Za-z0-9_]{36,}/g },\n { name: \"GitHub OAuth\", pattern: /gho_[A-Za-z0-9_]{36,}/g },\n { name: \"GitHub Fine-grained\", pattern: /github_pat_[A-Za-z0-9_]{22,}/g },\n // AI Providers\n { name: \"OpenAI Key\", pattern: /sk-[A-Za-z0-9]{20,}/g },\n { name: \"Anthropic Key\", pattern: /sk-ant-[A-Za-z0-9-]{20,}/g },\n { name: \"Google/Gemini API Key\", pattern: /AIza[A-Za-z0-9_-]{35}/g },\n // Payment / SaaS\n { name: \"Stripe Key\", pattern: /(?:sk|pk)_(?:live|test)_[A-Za-z0-9]{20,}/g },\n { name: \"Slack Token\", pattern: /xox[bpras]-[A-Za-z0-9-]{10,}/g },\n { name: \"Twilio Key\", pattern: /SK[0-9a-fA-F]{32}/g },\n {\n name: \"SendGrid Key\",\n pattern: /SG\\.[A-Za-z0-9_-]{22}\\.[A-Za-z0-9_-]{43}/g,\n },\n // General\n {\n name: \"PEM Private Key\",\n pattern: /-----BEGIN (?:RSA |EC )?PRIVATE KEY-----/g,\n },\n { name: \"Basic Auth URL\", pattern: /https?:\\/\\/[^:]+:[^@]+@/g },\n {\n name: \"Generic Credential\",\n pattern:\n /(?<=(?:password|secret|token|api_key|apikey)\\s*[:=]\\s*['\"]?)([A-Za-z0-9/+_.~-]{8,})['\"]?/gi,\n },\n {\n name: \"JWT\",\n pattern:\n /eyJ[A-Za-z0-9_-]{10,}\\.eyJ[A-Za-z0-9_-]{10,}\\.[A-Za-z0-9_-]{10,}/g,\n },\n { name: \"BR API Key\", pattern: /br_(?:live|test)_[A-Za-z0-9]{20,}/g },\n { name: \"NPM Token\", pattern: /npm_[A-Za-z0-9]{36}/g },\n];\n\nexport interface ScanResult {\n hasFindings: boolean;\n findings: Array<{ name: string; position: number; preview: string }>;\n}\n\n/**\n * Scan text for credential patterns.\n */\nexport function scanForCredentials(text: string): ScanResult {\n const findings: ScanResult[\"findings\"] = [];\n\n for (const { name, pattern } of CREDENTIAL_PATTERNS) {\n pattern.lastIndex = 0;\n let match;\n while ((match = pattern.exec(text)) !== null) {\n findings.push({\n name,\n position: match.index,\n preview: match[0].slice(0, 6) + \"...[REDACTED]\",\n });\n }\n }\n\n return { hasFindings: findings.length > 0, findings };\n}\n\n/**\n * Redact all detected credentials in text.\n */\nexport function redactCredentials(text: string): string {\n let result = text;\n for (const { pattern } of CREDENTIAL_PATTERNS) {\n pattern.lastIndex = 0;\n result = result.replace(pattern, \"[REDACTED]\");\n }\n return result;\n}\n"],"mappings":";AAiBA,SAAS,WAAW,gBAAgB,kBAAkB;AACtD,SAAS,MAAM,gBAAgB;AAC/B,SAAS,eAAe;AACxB,SAAS,kBAAkB;;;ACf3B,IAAM,sBAAgE;AAAA;AAAA,EAEpE,EAAE,MAAM,kBAAkB,SAAS,oBAAoB;AAAA,EACvD;AAAA,IACE,MAAM;AAAA,IACN,SACE;AAAA,EACJ;AAAA,EACA;AAAA,IACE,MAAM;AAAA,IACN,SACE;AAAA,EACJ;AAAA;AAAA,EAEA,EAAE,MAAM,gBAAgB,SAAS,4BAA4B;AAAA,EAC7D,EAAE,MAAM,gBAAgB,SAAS,yBAAyB;AAAA,EAC1D,EAAE,MAAM,uBAAuB,SAAS,gCAAgC;AAAA;AAAA,EAExE,EAAE,MAAM,cAAc,SAAS,uBAAuB;AAAA,EACtD,EAAE,MAAM,iBAAiB,SAAS,4BAA4B;AAAA,EAC9D,EAAE,MAAM,yBAAyB,SAAS,yBAAyB;AAAA;AAAA,EAEnE,EAAE,MAAM,cAAc,SAAS,4CAA4C;AAAA,EAC3E,EAAE,MAAM,eAAe,SAAS,gCAAgC;AAAA,EAChE,EAAE,MAAM,cAAc,SAAS,qBAAqB;AAAA,EACpD;AAAA,IACE,MAAM;AAAA,IACN,SAAS;AAAA,EACX;AAAA;AAAA,EAEA;AAAA,IACE,MAAM;AAAA,IACN,SAAS;AAAA,EACX;AAAA,EACA,EAAE,MAAM,kBAAkB,SAAS,2BAA2B;AAAA,EAC9D;AAAA,IACE,MAAM;AAAA,IACN,SACE;AAAA,EACJ;AAAA,EACA;AAAA,IACE,MAAM;AAAA,IACN,SACE;AAAA,EACJ;AAAA,EACA,EAAE,MAAM,cAAc,SAAS,qCAAqC;AAAA,EACpE,EAAE,MAAM,aAAa,SAAS,uBAAuB;AACvD;AAUO,SAAS,mBAAmB,MAA0B;AAC3D,QAAM,WAAmC,CAAC;AAE1C,aAAW,EAAE,MAAM,QAAQ,KAAK,qBAAqB;AACnD,YAAQ,YAAY;AACpB,QAAI;AACJ,YAAQ,QAAQ,QAAQ,KAAK,IAAI,OAAO,MAAM;AAC5C,eAAS,KAAK;AAAA,QACZ;AAAA,QACA,UAAU,MAAM;AAAA,QAChB,SAAS,MAAM,CAAC,EAAE,MAAM,GAAG,CAAC,IAAI;AAAA,MAClC,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO,EAAE,aAAa,SAAS,SAAS,GAAG,SAAS;AACtD;AAKO,SAAS,kBAAkB,MAAsB;AACtD,MAAI,SAAS;AACb,aAAW,EAAE,QAAQ,KAAK,qBAAqB;AAC7C,YAAQ,YAAY;AACpB,aAAS,OAAO,QAAQ,SAAS,YAAY;AAAA,EAC/C;AACA,SAAO;AACT;;;ADXA,IAAM,iBAAiB;AAAA,EACrB,QAAQ;AAAA,EACR;AAAA,EACA;AAAA,EACA;AACF;AAEO,IAAM,qBAAN,MAAyB;AAAA,EACtB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA,SAA4B,CAAC;AAAA,EAC7B,gBAAgC,CAAC;AAAA,EACjC,eAAgD;AAAA,EAChD,UAA2B;AAAA,IACjC,SAAS;AAAA,IACT,iBAAiB;AAAA,IACjB,aAAa;AAAA,IACb,gBAAgB;AAAA,IAChB,kBAAkB;AAAA,EACpB;AAAA,EAEA,YAAY,SAAiB,aAAqB;AAChD,SAAK,KAAK,WAAW;AACrB,SAAK,UAAU;AACf,SAAK,cAAc;AACnB,SAAK,YAAY,KAAK,IAAI;AAE1B,QAAI,CAAC,WAAW,cAAc,GAAG;AAC/B,gBAAU,gBAAgB,EAAE,WAAW,KAAK,CAAC;AAAA,IAC/C;AAAA,EACF;AAAA;AAAA,EAGA,YAAY,OAA4B;AACtC,YAAQ,MAAM,MAAM;AAAA,MAClB,KAAK;AACH,aAAK,QAAQ,cAAc,MAAM,OAAO;AACxC;AAAA,MAEF,KAAK;AACH,aAAK,eAAe;AAAA,UAClB,OAAO,MAAM;AAAA,UACb,SAAS,MAAM;AAAA,UACf,MAAM;AAAA,UACN,UAAU;AAAA,UACV,SAAS;AAAA,UACT,SAAS;AAAA,UACT,WAAW,CAAC;AAAA,UACZ,cAAc;AAAA,QAChB;AACA;AAAA,MAEF,KAAK;AACH,YAAI,KAAK,cAAc;AACrB,eAAK,OAAO,KAAK;AAAA,YACf,OAAO,MAAM,OAAO;AAAA,YACpB,SAAS,MAAM,OAAO;AAAA,YACtB,cAAc;AAAA;AAAA,YACd,WAAW,MAAM,OAAO;AAAA,YACxB,MAAM,MAAM,OAAO;AAAA,YACnB,UAAU,MAAM,OAAO;AAAA,YACvB,SAAS,MAAM,OAAO;AAAA,YACtB,SAAS;AAAA,YACT,OAAO,MAAM,OAAO;AAAA,YACpB,cAAc,MAAM,OAAO,OAAO;AAAA,UACpC,CAAC;AACD,cAAI,MAAM,OAAO,QAAS,MAAK,QAAQ;AACvC,eAAK,eAAe;AAAA,QACtB;AACA;AAAA,MAEF,KAAK;AACH,YAAI,KAAK,cAAc;AACrB,eAAK,OAAO,KAAK;AAAA,YACf,OAAO,MAAM;AAAA,YACb,SAAS,KAAK,aAAa,WAAW;AAAA,YACtC,cAAc;AAAA,YACd,WAAW,CAAC;AAAA,YACZ,MAAM;AAAA,YACN,UAAU;AAAA,YACV,SAAS;AAAA,YACT,SAAS;AAAA,YACT,OAAO,MAAM;AAAA,YACb,cAAc;AAAA,UAChB,CAAC;AACD,eAAK,eAAe;AAAA,QACtB;AACA;AAAA,MAEF,KAAK;AACH,aAAK,QAAQ,iBAAiB,MAAM,SAAS;AAC7C,aAAK,QAAQ,mBAAmB,MAAM,SAAS;AAAA,UAC7C,CAAC,MAAM,EAAE,aAAa;AAAA,QACxB,EAAE;AACF;AAAA,MAEF,KAAK;AACH,aAAK,cAAc,KAAK;AAAA,UACtB,MAAM,MAAM;AAAA,UACZ,IAAI,MAAM;AAAA,UACV,QAAQ,MAAM;AAAA,UACd,WAAW,KAAK,IAAI;AAAA,QACtB,CAAC;AACD;AAAA,MAEF,KAAK;AACH,aAAK,QAAQ,UACX,MAAM,QAAQ,MAAM,CAAC,MAAM,EAAE,OAAO,KAAK,MAAM,aAAa;AAC9D,aAAK,QAAQ,kBAAkB,MAAM,QAAQ;AAAA,UAC3C,CAAC,MAAM,EAAE;AAAA,QACX,EAAE;AAGF,cAAM,eAAe,MAAM,QAAQ,KAAK,CAAC,MAAM,EAAE,UAAU,QAAQ;AACnE,YAAI,cAAc;AAChB,eAAK,QAAQ,cACX,aAAa,OAAO,SAAS,aAAa;AAC5C,eAAK,QAAQ,cACX,aAAa,OAAO,SAAS,aAAa;AAAA,QAC9C;AACA;AAAA,IACJ;AAAA,EACF;AAAA;AAAA,EAGA,WAAoC;AAClC,UAAM,aAAsC;AAAA,MAC1C,IAAI,KAAK;AAAA,MACT,YAAW,oBAAI,KAAK,GAAE,YAAY;AAAA,MAClC,SAAS,kBAAkB,KAAK,OAAO;AAAA,MACvC,aAAa,SAAS,KAAK,WAAW;AAAA;AAAA,MACtC,QAAQ,KAAK;AAAA,MACb,SAAS,KAAK;AAAA,MACd,WAAW,KAAK,OAAO,OAAO,CAAC,KAAK,MAAM,MAAM,EAAE,MAAM,CAAC;AAAA,MACzD,eAAe,KAAK,IAAI,IAAI,KAAK;AAAA,MACjC,eAAe,KAAK;AAAA,IACtB;AAGA,UAAM,WAAW,IAAG,oBAAI,KAAK,GAAE,YAAY,EAAE,MAAM,GAAG,EAAE,CAAC;AACzD,UAAM,WAAW,KAAK,gBAAgB,QAAQ;AAC9C,mBAAe,UAAU,KAAK,UAAU,UAAU,IAAI,MAAM,OAAO;AAGnE,SAAK,SAAS,UAAU,EAAE,MAAM,MAAM;AAAA,IAEtC,CAAC;AAED,WAAO;AAAA,EACT;AAAA;AAAA,EAGA,MAAc,SAAS,YAAoD;AACzE,UAAM,SACJ,QAAQ,IAAI,sBAAsB,QAAQ,IAAI;AAChD,QAAI,CAAC,OAAQ;AAUb,UAAM,MAAM,MAAM;AAAA,MAChB;AAAA,MACA;AAAA,QACE,QAAQ;AAAA,QACR,SAAS;AAAA,UACP,eAAe,UAAU,MAAM;AAAA,UAC/B,gBAAgB;AAAA,QAClB;AAAA,QACA,MAAM,KAAK,UAAU,UAAU;AAAA,QAC/B,QAAQ,YAAY,QAAQ,GAAM;AAAA,MACpC;AAAA,IACF;AAEA,QAAI,CAAC,IAAI,IAAI;AAEX,YAAM,OAAO,MAAM,IAAI,KAAK,EAAE,MAAM,MAAM,EAAE;AAC5C,cAAQ;AAAA,QACN,gCAAgC,IAAI,MAAM,IAAI,KAAK,MAAM,GAAG,GAAG,CAAC;AAAA,MAClE;AAAA,IACF;AAAA,EACF;AAAA;AAAA,EAGA,QAAgB;AACd,WAAO,KAAK;AAAA,EACd;AACF;AAYO,SAAS,wBACd,YACyD;AACzD,QAAM,WAAoE,CAAC;AAG3E,QAAM,gBAAgB,WAAW,QAAQ,UACrC,IACA,WAAW,QAAQ,kBACf,KAAK,IAAI,WAAW,QAAQ,aAAa,CAAC,IAC5C,MACA,MACA;AAEN,aAAW,SAAS,WAAW,QAAQ;AACrC,QAAI,MAAM,QAAS;AAEnB,UAAM,QAAQ;AAAA,MACZ,YAAY,WAAW,OAAO;AAAA,MAC9B,UAAU,MAAM,KAAK;AAAA,MACrB,iBAAiB,WAAW,WAAW;AAAA,MACvC,uBAAuB,WAAW,YAAY,IAAI,WAAW,YAAY,GAAK,QAAQ,CAAC,CAAC;AAAA,MACxF,qBAAqB,WAAW,OAAO,QAAQ,KAAK,CAAC;AAAA,MACrD,mBAAmB,WAAW,cAAc,MAAM;AAAA,IACpD,EAAE,KAAK,IAAI;AAEX,UAAM,QAAQ;AAAA,MACZ,UAAU,MAAM,OAAO;AAAA,MACvB,UAAU,MAAM,UAAU,KAAK,GAAG,KAAK,MAAM;AAAA,MAC7C,oBAAoB,MAAM,KAAK,QAAQ,CAAC,CAAC;AAAA,MACzC,cAAc,KAAK,KAAK,MAAM,WAAW,GAAI,KAAK,CAAC;AAAA,MACnD,SAAS,MAAM,OAAO;AAAA,MACtB,YAAY,MAAM,OAAO;AAAA,IAC3B,EAAE,KAAK,IAAI;AAEX,aAAS,KAAK,EAAE,OAAO,OAAO,QAAQ,cAAc,CAAC;AAAA,EACvD;AAEA,SAAO;AACT;AAKO,SAAS,mBACd,UACQ;AACR,SAAO,SACJ;AAAA,IAAI,CAAC,OACJ,KAAK,UAAU;AAAA,MACb,UAAU;AAAA,QACR;AAAA,UACE,MAAM;AAAA,UACN,SACE;AAAA,QACJ;AAAA,QACA,EAAE,MAAM,QAAQ,SAAS,GAAG,MAAM;AAAA,QAClC,EAAE,MAAM,aAAa,SAAS,GAAG,MAAM;AAAA,MACzC;AAAA,MACA,QAAQ,GAAG;AAAA,IACb,CAAC;AAAA,EACH,EACC,KAAK,IAAI;AACd;","names":[]}
@@ -0,0 +1,34 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
3
+ var __getOwnPropNames = Object.getOwnPropertyNames;
4
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
5
+ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
6
+ get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
7
+ }) : x)(function(x) {
8
+ if (typeof require !== "undefined") return require.apply(this, arguments);
9
+ throw Error('Dynamic require of "' + x + '" is not supported');
10
+ });
11
+ var __esm = (fn, res) => function __init() {
12
+ return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
13
+ };
14
+ var __export = (target, all) => {
15
+ for (var name in all)
16
+ __defProp(target, name, { get: all[name], enumerable: true });
17
+ };
18
+ var __copyProps = (to, from, except, desc) => {
19
+ if (from && typeof from === "object" || typeof from === "function") {
20
+ for (let key of __getOwnPropNames(from))
21
+ if (!__hasOwnProp.call(to, key) && key !== except)
22
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
23
+ }
24
+ return to;
25
+ };
26
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
27
+
28
+ export {
29
+ __require,
30
+ __esm,
31
+ __export,
32
+ __toCommonJS
33
+ };
34
+ //# sourceMappingURL=chunk-Z6ZWNWWR.js.map