@klitchevo/code-council 0.0.14 → 0.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,310 @@
1
+ // src/prompts/tps-audit.ts
2
+ var SYSTEM_PROMPT = `You are an expert Toyota Production System (TPS) consultant analyzing software codebases. Your role is to "walk the production line" - examining how code flows from input to output, identifying waste (muda), spotting bottlenecks, and suggesting continuous improvement (kaizen).
3
+
4
+ ## TPS Principles for Software
5
+
6
+ ### 1. FLOW (Nagare)
7
+ Analyze how data and control flow through the system:
8
+ - Identify entry points and exit points
9
+ - Map the critical paths
10
+ - Look for smooth, uninterrupted flow
11
+ - Identify where flow is blocked or redirected
12
+ - Check for single-piece flow vs batch processing
13
+
14
+ ### 2. WASTE (Muda) - The 7 Wastes in Software
15
+ Identify instances of each waste type:
16
+
17
+ **Defects**: Bugs, error-prone code, missing validation
18
+ **Overproduction**: Features nobody uses, over-engineered solutions
19
+ **Waiting**: Blocking I/O, synchronous when async would work, slow tests
20
+ **Non-utilized Talent**: Manual tasks that could be automated, repetitive code
21
+ **Transportation**: Unnecessary data transformation, excessive API calls
22
+ **Inventory**: Dead code, unused imports/exports, stale dependencies
23
+ **Motion**: Complex navigation, scattered related code, poor organization
24
+ **Extra-processing**: Premature optimization, unnecessary abstraction layers
25
+
26
+ ### 3. BOTTLENECKS
27
+ Identify constraints that limit throughput:
28
+ - Synchronous operations that block
29
+ - Single points of failure
30
+ - Resource contention
31
+ - N+1 queries or API calls
32
+ - Sequential operations that could be parallel
33
+
34
+ ### 4. PULL vs PUSH
35
+ Evaluate if work is demand-driven:
36
+ - Lazy evaluation vs eager computation
37
+ - On-demand loading vs preloading everything
38
+ - Event-driven vs polling
39
+ - Streaming vs buffering all data
40
+
41
+ ### 5. JIDOKA (Built-in Quality)
42
+ Assess quality mechanisms:
43
+ - Error handling and recovery
44
+ - Validation at boundaries
45
+ - Fail-fast patterns
46
+ - Type safety usage
47
+ - Test coverage signals
48
+
49
+ ### 6. STANDARDIZATION
50
+ Look for consistency:
51
+ - Code style consistency
52
+ - Pattern usage consistency
53
+ - Error handling patterns
54
+ - Naming conventions
55
+ - File organization
56
+
57
+ ## Scoring Guidelines
58
+
59
+ **Overall Score (0-100)**:
60
+ - 90-100: Exceptional flow, minimal waste, excellent quality
61
+ - 70-89: Good practices, some waste, room for improvement
62
+ - 50-69: Average, significant waste or flow issues
63
+ - 30-49: Poor flow, excessive waste, quality concerns
64
+ - 0-29: Critical issues, major redesign needed
65
+
66
+ **Flow Score**: How smoothly does data/control move through the system?
67
+ **Waste Score**: Higher = less waste (100 = no waste identified)
68
+ **Quality Score**: Built-in quality mechanisms, error handling, type safety
69
+
70
+ ## Output Requirements
71
+
72
+ You MUST respond with valid JSON matching the TpsAnalysis interface. Do not include any text before or after the JSON.
73
+
74
+ Focus on:
75
+ 1. Actionable findings with specific file/line references
76
+ 2. Prioritized recommendations (quick wins first)
77
+ 3. Concrete suggestions, not vague advice
78
+ 4. Balanced assessment - acknowledge strengths too
79
+ 5. Effort estimates for recommendations`;
80
+ function buildUserMessage(aggregatedContent, options) {
81
+ const parts = [];
82
+ if (options?.repoName) {
83
+ parts.push(`## Repository: ${options.repoName}`);
84
+ }
85
+ if (options?.focusAreas && options.focusAreas.length > 0) {
86
+ parts.push(
87
+ `## Focus Areas
88
+ Pay special attention to: ${options.focusAreas.join(", ")}`
89
+ );
90
+ }
91
+ if (options?.additionalContext) {
92
+ parts.push(`## Additional Context
93
+ ${options.additionalContext}`);
94
+ }
95
+ parts.push(`## Codebase to Audit
96
+
97
+ Analyze this codebase using Toyota Production System principles. Walk the production line from entry points through to outputs. Identify waste, bottlenecks, and improvement opportunities.
98
+
99
+ ${aggregatedContent}
100
+
101
+ ## Response Format
102
+
103
+ Respond with ONLY valid JSON matching the TpsAnalysis interface. Include:
104
+ - Scores for overall, flow, waste, and quality (0-100)
105
+ - Flow analysis with entry points and pathways
106
+ - Specific bottlenecks with locations and suggestions
107
+ - Waste items categorized by the 7 types
108
+ - Jidoka (built-in quality) assessment
109
+ - Prioritized recommendations
110
+ - Summary with strengths, concerns, and quick wins
111
+
112
+ Your JSON response:`);
113
+ return parts.join("\n\n");
114
+ }
115
+ var BATCH_SYSTEM_PROMPT = `${SYSTEM_PROMPT}
116
+
117
+ IMPORTANT: You are analyzing a BATCH of files from a larger codebase. This is batch {{BATCH_INDEX}} of {{TOTAL_BATCHES}}.
118
+ - Focus on what you can observe in this batch
119
+ - Note any cross-cutting concerns you see
120
+ - Your analysis will be combined with other batches for a final report
121
+ - Score this batch independently based on what you observe`;
122
+ function buildBatchUserMessage(aggregatedContent, batchIndex, totalBatches, options) {
123
+ const baseMessage = buildUserMessage(aggregatedContent, options);
124
+ return `[BATCH ${batchIndex + 1} of ${totalBatches}]
125
+
126
+ ${baseMessage}`;
127
+ }
128
+ var SYNTHESIS_SYSTEM_PROMPT = `You are a TPS (Toyota Production System) consultant synthesizing multiple batch analyses of a codebase into a unified report.
129
+
130
+ You will receive multiple TPS analysis JSON objects, each analyzing a different portion of the codebase. Your job is to:
131
+
132
+ 1. **Aggregate Scores**: Calculate weighted averages based on batch sizes
133
+ 2. **Merge Findings**: Combine bottlenecks, waste items, and recommendations, removing duplicates
134
+ 3. **Identify Patterns**: Note patterns that appear across multiple batches
135
+ 4. **Prioritize**: Re-prioritize recommendations based on the full picture
136
+ 5. **Synthesize Summary**: Create a cohesive summary of the entire codebase
137
+
138
+ Output a single unified TpsAnalysis JSON that represents the whole codebase.
139
+
140
+ Guidelines:
141
+ - If multiple batches mention the same issue, increase its priority
142
+ - Cross-batch patterns are often more important than single-batch issues
143
+ - Flow analysis should try to connect entry points across batches
144
+ - Be concise - don't repeat the same finding multiple times
145
+ - Final scores should reflect the overall health, not just average`;
146
+ function buildSynthesisUserMessage(batchResults, repoName) {
147
+ const parts = [];
148
+ if (repoName) {
149
+ parts.push(`## Repository: ${repoName}`);
150
+ }
151
+ parts.push(`## Batch Analyses to Synthesize
152
+
153
+ You have ${batchResults.length} batch analyses to combine into a final report.`);
154
+ for (const batch of batchResults) {
155
+ parts.push(
156
+ `### Batch ${batch.batchIndex + 1} (~${batch.tokenCount} tokens)`
157
+ );
158
+ if (batch.analysis) {
159
+ parts.push("```json");
160
+ parts.push(JSON.stringify(batch.analysis, null, 2));
161
+ parts.push("```");
162
+ } else {
163
+ parts.push("*Analysis failed to parse. Raw response:*");
164
+ parts.push(
165
+ batch.rawResponse.substring(0, 1e3) + (batch.rawResponse.length > 1e3 ? "..." : "")
166
+ );
167
+ }
168
+ }
169
+ parts.push(`## Response Format
170
+
171
+ Synthesize all batches into a single unified TpsAnalysis JSON.
172
+ - Combine and deduplicate findings
173
+ - Recalculate overall scores based on all batches
174
+ - Prioritize issues that appear in multiple batches
175
+ - Create a cohesive summary
176
+
177
+ Your unified JSON response:`);
178
+ return parts.join("\n\n");
179
+ }
180
+ function parseTpsAnalysis(response) {
181
+ try {
182
+ let jsonStr = response.trim();
183
+ if (jsonStr.startsWith("```json")) {
184
+ jsonStr = jsonStr.slice(7);
185
+ } else if (jsonStr.startsWith("```")) {
186
+ jsonStr = jsonStr.slice(3);
187
+ }
188
+ if (jsonStr.endsWith("```")) {
189
+ jsonStr = jsonStr.slice(0, -3);
190
+ }
191
+ jsonStr = jsonStr.trim();
192
+ const raw = JSON.parse(jsonStr);
193
+ const scores = normalizeScores(raw);
194
+ if (!scores) {
195
+ return null;
196
+ }
197
+ const analysis = {
198
+ scores,
199
+ flowAnalysis: normalizeFlowAnalysis(raw),
200
+ bottlenecks: normalizeBottlenecks(raw),
201
+ waste: normalizeWaste(raw),
202
+ jidoka: normalizeJidoka(raw),
203
+ recommendations: normalizeRecommendations(raw),
204
+ summary: normalizeSummary(raw)
205
+ };
206
+ return analysis;
207
+ } catch {
208
+ return null;
209
+ }
210
+ }
211
+ function normalizeScores(raw) {
212
+ if (raw.scores && typeof raw.scores === "object") {
213
+ const s = raw.scores;
214
+ return {
215
+ overall: toNumber(s.overall) ?? toNumber(raw.overallScore) ?? 0,
216
+ flow: toNumber(s.flow) ?? 0,
217
+ waste: toNumber(s.waste) ?? 0,
218
+ quality: toNumber(s.quality) ?? 0
219
+ };
220
+ }
221
+ if (typeof raw.overallScore === "number" || typeof raw.overall === "number") {
222
+ return {
223
+ overall: toNumber(raw.overallScore) ?? toNumber(raw.overall) ?? 0,
224
+ flow: toNumber(raw.flowScore) ?? toNumber(raw.flow) ?? 0,
225
+ waste: toNumber(raw.wasteScore) ?? toNumber(raw.waste) ?? 0,
226
+ quality: toNumber(raw.qualityScore) ?? toNumber(raw.quality) ?? 0
227
+ };
228
+ }
229
+ return null;
230
+ }
231
+ function toNumber(val) {
232
+ if (typeof val === "number") return val;
233
+ if (typeof val === "string") {
234
+ const n = parseFloat(val);
235
+ return isNaN(n) ? null : n;
236
+ }
237
+ return null;
238
+ }
239
+ function normalizeFlowAnalysis(raw) {
240
+ const flow = raw.flowAnalysis;
241
+ if (!flow) {
242
+ return { entryPoints: [], diagram: "", pathways: [], observations: [] };
243
+ }
244
+ return {
245
+ entryPoints: normalizeStringArray(flow.entryPoints),
246
+ diagram: typeof flow.diagram === "string" ? flow.diagram : "",
247
+ pathways: normalizeStringArray(flow.pathways),
248
+ observations: normalizeStringArray(flow.observations)
249
+ };
250
+ }
251
+ function normalizeWaste(raw) {
252
+ const waste = raw.waste;
253
+ if (!waste || typeof waste !== "object") {
254
+ return {};
255
+ }
256
+ return waste;
257
+ }
258
+ function normalizeJidoka(raw) {
259
+ const jidoka = raw.jidoka;
260
+ if (!jidoka) {
261
+ return { score: 0, strengths: [], weaknesses: [] };
262
+ }
263
+ return {
264
+ score: toNumber(jidoka.score) ?? 0,
265
+ strengths: normalizeStringArray(jidoka.strengths),
266
+ weaknesses: normalizeStringArray(jidoka.weaknesses)
267
+ };
268
+ }
269
+ function normalizeBottlenecks(raw) {
270
+ const bottlenecks = raw.bottlenecks;
271
+ if (Array.isArray(bottlenecks)) {
272
+ return bottlenecks;
273
+ }
274
+ return [];
275
+ }
276
+ function normalizeRecommendations(raw) {
277
+ const recs = raw.recommendations;
278
+ if (Array.isArray(recs)) {
279
+ return recs;
280
+ }
281
+ return [];
282
+ }
283
+ function normalizeSummary(raw) {
284
+ const summary = raw.summary;
285
+ if (!summary) {
286
+ return { strengths: [], concerns: [], quickWins: [] };
287
+ }
288
+ return {
289
+ strengths: normalizeStringArray(summary.strengths),
290
+ concerns: normalizeStringArray(summary.concerns),
291
+ quickWins: normalizeStringArray(summary.quickWins)
292
+ };
293
+ }
294
+ function normalizeStringArray(val) {
295
+ if (Array.isArray(val)) {
296
+ return val;
297
+ }
298
+ return [];
299
+ }
300
+
301
+ export {
302
+ SYSTEM_PROMPT,
303
+ buildUserMessage,
304
+ BATCH_SYSTEM_PROMPT,
305
+ buildBatchUserMessage,
306
+ SYNTHESIS_SYSTEM_PROMPT,
307
+ buildSynthesisUserMessage,
308
+ parseTpsAnalysis
309
+ };
310
+ //# sourceMappingURL=chunk-W4MFXWTT.js.map
package/dist/index.js CHANGED
@@ -1,8 +1,12 @@
1
1
  #!/usr/bin/env node
2
2
  import {
3
+ BATCH_SYSTEM_PROMPT,
4
+ SYNTHESIS_SYSTEM_PROMPT,
3
5
  SYSTEM_PROMPT,
6
+ buildBatchUserMessage,
7
+ buildSynthesisUserMessage,
4
8
  buildUserMessage
5
- } from "./chunk-Y77R7523.js";
9
+ } from "./chunk-W4MFXWTT.js";
6
10
 
7
11
  // src/index.ts
8
12
  import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
@@ -540,6 +544,51 @@ var ReviewClient = class {
540
544
  (model) => this.chat(model, SYSTEM_PROMPT, userMessage)
541
545
  );
542
546
  }
547
+ /**
548
+ * Perform TPS audit on a single batch of content
549
+ * @internal Used by batch processing
550
+ */
551
+ async tpsAuditBatch(aggregatedContent, model, batchIndex, totalBatches, options) {
552
+ const systemPrompt = BATCH_SYSTEM_PROMPT.replace(
553
+ "{{BATCH_INDEX}}",
554
+ String(batchIndex + 1)
555
+ ).replace("{{TOTAL_BATCHES}}", String(totalBatches));
556
+ const userMessage = buildBatchUserMessage(
557
+ aggregatedContent,
558
+ batchIndex,
559
+ totalBatches,
560
+ {
561
+ focusAreas: options?.focusAreas,
562
+ repoName: options?.repoName
563
+ }
564
+ );
565
+ logger.debug("Processing TPS batch", {
566
+ batchIndex,
567
+ totalBatches,
568
+ model,
569
+ contentLength: aggregatedContent.length
570
+ });
571
+ return this.chat(model, systemPrompt, userMessage);
572
+ }
573
+ /**
574
+ * Synthesize multiple batch analyses into a unified report
575
+ * @internal Used by batch processing
576
+ */
577
+ async tpsAuditSynthesize(batchResults, model, repoName) {
578
+ const userMessage = buildSynthesisUserMessage(
579
+ batchResults,
580
+ repoName
581
+ );
582
+ logger.debug("Synthesizing TPS batch results", {
583
+ batchCount: batchResults.length,
584
+ model
585
+ });
586
+ return this.chat(
587
+ model,
588
+ SYNTHESIS_SYSTEM_PROMPT,
589
+ userMessage
590
+ );
591
+ }
543
592
  };
544
593
 
545
594
  // src/session/in-memory-store.ts
@@ -1308,6 +1357,7 @@ async function handlePlanReview(client2, input) {
1308
1357
  }
1309
1358
 
1310
1359
  // src/tools/tps-audit.ts
1360
+ import { existsSync as existsSync2, mkdirSync, writeFileSync } from "fs";
1311
1361
  import { join as join4 } from "path";
1312
1362
  import { z as z7 } from "zod";
1313
1363
 
@@ -1445,12 +1495,14 @@ var SECRET_CONTENT_PATTERNS = [
1445
1495
  // Square tokens
1446
1496
  ];
1447
1497
  var DEFAULT_FILE_EXTENSIONS = [
1498
+ // TypeScript/JavaScript
1448
1499
  ".ts",
1449
1500
  ".tsx",
1450
1501
  ".js",
1451
1502
  ".jsx",
1452
1503
  ".mjs",
1453
1504
  ".cjs",
1505
+ // Other languages
1454
1506
  ".py",
1455
1507
  ".go",
1456
1508
  ".rs",
@@ -1465,9 +1517,31 @@ var DEFAULT_FILE_EXTENSIONS = [
1465
1517
  ".h",
1466
1518
  ".hpp",
1467
1519
  ".swift",
1520
+ // Frontend frameworks
1468
1521
  ".vue",
1469
1522
  ".svelte",
1470
- ".astro"
1523
+ ".astro",
1524
+ // Config & docs (important for understanding project)
1525
+ ".json",
1526
+ ".yaml",
1527
+ ".yml",
1528
+ ".toml",
1529
+ ".md"
1530
+ ];
1531
+ var PRIORITY_DIRS = [
1532
+ ".claude",
1533
+ // Claude Code project configuration
1534
+ ".beads"
1535
+ // Issue tracking
1536
+ ];
1537
+ var PRIORITY_ROOT_FILES = [
1538
+ "CLAUDE.md",
1539
+ "README.md",
1540
+ "package.json",
1541
+ "tsconfig.json",
1542
+ "pyproject.toml",
1543
+ "Cargo.toml",
1544
+ "go.mod"
1471
1545
  ];
1472
1546
  var EXCLUDED_DIRS = [
1473
1547
  "node_modules",
@@ -1492,11 +1566,11 @@ var EXCLUDED_DIRS = [
1492
1566
  ".nyc_output"
1493
1567
  ];
1494
1568
  var HARD_LIMITS = {
1495
- MAX_FILES: 100,
1496
- MAX_FILE_SIZE: 100 * 1024,
1497
- // 100KB per file
1498
- MAX_TOTAL_SIZE: 1024 * 1024
1499
- // 1MB total
1569
+ MAX_FILES: 1e4,
1570
+ MAX_FILE_SIZE: 1024 * 1024,
1571
+ // 1MB per file
1572
+ MAX_TOTAL_SIZE: 50 * 1024 * 1024
1573
+ // 50MB total
1500
1574
  };
1501
1575
  var DEFAULT_OPTIONS = {
1502
1576
  maxFiles: 50,
@@ -1637,6 +1711,9 @@ async function scanRepository(startPath, options = {}) {
1637
1711
  continue;
1638
1712
  }
1639
1713
  if (stats2.isDirectory()) {
1714
+ if (depth === 0 && PRIORITY_DIRS.includes(entry)) {
1715
+ continue;
1716
+ }
1640
1717
  scanDir(fullPath, depth + 1);
1641
1718
  } else if (stats2.isFile()) {
1642
1719
  totalFilesFound++;
@@ -1696,6 +1773,56 @@ async function scanRepository(startPath, options = {}) {
1696
1773
  }
1697
1774
  }
1698
1775
  }
1776
+ function scanSingleFile(fullPath, bypassTypeFilter = false) {
1777
+ const relativePath = relative2(repoRoot, fullPath);
1778
+ let stats2;
1779
+ try {
1780
+ stats2 = statSync(fullPath);
1781
+ } catch {
1782
+ return false;
1783
+ }
1784
+ if (!stats2.isFile()) return false;
1785
+ totalFilesFound++;
1786
+ const ext = extname(fullPath).toLowerCase();
1787
+ if (!bypassTypeFilter && !opts.fileTypes.includes(ext)) {
1788
+ return false;
1789
+ }
1790
+ if (stats2.size > opts.maxFileSize) {
1791
+ skipped.push({
1792
+ path: relativePath,
1793
+ reason: `File too large (${stats2.size} bytes)`
1794
+ });
1795
+ return false;
1796
+ }
1797
+ let content;
1798
+ try {
1799
+ const buffer = readFileSync2(fullPath);
1800
+ if (isBinaryContent(buffer)) {
1801
+ return false;
1802
+ }
1803
+ content = buffer.toString("utf-8");
1804
+ } catch {
1805
+ return false;
1806
+ }
1807
+ files.push({ path: relativePath, content });
1808
+ totalSize += stats2.size;
1809
+ return true;
1810
+ }
1811
+ for (const filename of PRIORITY_ROOT_FILES) {
1812
+ const fullPath = join3(repoRoot, filename);
1813
+ scanSingleFile(fullPath, true);
1814
+ }
1815
+ for (const priorityDir of PRIORITY_DIRS) {
1816
+ const dirPath = join3(repoRoot, priorityDir);
1817
+ try {
1818
+ const dirStats = statSync(dirPath);
1819
+ if (dirStats.isDirectory()) {
1820
+ logger.debug("Scanning priority directory", { dir: priorityDir });
1821
+ scanDir(dirPath, 0);
1822
+ }
1823
+ } catch {
1824
+ }
1825
+ }
1699
1826
  scanDir(repoRoot);
1700
1827
  const stats = {
1701
1828
  totalFilesFound,
@@ -1725,6 +1852,84 @@ ${f.content}
1725
1852
  === END FILE: ${f.path} ===`
1726
1853
  ).join("\n\n");
1727
1854
  }
1855
+ var BATCH_TOKEN_BUDGET = 6e4;
1856
+ function createFileBatches(files, tokenBudget = BATCH_TOKEN_BUDGET) {
1857
+ if (files.length === 0) {
1858
+ return [];
1859
+ }
1860
+ const filesByDir = /* @__PURE__ */ new Map();
1861
+ for (const file of files) {
1862
+ const dir = file.path.includes("/") ? file.path.substring(0, file.path.lastIndexOf("/")) : ".";
1863
+ const dirFiles = filesByDir.get(dir) || [];
1864
+ dirFiles.push(file);
1865
+ filesByDir.set(dir, dirFiles);
1866
+ }
1867
+ const dirsWithTokens = [...filesByDir.entries()].map(([dir, dirFiles]) => ({
1868
+ dir,
1869
+ files: dirFiles,
1870
+ tokens: dirFiles.reduce((sum, f) => sum + estimateTokens(f.content), 0)
1871
+ }));
1872
+ dirsWithTokens.sort((a, b) => a.tokens - b.tokens);
1873
+ const batches = [];
1874
+ let currentBatch = [];
1875
+ let currentTokens = 0;
1876
+ for (const { files: dirFiles, tokens: dirTokens } of dirsWithTokens) {
1877
+ if (currentTokens + dirTokens <= tokenBudget) {
1878
+ currentBatch.push(...dirFiles);
1879
+ currentTokens += dirTokens;
1880
+ } else if (dirTokens > tokenBudget) {
1881
+ if (currentBatch.length > 0) {
1882
+ batches.push({
1883
+ files: currentBatch,
1884
+ tokenEstimate: currentTokens,
1885
+ batchIndex: batches.length
1886
+ });
1887
+ currentBatch = [];
1888
+ currentTokens = 0;
1889
+ }
1890
+ for (const file of dirFiles) {
1891
+ const fileTokens = estimateTokens(file.content);
1892
+ if (currentTokens + fileTokens > tokenBudget && currentBatch.length > 0) {
1893
+ batches.push({
1894
+ files: currentBatch,
1895
+ tokenEstimate: currentTokens,
1896
+ batchIndex: batches.length
1897
+ });
1898
+ currentBatch = [];
1899
+ currentTokens = 0;
1900
+ }
1901
+ currentBatch.push(file);
1902
+ currentTokens += fileTokens;
1903
+ }
1904
+ } else {
1905
+ if (currentBatch.length > 0) {
1906
+ batches.push({
1907
+ files: currentBatch,
1908
+ tokenEstimate: currentTokens,
1909
+ batchIndex: batches.length
1910
+ });
1911
+ }
1912
+ currentBatch = [...dirFiles];
1913
+ currentTokens = dirTokens;
1914
+ }
1915
+ }
1916
+ if (currentBatch.length > 0) {
1917
+ batches.push({
1918
+ files: currentBatch,
1919
+ tokenEstimate: currentTokens,
1920
+ batchIndex: batches.length
1921
+ });
1922
+ }
1923
+ logger.debug("Created file batches", {
1924
+ totalFiles: files.length,
1925
+ batchCount: batches.length,
1926
+ batchSizes: batches.map((b) => ({
1927
+ files: b.files.length,
1928
+ tokens: b.tokenEstimate
1929
+ }))
1930
+ });
1931
+ return batches;
1932
+ }
1728
1933
 
1729
1934
  // src/tools/tps-audit.ts
1730
1935
  var tpsAuditSchemaObj = z7.object({
@@ -1732,7 +1937,7 @@ var tpsAuditSchemaObj = z7.object({
1732
1937
  "Path to repo root (auto-detects current directory if not provided)"
1733
1938
  ),
1734
1939
  focus_areas: z7.array(z7.string()).optional().describe("Specific areas to focus on (e.g., 'performance', 'security')"),
1735
- max_files: z7.number().max(100).optional().describe("Maximum files to analyze (default: 50, max: 100)"),
1940
+ max_files: z7.number().optional().describe("Maximum files to analyze (default: 50)"),
1736
1941
  file_types: z7.array(z7.string()).optional().describe("File extensions to include (e.g., ['.ts', '.js'])"),
1737
1942
  include_sensitive: z7.boolean().optional().describe(
1738
1943
  "Include potentially sensitive files (default: false, use with caution)"
@@ -1740,6 +1945,7 @@ var tpsAuditSchemaObj = z7.object({
1740
1945
  output_format: z7.enum(["html", "markdown", "json"]).optional().describe("Output format (default: html)")
1741
1946
  });
1742
1947
  var tpsAuditSchema = tpsAuditSchemaObj.shape;
1948
+ var BATCH_TOKEN_THRESHOLD = 6e4;
1743
1949
  async function handleTpsAudit(client2, models, input) {
1744
1950
  const startPath = input.path || process.cwd();
1745
1951
  const outputFormat = input.output_format || "html";
@@ -1779,27 +1985,40 @@ async function handleTpsAudit(client2, models, input) {
1779
1985
  warnings: scanResult.warnings
1780
1986
  });
1781
1987
  }
1782
- const aggregatedContent = aggregateFiles(scanResult.files);
1783
1988
  logger.info("Repository scanned", {
1784
1989
  filesIncluded: scanResult.files.length,
1785
1990
  totalSize: scanResult.stats.totalSize,
1786
1991
  tokenEstimate: scanResult.stats.tokenEstimate
1787
1992
  });
1788
- if (scanResult.stats.tokenEstimate > 1e5) {
1789
- logger.warn("Large token count", {
1790
- estimate: scanResult.stats.tokenEstimate
1791
- });
1792
- }
1793
- const results = await client2.tpsAudit(aggregatedContent, models, {
1794
- focusAreas: input.focus_areas,
1795
- repoName: scanResult.repoRoot.split("/").pop()
1796
- });
1993
+ const repoName = scanResult.repoRoot.split("/").pop();
1994
+ const needsBatching = scanResult.stats.tokenEstimate > BATCH_TOKEN_THRESHOLD;
1995
+ let results;
1797
1996
  let analysis = null;
1798
- for (const result of results) {
1799
- if (!result.error && result.review) {
1800
- const { parseTpsAnalysis } = await import("./tps-audit-GNK4VIKA.js");
1801
- analysis = parseTpsAnalysis(result.review);
1802
- if (analysis) break;
1997
+ if (needsBatching) {
1998
+ logger.info("Using batch processing", {
1999
+ tokenEstimate: scanResult.stats.tokenEstimate,
2000
+ threshold: BATCH_TOKEN_THRESHOLD
2001
+ });
2002
+ const batchResults = await handleBatchedTpsAudit(
2003
+ client2,
2004
+ models,
2005
+ scanResult,
2006
+ input.focus_areas
2007
+ );
2008
+ results = batchResults.results;
2009
+ analysis = batchResults.analysis;
2010
+ } else {
2011
+ const aggregatedContent = aggregateFiles(scanResult.files);
2012
+ results = await client2.tpsAudit(aggregatedContent, models, {
2013
+ focusAreas: input.focus_areas,
2014
+ repoName
2015
+ });
2016
+ for (const result of results) {
2017
+ if (!result.error && result.review) {
2018
+ const { parseTpsAnalysis } = await import("./tps-audit-TXNM5HYS.js");
2019
+ analysis = parseTpsAnalysis(result.review);
2020
+ if (analysis) break;
2021
+ }
1803
2022
  }
1804
2023
  }
1805
2024
  return {
@@ -1810,18 +2029,136 @@ async function handleTpsAudit(client2, models, input) {
1810
2029
  outputFormat
1811
2030
  };
1812
2031
  }
2032
+ async function handleBatchedTpsAudit(client2, models, scanResult, focusAreas) {
2033
+ const { parseTpsAnalysis } = await import("./tps-audit-TXNM5HYS.js");
2034
+ const repoName = scanResult.repoRoot.split("/").pop() ?? "unknown";
2035
+ const batches = createFileBatches(scanResult.files);
2036
+ const batchContents = batches.map((batch) => ({
2037
+ content: aggregateFiles(batch.files),
2038
+ tokenEstimate: batch.tokenEstimate,
2039
+ batchIndex: batch.batchIndex
2040
+ }));
2041
+ logger.info("Processing batches with all models", {
2042
+ batchCount: batches.length,
2043
+ modelCount: models.length,
2044
+ totalApiCalls: batches.length * models.length + models.length
2045
+ // batches + synthesis per model
2046
+ });
2047
+ const modelResults = await Promise.all(
2048
+ models.map(async (model) => {
2049
+ const batchAnalyses = await Promise.all(
2050
+ batchContents.map(async (batch) => {
2051
+ try {
2052
+ const response = await client2.tpsAuditBatch(
2053
+ batch.content,
2054
+ model,
2055
+ batch.batchIndex,
2056
+ batches.length,
2057
+ { focusAreas, repoName }
2058
+ );
2059
+ return {
2060
+ batchIndex: batch.batchIndex,
2061
+ tokenCount: batch.tokenEstimate,
2062
+ rawResponse: response,
2063
+ analysis: parseTpsAnalysis(response)
2064
+ };
2065
+ } catch (error) {
2066
+ logger.error("Batch analysis failed", error, {
2067
+ model,
2068
+ batchIndex: batch.batchIndex
2069
+ });
2070
+ return {
2071
+ batchIndex: batch.batchIndex,
2072
+ tokenCount: batch.tokenEstimate,
2073
+ rawResponse: error instanceof Error ? error.message : "Unknown error",
2074
+ analysis: null
2075
+ };
2076
+ }
2077
+ })
2078
+ );
2079
+ const successfulBatches = batchAnalyses.filter(
2080
+ (b) => b.analysis !== null
2081
+ );
2082
+ let finalAnalysis = null;
2083
+ let finalResponse = "";
2084
+ if (batchAnalyses.length > 1 && successfulBatches.length > 0) {
2085
+ try {
2086
+ finalResponse = await client2.tpsAuditSynthesize(
2087
+ batchAnalyses,
2088
+ model,
2089
+ repoName
2090
+ );
2091
+ finalAnalysis = parseTpsAnalysis(finalResponse);
2092
+ } catch (error) {
2093
+ logger.error("Synthesis failed for model", error, { model });
2094
+ const first = successfulBatches[0];
2095
+ finalAnalysis = first?.analysis ?? null;
2096
+ finalResponse = first?.rawResponse ?? "";
2097
+ }
2098
+ } else if (successfulBatches.length === 1) {
2099
+ const first = successfulBatches[0];
2100
+ if (first) {
2101
+ finalAnalysis = first.analysis;
2102
+ finalResponse = first.rawResponse;
2103
+ }
2104
+ }
2105
+ return {
2106
+ model,
2107
+ review: finalResponse,
2108
+ analysis: finalAnalysis,
2109
+ error: finalAnalysis === null ? "Failed to produce analysis" : void 0
2110
+ };
2111
+ })
2112
+ );
2113
+ const results = modelResults.map((r) => ({
2114
+ model: r.model,
2115
+ review: r.review,
2116
+ error: r.error
2117
+ }));
2118
+ const firstSuccess = modelResults.find((r) => r.analysis !== null);
2119
+ logger.info("Batched TPS audit complete", {
2120
+ modelsUsed: models.length,
2121
+ batchesPerModel: batches.length,
2122
+ successfulModels: modelResults.filter((r) => r.analysis !== null).length
2123
+ });
2124
+ return {
2125
+ results,
2126
+ analysis: firstSuccess?.analysis ?? null
2127
+ };
2128
+ }
2129
+ function writeReportToFolder(repoRoot, content, format) {
2130
+ try {
2131
+ const outputDir = join4(repoRoot, ".code-council");
2132
+ if (!existsSync2(outputDir)) {
2133
+ mkdirSync(outputDir, { recursive: true });
2134
+ }
2135
+ const ext = format === "json" ? "json" : format === "html" ? "html" : "md";
2136
+ const filename = `tps-audit.${ext}`;
2137
+ const filepath = join4(outputDir, filename);
2138
+ writeFileSync(filepath, content);
2139
+ logger.info("TPS audit report written", { filepath });
2140
+ return filepath;
2141
+ } catch (err) {
2142
+ logger.warn("Failed to write report to .code-council folder", {
2143
+ error: err instanceof Error ? err.message : String(err)
2144
+ });
2145
+ return null;
2146
+ }
2147
+ }
1813
2148
  function formatTpsAuditResults(auditResult) {
1814
2149
  const { results, scanResult, analysis, outputFormat } = auditResult;
2150
+ let content;
1815
2151
  switch (outputFormat) {
1816
2152
  case "html": {
1817
2153
  const templatePath = join4(getTemplatesDir(), "tps-report.html");
1818
- return formatResultsAsHtml(results, templatePath, {
2154
+ content = formatResultsAsHtml(results, templatePath, {
1819
2155
  analysis,
1820
2156
  repoName: scanResult.repoRoot.split("/").pop()
1821
2157
  });
2158
+ break;
1822
2159
  }
1823
2160
  case "json": {
1824
- return JSON.stringify(
2161
+ content = JSON.stringify(
1825
2162
  {
1826
2163
  analysis,
1827
2164
  scanStats: scanResult.stats,
@@ -1836,6 +2173,7 @@ function formatTpsAuditResults(auditResult) {
1836
2173
  null,
1837
2174
  2
1838
2175
  );
2176
+ break;
1839
2177
  }
1840
2178
  case "markdown":
1841
2179
  default: {
@@ -1882,7 +2220,7 @@ function formatTpsAuditResults(auditResult) {
1882
2220
  }
1883
2221
  }
1884
2222
  parts.push("\n## Model Perspectives\n");
1885
- results.forEach((r) => {
2223
+ for (const r of results) {
1886
2224
  if (r.error) {
1887
2225
  parts.push(`
1888
2226
  ### ${r.model}
@@ -1897,10 +2235,25 @@ ${r.review}
1897
2235
  `);
1898
2236
  }
1899
2237
  parts.push("\n---\n");
1900
- });
1901
- return parts.join("");
2238
+ }
2239
+ content = parts.join("");
2240
+ break;
1902
2241
  }
1903
2242
  }
2243
+ const filepath = writeReportToFolder(
2244
+ scanResult.repoRoot,
2245
+ content,
2246
+ outputFormat
2247
+ );
2248
+ if (filepath) {
2249
+ const fileNote = `
2250
+
2251
+ ---
2252
+ **Report saved to:** \`${filepath}\`
2253
+ `;
2254
+ return content + fileNote;
2255
+ }
2256
+ return content;
1904
2257
  }
1905
2258
 
1906
2259
  // src/index.ts
@@ -0,0 +1,19 @@
1
+ import {
2
+ BATCH_SYSTEM_PROMPT,
3
+ SYNTHESIS_SYSTEM_PROMPT,
4
+ SYSTEM_PROMPT,
5
+ buildBatchUserMessage,
6
+ buildSynthesisUserMessage,
7
+ buildUserMessage,
8
+ parseTpsAnalysis
9
+ } from "./chunk-W4MFXWTT.js";
10
+ export {
11
+ BATCH_SYSTEM_PROMPT,
12
+ SYNTHESIS_SYSTEM_PROMPT,
13
+ SYSTEM_PROMPT,
14
+ buildBatchUserMessage,
15
+ buildSynthesisUserMessage,
16
+ buildUserMessage,
17
+ parseTpsAnalysis
18
+ };
19
+ //# sourceMappingURL=tps-audit-TXNM5HYS.js.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@klitchevo/code-council",
3
- "version": "0.0.14",
3
+ "version": "0.0.16",
4
4
  "description": "Multi-model AI code review server using OpenRouter - get diverse perspectives from multiple LLMs in parallel",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",
@@ -1,142 +0,0 @@
1
- // src/prompts/tps-audit.ts
2
- var SYSTEM_PROMPT = `You are an expert Toyota Production System (TPS) consultant analyzing software codebases. Your role is to "walk the production line" - examining how code flows from input to output, identifying waste (muda), spotting bottlenecks, and suggesting continuous improvement (kaizen).
3
-
4
- ## TPS Principles for Software
5
-
6
- ### 1. FLOW (Nagare)
7
- Analyze how data and control flow through the system:
8
- - Identify entry points and exit points
9
- - Map the critical paths
10
- - Look for smooth, uninterrupted flow
11
- - Identify where flow is blocked or redirected
12
- - Check for single-piece flow vs batch processing
13
-
14
- ### 2. WASTE (Muda) - The 7 Wastes in Software
15
- Identify instances of each waste type:
16
-
17
- **Defects**: Bugs, error-prone code, missing validation
18
- **Overproduction**: Features nobody uses, over-engineered solutions
19
- **Waiting**: Blocking I/O, synchronous when async would work, slow tests
20
- **Non-utilized Talent**: Manual tasks that could be automated, repetitive code
21
- **Transportation**: Unnecessary data transformation, excessive API calls
22
- **Inventory**: Dead code, unused imports/exports, stale dependencies
23
- **Motion**: Complex navigation, scattered related code, poor organization
24
- **Extra-processing**: Premature optimization, unnecessary abstraction layers
25
-
26
- ### 3. BOTTLENECKS
27
- Identify constraints that limit throughput:
28
- - Synchronous operations that block
29
- - Single points of failure
30
- - Resource contention
31
- - N+1 queries or API calls
32
- - Sequential operations that could be parallel
33
-
34
- ### 4. PULL vs PUSH
35
- Evaluate if work is demand-driven:
36
- - Lazy evaluation vs eager computation
37
- - On-demand loading vs preloading everything
38
- - Event-driven vs polling
39
- - Streaming vs buffering all data
40
-
41
- ### 5. JIDOKA (Built-in Quality)
42
- Assess quality mechanisms:
43
- - Error handling and recovery
44
- - Validation at boundaries
45
- - Fail-fast patterns
46
- - Type safety usage
47
- - Test coverage signals
48
-
49
- ### 6. STANDARDIZATION
50
- Look for consistency:
51
- - Code style consistency
52
- - Pattern usage consistency
53
- - Error handling patterns
54
- - Naming conventions
55
- - File organization
56
-
57
- ## Scoring Guidelines
58
-
59
- **Overall Score (0-100)**:
60
- - 90-100: Exceptional flow, minimal waste, excellent quality
61
- - 70-89: Good practices, some waste, room for improvement
62
- - 50-69: Average, significant waste or flow issues
63
- - 30-49: Poor flow, excessive waste, quality concerns
64
- - 0-29: Critical issues, major redesign needed
65
-
66
- **Flow Score**: How smoothly does data/control move through the system?
67
- **Waste Score**: Higher = less waste (100 = no waste identified)
68
- **Quality Score**: Built-in quality mechanisms, error handling, type safety
69
-
70
- ## Output Requirements
71
-
72
- You MUST respond with valid JSON matching the TpsAnalysis interface. Do not include any text before or after the JSON.
73
-
74
- Focus on:
75
- 1. Actionable findings with specific file/line references
76
- 2. Prioritized recommendations (quick wins first)
77
- 3. Concrete suggestions, not vague advice
78
- 4. Balanced assessment - acknowledge strengths too
79
- 5. Effort estimates for recommendations`;
80
- function buildUserMessage(aggregatedContent, options) {
81
- const parts = [];
82
- if (options?.repoName) {
83
- parts.push(`## Repository: ${options.repoName}`);
84
- }
85
- if (options?.focusAreas && options.focusAreas.length > 0) {
86
- parts.push(
87
- `## Focus Areas
88
- Pay special attention to: ${options.focusAreas.join(", ")}`
89
- );
90
- }
91
- if (options?.additionalContext) {
92
- parts.push(`## Additional Context
93
- ${options.additionalContext}`);
94
- }
95
- parts.push(`## Codebase to Audit
96
-
97
- Analyze this codebase using Toyota Production System principles. Walk the production line from entry points through to outputs. Identify waste, bottlenecks, and improvement opportunities.
98
-
99
- ${aggregatedContent}
100
-
101
- ## Response Format
102
-
103
- Respond with ONLY valid JSON matching the TpsAnalysis interface. Include:
104
- - Scores for overall, flow, waste, and quality (0-100)
105
- - Flow analysis with entry points and pathways
106
- - Specific bottlenecks with locations and suggestions
107
- - Waste items categorized by the 7 types
108
- - Jidoka (built-in quality) assessment
109
- - Prioritized recommendations
110
- - Summary with strengths, concerns, and quick wins
111
-
112
- Your JSON response:`);
113
- return parts.join("\n\n");
114
- }
115
- function parseTpsAnalysis(response) {
116
- try {
117
- let jsonStr = response.trim();
118
- if (jsonStr.startsWith("```json")) {
119
- jsonStr = jsonStr.slice(7);
120
- } else if (jsonStr.startsWith("```")) {
121
- jsonStr = jsonStr.slice(3);
122
- }
123
- if (jsonStr.endsWith("```")) {
124
- jsonStr = jsonStr.slice(0, -3);
125
- }
126
- jsonStr = jsonStr.trim();
127
- const parsed = JSON.parse(jsonStr);
128
- if (typeof parsed.scores?.overall !== "number" || !Array.isArray(parsed.bottlenecks) || !Array.isArray(parsed.recommendations)) {
129
- return null;
130
- }
131
- return parsed;
132
- } catch {
133
- return null;
134
- }
135
- }
136
-
137
- export {
138
- SYSTEM_PROMPT,
139
- buildUserMessage,
140
- parseTpsAnalysis
141
- };
142
- //# sourceMappingURL=chunk-Y77R7523.js.map
@@ -1,11 +0,0 @@
1
- import {
2
- SYSTEM_PROMPT,
3
- buildUserMessage,
4
- parseTpsAnalysis
5
- } from "./chunk-Y77R7523.js";
6
- export {
7
- SYSTEM_PROMPT,
8
- buildUserMessage,
9
- parseTpsAnalysis
10
- };
11
- //# sourceMappingURL=tps-audit-GNK4VIKA.js.map