crucible-mcp 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
crucible/server.py CHANGED
@@ -13,6 +13,15 @@ from crucible.knowledge.loader import (
13
13
  load_principles,
14
14
  )
15
15
  from crucible.models import Domain, FullReviewResult, Severity, ToolFinding
16
+ from crucible.review.core import (
17
+ compute_severity_counts,
18
+ deduplicate_findings,
19
+ detect_domain,
20
+ filter_findings_to_changes,
21
+ load_skills_and_knowledge,
22
+ run_enforcement,
23
+ run_static_analysis,
24
+ )
16
25
  from crucible.skills import get_knowledge_for_skills, load_skill, match_skills_for_domain
17
26
  from crucible.tools.delegation import (
18
27
  check_all_tools,
@@ -61,45 +70,59 @@ def _format_findings(findings: list[ToolFinding]) -> str:
61
70
  return "\n".join(parts) if parts else "No findings."
62
71
 
63
72
 
64
- def _deduplicate_findings(findings: list[ToolFinding]) -> list[ToolFinding]:
65
- """Deduplicate findings by location and message.
66
-
67
- When multiple tools report the same issue at the same location,
68
- keep only the highest severity finding.
69
- """
70
- # Group by (location, normalized_message)
71
- seen: dict[tuple[str, str], ToolFinding] = {}
72
-
73
- for f in findings:
74
- # Normalize the message for comparison (lowercase, strip whitespace)
75
- norm_msg = f.message.lower().strip()
76
- key = (f.location, norm_msg)
77
-
78
- if key not in seen:
79
- seen[key] = f
80
- else:
81
- # Keep the higher severity finding
82
- existing = seen[key]
83
- severity_order = [
84
- Severity.CRITICAL,
85
- Severity.HIGH,
86
- Severity.MEDIUM,
87
- Severity.LOW,
88
- Severity.INFO,
89
- ]
90
- if severity_order.index(f.severity) < severity_order.index(existing.severity):
91
- seen[key] = f
92
-
93
- return list(seen.values())
94
-
95
-
96
73
  @server.list_tools() # type: ignore[misc]
97
74
  async def list_tools() -> list[Tool]:
98
75
  """List available tools."""
99
76
  return [
77
+ Tool(
78
+ name="review",
79
+ description="Unified code review tool. Supports path-based review OR git-aware review. Runs static analysis, matches skills, loads knowledge.",
80
+ inputSchema={
81
+ "type": "object",
82
+ "properties": {
83
+ "path": {
84
+ "type": "string",
85
+ "description": "File or directory path to review. If not provided, uses git mode.",
86
+ },
87
+ "mode": {
88
+ "type": "string",
89
+ "enum": ["staged", "unstaged", "branch", "commits"],
90
+ "description": "Git mode: staged (about to commit), unstaged (working dir), branch (PR diff), commits (recent N)",
91
+ },
92
+ "base": {
93
+ "type": "string",
94
+ "description": "Base branch for 'branch' mode (default: main) or commit count for 'commits' mode (default: 1)",
95
+ },
96
+ "include_context": {
97
+ "type": "boolean",
98
+ "description": "For git modes: include findings near (within 5 lines of) changes (default: false)",
99
+ },
100
+ "skills": {
101
+ "type": "array",
102
+ "items": {"type": "string"},
103
+ "description": "Override skill selection (default: auto-detect based on domain)",
104
+ },
105
+ "include_skills": {
106
+ "type": "boolean",
107
+ "description": "Load skills and checklists (default: true). Set false for quick analysis only.",
108
+ "default": True,
109
+ },
110
+ "include_knowledge": {
111
+ "type": "boolean",
112
+ "description": "Load knowledge files (default: true). Set false for quick analysis only.",
113
+ "default": True,
114
+ },
115
+ "enforce": {
116
+ "type": "boolean",
117
+ "description": "Run pattern assertions from .crucible/assertions/ (default: true).",
118
+ "default": True,
119
+ },
120
+ },
121
+ },
122
+ ),
100
123
  Tool(
101
124
  name="quick_review",
102
- description="Run static analysis tools on code. Returns findings with domain metadata for skill selection.",
125
+ description="[DEPRECATED: use review(path, include_skills=false)] Run static analysis only.",
103
126
  inputSchema={
104
127
  "type": "object",
105
128
  "properties": {
@@ -116,6 +139,57 @@ async def list_tools() -> list[Tool]:
116
139
  "required": ["path"],
117
140
  },
118
141
  ),
142
+ Tool(
143
+ name="full_review",
144
+ description="[DEPRECATED: use review(path)] Comprehensive code review with skills and knowledge.",
145
+ inputSchema={
146
+ "type": "object",
147
+ "properties": {
148
+ "path": {
149
+ "type": "string",
150
+ "description": "File or directory path to review",
151
+ },
152
+ "skills": {
153
+ "type": "array",
154
+ "items": {"type": "string"},
155
+ "description": "Override skill selection (default: auto-detect based on domain)",
156
+ },
157
+ "include_sage": {
158
+ "type": "boolean",
159
+ "description": "Include Sage knowledge recall (not yet implemented)",
160
+ "default": True,
161
+ },
162
+ },
163
+ "required": ["path"],
164
+ },
165
+ ),
166
+ Tool(
167
+ name="review_changes",
168
+ description="[DEPRECATED: use review(mode='staged')] Review git changes.",
169
+ inputSchema={
170
+ "type": "object",
171
+ "properties": {
172
+ "mode": {
173
+ "type": "string",
174
+ "enum": ["staged", "unstaged", "branch", "commits"],
175
+ "description": "What changes to review",
176
+ },
177
+ "base": {
178
+ "type": "string",
179
+ "description": "Base branch for 'branch' mode or commit count for 'commits' mode",
180
+ },
181
+ "path": {
182
+ "type": "string",
183
+ "description": "Repository path (default: current directory)",
184
+ },
185
+ "include_context": {
186
+ "type": "boolean",
187
+ "description": "Include findings near changes (default: false)",
188
+ },
189
+ },
190
+ "required": ["mode"],
191
+ },
192
+ ),
119
193
  Tool(
120
194
  name="get_principles",
121
195
  description="Load engineering principles by topic",
@@ -203,57 +277,6 @@ async def list_tools() -> list[Tool]:
203
277
  "properties": {},
204
278
  },
205
279
  ),
206
- Tool(
207
- name="review_changes",
208
- description="Review git changes (staged, unstaged, branch diff, commits). Runs analysis on changed files and filters findings to changed lines only.",
209
- inputSchema={
210
- "type": "object",
211
- "properties": {
212
- "mode": {
213
- "type": "string",
214
- "enum": ["staged", "unstaged", "branch", "commits"],
215
- "description": "What changes to review: staged (about to commit), unstaged (working dir), branch (PR diff vs base), commits (recent N commits)",
216
- },
217
- "base": {
218
- "type": "string",
219
- "description": "Base branch for 'branch' mode (default: main) or commit count for 'commits' mode (default: 1)",
220
- },
221
- "path": {
222
- "type": "string",
223
- "description": "Repository path (default: current directory)",
224
- },
225
- "include_context": {
226
- "type": "boolean",
227
- "description": "Include findings near (within 5 lines of) changes, not just in changed lines (default: false)",
228
- },
229
- },
230
- "required": ["mode"],
231
- },
232
- ),
233
- Tool(
234
- name="full_review",
235
- description="Comprehensive code review: runs static analysis, matches applicable skills based on domain, loads linked knowledge. Returns unified report for synthesis.",
236
- inputSchema={
237
- "type": "object",
238
- "properties": {
239
- "path": {
240
- "type": "string",
241
- "description": "File or directory path to review",
242
- },
243
- "skills": {
244
- "type": "array",
245
- "items": {"type": "string"},
246
- "description": "Override skill selection (default: auto-detect based on domain)",
247
- },
248
- "include_sage": {
249
- "type": "boolean",
250
- "description": "Include Sage knowledge recall (not yet implemented)",
251
- "default": True,
252
- },
253
- },
254
- "required": ["path"],
255
- },
256
- ),
257
280
  Tool(
258
281
  name="load_knowledge",
259
282
  description="Load knowledge/principles files without running static analysis. Useful for getting guidance on patterns, best practices, or domain-specific knowledge. Automatically includes project and user knowledge files.",
@@ -280,6 +303,306 @@ async def list_tools() -> list[Tool]:
280
303
  ]
281
304
 
282
305
 
306
+ def _format_review_output(
307
+ path: str | None,
308
+ git_context: GitContext | None,
309
+ domains: list[str],
310
+ severity_counts: dict[str, int],
311
+ findings: list[ToolFinding],
312
+ tool_errors: list[str],
313
+ matched_skills: list[tuple[str, list[str]]] | None,
314
+ skill_content: dict[str, str] | None,
315
+ knowledge_files: set[str] | None,
316
+ knowledge_content: dict[str, str] | None,
317
+ enforcement_findings: list | None = None,
318
+ enforcement_errors: list[str] | None = None,
319
+ assertions_checked: int = 0,
320
+ assertions_skipped: int = 0,
321
+ ) -> str:
322
+ """Format unified review output."""
323
+ parts: list[str] = ["# Code Review\n"]
324
+
325
+ # Header based on mode
326
+ if git_context:
327
+ parts.append(f"**Mode:** {git_context.mode}")
328
+ if git_context.base_ref:
329
+ parts.append(f"**Base:** {git_context.base_ref}")
330
+ elif path:
331
+ parts.append(f"**Path:** `{path}`")
332
+
333
+ parts.append(f"**Domains:** {', '.join(domains)}")
334
+ parts.append(f"**Severity summary:** {severity_counts or 'No findings'}\n")
335
+
336
+ # Files changed (git mode)
337
+ if git_context and git_context.changes:
338
+ added = [c for c in git_context.changes if c.status == "A"]
339
+ modified = [c for c in git_context.changes if c.status == "M"]
340
+ deleted = [c for c in git_context.changes if c.status == "D"]
341
+ renamed = [c for c in git_context.changes if c.status == "R"]
342
+
343
+ total = len(git_context.changes)
344
+ parts.append(f"## Files Changed ({total})")
345
+ for c in added:
346
+ parts.append(f"- `+` {c.path}")
347
+ for c in modified:
348
+ parts.append(f"- `~` {c.path}")
349
+ for c in renamed:
350
+ parts.append(f"- `R` {c.old_path} -> {c.path}")
351
+ for c in deleted:
352
+ parts.append(f"- `-` {c.path}")
353
+ parts.append("")
354
+
355
+ # Commit messages
356
+ if git_context.commit_messages:
357
+ parts.append("## Commits")
358
+ for msg in git_context.commit_messages:
359
+ parts.append(f"- {msg}")
360
+ parts.append("")
361
+
362
+ # Tool errors
363
+ if tool_errors:
364
+ parts.append("## Tool Errors\n")
365
+ for error in tool_errors:
366
+ parts.append(f"- {error}")
367
+ parts.append("")
368
+
369
+ # Applicable skills
370
+ if matched_skills:
371
+ parts.append("## Applicable Skills\n")
372
+ for skill_name, triggers in matched_skills:
373
+ parts.append(f"- **{skill_name}**: matched on {', '.join(triggers)}")
374
+ parts.append("")
375
+
376
+ # Knowledge loaded
377
+ if knowledge_files:
378
+ parts.append("## Knowledge Loaded\n")
379
+ parts.append(f"Files: {', '.join(sorted(knowledge_files))}")
380
+ parts.append("")
381
+
382
+ # Findings
383
+ parts.append("## Static Analysis Findings\n")
384
+ if findings:
385
+ parts.append(_format_findings(findings))
386
+ else:
387
+ parts.append("No issues found.")
388
+ parts.append("")
389
+
390
+ # Enforcement assertions
391
+ if enforcement_findings is not None:
392
+ active = [f for f in enforcement_findings if not f.suppressed]
393
+ suppressed = [f for f in enforcement_findings if f.suppressed]
394
+
395
+ parts.append("## Pattern Assertions\n")
396
+ if assertions_checked > 0 or assertions_skipped > 0:
397
+ parts.append(f"*Checked: {assertions_checked}, Skipped (LLM): {assertions_skipped}*\n")
398
+
399
+ if enforcement_errors:
400
+ parts.append("**Errors:**")
401
+ for err in enforcement_errors:
402
+ parts.append(f"- {err}")
403
+ parts.append("")
404
+
405
+ if active:
406
+ # Group by severity
407
+ by_sev: dict[str, list] = {}
408
+ for f in active:
409
+ by_sev.setdefault(f.severity.upper(), []).append(f)
410
+
411
+ for sev in ["ERROR", "WARNING", "INFO"]:
412
+ if sev in by_sev:
413
+ parts.append(f"### {sev} ({len(by_sev[sev])})\n")
414
+ for f in by_sev[sev]:
415
+ parts.append(f"- **[{f.assertion_id}]** {f.message}")
416
+ parts.append(f" - Location: `{f.location}`")
417
+ if f.match_text:
418
+ parts.append(f" - Match: `{f.match_text}`")
419
+ else:
420
+ parts.append("No pattern violations found.")
421
+
422
+ if suppressed:
423
+ parts.append(f"\n*Suppressed: {len(suppressed)}*")
424
+ for f in suppressed:
425
+ reason = f" ({f.suppression_reason})" if f.suppression_reason else ""
426
+ parts.append(f"- {f.assertion_id}: {f.location}{reason}")
427
+
428
+ parts.append("")
429
+
430
+ # Review checklists from skills
431
+ if skill_content:
432
+ parts.append("---\n")
433
+ parts.append("## Review Checklists\n")
434
+ for skill_name, content in skill_content.items():
435
+ parts.append(f"### {skill_name}\n")
436
+ parts.append(content)
437
+ parts.append("")
438
+
439
+ # Knowledge reference
440
+ if knowledge_content:
441
+ parts.append("---\n")
442
+ parts.append("## Principles Reference\n")
443
+ for filename, content in sorted(knowledge_content.items()):
444
+ parts.append(f"### {filename}\n")
445
+ parts.append(content)
446
+ parts.append("")
447
+
448
+ return "\n".join(parts)
449
+
450
+
451
+ def _handle_review(arguments: dict[str, Any]) -> list[TextContent]:
452
+ """Handle unified review tool."""
453
+ import os
454
+
455
+ path = arguments.get("path")
456
+ mode = arguments.get("mode")
457
+ base = arguments.get("base")
458
+ include_context = arguments.get("include_context", False)
459
+ skills_override = arguments.get("skills")
460
+ include_skills = arguments.get("include_skills", True)
461
+ include_knowledge = arguments.get("include_knowledge", True)
462
+ enforce = arguments.get("enforce", True)
463
+
464
+ # Determine if this is path-based or git-based review
465
+ git_context: GitContext | None = None
466
+ changed_files: list[str] = []
467
+
468
+ if mode:
469
+ # Git-based review
470
+ repo_path = path if path else os.getcwd()
471
+ root_result = get_repo_root(repo_path)
472
+ if root_result.is_err:
473
+ return [TextContent(type="text", text=f"Error: {root_result.error}")]
474
+ repo_path = root_result.value
475
+
476
+ # Get git context based on mode
477
+ if mode == "staged":
478
+ context_result = get_staged_changes(repo_path)
479
+ elif mode == "unstaged":
480
+ context_result = get_unstaged_changes(repo_path)
481
+ elif mode == "branch":
482
+ base_branch = base if base else "main"
483
+ context_result = get_branch_diff(repo_path, base_branch)
484
+ elif mode == "commits":
485
+ try:
486
+ count = int(base) if base else 1
487
+ except ValueError:
488
+ return [TextContent(type="text", text=f"Error: Invalid commit count '{base}'")]
489
+ context_result = get_recent_commits(repo_path, count)
490
+ else:
491
+ return [TextContent(type="text", text=f"Error: Unknown mode '{mode}'")]
492
+
493
+ if context_result.is_err:
494
+ return [TextContent(type="text", text=f"Error: {context_result.error}")]
495
+
496
+ git_context = context_result.value
497
+
498
+ if not git_context.changes:
499
+ if mode == "staged":
500
+ return [TextContent(type="text", text="No changes to review. Stage files with `git add` first.")]
501
+ elif mode == "unstaged":
502
+ return [TextContent(type="text", text="No unstaged changes to review.")]
503
+ else:
504
+ return [TextContent(type="text", text="No changes found.")]
505
+
506
+ changed_files = get_changed_files(git_context)
507
+ if not changed_files:
508
+ return [TextContent(type="text", text="No files to analyze (only deletions).")]
509
+
510
+ elif not path:
511
+ return [TextContent(type="text", text="Error: Either 'path' or 'mode' is required.")]
512
+
513
+ # Detect domains and run analysis
514
+ all_findings: list[ToolFinding] = []
515
+ tool_errors: list[str] = []
516
+ domains_detected: set[Domain] = set()
517
+ all_domain_tags: set[str] = set()
518
+
519
+ if git_context:
520
+ # Git mode: analyze each changed file
521
+ repo_path = get_repo_root(path if path else os.getcwd()).value
522
+ for file_path in changed_files:
523
+ full_path = f"{repo_path}/{file_path}"
524
+ domain, domain_tags = detect_domain(file_path)
525
+ domains_detected.add(domain)
526
+ all_domain_tags.update(domain_tags)
527
+
528
+ findings, errors = run_static_analysis(full_path, domain, domain_tags)
529
+ all_findings.extend(findings)
530
+ tool_errors.extend([f"{e} ({file_path})" for e in errors])
531
+
532
+ # Filter findings to changed lines
533
+ all_findings = filter_findings_to_changes(all_findings, git_context, include_context)
534
+ else:
535
+ # Path mode: analyze the path directly
536
+ domain, domain_tags = detect_domain(path)
537
+ domains_detected.add(domain)
538
+ all_domain_tags.update(domain_tags)
539
+
540
+ findings, errors = run_static_analysis(path, domain, domain_tags)
541
+ all_findings.extend(findings)
542
+ tool_errors.extend(errors)
543
+
544
+ # Deduplicate findings
545
+ all_findings = deduplicate_findings(all_findings)
546
+
547
+ # Run pattern assertions
548
+ enforcement_findings = []
549
+ enforcement_errors: list[str] = []
550
+ assertions_checked = 0
551
+ assertions_skipped = 0
552
+
553
+ if enforce:
554
+ if git_context:
555
+ repo_path = get_repo_root(path if path else os.getcwd()).value
556
+ enforcement_findings, enforcement_errors, assertions_checked, assertions_skipped = (
557
+ run_enforcement(path or "", changed_files=changed_files, repo_root=repo_path)
558
+ )
559
+ elif path:
560
+ enforcement_findings, enforcement_errors, assertions_checked, assertions_skipped = (
561
+ run_enforcement(path)
562
+ )
563
+
564
+ # Compute severity summary
565
+ severity_counts = compute_severity_counts(all_findings)
566
+
567
+ # Load skills and knowledge
568
+ matched_skills: list[tuple[str, list[str]]] | None = None
569
+ skill_content: dict[str, str] | None = None
570
+ knowledge_files: set[str] | None = None
571
+ knowledge_content: dict[str, str] | None = None
572
+
573
+ if include_skills or include_knowledge:
574
+ primary_domain = next(iter(domains_detected)) if domains_detected else Domain.UNKNOWN
575
+ matched, s_content, k_files, k_content = load_skills_and_knowledge(
576
+ primary_domain, list(all_domain_tags), skills_override
577
+ )
578
+ if include_skills:
579
+ matched_skills = matched
580
+ skill_content = s_content
581
+ if include_knowledge:
582
+ knowledge_files = k_files
583
+ knowledge_content = k_content
584
+
585
+ # Format output
586
+ output = _format_review_output(
587
+ path,
588
+ git_context,
589
+ list(all_domain_tags) if all_domain_tags else ["unknown"],
590
+ severity_counts,
591
+ all_findings,
592
+ tool_errors,
593
+ matched_skills,
594
+ skill_content,
595
+ knowledge_files,
596
+ knowledge_content,
597
+ enforcement_findings if enforce else None,
598
+ enforcement_errors if enforce else None,
599
+ assertions_checked,
600
+ assertions_skipped,
601
+ )
602
+
603
+ return [TextContent(type="text", text=output)]
604
+
605
+
283
606
  def _handle_get_principles(arguments: dict[str, Any]) -> list[TextContent]:
284
607
  """Handle get_principles tool."""
285
608
  topic = arguments.get("topic")
@@ -396,89 +719,13 @@ def _handle_check_tools(arguments: dict[str, Any]) -> list[TextContent]:
396
719
  return [TextContent(type="text", text="\n".join(parts))]
397
720
 
398
721
 
399
- def _detect_domain_for_file(path: str) -> tuple[Domain, list[str]]:
400
- """Detect domain from a single file path.
401
-
402
- Returns (domain, list of domain tags for skill matching).
403
- """
404
- if path.endswith(".sol"):
405
- return Domain.SMART_CONTRACT, ["solidity", "smart_contract", "web3"]
406
- elif path.endswith(".vy"):
407
- return Domain.SMART_CONTRACT, ["vyper", "smart_contract", "web3"]
408
- elif path.endswith(".py"):
409
- return Domain.BACKEND, ["python", "backend"]
410
- elif path.endswith((".ts", ".tsx")):
411
- return Domain.FRONTEND, ["typescript", "frontend"]
412
- elif path.endswith((".js", ".jsx")):
413
- return Domain.FRONTEND, ["javascript", "frontend"]
414
- elif path.endswith(".go"):
415
- return Domain.BACKEND, ["go", "backend"]
416
- elif path.endswith(".rs"):
417
- return Domain.BACKEND, ["rust", "backend"]
418
- elif path.endswith((".tf", ".yaml", ".yml")):
419
- return Domain.INFRASTRUCTURE, ["infrastructure", "devops"]
420
- else:
421
- return Domain.UNKNOWN, []
422
-
423
-
424
- def _detect_domain(path: str) -> tuple[Domain, list[str]]:
425
- """Detect domain from file or directory path.
426
-
427
- For directories, scans contained files and aggregates domains.
428
- Returns (primary_domain, list of all domain tags).
429
- """
430
- from collections import Counter
431
- from pathlib import Path
432
-
433
- p = Path(path)
434
-
435
- # Single file - use direct detection
436
- if p.is_file():
437
- return _detect_domain_for_file(path)
438
-
439
- # Directory - scan and aggregate
440
- if not p.is_dir():
441
- return Domain.UNKNOWN, ["unknown"]
442
-
443
- domain_counts: Counter[Domain] = Counter()
444
- all_tags: set[str] = set()
445
-
446
- # Scan files in directory (up to 1000 to avoid huge repos)
447
- file_count = 0
448
- max_files = 1000
449
-
450
- for file_path in p.rglob("*"):
451
- if file_count >= max_files:
452
- break
453
- if not file_path.is_file():
454
- continue
455
- # Skip hidden files and common non-code directories
456
- if any(part.startswith(".") for part in file_path.parts):
457
- continue
458
- if any(part in ("node_modules", "__pycache__", "venv", ".venv", "dist", "build") for part in file_path.parts):
459
- continue
460
-
461
- domain, tags = _detect_domain_for_file(str(file_path))
462
- if domain != Domain.UNKNOWN:
463
- domain_counts[domain] += 1
464
- all_tags.update(tags)
465
- file_count += 1
466
-
467
- # Return most common domain, or UNKNOWN if none found
468
- if not domain_counts:
469
- return Domain.UNKNOWN, ["unknown"]
470
-
471
- primary_domain = domain_counts.most_common(1)[0][0]
472
- return primary_domain, sorted(all_tags) if all_tags else ["unknown"]
473
-
474
-
475
722
  def _handle_quick_review(arguments: dict[str, Any]) -> list[TextContent]:
476
723
  """Handle quick_review tool - returns findings with domain metadata."""
477
724
  path = arguments.get("path", "")
478
725
  tools = arguments.get("tools")
479
726
 
480
727
  # Internal domain detection
481
- domain, domain_tags = _detect_domain(path)
728
+ domain, domain_tags = detect_domain(path)
482
729
 
483
730
  # Select tools based on domain
484
731
  if domain == Domain.SMART_CONTRACT:
@@ -531,7 +778,7 @@ def _handle_quick_review(arguments: dict[str, Any]) -> list[TextContent]:
531
778
  tool_results.append(f"## Bandit\nError: {result.error}")
532
779
 
533
780
  # Deduplicate findings
534
- all_findings = _deduplicate_findings(all_findings)
781
+ all_findings = deduplicate_findings(all_findings)
535
782
 
536
783
  # Compute severity summary
537
784
  severity_counts: dict[str, int] = {}
@@ -550,65 +797,15 @@ def _handle_quick_review(arguments: dict[str, Any]) -> list[TextContent]:
550
797
  return [TextContent(type="text", text="\n".join(output_parts))]
551
798
 
552
799
 
553
- def _filter_findings_to_changes(
554
- findings: list[ToolFinding],
555
- context: GitContext,
556
- include_context: bool = False,
557
- ) -> list[ToolFinding]:
558
- """Filter findings to only those in changed lines."""
559
- # Build a lookup of file -> changed line ranges
560
- changed_ranges: dict[str, list[tuple[int, int]]] = {}
561
- for change in context.changes:
562
- if change.status == "D":
563
- continue # Skip deleted files
564
- ranges = [(r.start, r.end) for r in change.added_lines]
565
- changed_ranges[change.path] = ranges
566
-
567
- context_lines = 5 if include_context else 0
568
- filtered: list[ToolFinding] = []
569
-
570
- for finding in findings:
571
- # Parse location: "path:line" or "path:line:col"
572
- parts = finding.location.split(":")
573
- if len(parts) < 2:
574
- continue
575
-
576
- file_path = parts[0]
577
- try:
578
- line_num = int(parts[1])
579
- except ValueError:
580
- continue
581
-
582
- # Check if file is in changes
583
- # Handle both absolute and relative paths
584
- matching_file = None
585
- for changed_file in changed_ranges:
586
- if file_path.endswith(changed_file) or changed_file.endswith(file_path):
587
- matching_file = changed_file
588
- break
589
-
590
- if not matching_file:
591
- continue
592
-
593
- # Check if line is in changed ranges
594
- ranges = changed_ranges[matching_file]
595
- in_range = False
596
- for start, end in ranges:
597
- if start - context_lines <= line_num <= end + context_lines:
598
- in_range = True
599
- break
600
-
601
- if in_range:
602
- filtered.append(finding)
603
-
604
- return filtered
605
-
606
-
607
800
  def _format_change_review(
608
801
  context: GitContext,
609
802
  findings: list[ToolFinding],
610
803
  severity_counts: dict[str, int],
611
804
  tool_errors: list[str] | None = None,
805
+ matched_skills: list[tuple[str, list[str]]] | None = None,
806
+ skill_content: dict[str, str] | None = None,
807
+ knowledge_files: set[str] | None = None,
808
+ knowledge_content: dict[str, str] | None = None,
612
809
  ) -> str:
613
810
  """Format change review output."""
614
811
  parts: list[str] = ["# Change Review\n"]
@@ -642,6 +839,19 @@ def _format_change_review(
642
839
  parts.append(f"- {msg}")
643
840
  parts.append("")
644
841
 
842
+ # Applicable skills
843
+ if matched_skills:
844
+ parts.append("## Applicable Skills\n")
845
+ for skill_name, triggers in matched_skills:
846
+ parts.append(f"- **{skill_name}**: matched on {', '.join(triggers)}")
847
+ parts.append("")
848
+
849
+ # Knowledge loaded
850
+ if knowledge_files:
851
+ parts.append("## Knowledge Loaded\n")
852
+ parts.append(f"Files: {', '.join(sorted(knowledge_files))}")
853
+ parts.append("")
854
+
645
855
  # Tool errors (if any)
646
856
  if tool_errors:
647
857
  parts.append("## Tool Errors\n")
@@ -657,6 +867,25 @@ def _format_change_review(
657
867
  else:
658
868
  parts.append("## Findings in Changed Code\n")
659
869
  parts.append("No issues found in changed code.")
870
+ parts.append("")
871
+
872
+ # Review checklists from skills
873
+ if skill_content:
874
+ parts.append("---\n")
875
+ parts.append("## Review Checklists\n")
876
+ for skill_name, content in skill_content.items():
877
+ parts.append(f"### {skill_name}\n")
878
+ parts.append(content)
879
+ parts.append("")
880
+
881
+ # Knowledge reference
882
+ if knowledge_content:
883
+ parts.append("---\n")
884
+ parts.append("## Principles Reference\n")
885
+ for filename, content in sorted(knowledge_content.items()):
886
+ parts.append(f"### {filename}\n")
887
+ parts.append(content)
888
+ parts.append("")
660
889
 
661
890
  return "\n".join(parts)
662
891
 
@@ -716,12 +945,16 @@ def _handle_review_changes(arguments: dict[str, Any]) -> list[TextContent]:
716
945
  # Run analysis on changed files
717
946
  all_findings: list[ToolFinding] = []
718
947
  tool_errors: list[str] = []
948
+ domains_detected: set[Domain] = set()
949
+ all_domain_tags: set[str] = set()
719
950
 
720
951
  for file_path in changed_files:
721
952
  full_path = f"{repo_path}/{file_path}"
722
953
 
723
954
  # Detect domain for this file
724
- domain, domain_tags = _detect_domain(file_path)
955
+ domain, domain_tags = detect_domain(file_path)
956
+ domains_detected.add(domain)
957
+ all_domain_tags.update(domain_tags)
725
958
 
726
959
  # Select tools based on domain
727
960
  if domain == Domain.SMART_CONTRACT:
@@ -764,10 +997,10 @@ def _handle_review_changes(arguments: dict[str, Any]) -> list[TextContent]:
764
997
  tool_errors.append(f"bandit ({file_path}): {result.error}")
765
998
 
766
999
  # Filter findings to changed lines
767
- filtered_findings = _filter_findings_to_changes(all_findings, context, include_context)
1000
+ filtered_findings = filter_findings_to_changes(all_findings, context, include_context)
768
1001
 
769
1002
  # Deduplicate findings
770
- filtered_findings = _deduplicate_findings(filtered_findings)
1003
+ filtered_findings = deduplicate_findings(filtered_findings)
771
1004
 
772
1005
  # Compute severity summary
773
1006
  severity_counts: dict[str, int] = {}
@@ -775,62 +1008,63 @@ def _handle_review_changes(arguments: dict[str, Any]) -> list[TextContent]:
775
1008
  sev = f.severity.value
776
1009
  severity_counts[sev] = severity_counts.get(sev, 0) + 1
777
1010
 
1011
+ # Match skills and load knowledge based on detected domains
1012
+ from crucible.knowledge.loader import load_knowledge_file
1013
+ from crucible.skills.loader import (
1014
+ get_knowledge_for_skills,
1015
+ load_skill,
1016
+ match_skills_for_domain,
1017
+ )
1018
+
1019
+ primary_domain = next(iter(domains_detected)) if domains_detected else Domain.UNKNOWN
1020
+ matched_skills = match_skills_for_domain(
1021
+ primary_domain, list(all_domain_tags), override=None
1022
+ )
1023
+
1024
+ skill_names = [name for name, _ in matched_skills]
1025
+ skill_content: dict[str, str] = {}
1026
+ for skill_name, _triggers in matched_skills:
1027
+ result = load_skill(skill_name)
1028
+ if result.is_ok:
1029
+ _, content = result.value
1030
+ skill_content[skill_name] = content
1031
+
1032
+ knowledge_files = get_knowledge_for_skills(skill_names)
1033
+ knowledge_content: dict[str, str] = {}
1034
+ for filename in knowledge_files:
1035
+ result = load_knowledge_file(filename)
1036
+ if result.is_ok:
1037
+ knowledge_content[filename] = result.value
1038
+
778
1039
  # Format output
779
- output = _format_change_review(context, filtered_findings, severity_counts, tool_errors)
1040
+ output = _format_change_review(
1041
+ context,
1042
+ filtered_findings,
1043
+ severity_counts,
1044
+ tool_errors,
1045
+ matched_skills,
1046
+ skill_content,
1047
+ knowledge_files,
1048
+ knowledge_content,
1049
+ )
780
1050
  return [TextContent(type="text", text=output)]
781
1051
 
782
1052
 
783
1053
  def _handle_full_review(arguments: dict[str, Any]) -> list[TextContent]:
784
- """Handle full_review tool - comprehensive code review."""
1054
+ """Handle full_review tool - comprehensive code review.
1055
+
1056
+ DEPRECATED: Use _handle_review with path parameter instead.
1057
+ """
1058
+ from crucible.review.core import run_static_analysis
1059
+
785
1060
  path = arguments.get("path", "")
786
1061
  skills_override = arguments.get("skills")
787
- # include_sage is accepted but not yet implemented
788
- # _ = arguments.get("include_sage", True)
789
1062
 
790
1063
  # 1. Detect domain
791
- domain, domain_tags = _detect_domain(path)
1064
+ domain, domain_tags = detect_domain(path)
792
1065
 
793
- # 2. Run static analysis (reuse quick_review logic)
794
- if domain == Domain.SMART_CONTRACT:
795
- default_tools = ["slither", "semgrep"]
796
- elif domain == Domain.BACKEND and "python" in domain_tags:
797
- default_tools = ["ruff", "bandit", "semgrep"]
798
- elif domain == Domain.FRONTEND:
799
- default_tools = ["semgrep"]
800
- else:
801
- default_tools = ["semgrep"]
802
-
803
- all_findings: list[ToolFinding] = []
804
- tool_errors: list[str] = []
805
-
806
- if "semgrep" in default_tools:
807
- config = get_semgrep_config(domain)
808
- result = delegate_semgrep(path, config)
809
- if result.is_ok:
810
- all_findings.extend(result.value)
811
- elif result.is_err:
812
- tool_errors.append(f"semgrep: {result.error}")
813
-
814
- if "ruff" in default_tools:
815
- result = delegate_ruff(path)
816
- if result.is_ok:
817
- all_findings.extend(result.value)
818
- elif result.is_err:
819
- tool_errors.append(f"ruff: {result.error}")
820
-
821
- if "slither" in default_tools:
822
- result = delegate_slither(path)
823
- if result.is_ok:
824
- all_findings.extend(result.value)
825
- elif result.is_err:
826
- tool_errors.append(f"slither: {result.error}")
827
-
828
- if "bandit" in default_tools:
829
- result = delegate_bandit(path)
830
- if result.is_ok:
831
- all_findings.extend(result.value)
832
- elif result.is_err:
833
- tool_errors.append(f"bandit: {result.error}")
1066
+ # 2. Run static analysis using shared core function
1067
+ all_findings, tool_errors = run_static_analysis(path, domain, domain_tags)
834
1068
 
835
1069
  # 3. Match applicable skills
836
1070
  matched_skills = match_skills_for_domain(domain, domain_tags, skills_override)
@@ -864,13 +1098,10 @@ def _handle_full_review(arguments: dict[str, Any]) -> list[TextContent]:
864
1098
  )
865
1099
 
866
1100
  # 7. Deduplicate findings
867
- all_findings = _deduplicate_findings(all_findings)
1101
+ all_findings = deduplicate_findings(all_findings)
868
1102
 
869
1103
  # 8. Compute severity summary
870
- severity_counts: dict[str, int] = {}
871
- for f in all_findings:
872
- sev = f.severity.value
873
- severity_counts[sev] = severity_counts.get(sev, 0) + 1
1104
+ severity_counts = compute_severity_counts(all_findings)
874
1105
 
875
1106
  # 8. Build result
876
1107
  review_result = FullReviewResult(
@@ -937,16 +1168,20 @@ def _handle_full_review(arguments: dict[str, Any]) -> list[TextContent]:
937
1168
  async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
938
1169
  """Handle tool calls."""
939
1170
  handlers = {
1171
+ # Unified review tool
1172
+ "review": _handle_review,
1173
+ # Deprecated tools (kept for backwards compatibility)
1174
+ "quick_review": _handle_quick_review,
1175
+ "full_review": _handle_full_review,
1176
+ "review_changes": _handle_review_changes,
1177
+ # Other tools
940
1178
  "get_principles": _handle_get_principles,
941
1179
  "load_knowledge": _handle_load_knowledge,
942
1180
  "delegate_semgrep": _handle_delegate_semgrep,
943
1181
  "delegate_ruff": _handle_delegate_ruff,
944
1182
  "delegate_slither": _handle_delegate_slither,
945
1183
  "delegate_bandit": _handle_delegate_bandit,
946
- "quick_review": _handle_quick_review,
947
1184
  "check_tools": _handle_check_tools,
948
- "review_changes": _handle_review_changes,
949
- "full_review": _handle_full_review,
950
1185
  }
951
1186
 
952
1187
  handler = handlers.get(name)