crucible-mcp 1.1.0__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
crucible/server.py CHANGED
@@ -9,28 +9,26 @@ from mcp.types import TextContent, Tool
9
9
 
10
10
  from crucible.enforcement.assertions import load_assertions
11
11
  from crucible.knowledge.loader import (
12
- get_custom_knowledge_files,
13
12
  load_all_knowledge,
14
13
  load_principles,
15
14
  )
16
- from crucible.models import Domain, FullReviewResult, Severity, ToolFinding
15
+ from crucible.models import Domain, Severity, ToolFinding
17
16
  from crucible.review.core import (
18
17
  compute_severity_counts,
19
18
  deduplicate_findings,
20
19
  detect_domain,
21
20
  filter_findings_to_changes,
21
+ filter_ignored_findings,
22
22
  load_skills_and_knowledge,
23
23
  run_enforcement,
24
24
  run_static_analysis,
25
25
  )
26
- from crucible.skills import get_knowledge_for_skills, load_skill, match_skills_for_domain
27
26
  from crucible.tools.delegation import (
28
27
  check_all_tools,
29
28
  delegate_bandit,
30
29
  delegate_ruff,
31
30
  delegate_semgrep,
32
31
  delegate_slither,
33
- get_semgrep_config,
34
32
  )
35
33
  from crucible.tools.git import (
36
34
  GitContext,
@@ -135,76 +133,6 @@ async def list_tools() -> list[Tool]:
135
133
  },
136
134
  },
137
135
  ),
138
- Tool(
139
- name="quick_review",
140
- description="[DEPRECATED: use review(path, include_skills=false)] Run static analysis only.",
141
- inputSchema={
142
- "type": "object",
143
- "properties": {
144
- "path": {
145
- "type": "string",
146
- "description": "File or directory path to scan",
147
- },
148
- "tools": {
149
- "type": "array",
150
- "items": {"type": "string"},
151
- "description": "Tools to run (semgrep, ruff, slither, bandit). Default: auto-detect based on file type",
152
- },
153
- },
154
- "required": ["path"],
155
- },
156
- ),
157
- Tool(
158
- name="full_review",
159
- description="[DEPRECATED: use review(path)] Comprehensive code review with skills and knowledge.",
160
- inputSchema={
161
- "type": "object",
162
- "properties": {
163
- "path": {
164
- "type": "string",
165
- "description": "File or directory path to review",
166
- },
167
- "skills": {
168
- "type": "array",
169
- "items": {"type": "string"},
170
- "description": "Override skill selection (default: auto-detect based on domain)",
171
- },
172
- "include_sage": {
173
- "type": "boolean",
174
- "description": "Include Sage knowledge recall (not yet implemented)",
175
- "default": True,
176
- },
177
- },
178
- "required": ["path"],
179
- },
180
- ),
181
- Tool(
182
- name="review_changes",
183
- description="[DEPRECATED: use review(mode='staged')] Review git changes.",
184
- inputSchema={
185
- "type": "object",
186
- "properties": {
187
- "mode": {
188
- "type": "string",
189
- "enum": ["staged", "unstaged", "branch", "commits"],
190
- "description": "What changes to review",
191
- },
192
- "base": {
193
- "type": "string",
194
- "description": "Base branch for 'branch' mode or commit count for 'commits' mode",
195
- },
196
- "path": {
197
- "type": "string",
198
- "description": "Repository path (default: current directory)",
199
- },
200
- "include_context": {
201
- "type": "boolean",
202
- "description": "Include findings near changes (default: false)",
203
- },
204
- },
205
- "required": ["mode"],
206
- },
207
- ),
208
136
  Tool(
209
137
  name="get_principles",
210
138
  description="Load engineering principles by topic",
@@ -294,19 +222,19 @@ async def list_tools() -> list[Tool]:
294
222
  ),
295
223
  Tool(
296
224
  name="load_knowledge",
297
- description="Load knowledge/principles files without running static analysis. Useful for getting guidance on patterns, best practices, or domain-specific knowledge. Automatically includes project and user knowledge files.",
225
+ description="Load knowledge/principles files without running static analysis. Useful for getting guidance on patterns, best practices, or domain-specific knowledge. Loads all 14 bundled knowledge files by default, with project/user files overriding bundled ones.",
298
226
  inputSchema={
299
227
  "type": "object",
300
228
  "properties": {
301
229
  "files": {
302
230
  "type": "array",
303
231
  "items": {"type": "string"},
304
- "description": "Specific knowledge files to load (e.g., ['SECURITY.md', 'SMART_CONTRACT.md']). If not specified, loads all project/user knowledge files.",
232
+ "description": "Specific knowledge files to load (e.g., ['SECURITY.md', 'SMART_CONTRACT.md']). If not specified, loads all available knowledge files.",
305
233
  },
306
234
  "include_bundled": {
307
235
  "type": "boolean",
308
- "description": "Include bundled knowledge files in addition to project/user files (default: false)",
309
- "default": False,
236
+ "description": "Include bundled knowledge files (default: true). Project/user files override bundled ones with same name.",
237
+ "default": True,
310
238
  },
311
239
  "topic": {
312
240
  "type": "string",
@@ -582,6 +510,14 @@ def _handle_review(arguments: dict[str, Any]) -> list[TextContent]:
582
510
  if not changed_files:
583
511
  return [TextContent(type="text", text="No files to analyze (only deletions).")]
584
512
 
513
+ # Filter out ignored files (.crucibleignore)
514
+ from crucible.ignore import load_ignore_spec
515
+ repo_path_for_ignore = get_repo_root(path if path else os.getcwd()).value
516
+ ignore_spec = load_ignore_spec(repo_path_for_ignore)
517
+ changed_files = [f for f in changed_files if not ignore_spec.is_ignored(f, is_dir=False)]
518
+ if not changed_files:
519
+ return [TextContent(type="text", text="No files to analyze (all changes are in ignored paths).")]
520
+
585
521
  elif not path:
586
522
  return [TextContent(type="text", text="Error: Either 'path' or 'mode' is required.")]
587
523
 
@@ -619,6 +555,10 @@ def _handle_review(arguments: dict[str, Any]) -> list[TextContent]:
619
555
  # Deduplicate findings
620
556
  all_findings = deduplicate_findings(all_findings)
621
557
 
558
+ # Filter out findings from ignored paths (.crucibleignore)
559
+ base_path = path if path else os.getcwd()
560
+ all_findings = filter_ignored_findings(all_findings, base_path)
561
+
622
562
  # Run pattern and LLM assertions
623
563
  enforcement_findings = []
624
564
  enforcement_errors: list[str] = []
@@ -698,7 +638,7 @@ def _handle_get_principles(arguments: dict[str, Any]) -> list[TextContent]:
698
638
  def _handle_load_knowledge(arguments: dict[str, Any]) -> list[TextContent]:
699
639
  """Handle load_knowledge tool."""
700
640
  files = arguments.get("files")
701
- include_bundled = arguments.get("include_bundled", False)
641
+ include_bundled = arguments.get("include_bundled", True)
702
642
  topic = arguments.get("topic")
703
643
 
704
644
  # If topic specified, use load_principles
@@ -856,465 +796,17 @@ def _handle_check_tools(arguments: dict[str, Any]) -> list[TextContent]:
856
796
  return [TextContent(type="text", text="\n".join(parts))]
857
797
 
858
798
 
859
- def _handle_quick_review(arguments: dict[str, Any]) -> list[TextContent]:
860
- """Handle quick_review tool - returns findings with domain metadata."""
861
- path = arguments.get("path", "")
862
- tools = arguments.get("tools")
863
-
864
- # Internal domain detection
865
- domain, domain_tags = detect_domain(path)
866
-
867
- # Select tools based on domain
868
- if domain == Domain.SMART_CONTRACT:
869
- default_tools = ["slither", "semgrep"]
870
- elif domain == Domain.BACKEND and "python" in domain_tags:
871
- default_tools = ["ruff", "bandit", "semgrep"]
872
- elif domain == Domain.FRONTEND:
873
- default_tools = ["semgrep"]
874
- else:
875
- default_tools = ["semgrep"]
876
-
877
- if not tools:
878
- tools = default_tools
879
-
880
- # Collect all findings
881
- all_findings: list[ToolFinding] = []
882
- tool_results: list[str] = []
883
-
884
- if "semgrep" in tools:
885
- config = get_semgrep_config(domain)
886
- result = delegate_semgrep(path, config)
887
- if result.is_ok:
888
- all_findings.extend(result.value)
889
- tool_results.append(f"## Semgrep\n{_format_findings(result.value)}")
890
- else:
891
- tool_results.append(f"## Semgrep\nError: {result.error}")
892
-
893
- if "ruff" in tools:
894
- result = delegate_ruff(path)
895
- if result.is_ok:
896
- all_findings.extend(result.value)
897
- tool_results.append(f"## Ruff\n{_format_findings(result.value)}")
898
- else:
899
- tool_results.append(f"## Ruff\nError: {result.error}")
900
-
901
- if "slither" in tools:
902
- result = delegate_slither(path)
903
- if result.is_ok:
904
- all_findings.extend(result.value)
905
- tool_results.append(f"## Slither\n{_format_findings(result.value)}")
906
- else:
907
- tool_results.append(f"## Slither\nError: {result.error}")
908
-
909
- if "bandit" in tools:
910
- result = delegate_bandit(path)
911
- if result.is_ok:
912
- all_findings.extend(result.value)
913
- tool_results.append(f"## Bandit\n{_format_findings(result.value)}")
914
- else:
915
- tool_results.append(f"## Bandit\nError: {result.error}")
916
-
917
- # Deduplicate findings
918
- all_findings = deduplicate_findings(all_findings)
919
-
920
- # Compute severity summary
921
- severity_counts: dict[str, int] = {}
922
- for f in all_findings:
923
- sev = f.severity.value
924
- severity_counts[sev] = severity_counts.get(sev, 0) + 1
925
-
926
- # Build structured output
927
- output_parts = [
928
- "# Review Results\n",
929
- f"**Domains detected:** {', '.join(domain_tags)}",
930
- f"**Severity summary:** {severity_counts or 'No findings'}\n",
931
- "\n".join(tool_results),
932
- ]
933
-
934
- return [TextContent(type="text", text="\n".join(output_parts))]
935
-
936
-
937
- def _format_change_review(
938
- context: GitContext,
939
- findings: list[ToolFinding],
940
- severity_counts: dict[str, int],
941
- tool_errors: list[str] | None = None,
942
- matched_skills: list[tuple[str, list[str]]] | None = None,
943
- skill_content: dict[str, str] | None = None,
944
- knowledge_files: set[str] | None = None,
945
- knowledge_content: dict[str, str] | None = None,
946
- ) -> str:
947
- """Format change review output."""
948
- parts: list[str] = ["# Change Review\n"]
949
- parts.append(f"**Mode:** {context.mode}")
950
- if context.base_ref:
951
- parts.append(f"**Base:** {context.base_ref}")
952
- parts.append("")
953
-
954
- # Files changed
955
- added = [c for c in context.changes if c.status == "A"]
956
- modified = [c for c in context.changes if c.status == "M"]
957
- deleted = [c for c in context.changes if c.status == "D"]
958
- renamed = [c for c in context.changes if c.status == "R"]
959
-
960
- total = len(context.changes)
961
- parts.append(f"## Files Changed ({total})")
962
- for c in added:
963
- parts.append(f"- `+` {c.path}")
964
- for c in modified:
965
- parts.append(f"- `~` {c.path}")
966
- for c in renamed:
967
- parts.append(f"- `R` {c.old_path} -> {c.path}")
968
- for c in deleted:
969
- parts.append(f"- `-` {c.path}")
970
- parts.append("")
971
-
972
- # Commit messages (if available)
973
- if context.commit_messages:
974
- parts.append("## Commits")
975
- for msg in context.commit_messages:
976
- parts.append(f"- {msg}")
977
- parts.append("")
978
-
979
- # Applicable skills
980
- if matched_skills:
981
- parts.append("## Applicable Skills\n")
982
- for skill_name, triggers in matched_skills:
983
- parts.append(f"- **{skill_name}**: matched on {', '.join(triggers)}")
984
- parts.append("")
985
-
986
- # Knowledge loaded
987
- if knowledge_files:
988
- parts.append("## Knowledge Loaded\n")
989
- parts.append(f"Files: {', '.join(sorted(knowledge_files))}")
990
- parts.append("")
991
-
992
- # Tool errors (if any)
993
- if tool_errors:
994
- parts.append("## Tool Errors\n")
995
- for error in tool_errors:
996
- parts.append(f"- {error}")
997
- parts.append("")
998
-
999
- # Findings
1000
- if findings:
1001
- parts.append("## Findings in Changed Code\n")
1002
- parts.append(f"**Summary:** {severity_counts}\n")
1003
- parts.append(_format_findings(findings))
1004
- else:
1005
- parts.append("## Findings in Changed Code\n")
1006
- parts.append("No issues found in changed code.")
1007
- parts.append("")
1008
-
1009
- # Review checklists from skills
1010
- if skill_content:
1011
- parts.append("---\n")
1012
- parts.append("## Review Checklists\n")
1013
- for skill_name, content in skill_content.items():
1014
- parts.append(f"### {skill_name}\n")
1015
- parts.append(content)
1016
- parts.append("")
1017
-
1018
- # Knowledge reference
1019
- if knowledge_content:
1020
- parts.append("---\n")
1021
- parts.append("## Principles Reference\n")
1022
- for filename, content in sorted(knowledge_content.items()):
1023
- parts.append(f"### {filename}\n")
1024
- parts.append(content)
1025
- parts.append("")
1026
-
1027
- return "\n".join(parts)
1028
-
1029
-
1030
- def _handle_review_changes(arguments: dict[str, Any]) -> list[TextContent]:
1031
- """Handle review_changes tool - review git changes."""
1032
- import os
1033
-
1034
- mode = arguments.get("mode", "staged")
1035
- base = arguments.get("base")
1036
- path = arguments.get("path", os.getcwd())
1037
- include_context = arguments.get("include_context", False)
1038
-
1039
- # Get repo root
1040
- root_result = get_repo_root(path)
1041
- if root_result.is_err:
1042
- return [TextContent(type="text", text=f"Error: {root_result.error}")]
1043
-
1044
- repo_path = root_result.value
1045
-
1046
- # Get git context based on mode
1047
- if mode == "staged":
1048
- context_result = get_staged_changes(repo_path)
1049
- elif mode == "unstaged":
1050
- context_result = get_unstaged_changes(repo_path)
1051
- elif mode == "branch":
1052
- base_branch = base if base else "main"
1053
- context_result = get_branch_diff(repo_path, base_branch)
1054
- elif mode == "commits":
1055
- try:
1056
- count = int(base) if base else 1
1057
- except ValueError:
1058
- return [TextContent(type="text", text=f"Error: Invalid commit count '{base}'")]
1059
- context_result = get_recent_commits(repo_path, count)
1060
- else:
1061
- return [TextContent(type="text", text=f"Error: Unknown mode '{mode}'")]
1062
-
1063
- if context_result.is_err:
1064
- return [TextContent(type="text", text=f"Error: {context_result.error}")]
1065
-
1066
- context = context_result.value
1067
-
1068
- # Check if there are any changes
1069
- if not context.changes:
1070
- if mode == "staged":
1071
- return [TextContent(type="text", text="No changes to review. Stage files with `git add` first.")]
1072
- elif mode == "unstaged":
1073
- return [TextContent(type="text", text="No unstaged changes to review.")]
1074
- else:
1075
- return [TextContent(type="text", text="No changes found.")]
1076
-
1077
- # Get changed files (excluding deleted)
1078
- changed_files = get_changed_files(context)
1079
- if not changed_files:
1080
- return [TextContent(type="text", text="No files to analyze (only deletions).")]
1081
-
1082
- # Run analysis on changed files
1083
- all_findings: list[ToolFinding] = []
1084
- tool_errors: list[str] = []
1085
- domains_detected: set[Domain] = set()
1086
- all_domain_tags: set[str] = set()
1087
-
1088
- for file_path in changed_files:
1089
- full_path = f"{repo_path}/{file_path}"
1090
-
1091
- # Detect domain for this file
1092
- domain, domain_tags = detect_domain(file_path)
1093
- domains_detected.add(domain)
1094
- all_domain_tags.update(domain_tags)
1095
-
1096
- # Select tools based on domain
1097
- if domain == Domain.SMART_CONTRACT:
1098
- tools = ["slither", "semgrep"]
1099
- elif domain == Domain.BACKEND and "python" in domain_tags:
1100
- tools = ["ruff", "bandit", "semgrep"]
1101
- elif domain == Domain.FRONTEND:
1102
- tools = ["semgrep"]
1103
- else:
1104
- tools = ["semgrep"]
1105
-
1106
- # Run tools
1107
- if "semgrep" in tools:
1108
- config = get_semgrep_config(domain)
1109
- result = delegate_semgrep(full_path, config)
1110
- if result.is_ok:
1111
- all_findings.extend(result.value)
1112
- elif result.is_err:
1113
- tool_errors.append(f"semgrep ({file_path}): {result.error}")
1114
-
1115
- if "ruff" in tools:
1116
- result = delegate_ruff(full_path)
1117
- if result.is_ok:
1118
- all_findings.extend(result.value)
1119
- elif result.is_err:
1120
- tool_errors.append(f"ruff ({file_path}): {result.error}")
1121
-
1122
- if "slither" in tools:
1123
- result = delegate_slither(full_path)
1124
- if result.is_ok:
1125
- all_findings.extend(result.value)
1126
- elif result.is_err:
1127
- tool_errors.append(f"slither ({file_path}): {result.error}")
1128
-
1129
- if "bandit" in tools:
1130
- result = delegate_bandit(full_path)
1131
- if result.is_ok:
1132
- all_findings.extend(result.value)
1133
- elif result.is_err:
1134
- tool_errors.append(f"bandit ({file_path}): {result.error}")
1135
-
1136
- # Filter findings to changed lines
1137
- filtered_findings = filter_findings_to_changes(all_findings, context, include_context)
1138
-
1139
- # Deduplicate findings
1140
- filtered_findings = deduplicate_findings(filtered_findings)
1141
-
1142
- # Compute severity summary
1143
- severity_counts: dict[str, int] = {}
1144
- for f in filtered_findings:
1145
- sev = f.severity.value
1146
- severity_counts[sev] = severity_counts.get(sev, 0) + 1
1147
-
1148
- # Match skills and load knowledge based on detected domains
1149
- from crucible.knowledge.loader import load_knowledge_file
1150
- from crucible.skills.loader import (
1151
- get_knowledge_for_skills,
1152
- load_skill,
1153
- match_skills_for_domain,
1154
- )
1155
-
1156
- primary_domain = next(iter(domains_detected)) if domains_detected else Domain.UNKNOWN
1157
- matched_skills = match_skills_for_domain(
1158
- primary_domain, list(all_domain_tags), override=None
1159
- )
1160
-
1161
- skill_names = [name for name, _ in matched_skills]
1162
- skill_content: dict[str, str] = {}
1163
- for skill_name, _triggers in matched_skills:
1164
- result = load_skill(skill_name)
1165
- if result.is_ok:
1166
- _, content = result.value
1167
- skill_content[skill_name] = content
1168
-
1169
- knowledge_files = get_knowledge_for_skills(skill_names)
1170
- knowledge_content: dict[str, str] = {}
1171
- for filename in knowledge_files:
1172
- result = load_knowledge_file(filename)
1173
- if result.is_ok:
1174
- knowledge_content[filename] = result.value
1175
-
1176
- # Format output
1177
- output = _format_change_review(
1178
- context,
1179
- filtered_findings,
1180
- severity_counts,
1181
- tool_errors,
1182
- matched_skills,
1183
- skill_content,
1184
- knowledge_files,
1185
- knowledge_content,
1186
- )
1187
- return [TextContent(type="text", text=output)]
1188
-
1189
-
1190
- def _handle_full_review(arguments: dict[str, Any]) -> list[TextContent]:
1191
- """Handle full_review tool - comprehensive code review.
1192
-
1193
- DEPRECATED: Use _handle_review with path parameter instead.
1194
- """
1195
- from crucible.review.core import run_static_analysis
1196
-
1197
- path = arguments.get("path", "")
1198
- skills_override = arguments.get("skills")
1199
-
1200
- # 1. Detect domain
1201
- domain, domain_tags = detect_domain(path)
1202
-
1203
- # 2. Run static analysis using shared core function
1204
- all_findings, tool_errors = run_static_analysis(path, domain, domain_tags)
1205
-
1206
- # 3. Match applicable skills
1207
- matched_skills = match_skills_for_domain(domain, domain_tags, skills_override)
1208
- skill_names = [name for name, _ in matched_skills]
1209
- skill_triggers: dict[str, tuple[str, ...]] = {
1210
- name: tuple(triggers) for name, triggers in matched_skills
1211
- }
1212
-
1213
- # 4. Load skill content (checklists/prompts)
1214
- skill_contents: dict[str, str] = {}
1215
- for skill_name in skill_names:
1216
- result = load_skill(skill_name)
1217
- if result.is_ok:
1218
- _, content = result.value
1219
- # Extract content after frontmatter
1220
- if "\n---\n" in content:
1221
- skill_contents[skill_name] = content.split("\n---\n", 1)[1].strip()
1222
- else:
1223
- skill_contents[skill_name] = content
1224
-
1225
- # 5. Collect knowledge files from matched skills + custom project/user knowledge
1226
- skill_knowledge = get_knowledge_for_skills(skill_names)
1227
- custom_knowledge = get_custom_knowledge_files()
1228
- # Merge: custom knowledge always included, plus skill-referenced files
1229
- knowledge_files = skill_knowledge | custom_knowledge
1230
-
1231
- # 6. Load knowledge content
1232
- loaded_files, principles_content = load_all_knowledge(
1233
- include_bundled=False,
1234
- filenames=knowledge_files,
1235
- )
1236
-
1237
- # 7. Deduplicate findings
1238
- all_findings = deduplicate_findings(all_findings)
1239
-
1240
- # 8. Compute severity summary
1241
- severity_counts = compute_severity_counts(all_findings)
1242
-
1243
- # 8. Build result
1244
- review_result = FullReviewResult(
1245
- domains_detected=tuple(domain_tags),
1246
- severity_summary=severity_counts,
1247
- findings=tuple(all_findings),
1248
- applicable_skills=tuple(skill_names),
1249
- skill_triggers_matched=skill_triggers,
1250
- principles_loaded=tuple(loaded_files),
1251
- principles_content=principles_content,
1252
- sage_knowledge=None, # Not implemented yet
1253
- sage_query_used=None, # Not implemented yet
1254
- )
1255
-
1256
- # 8. Format output
1257
- output_parts = [
1258
- "# Full Review Results\n",
1259
- f"**Path:** `{path}`",
1260
- f"**Domains detected:** {', '.join(review_result.domains_detected)}",
1261
- f"**Severity summary:** {review_result.severity_summary or 'No findings'}\n",
1262
- ]
1263
-
1264
- if tool_errors:
1265
- output_parts.append("## Tool Errors\n")
1266
- for error in tool_errors:
1267
- output_parts.append(f"- {error}")
1268
- output_parts.append("")
1269
-
1270
- output_parts.append("## Applicable Skills\n")
1271
- if review_result.applicable_skills:
1272
- for skill in review_result.applicable_skills:
1273
- triggers = review_result.skill_triggers_matched.get(skill, ())
1274
- output_parts.append(f"- **{skill}**: matched on {', '.join(triggers)}")
1275
- else:
1276
- output_parts.append("- No skills matched")
1277
- output_parts.append("")
1278
-
1279
- # Include skill checklists
1280
- if skill_contents:
1281
- output_parts.append("## Review Checklists\n")
1282
- for skill_name, content in skill_contents.items():
1283
- output_parts.append(f"### {skill_name}\n")
1284
- output_parts.append(content)
1285
- output_parts.append("")
1286
-
1287
- output_parts.append("## Knowledge Loaded\n")
1288
- if review_result.principles_loaded:
1289
- output_parts.append(f"Files: {', '.join(review_result.principles_loaded)}\n")
1290
- else:
1291
- output_parts.append("No knowledge files loaded.\n")
1292
-
1293
- output_parts.append("## Static Analysis Findings\n")
1294
- output_parts.append(_format_findings(list(review_result.findings)))
1295
-
1296
- if review_result.principles_content:
1297
- output_parts.append("\n---\n")
1298
- output_parts.append("## Principles Reference\n")
1299
- output_parts.append(review_result.principles_content)
1300
-
1301
- return [TextContent(type="text", text="\n".join(output_parts))]
1302
-
1303
-
1304
799
  @server.call_tool() # type: ignore[misc]
1305
800
  async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
1306
801
  """Handle tool calls."""
1307
802
  handlers = {
1308
803
  # Unified review tool
1309
804
  "review": _handle_review,
1310
- # Deprecated tools (kept for backwards compatibility)
1311
- "quick_review": _handle_quick_review,
1312
- "full_review": _handle_full_review,
1313
- "review_changes": _handle_review_changes,
1314
805
  # Context injection tools (call at session start)
1315
806
  "get_assertions": _handle_get_assertions,
1316
807
  "get_principles": _handle_get_principles,
1317
808
  "load_knowledge": _handle_load_knowledge,
809
+ # Direct tool access
1318
810
  "delegate_semgrep": _handle_delegate_semgrep,
1319
811
  "delegate_ruff": _handle_delegate_ruff,
1320
812
  "delegate_slither": _handle_delegate_slither,