entrix 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
entrix/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ """Entrix fitness and review-trigger package."""
2
+
3
+ __version__ = "0.1.5"
entrix/__main__.py ADDED
@@ -0,0 +1,6 @@
1
+ """Allow running as `python -m entrix`."""
2
+
3
+ from entrix.cli import main
4
+
5
+ if __name__ == "__main__":
6
+ main()
entrix/cli.py ADDED
@@ -0,0 +1,634 @@
1
+ """CLI entry point — wires all modules together, feature parity with fitness.py."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import argparse
6
+ import json
7
+ import sys
8
+ from pathlib import Path
9
+
10
+ from entrix.engine import collect_changed_files, matches_changed_files, run_fitness_report
11
+ from entrix.governance import GovernancePolicy, enforce
12
+ from entrix.loaders import load_dimensions, validate_weights
13
+ from entrix.model import ExecutionScope, Metric, ResultState, Tier
14
+ from entrix.presets import get_project_preset
15
+ from entrix.reporting import report_to_dict, write_report_output
16
+ from entrix.review_trigger import (
17
+ collect_changed_files as collect_review_changed_files,
18
+ collect_diff_stats,
19
+ evaluate_review_triggers,
20
+ load_review_triggers,
21
+ )
22
+ from entrix.reporters.terminal import TerminalReporter
23
+ from entrix.runners.graph import GraphRunner
24
+
25
+
26
+ def _find_project_root() -> Path:
27
+ """Walk up from CWD to find the project root (contains package.json or Cargo.toml)."""
28
+ cwd = Path.cwd().resolve()
29
+ for parent in [cwd, *cwd.parents]:
30
+ if (parent / "package.json").exists() or (parent / "Cargo.toml").exists():
31
+ return parent
32
+ return cwd
33
+
34
+
35
+ def _find_fitness_dir(project_root: Path) -> Path:
36
+ """Locate the docs/fitness/ directory relative to project root."""
37
+ fitness_dir = get_project_preset().fitness_dir(project_root)
38
+ if not fitness_dir.is_dir():
39
+ print(f"Error: fitness directory not found at {fitness_dir}")
40
+ sys.exit(1)
41
+ return fitness_dir
42
+
43
+
44
+ def _find_review_trigger_config(project_root: Path) -> Path:
45
+ """Locate the default review-trigger config."""
46
+ config_path = get_project_preset().review_trigger_config(project_root)
47
+ if not config_path.is_file():
48
+ print(f"Error: review-trigger config not found at {config_path}")
49
+ sys.exit(1)
50
+ return config_path
51
+
52
+
53
+ def _print_json(data: dict) -> None:
54
+ print(json.dumps(data, indent=2, ensure_ascii=False))
55
+
56
+
57
+ def _print_graph_impact(result: dict) -> None:
58
+ print(result.get("summary", "No summary available."))
59
+ print(f"Changed files: {len(result.get('changed_files', []))}")
60
+ print(f"Impacted files: {len(result.get('impacted_files', []))}")
61
+ print(f"Impacted test files: {len(result.get('impacted_test_files', []))}")
62
+ print(f"Wide blast radius: {'yes' if result.get('wide_blast_radius') else 'no'}")
63
+ if result.get("skipped_files"):
64
+ print(f"Skipped files: {', '.join(result['skipped_files'][:10])}")
65
+
66
+
67
+ def _print_graph_test_radius(result: dict) -> None:
68
+ print(result.get("summary", "No summary available."))
69
+ print(f"Changed files: {len(result.get('changed_files', []))}")
70
+ print(f"Queryable targets: {len(result.get('target_nodes', []))}")
71
+ print(f"Unique test files: {len(result.get('test_files', []))}")
72
+ print(f"Untested targets: {len(result.get('untested_targets', []))}")
73
+ if result.get("test_files"):
74
+ print("Test files:")
75
+ for file_path in result["test_files"][:20]:
76
+ print(f" - {file_path}")
77
+ if result.get("untested_targets"):
78
+ print("Untested targets:")
79
+ for target in result["untested_targets"][:20]:
80
+ print(f" - {target['qualified_name']}")
81
+
82
+
83
+ def _print_graph_query(result: dict) -> None:
84
+ print(result.get("summary", "No summary available."))
85
+ for item in result.get("results", [])[:20]:
86
+ label = item.get("qualified_name") or item.get("name") or item.get("file_path") or str(item)
87
+ print(f" - {label}")
88
+
89
+
90
+ def _print_graph_history(result: dict) -> None:
91
+ print(result.get("summary", "No summary available."))
92
+ for commit in result.get("commits", []):
93
+ print(
94
+ f"{commit['short_commit']} {commit['subject']} | "
95
+ f"files={commit['changed_file_count']} "
96
+ f"targets={commit['target_count']} "
97
+ f"tests={commit['test_file_count']} "
98
+ f"untested={commit['untested_target_count']}"
99
+ )
100
+
101
+
102
+ def _print_graph_review_context(result: dict) -> None:
103
+ print(result.get("summary", "No summary available."))
104
+ context = result.get("context", {})
105
+ tests = context.get("tests", {})
106
+ print(f"Changed files: {len(context.get('changed_files', []))}")
107
+ print(f"Impacted files: {len(context.get('impacted_files', []))}")
108
+ print(f"Queryable targets: {len(context.get('targets', []))}")
109
+ print(f"Test files: {len(tests.get('test_files', []))}")
110
+ print("Review guidance:")
111
+ for line in str(context.get("review_guidance", "")).splitlines():
112
+ print(f" {line}")
113
+ snippets = context.get("source_snippets", [])
114
+ if snippets:
115
+ print("Source snippets:")
116
+ for snippet in snippets[:10]:
117
+ suffix = " (truncated)" if snippet.get("truncated") else ""
118
+ print(f" - {snippet['file_path']}{suffix}")
119
+
120
+
121
+ def _print_review_trigger_report(report: dict) -> None:
122
+ print("REVIEW TRIGGER REPORT")
123
+ print(f"Base: {report['base']}")
124
+ stats = report.get("diff_stats", {})
125
+ print(
126
+ "Diff stats: "
127
+ f"files={stats.get('file_count', 0)} "
128
+ f"added={stats.get('added_lines', 0)} "
129
+ f"deleted={stats.get('deleted_lines', 0)}"
130
+ )
131
+ if report.get("human_review_required"):
132
+ print("Human review required: yes")
133
+ for trigger in report.get("triggers", []):
134
+ print(f"- {trigger['name']} [{trigger['severity']}]")
135
+ for reason in trigger.get("reasons", []):
136
+ print(f" reason: {reason}")
137
+ else:
138
+ print("Human review required: no")
139
+
140
+
141
+ def _domains_from_files(files: list[str]) -> set[str]:
142
+ return get_project_preset().domains_from_files(files)
143
+
144
+
145
+ def _metric_domains(metric: Metric) -> set[str]:
146
+ return get_project_preset().metric_domains(metric)
147
+
148
+
149
+ def _collect_run_files(args: argparse.Namespace, project_root: Path) -> list[str]:
150
+ explicit_files = args.files or []
151
+ if explicit_files:
152
+ return explicit_files
153
+ if args.changed_only:
154
+ return collect_changed_files(project_root, args.base)
155
+ return []
156
+
157
+
158
+ def cmd_run(args: argparse.Namespace) -> int:
159
+ """Run fitness checks (main command)."""
160
+ project_root = _find_project_root()
161
+ _find_fitness_dir(project_root)
162
+ preset = get_project_preset()
163
+
164
+ tier_filter = Tier(args.tier) if args.tier else None
165
+ execution_scope = ExecutionScope(args.scope) if args.scope else None
166
+ policy = GovernancePolicy(
167
+ tier_filter=tier_filter,
168
+ parallel=args.parallel,
169
+ dry_run=args.dry_run,
170
+ verbose=args.verbose,
171
+ min_score=args.min_score,
172
+ execution_scope=execution_scope,
173
+ )
174
+
175
+ reporter = TerminalReporter(verbose=policy.verbose)
176
+ reporter.print_header(
177
+ dry_run=policy.dry_run,
178
+ tier=args.tier,
179
+ parallel=policy.parallel,
180
+ )
181
+
182
+ changed_files = _collect_run_files(args, project_root)
183
+ if args.changed_only or changed_files:
184
+ if args.changed_only and not changed_files:
185
+ print("No changed files detected; skipping fitness run.")
186
+ write_report_output(
187
+ args.output,
188
+ {
189
+ "final_score": 0.0,
190
+ "hard_gate_blocked": False,
191
+ "score_blocked": False,
192
+ "dimensions": [],
193
+ },
194
+ )
195
+ return 0
196
+
197
+ changed_domains = preset.domains_from_files(changed_files)
198
+ print(
199
+ f"\nIncremental mode: base={args.base}, changed_files={len(changed_files)}, domains={','.join(sorted(changed_domains)) or 'none'}"
200
+ )
201
+ report, dimensions = run_fitness_report(
202
+ project_root,
203
+ policy,
204
+ preset,
205
+ changed_files=changed_files or None,
206
+ base=args.base,
207
+ )
208
+
209
+ if not dimensions:
210
+ print("No metrics matched the current run filters; skipping fitness run.")
211
+ write_report_output(
212
+ args.output,
213
+ {
214
+ "final_score": 0.0,
215
+ "hard_gate_blocked": False,
216
+ "score_blocked": False,
217
+ "dimensions": [],
218
+ },
219
+ )
220
+ return 0
221
+
222
+ for dim, ds in zip(dimensions, report.dimensions):
223
+ print(f"\n## {dim.name.upper()} (weight: {dim.weight}%)")
224
+ print(f" Source: {dim.source_file}")
225
+
226
+ for result in ds.results:
227
+ state_labels = {
228
+ ResultState.PASS: "\u2705 PASS",
229
+ ResultState.FAIL: "\u274c FAIL",
230
+ ResultState.UNKNOWN: "\u2753 UNKNOWN",
231
+ ResultState.SKIPPED: "\u23ed\ufe0f SKIPPED",
232
+ ResultState.WAIVED: "\u26a0\ufe0f WAIVED",
233
+ }
234
+ status = state_labels.get(result.state, "\u2753 UNKNOWN")
235
+ hard = " [HARD GATE]" if result.hard_gate else ""
236
+ tier_label = f" [{result.tier.value}]" if tier_filter else ""
237
+ print(f" - {result.metric_name}: {status}{hard}{tier_label}")
238
+
239
+ if result.state == ResultState.FAIL and (policy.verbose or result.hard_gate):
240
+ if result.output:
241
+ lines = result.output.strip().split("\n")
242
+ for line in lines[:10]:
243
+ print(f" > {line}")
244
+ if len(lines) > 10:
245
+ print(f" > ... ({len(lines) - 10} more lines)")
246
+
247
+ if ds.total > 0:
248
+ print(f" Score: {ds.score:.0f}%")
249
+
250
+ reporter.print_footer(report)
251
+ write_report_output(args.output, report_to_dict(report))
252
+
253
+ return enforce(report, policy)
254
+
255
+
256
+ def cmd_validate(args: argparse.Namespace) -> int:
257
+ """Validate that dimension weights sum to 100%."""
258
+ project_root = _find_project_root()
259
+ fitness_dir = _find_fitness_dir(project_root)
260
+
261
+ dimensions = load_dimensions(fitness_dir)
262
+ valid, total = validate_weights(dimensions)
263
+
264
+ for dim in dimensions:
265
+ print(f" {dim.name}: {dim.weight}% ({dim.source_file})")
266
+
267
+ print(f"\nTotal: {total}%")
268
+ if valid:
269
+ print("\u2705 Weights sum to 100%")
270
+ return 0
271
+
272
+ print(f"\u274c Weights sum to {total}%, expected 100%")
273
+ return 1
274
+
275
+
276
+ def cmd_review_trigger(args: argparse.Namespace) -> int:
277
+ """Evaluate review-trigger rules for the current diff."""
278
+ project_root = _find_project_root()
279
+ config_path = Path(args.config).resolve() if args.config else _find_review_trigger_config(project_root)
280
+
281
+ rules = load_review_triggers(config_path)
282
+ changed_files = args.files or collect_review_changed_files(project_root, args.base)
283
+ diff_stats = collect_diff_stats(project_root, args.base)
284
+ report = evaluate_review_triggers(rules, changed_files, diff_stats, base=args.base)
285
+
286
+ if args.json:
287
+ _print_json(report.to_dict())
288
+ else:
289
+ _print_review_trigger_report(report.to_dict())
290
+
291
+ if report.human_review_required and args.fail_on_trigger:
292
+ return 3
293
+ return 0
294
+
295
+
296
+ def cmd_graph_build(args: argparse.Namespace) -> int:
297
+ """Build or update the backing code graph."""
298
+ runner = GraphRunner(_find_project_root())
299
+ result = runner.build_graph(base=args.base, build_mode=args.build_mode)
300
+ if args.json:
301
+ _print_json(result)
302
+ else:
303
+ print(result.get("summary", result.get("reason", "No summary available.")))
304
+ return 0 if result.get("status") not in {"unavailable"} else 1
305
+
306
+
307
+ def cmd_graph_stats(args: argparse.Namespace) -> int:
308
+ """Show graph statistics."""
309
+ runner = GraphRunner(_find_project_root())
310
+ result = runner.stats()
311
+ if args.json:
312
+ _print_json(result)
313
+ else:
314
+ if result.get("status") == "unavailable":
315
+ print(result.get("reason", "Graph unavailable"))
316
+ return 1
317
+ print(json.dumps(result, indent=2, ensure_ascii=False))
318
+ return 0 if result.get("status") != "unavailable" else 1
319
+
320
+
321
+ def cmd_graph_impact(args: argparse.Namespace) -> int:
322
+ """Show blast radius for changed files or an explicit file list."""
323
+ runner = GraphRunner(_find_project_root())
324
+ result = runner.analyze_impact(
325
+ args.files or None,
326
+ base=args.base,
327
+ max_depth=args.depth,
328
+ build_mode=args.build_mode,
329
+ )
330
+ if args.json:
331
+ _print_json(result)
332
+ else:
333
+ if result.get("status") == "unavailable":
334
+ print(result.get("reason", "Graph unavailable"))
335
+ return 1
336
+ _print_graph_impact(result)
337
+ return 0 if result.get("status") != "unavailable" else 1
338
+
339
+
340
+ def cmd_graph_test_radius(args: argparse.Namespace) -> int:
341
+ """Show tests in the radius of the current diff or explicit files."""
342
+ runner = GraphRunner(_find_project_root())
343
+ result = runner.analyze_test_radius(
344
+ args.files or None,
345
+ base=args.base,
346
+ max_depth=args.depth,
347
+ build_mode=args.build_mode,
348
+ max_targets=args.max_targets,
349
+ )
350
+ if args.json:
351
+ _print_json(result)
352
+ else:
353
+ if result.get("status") == "unavailable":
354
+ print(result.get("reason", "Graph unavailable"))
355
+ return 1
356
+ _print_graph_test_radius(result)
357
+ return 0 if result.get("status") != "unavailable" else 1
358
+
359
+
360
+ def cmd_graph_query(args: argparse.Namespace) -> int:
361
+ """Run a graph query such as callers_of or tests_for."""
362
+ runner = GraphRunner(_find_project_root())
363
+ result = runner.query(
364
+ args.pattern,
365
+ args.target,
366
+ base=args.base,
367
+ build_mode=args.build_mode,
368
+ )
369
+ if args.json:
370
+ _print_json(result)
371
+ else:
372
+ if result.get("status") == "unavailable":
373
+ print(result.get("reason", "Graph unavailable"))
374
+ return 1
375
+ _print_graph_query(result)
376
+ return 0 if result.get("status") != "unavailable" else 1
377
+
378
+
379
+ def cmd_graph_history(args: argparse.Namespace) -> int:
380
+ """Estimate test radius for recent commits using the current graph."""
381
+ runner = GraphRunner(_find_project_root())
382
+ result = runner.analyze_history(
383
+ count=args.count,
384
+ ref=args.ref,
385
+ max_depth=args.depth,
386
+ build_mode=args.build_mode,
387
+ max_targets=args.max_targets,
388
+ )
389
+ if args.json:
390
+ _print_json(result)
391
+ else:
392
+ if result.get("status") == "unavailable":
393
+ print(result.get("reason", "Graph unavailable"))
394
+ return 1
395
+ _print_graph_history(result)
396
+ return 0 if result.get("status") != "unavailable" else 1
397
+
398
+
399
+ def cmd_graph_review_context(args: argparse.Namespace) -> int:
400
+ """Build an AI-friendly review context for the current diff or files."""
401
+ runner = GraphRunner(_find_project_root())
402
+ file_args = (args.files_positional or []) + (args.files or [])
403
+ result = runner.review_context(
404
+ file_args or None,
405
+ base=args.base,
406
+ max_depth=args.depth,
407
+ build_mode=args.build_mode,
408
+ max_targets=args.max_targets,
409
+ include_source=not args.no_source,
410
+ max_files=args.max_files,
411
+ max_lines_per_file=args.max_lines_per_file,
412
+ )
413
+ if args.json:
414
+ if args.output and args.output != "-":
415
+ Path(args.output).write_text(json.dumps(result, indent=2, ensure_ascii=False) + "\n", encoding="utf-8")
416
+ else:
417
+ _print_json(result)
418
+ else:
419
+ if result.get("status") == "unavailable":
420
+ print(result.get("reason", "Graph unavailable"))
421
+ return 1
422
+ _print_graph_review_context(result)
423
+ return 0 if result.get("status") != "unavailable" else 1
424
+
425
+
426
+ def build_parser() -> argparse.ArgumentParser:
427
+ parser = argparse.ArgumentParser(
428
+ prog="entrix",
429
+ description="Evolutionary architecture fitness engine for change-aware verification",
430
+ )
431
+ subparsers = parser.add_subparsers(dest="command")
432
+
433
+ run_parser = subparsers.add_parser("run", help="Run fitness checks")
434
+ run_parser.add_argument(
435
+ "--tier", choices=["fast", "normal", "deep"], help="Run only metrics up to this tier"
436
+ )
437
+ run_parser.add_argument("--parallel", action="store_true", help="Run metrics in parallel")
438
+ run_parser.add_argument("--dry-run", action="store_true", help="Show what would run")
439
+ run_parser.add_argument("--verbose", action="store_true", help="Show output on failure")
440
+ run_parser.add_argument(
441
+ "--min-score",
442
+ type=float,
443
+ default=80.0,
444
+ help="Minimum weighted score before the run exits non-zero",
445
+ )
446
+ run_parser.add_argument(
447
+ "--scope",
448
+ choices=["local", "ci", "staging", "prod_observation"],
449
+ help="Run only metrics for the given execution scope",
450
+ )
451
+ run_parser.add_argument(
452
+ "--output",
453
+ help="Write JSON report to a file path, or '-' for stdout",
454
+ )
455
+ run_parser.add_argument(
456
+ "--changed-only",
457
+ action="store_true",
458
+ help="Run only metrics relevant to changed files",
459
+ )
460
+ run_parser.add_argument(
461
+ "--files",
462
+ nargs="*",
463
+ default=[],
464
+ help="Explicit changed files used for incremental metric selection",
465
+ )
466
+ run_parser.add_argument(
467
+ "--base",
468
+ default="HEAD",
469
+ help="Git base reference used by --changed-only",
470
+ )
471
+ run_parser.set_defaults(func=cmd_run)
472
+
473
+ validate_parser = subparsers.add_parser("validate", help="Check dimension weights sum to 100%%")
474
+ validate_parser.set_defaults(func=cmd_validate)
475
+
476
+ review_trigger_parser = subparsers.add_parser(
477
+ "review-trigger",
478
+ help="Detect risky changes that should trigger human review",
479
+ )
480
+ review_trigger_parser.add_argument("files", nargs="*", help="Optional explicit changed files")
481
+ review_trigger_parser.add_argument("--base", default="HEAD~1", help="Git diff base")
482
+ review_trigger_parser.add_argument("--config", help="Optional review-trigger YAML config path")
483
+ review_trigger_parser.add_argument(
484
+ "--fail-on-trigger",
485
+ action="store_true",
486
+ help="Return non-zero when human review is required",
487
+ )
488
+ review_trigger_parser.add_argument("--json", action="store_true", help="Emit JSON output")
489
+ review_trigger_parser.set_defaults(func=cmd_review_trigger)
490
+
491
+ graph_parser = subparsers.add_parser("graph", help="Graph-backed impact and test-radius analysis")
492
+ graph_subparsers = graph_parser.add_subparsers(dest="graph_command")
493
+
494
+ graph_build = graph_subparsers.add_parser("build", help="Build or update the code graph")
495
+ graph_build.add_argument("--base", default="HEAD", help="Git diff base for incremental update")
496
+ graph_build.add_argument(
497
+ "--build-mode",
498
+ choices=["auto", "full", "skip"],
499
+ default="auto",
500
+ help="Graph build mode",
501
+ )
502
+ graph_build.add_argument("--json", action="store_true", help="Emit JSON output")
503
+ graph_build.set_defaults(func=cmd_graph_build)
504
+
505
+ graph_stats = graph_subparsers.add_parser("stats", help="Show graph statistics")
506
+ graph_stats.add_argument("--json", action="store_true", help="Emit JSON output")
507
+ graph_stats.set_defaults(func=cmd_graph_stats)
508
+
509
+ graph_impact = graph_subparsers.add_parser("impact", help="Analyze blast radius")
510
+ graph_impact.add_argument("files", nargs="*", help="Optional explicit changed files")
511
+ graph_impact.add_argument("--base", default="HEAD", help="Git diff base")
512
+ graph_impact.add_argument("--depth", type=int, default=2, help="Traversal depth")
513
+ graph_impact.add_argument(
514
+ "--build-mode",
515
+ choices=["auto", "full", "skip"],
516
+ default="auto",
517
+ help="Graph build mode",
518
+ )
519
+ graph_impact.add_argument("--json", action="store_true", help="Emit JSON output")
520
+ graph_impact.set_defaults(func=cmd_graph_impact)
521
+
522
+ graph_test_radius = graph_subparsers.add_parser(
523
+ "test-radius",
524
+ help="Estimate tests affected by changed files or commits",
525
+ )
526
+ graph_test_radius.add_argument("files", nargs="*", help="Optional explicit changed files")
527
+ graph_test_radius.add_argument("--base", default="HEAD", help="Git diff base")
528
+ graph_test_radius.add_argument("--depth", type=int, default=2, help="Traversal depth")
529
+ graph_test_radius.add_argument("--max-targets", type=int, default=25, help="Max nodes to query")
530
+ graph_test_radius.add_argument(
531
+ "--build-mode",
532
+ choices=["auto", "full", "skip"],
533
+ default="auto",
534
+ help="Graph build mode",
535
+ )
536
+ graph_test_radius.add_argument("--json", action="store_true", help="Emit JSON output")
537
+ graph_test_radius.set_defaults(func=cmd_graph_test_radius)
538
+
539
+ graph_query = graph_subparsers.add_parser("query", help="Run a graph query")
540
+ graph_query.add_argument(
541
+ "pattern",
542
+ choices=[
543
+ "callers_of",
544
+ "callees_of",
545
+ "imports_of",
546
+ "importers_of",
547
+ "children_of",
548
+ "tests_for",
549
+ "inheritors_of",
550
+ "file_summary",
551
+ ],
552
+ help="Query pattern",
553
+ )
554
+ graph_query.add_argument("target", help="Qualified name or file path")
555
+ graph_query.add_argument("--base", default="HEAD", help="Git diff base")
556
+ graph_query.add_argument(
557
+ "--build-mode",
558
+ choices=["auto", "full", "skip"],
559
+ default="auto",
560
+ help="Graph build mode",
561
+ )
562
+ graph_query.add_argument("--json", action="store_true", help="Emit JSON output")
563
+ graph_query.set_defaults(func=cmd_graph_query)
564
+
565
+ graph_history = graph_subparsers.add_parser(
566
+ "history",
567
+ help="Estimate test radius for recent commits using the current graph",
568
+ )
569
+ graph_history.add_argument("--count", type=int, default=10, help="Number of commits to inspect")
570
+ graph_history.add_argument("--ref", default="HEAD", help="Revision to walk from")
571
+ graph_history.add_argument("--depth", type=int, default=2, help="Traversal depth")
572
+ graph_history.add_argument("--max-targets", type=int, default=25, help="Max nodes to query")
573
+ graph_history.add_argument(
574
+ "--build-mode",
575
+ choices=["auto", "full", "skip"],
576
+ default="auto",
577
+ help="Graph build mode",
578
+ )
579
+ graph_history.add_argument("--json", action="store_true", help="Emit JSON output")
580
+ graph_history.set_defaults(func=cmd_graph_history)
581
+
582
+ graph_review_context = graph_subparsers.add_parser(
583
+ "review-context",
584
+ help="Build an AI-friendly review context from the current graph",
585
+ )
586
+ graph_review_context.add_argument("files_positional", nargs="*", help="Optional explicit changed files")
587
+ graph_review_context.add_argument("--files", nargs="*", default=[], help="Explicit changed files")
588
+ graph_review_context.add_argument("--base", default="HEAD", help="Git diff base")
589
+ graph_review_context.add_argument("--head", default="HEAD", help="Compatibility flag; currently unused")
590
+ graph_review_context.add_argument("--depth", type=int, default=2, help="Traversal depth")
591
+ graph_review_context.add_argument("--max-targets", type=int, default=25, help="Max nodes to query")
592
+ graph_review_context.add_argument("--max-files", type=int, default=12, help="Max source files to include")
593
+ graph_review_context.add_argument(
594
+ "--max-lines-per-file",
595
+ type=int,
596
+ default=120,
597
+ help="Max source lines to include per file",
598
+ )
599
+ graph_review_context.add_argument(
600
+ "--no-source",
601
+ action="store_true",
602
+ help="Do not include source snippets in the output",
603
+ )
604
+ graph_review_context.add_argument(
605
+ "--build-mode",
606
+ choices=["auto", "full", "skip"],
607
+ default="auto",
608
+ help="Graph build mode",
609
+ )
610
+ graph_review_context.add_argument("--json", action="store_true", help="Emit JSON output")
611
+ graph_review_context.add_argument("--output", help="Write JSON output to a file path or '-' for stdout")
612
+ graph_review_context.set_defaults(func=cmd_graph_review_context)
613
+
614
+ return parser
615
+
616
+
617
+ def main() -> None:
618
+ parser = build_parser()
619
+ args = parser.parse_args()
620
+
621
+ if not args.command:
622
+ parser.print_help()
623
+ sys.exit(0)
624
+
625
+ if args.command == "graph" and not getattr(args, "graph_command", None):
626
+ parser.parse_args(["graph", "--help"])
627
+ return
628
+
629
+ exit_code = args.func(args)
630
+ sys.exit(exit_code)
631
+
632
+
633
+ if __name__ == "__main__":
634
+ main()