gabion 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gabion/__init__.py +5 -0
- gabion/__main__.py +11 -0
- gabion/analysis/__init__.py +37 -0
- gabion/analysis/dataflow_audit.py +3173 -0
- gabion/analysis/engine.py +8 -0
- gabion/analysis/model.py +45 -0
- gabion/analysis/visitors.py +402 -0
- gabion/cli.py +503 -0
- gabion/config.py +45 -0
- gabion/lsp_client.py +111 -0
- gabion/refactor/__init__.py +4 -0
- gabion/refactor/engine.py +726 -0
- gabion/refactor/model.py +37 -0
- gabion/schema.py +84 -0
- gabion/server.py +447 -0
- gabion/synthesis/__init__.py +26 -0
- gabion/synthesis/merge.py +41 -0
- gabion/synthesis/model.py +41 -0
- gabion/synthesis/naming.py +45 -0
- gabion/synthesis/protocols.py +74 -0
- gabion/synthesis/schedule.py +87 -0
- gabion-0.1.0.dist-info/METADATA +250 -0
- gabion-0.1.0.dist-info/RECORD +26 -0
- gabion-0.1.0.dist-info/WHEEL +4 -0
- gabion-0.1.0.dist-info/entry_points.txt +3 -0
- gabion-0.1.0.dist-info/licenses/LICENSE +190 -0
gabion/cli.py
ADDED
|
@@ -0,0 +1,503 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import List, Optional, Any
|
|
7
|
+
import argparse
|
|
8
|
+
import json
|
|
9
|
+
import subprocess
|
|
10
|
+
import sys
|
|
11
|
+
|
|
12
|
+
import typer
|
|
13
|
+
|
|
14
|
+
DATAFLOW_COMMAND = "gabion.dataflowAudit"
|
|
15
|
+
SYNTHESIS_COMMAND = "gabion.synthesisPlan"
|
|
16
|
+
REFACTOR_COMMAND = "gabion.refactorProtocol"
|
|
17
|
+
from gabion.lsp_client import CommandRequest, run_command
|
|
18
|
+
app = typer.Typer(add_completion=False)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass(frozen=True)
|
|
22
|
+
class DataflowAuditRequest:
|
|
23
|
+
ctx: typer.Context
|
|
24
|
+
args: List[str] | None = None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _find_repo_root() -> Path:
|
|
28
|
+
return Path(__file__).resolve().parents[2]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@app.command()
|
|
32
|
+
def check(
|
|
33
|
+
paths: List[Path] = typer.Argument(None),
|
|
34
|
+
report: Optional[Path] = typer.Option(None, "--report"),
|
|
35
|
+
fail_on_violations: bool = typer.Option(True, "--fail-on-violations/--no-fail-on-violations"),
|
|
36
|
+
root: Path = typer.Option(Path("."), "--root"),
|
|
37
|
+
config: Optional[Path] = typer.Option(None, "--config"),
|
|
38
|
+
baseline: Optional[Path] = typer.Option(
|
|
39
|
+
None, "--baseline", help="Baseline file of allowed violations."
|
|
40
|
+
),
|
|
41
|
+
baseline_write: bool = typer.Option(
|
|
42
|
+
False, "--baseline-write", help="Write current violations to baseline."
|
|
43
|
+
),
|
|
44
|
+
exclude: Optional[List[str]] = typer.Option(None, "--exclude"),
|
|
45
|
+
ignore_params: Optional[str] = typer.Option(None, "--ignore-params"),
|
|
46
|
+
transparent_decorators: Optional[str] = typer.Option(
|
|
47
|
+
None, "--transparent-decorators"
|
|
48
|
+
),
|
|
49
|
+
allow_external: Optional[bool] = typer.Option(
|
|
50
|
+
None, "--allow-external/--no-allow-external"
|
|
51
|
+
),
|
|
52
|
+
strictness: Optional[str] = typer.Option(None, "--strictness"),
|
|
53
|
+
fail_on_type_ambiguities: bool = typer.Option(
|
|
54
|
+
True, "--fail-on-type-ambiguities/--no-fail-on-type-ambiguities"
|
|
55
|
+
),
|
|
56
|
+
) -> None:
|
|
57
|
+
"""Run the dataflow grammar audit with strict defaults."""
|
|
58
|
+
if not paths:
|
|
59
|
+
paths = [Path(".")]
|
|
60
|
+
exclude_dirs: list[str] | None = None
|
|
61
|
+
if exclude is not None:
|
|
62
|
+
exclude_dirs = []
|
|
63
|
+
for entry in exclude:
|
|
64
|
+
exclude_dirs.extend([part.strip() for part in entry.split(",") if part.strip()])
|
|
65
|
+
ignore_list: list[str] | None = None
|
|
66
|
+
if ignore_params is not None:
|
|
67
|
+
ignore_list = [p.strip() for p in ignore_params.split(",") if p.strip()]
|
|
68
|
+
transparent_list: list[str] | None = None
|
|
69
|
+
if transparent_decorators is not None:
|
|
70
|
+
transparent_list = [
|
|
71
|
+
p.strip() for p in transparent_decorators.split(",") if p.strip()
|
|
72
|
+
]
|
|
73
|
+
if strictness is not None and strictness not in {"high", "low"}:
|
|
74
|
+
raise typer.BadParameter("strictness must be 'high' or 'low'")
|
|
75
|
+
payload = {
|
|
76
|
+
"paths": [str(p) for p in paths],
|
|
77
|
+
"report": str(report) if report is not None else None,
|
|
78
|
+
"fail_on_violations": fail_on_violations,
|
|
79
|
+
"fail_on_type_ambiguities": fail_on_type_ambiguities,
|
|
80
|
+
"root": str(root),
|
|
81
|
+
"config": str(config) if config is not None else None,
|
|
82
|
+
"baseline": str(baseline) if baseline is not None else None,
|
|
83
|
+
"baseline_write": baseline_write if baseline is not None else None,
|
|
84
|
+
"exclude": exclude_dirs,
|
|
85
|
+
"ignore_params": ignore_list,
|
|
86
|
+
"transparent_decorators": transparent_list,
|
|
87
|
+
"allow_external": allow_external,
|
|
88
|
+
"strictness": strictness,
|
|
89
|
+
"type_audit": True if fail_on_type_ambiguities else None,
|
|
90
|
+
}
|
|
91
|
+
result = run_command(CommandRequest(DATAFLOW_COMMAND, [payload]))
|
|
92
|
+
raise typer.Exit(code=int(result.get("exit_code", 0)))
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _dataflow_audit(
|
|
96
|
+
request: "DataflowAuditRequest",
|
|
97
|
+
) -> None:
|
|
98
|
+
"""Run the dataflow grammar audit with explicit options."""
|
|
99
|
+
argv = list(request.args or []) + list(request.ctx.args)
|
|
100
|
+
if not argv:
|
|
101
|
+
argv = []
|
|
102
|
+
parser = dataflow_cli_parser()
|
|
103
|
+
opts = parser.parse_args(argv)
|
|
104
|
+
exclude_dirs: list[str] | None = None
|
|
105
|
+
if opts.exclude is not None:
|
|
106
|
+
exclude_dirs = []
|
|
107
|
+
for entry in opts.exclude:
|
|
108
|
+
exclude_dirs.extend([part.strip() for part in entry.split(",") if part.strip()])
|
|
109
|
+
ignore_list: list[str] | None = None
|
|
110
|
+
if opts.ignore_params is not None:
|
|
111
|
+
ignore_list = [p.strip() for p in opts.ignore_params.split(",") if p.strip()]
|
|
112
|
+
transparent_list: list[str] | None = None
|
|
113
|
+
if opts.transparent_decorators is not None:
|
|
114
|
+
transparent_list = [
|
|
115
|
+
p.strip() for p in opts.transparent_decorators.split(",") if p.strip()
|
|
116
|
+
]
|
|
117
|
+
payload: dict[str, Any] = {
|
|
118
|
+
"paths": [str(p) for p in opts.paths],
|
|
119
|
+
"root": str(opts.root),
|
|
120
|
+
"config": str(opts.config) if opts.config is not None else None,
|
|
121
|
+
"report": str(opts.report) if opts.report else None,
|
|
122
|
+
"dot": opts.dot,
|
|
123
|
+
"fail_on_violations": opts.fail_on_violations,
|
|
124
|
+
"baseline": str(opts.baseline) if opts.baseline else None,
|
|
125
|
+
"baseline_write": opts.baseline_write if opts.baseline else None,
|
|
126
|
+
"no_recursive": opts.no_recursive,
|
|
127
|
+
"max_components": opts.max_components,
|
|
128
|
+
"type_audit": opts.type_audit,
|
|
129
|
+
"type_audit_report": opts.type_audit_report,
|
|
130
|
+
"type_audit_max": opts.type_audit_max,
|
|
131
|
+
"exclude": exclude_dirs,
|
|
132
|
+
"ignore_params": ignore_list,
|
|
133
|
+
"transparent_decorators": transparent_list,
|
|
134
|
+
"allow_external": opts.allow_external,
|
|
135
|
+
"strictness": opts.strictness,
|
|
136
|
+
"synthesis_plan": str(opts.synthesis_plan) if opts.synthesis_plan else None,
|
|
137
|
+
"synthesis_report": opts.synthesis_report,
|
|
138
|
+
"synthesis_max_tier": opts.synthesis_max_tier,
|
|
139
|
+
"synthesis_min_bundle_size": opts.synthesis_min_bundle_size,
|
|
140
|
+
"synthesis_allow_singletons": opts.synthesis_allow_singletons,
|
|
141
|
+
"synthesis_protocols": str(opts.synthesis_protocols)
|
|
142
|
+
if opts.synthesis_protocols
|
|
143
|
+
else None,
|
|
144
|
+
"synthesis_protocols_kind": opts.synthesis_protocols_kind,
|
|
145
|
+
"refactor_plan": opts.refactor_plan,
|
|
146
|
+
"refactor_plan_json": str(opts.refactor_plan_json)
|
|
147
|
+
if opts.refactor_plan_json
|
|
148
|
+
else None,
|
|
149
|
+
}
|
|
150
|
+
result = run_command(CommandRequest(DATAFLOW_COMMAND, [payload]))
|
|
151
|
+
if opts.type_audit:
|
|
152
|
+
suggestions = result.get("type_suggestions", [])
|
|
153
|
+
ambiguities = result.get("type_ambiguities", [])
|
|
154
|
+
if suggestions:
|
|
155
|
+
typer.echo("Type tightening candidates:")
|
|
156
|
+
for line in suggestions[: opts.type_audit_max]:
|
|
157
|
+
typer.echo(f"- {line}")
|
|
158
|
+
if ambiguities:
|
|
159
|
+
typer.echo("Type ambiguities (conflicting downstream expectations):")
|
|
160
|
+
for line in ambiguities[: opts.type_audit_max]:
|
|
161
|
+
typer.echo(f"- {line}")
|
|
162
|
+
if opts.dot == "-" and "dot" in result:
|
|
163
|
+
typer.echo(result["dot"])
|
|
164
|
+
if opts.synthesis_plan == "-" and "synthesis_plan" in result:
|
|
165
|
+
typer.echo(json.dumps(result["synthesis_plan"], indent=2, sort_keys=True))
|
|
166
|
+
if opts.synthesis_protocols == "-" and "synthesis_protocols" in result:
|
|
167
|
+
typer.echo(result["synthesis_protocols"])
|
|
168
|
+
if opts.refactor_plan_json == "-" and "refactor_plan" in result:
|
|
169
|
+
typer.echo(json.dumps(result["refactor_plan"], indent=2, sort_keys=True))
|
|
170
|
+
raise typer.Exit(code=int(result.get("exit_code", 0)))
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
@app.command(
|
|
174
|
+
"dataflow-audit",
|
|
175
|
+
context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
|
|
176
|
+
)
|
|
177
|
+
def dataflow_audit(
|
|
178
|
+
ctx: typer.Context,
|
|
179
|
+
args: List[str] = typer.Argument(None),
|
|
180
|
+
) -> None:
|
|
181
|
+
request = DataflowAuditRequest(ctx=ctx, args=args)
|
|
182
|
+
_dataflow_audit(request)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def dataflow_cli_parser() -> argparse.ArgumentParser:
|
|
186
|
+
parser = argparse.ArgumentParser()
|
|
187
|
+
parser.add_argument("paths", nargs="+")
|
|
188
|
+
parser.add_argument("--root", default=".")
|
|
189
|
+
parser.add_argument("--config", default=None)
|
|
190
|
+
parser.add_argument("--baseline", default=None, help="Baseline file for violations.")
|
|
191
|
+
parser.add_argument(
|
|
192
|
+
"--baseline-write",
|
|
193
|
+
action="store_true",
|
|
194
|
+
help="Write current violations to baseline file.",
|
|
195
|
+
)
|
|
196
|
+
parser.add_argument("--exclude", action="append", default=None)
|
|
197
|
+
parser.add_argument("--ignore-params", default=None)
|
|
198
|
+
parser.add_argument(
|
|
199
|
+
"--transparent-decorators",
|
|
200
|
+
default=None,
|
|
201
|
+
help="Comma-separated decorator names treated as transparent.",
|
|
202
|
+
)
|
|
203
|
+
parser.add_argument(
|
|
204
|
+
"--allow-external",
|
|
205
|
+
action=argparse.BooleanOptionalAction,
|
|
206
|
+
default=None,
|
|
207
|
+
)
|
|
208
|
+
parser.add_argument("--strictness", choices=["high", "low"], default=None)
|
|
209
|
+
parser.add_argument("--no-recursive", action="store_true")
|
|
210
|
+
parser.add_argument("--dot", default=None, help="Write DOT graph to file or '-' for stdout.")
|
|
211
|
+
parser.add_argument("--report", default=None, help="Write Markdown report (mermaid) to file.")
|
|
212
|
+
parser.add_argument("--max-components", type=int, default=10, help="Max components in report.")
|
|
213
|
+
parser.add_argument(
|
|
214
|
+
"--type-audit",
|
|
215
|
+
action="store_true",
|
|
216
|
+
help="Emit type-tightening suggestions based on downstream annotations.",
|
|
217
|
+
)
|
|
218
|
+
parser.add_argument(
|
|
219
|
+
"--type-audit-max",
|
|
220
|
+
type=int,
|
|
221
|
+
default=50,
|
|
222
|
+
help="Max type-tightening entries to print.",
|
|
223
|
+
)
|
|
224
|
+
parser.add_argument(
|
|
225
|
+
"--type-audit-report",
|
|
226
|
+
action="store_true",
|
|
227
|
+
help="Include type-flow audit summary in the markdown report.",
|
|
228
|
+
)
|
|
229
|
+
parser.add_argument(
|
|
230
|
+
"--fail-on-type-ambiguities",
|
|
231
|
+
action="store_true",
|
|
232
|
+
help="Exit non-zero if type ambiguities are detected.",
|
|
233
|
+
)
|
|
234
|
+
parser.add_argument(
|
|
235
|
+
"--fail-on-violations",
|
|
236
|
+
action="store_true",
|
|
237
|
+
help="Exit non-zero if undocumented/undeclared bundle violations are detected.",
|
|
238
|
+
)
|
|
239
|
+
parser.add_argument(
|
|
240
|
+
"--synthesis-plan",
|
|
241
|
+
default=None,
|
|
242
|
+
help="Write synthesis plan JSON to file or '-' for stdout.",
|
|
243
|
+
)
|
|
244
|
+
parser.add_argument(
|
|
245
|
+
"--synthesis-report",
|
|
246
|
+
action="store_true",
|
|
247
|
+
help="Include synthesis plan summary in the markdown report.",
|
|
248
|
+
)
|
|
249
|
+
parser.add_argument(
|
|
250
|
+
"--synthesis-protocols",
|
|
251
|
+
default=None,
|
|
252
|
+
help="Write protocol/dataclass stubs to file or '-' for stdout.",
|
|
253
|
+
)
|
|
254
|
+
parser.add_argument(
|
|
255
|
+
"--synthesis-protocols-kind",
|
|
256
|
+
choices=["dataclass", "protocol"],
|
|
257
|
+
default="dataclass",
|
|
258
|
+
help="Emit dataclass or typing.Protocol stubs (default: dataclass).",
|
|
259
|
+
)
|
|
260
|
+
parser.add_argument(
|
|
261
|
+
"--synthesis-max-tier",
|
|
262
|
+
type=int,
|
|
263
|
+
default=2,
|
|
264
|
+
help="Max tier to include in synthesis plan.",
|
|
265
|
+
)
|
|
266
|
+
parser.add_argument(
|
|
267
|
+
"--synthesis-min-bundle-size",
|
|
268
|
+
type=int,
|
|
269
|
+
default=2,
|
|
270
|
+
help="Min bundle size to include in synthesis plan.",
|
|
271
|
+
)
|
|
272
|
+
parser.add_argument(
|
|
273
|
+
"--synthesis-allow-singletons",
|
|
274
|
+
action="store_true",
|
|
275
|
+
help="Allow single-field bundles in synthesis plan.",
|
|
276
|
+
)
|
|
277
|
+
parser.add_argument(
|
|
278
|
+
"--refactor-plan",
|
|
279
|
+
action="store_true",
|
|
280
|
+
help="Include refactoring plan summary in the markdown report.",
|
|
281
|
+
)
|
|
282
|
+
parser.add_argument(
|
|
283
|
+
"--refactor-plan-json",
|
|
284
|
+
default=None,
|
|
285
|
+
help="Write refactoring plan JSON to file or '-' for stdout.",
|
|
286
|
+
)
|
|
287
|
+
return parser
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
@app.command("docflow-audit")
|
|
291
|
+
def docflow_audit(
|
|
292
|
+
root: Path = typer.Option(Path("."), "--root"),
|
|
293
|
+
fail_on_violations: bool = typer.Option(
|
|
294
|
+
False, "--fail-on-violations/--no-fail-on-violations"
|
|
295
|
+
),
|
|
296
|
+
) -> None:
|
|
297
|
+
"""Run the docflow audit (governance docs only)."""
|
|
298
|
+
repo_root = _find_repo_root()
|
|
299
|
+
script = repo_root / "scripts" / "docflow_audit.py"
|
|
300
|
+
if not script.exists():
|
|
301
|
+
typer.secho(
|
|
302
|
+
"docflow audit script not found; repository layout required",
|
|
303
|
+
err=True,
|
|
304
|
+
fg=typer.colors.RED,
|
|
305
|
+
)
|
|
306
|
+
raise typer.Exit(code=2)
|
|
307
|
+
args = ["--root", str(root)]
|
|
308
|
+
if fail_on_violations:
|
|
309
|
+
args.append("--fail-on-violations")
|
|
310
|
+
result = subprocess.run([sys.executable, str(script), *args], check=False)
|
|
311
|
+
raise typer.Exit(code=result.returncode)
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
@app.command("synth")
|
|
315
|
+
def synth(
|
|
316
|
+
paths: List[Path] = typer.Argument(None),
|
|
317
|
+
root: Path = typer.Option(Path("."), "--root"),
|
|
318
|
+
out_dir: Path = typer.Option(Path("artifacts/synthesis"), "--out-dir"),
|
|
319
|
+
no_timestamp: bool = typer.Option(False, "--no-timestamp"),
|
|
320
|
+
config: Optional[Path] = typer.Option(None, "--config"),
|
|
321
|
+
exclude: Optional[List[str]] = typer.Option(None, "--exclude"),
|
|
322
|
+
ignore_params: Optional[str] = typer.Option(None, "--ignore-params"),
|
|
323
|
+
transparent_decorators: Optional[str] = typer.Option(
|
|
324
|
+
None, "--transparent-decorators"
|
|
325
|
+
),
|
|
326
|
+
allow_external: Optional[bool] = typer.Option(
|
|
327
|
+
None, "--allow-external/--no-allow-external"
|
|
328
|
+
),
|
|
329
|
+
strictness: Optional[str] = typer.Option(None, "--strictness"),
|
|
330
|
+
no_recursive: bool = typer.Option(False, "--no-recursive"),
|
|
331
|
+
max_components: int = typer.Option(10, "--max-components"),
|
|
332
|
+
type_audit_report: bool = typer.Option(
|
|
333
|
+
True, "--type-audit-report/--no-type-audit-report"
|
|
334
|
+
),
|
|
335
|
+
type_audit_max: int = typer.Option(50, "--type-audit-max"),
|
|
336
|
+
synthesis_max_tier: int = typer.Option(2, "--synthesis-max-tier"),
|
|
337
|
+
synthesis_min_bundle_size: int = typer.Option(2, "--synthesis-min-bundle-size"),
|
|
338
|
+
synthesis_allow_singletons: bool = typer.Option(
|
|
339
|
+
False, "--synthesis-allow-singletons"
|
|
340
|
+
),
|
|
341
|
+
synthesis_protocols_kind: str = typer.Option(
|
|
342
|
+
"dataclass", "--synthesis-protocols-kind"
|
|
343
|
+
),
|
|
344
|
+
refactor_plan: bool = typer.Option(True, "--refactor-plan/--no-refactor-plan"),
|
|
345
|
+
fail_on_violations: bool = typer.Option(
|
|
346
|
+
False, "--fail-on-violations/--no-fail-on-violations"
|
|
347
|
+
),
|
|
348
|
+
) -> None:
|
|
349
|
+
"""Run the dataflow audit and emit synthesis outputs (prototype)."""
|
|
350
|
+
if not paths:
|
|
351
|
+
paths = [Path(".")]
|
|
352
|
+
exclude_dirs: list[str] | None = None
|
|
353
|
+
if exclude is not None:
|
|
354
|
+
exclude_dirs = []
|
|
355
|
+
for entry in exclude:
|
|
356
|
+
exclude_dirs.extend([part.strip() for part in entry.split(",") if part.strip()])
|
|
357
|
+
ignore_list: list[str] | None = None
|
|
358
|
+
if ignore_params is not None:
|
|
359
|
+
ignore_list = [p.strip() for p in ignore_params.split(",") if p.strip()]
|
|
360
|
+
transparent_list: list[str] | None = None
|
|
361
|
+
if transparent_decorators is not None:
|
|
362
|
+
transparent_list = [
|
|
363
|
+
p.strip() for p in transparent_decorators.split(",") if p.strip()
|
|
364
|
+
]
|
|
365
|
+
if strictness is not None and strictness not in {"high", "low"}:
|
|
366
|
+
raise typer.BadParameter("strictness must be 'high' or 'low'")
|
|
367
|
+
if synthesis_protocols_kind not in {"dataclass", "protocol"}:
|
|
368
|
+
raise typer.BadParameter(
|
|
369
|
+
"synthesis-protocols-kind must be 'dataclass' or 'protocol'"
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
output_root = out_dir
|
|
373
|
+
timestamp = None
|
|
374
|
+
if not no_timestamp:
|
|
375
|
+
timestamp = datetime.now(tz=timezone.utc).strftime("%Y%m%d_%H%M%S")
|
|
376
|
+
output_root = out_dir / timestamp
|
|
377
|
+
out_dir.mkdir(parents=True, exist_ok=True)
|
|
378
|
+
(out_dir / "LATEST.txt").write_text(timestamp)
|
|
379
|
+
output_root.mkdir(parents=True, exist_ok=True)
|
|
380
|
+
|
|
381
|
+
report_path = output_root / "dataflow_report.md"
|
|
382
|
+
dot_path = output_root / "dataflow_graph.dot"
|
|
383
|
+
plan_path = output_root / "synthesis_plan.json"
|
|
384
|
+
protocol_path = output_root / "protocol_stubs.py"
|
|
385
|
+
refactor_plan_path = output_root / "refactor_plan.json"
|
|
386
|
+
|
|
387
|
+
payload: dict[str, Any] = {
|
|
388
|
+
"paths": [str(p) for p in paths],
|
|
389
|
+
"root": str(root),
|
|
390
|
+
"config": str(config) if config is not None else None,
|
|
391
|
+
"report": str(report_path),
|
|
392
|
+
"dot": str(dot_path),
|
|
393
|
+
"fail_on_violations": fail_on_violations,
|
|
394
|
+
"no_recursive": no_recursive,
|
|
395
|
+
"max_components": max_components,
|
|
396
|
+
"type_audit_report": type_audit_report,
|
|
397
|
+
"type_audit_max": type_audit_max,
|
|
398
|
+
"exclude": exclude_dirs,
|
|
399
|
+
"ignore_params": ignore_list,
|
|
400
|
+
"transparent_decorators": transparent_list,
|
|
401
|
+
"allow_external": allow_external,
|
|
402
|
+
"strictness": strictness,
|
|
403
|
+
"synthesis_plan": str(plan_path),
|
|
404
|
+
"synthesis_report": True,
|
|
405
|
+
"synthesis_protocols": str(protocol_path),
|
|
406
|
+
"synthesis_protocols_kind": synthesis_protocols_kind,
|
|
407
|
+
"synthesis_max_tier": synthesis_max_tier,
|
|
408
|
+
"synthesis_min_bundle_size": synthesis_min_bundle_size,
|
|
409
|
+
"synthesis_allow_singletons": synthesis_allow_singletons,
|
|
410
|
+
"refactor_plan": refactor_plan,
|
|
411
|
+
"refactor_plan_json": str(refactor_plan_path) if refactor_plan else None,
|
|
412
|
+
}
|
|
413
|
+
result = run_command(CommandRequest(DATAFLOW_COMMAND, [payload]))
|
|
414
|
+
if timestamp:
|
|
415
|
+
typer.echo(f"Snapshot: {output_root}")
|
|
416
|
+
typer.echo(f"- {report_path}")
|
|
417
|
+
typer.echo(f"- {dot_path}")
|
|
418
|
+
typer.echo(f"- {plan_path}")
|
|
419
|
+
typer.echo(f"- {protocol_path}")
|
|
420
|
+
if refactor_plan:
|
|
421
|
+
typer.echo(f"- {refactor_plan_path}")
|
|
422
|
+
raise typer.Exit(code=int(result.get("exit_code", 0)))
|
|
423
|
+
|
|
424
|
+
|
|
425
|
+
@app.command("synthesis-plan")
|
|
426
|
+
def synthesis_plan(
|
|
427
|
+
input_path: Optional[Path] = typer.Option(
|
|
428
|
+
None, "--input", help="JSON payload describing bundles and synthesis settings."
|
|
429
|
+
),
|
|
430
|
+
output_path: Optional[Path] = typer.Option(
|
|
431
|
+
None, "--output", help="Write synthesis plan JSON to this path."
|
|
432
|
+
),
|
|
433
|
+
) -> None:
|
|
434
|
+
"""Generate a synthesis plan from a JSON payload (prototype)."""
|
|
435
|
+
payload: dict[str, Any] = {}
|
|
436
|
+
if input_path is not None:
|
|
437
|
+
try:
|
|
438
|
+
payload = json.loads(input_path.read_text())
|
|
439
|
+
except json.JSONDecodeError as exc:
|
|
440
|
+
raise typer.BadParameter(f"Invalid JSON payload: {exc}") from exc
|
|
441
|
+
result = run_command(CommandRequest(SYNTHESIS_COMMAND, [payload]))
|
|
442
|
+
output = json.dumps(result, indent=2, sort_keys=True)
|
|
443
|
+
if output_path is None:
|
|
444
|
+
typer.echo(output)
|
|
445
|
+
else:
|
|
446
|
+
output_path.write_text(output)
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
@app.command("refactor-protocol")
|
|
450
|
+
def refactor_protocol(
|
|
451
|
+
input_path: Optional[Path] = typer.Option(
|
|
452
|
+
None, "--input", help="JSON payload describing the refactor request."
|
|
453
|
+
),
|
|
454
|
+
output_path: Optional[Path] = typer.Option(
|
|
455
|
+
None, "--output", help="Write refactor response JSON to this path."
|
|
456
|
+
),
|
|
457
|
+
protocol_name: Optional[str] = typer.Option(None, "--protocol-name"),
|
|
458
|
+
bundle: Optional[List[str]] = typer.Option(None, "--bundle"),
|
|
459
|
+
field: Optional[List[str]] = typer.Option(
|
|
460
|
+
None,
|
|
461
|
+
"--field",
|
|
462
|
+
help="Field spec in 'name:type' form (repeatable).",
|
|
463
|
+
),
|
|
464
|
+
target_path: Optional[Path] = typer.Option(None, "--target-path"),
|
|
465
|
+
target_functions: Optional[List[str]] = typer.Option(None, "--target-function"),
|
|
466
|
+
rationale: Optional[str] = typer.Option(None, "--rationale"),
|
|
467
|
+
) -> None:
|
|
468
|
+
"""Generate protocol refactor edits from a JSON payload (prototype)."""
|
|
469
|
+
payload: dict[str, Any] = {}
|
|
470
|
+
if input_path is not None:
|
|
471
|
+
try:
|
|
472
|
+
payload = json.loads(input_path.read_text())
|
|
473
|
+
except json.JSONDecodeError as exc:
|
|
474
|
+
raise typer.BadParameter(f"Invalid JSON payload: {exc}") from exc
|
|
475
|
+
else:
|
|
476
|
+
if protocol_name is None or target_path is None:
|
|
477
|
+
raise typer.BadParameter(
|
|
478
|
+
"Provide --protocol-name and --target-path or use --input."
|
|
479
|
+
)
|
|
480
|
+
field_specs: list[dict[str, str | None]] = []
|
|
481
|
+
for spec in field or []:
|
|
482
|
+
name, _, hint = spec.partition(":")
|
|
483
|
+
name = name.strip()
|
|
484
|
+
if not name:
|
|
485
|
+
continue
|
|
486
|
+
type_hint = hint.strip() or None
|
|
487
|
+
field_specs.append({"name": name, "type_hint": type_hint})
|
|
488
|
+
if not bundle and field_specs:
|
|
489
|
+
bundle = [spec["name"] for spec in field_specs]
|
|
490
|
+
payload = {
|
|
491
|
+
"protocol_name": protocol_name,
|
|
492
|
+
"bundle": bundle or [],
|
|
493
|
+
"fields": field_specs,
|
|
494
|
+
"target_path": str(target_path),
|
|
495
|
+
"target_functions": target_functions or [],
|
|
496
|
+
"rationale": rationale,
|
|
497
|
+
}
|
|
498
|
+
result = run_command(CommandRequest(REFACTOR_COMMAND, [payload]))
|
|
499
|
+
output = json.dumps(result, indent=2, sort_keys=True)
|
|
500
|
+
if output_path is None:
|
|
501
|
+
typer.echo(output)
|
|
502
|
+
else:
|
|
503
|
+
output_path.write_text(output)
|
gabion/config.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any
|
|
5
|
+
import tomllib
|
|
6
|
+
|
|
7
|
+
DEFAULT_CONFIG_NAME = "gabion.toml"
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _load_toml(path: Path) -> dict[str, Any]:
|
|
11
|
+
try:
|
|
12
|
+
raw = path.read_text(encoding="utf-8")
|
|
13
|
+
except FileNotFoundError:
|
|
14
|
+
return {}
|
|
15
|
+
except OSError:
|
|
16
|
+
return {}
|
|
17
|
+
try:
|
|
18
|
+
data = tomllib.loads(raw)
|
|
19
|
+
except Exception:
|
|
20
|
+
return {}
|
|
21
|
+
return data if isinstance(data, dict) else {}
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def load_config(root: Path | None = None, config_path: Path | None = None) -> dict[str, Any]:
|
|
25
|
+
if config_path is None:
|
|
26
|
+
base = root if root is not None else Path.cwd()
|
|
27
|
+
config_path = base / DEFAULT_CONFIG_NAME
|
|
28
|
+
return _load_toml(config_path)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def dataflow_defaults(
|
|
32
|
+
root: Path | None = None, config_path: Path | None = None
|
|
33
|
+
) -> dict[str, Any]:
|
|
34
|
+
data = load_config(root=root, config_path=config_path)
|
|
35
|
+
section = data.get("dataflow", {})
|
|
36
|
+
return section if isinstance(section, dict) else {}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def merge_payload(payload: dict[str, Any], defaults: dict[str, Any]) -> dict[str, Any]:
|
|
40
|
+
merged = dict(defaults)
|
|
41
|
+
for key, value in payload.items():
|
|
42
|
+
if value is None:
|
|
43
|
+
continue
|
|
44
|
+
merged[key] = value
|
|
45
|
+
return merged
|
gabion/lsp_client.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import subprocess
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class LspClientError(RuntimeError):
|
|
12
|
+
pass
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass(frozen=True)
|
|
16
|
+
class CommandRequest:
|
|
17
|
+
command: str
|
|
18
|
+
arguments: list[dict] | None = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _read_rpc(stream) -> dict:
|
|
22
|
+
header = b""
|
|
23
|
+
while b"\r\n\r\n" not in header:
|
|
24
|
+
chunk = stream.read(1)
|
|
25
|
+
if not chunk:
|
|
26
|
+
raise LspClientError("LSP stream closed")
|
|
27
|
+
header += chunk
|
|
28
|
+
head, _, rest = header.partition(b"\r\n\r\n")
|
|
29
|
+
length = 0
|
|
30
|
+
for line in head.split(b"\r\n"):
|
|
31
|
+
if line.lower().startswith(b"content-length:"):
|
|
32
|
+
length = int(line.split(b":", 1)[1].strip())
|
|
33
|
+
break
|
|
34
|
+
if length <= 0:
|
|
35
|
+
raise LspClientError("Invalid LSP Content-Length")
|
|
36
|
+
body = rest
|
|
37
|
+
if len(body) < length:
|
|
38
|
+
body += stream.read(length - len(body))
|
|
39
|
+
return json.loads(body.decode("utf-8"))
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _write_rpc(stream, message: dict) -> None:
|
|
43
|
+
payload = json.dumps(message).encode("utf-8")
|
|
44
|
+
header = f"Content-Length: {len(payload)}\r\n\r\n".encode("utf-8")
|
|
45
|
+
stream.write(header + payload)
|
|
46
|
+
stream.flush()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _read_response(stream, request_id: int) -> dict:
|
|
50
|
+
while True:
|
|
51
|
+
message = _read_rpc(stream)
|
|
52
|
+
if message.get("id") == request_id:
|
|
53
|
+
return message
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def run_command(
|
|
57
|
+
request: CommandRequest,
|
|
58
|
+
*,
|
|
59
|
+
root: Path | None = None,
|
|
60
|
+
timeout: float = 5.0,
|
|
61
|
+
) -> dict[str, Any]:
|
|
62
|
+
proc = subprocess.Popen(
|
|
63
|
+
[sys.executable, "-m", "gabion.server"],
|
|
64
|
+
stdin=subprocess.PIPE,
|
|
65
|
+
stdout=subprocess.PIPE,
|
|
66
|
+
stderr=subprocess.PIPE,
|
|
67
|
+
)
|
|
68
|
+
assert proc.stdin is not None
|
|
69
|
+
assert proc.stdout is not None
|
|
70
|
+
|
|
71
|
+
root_uri = (root or Path.cwd()).resolve().as_uri()
|
|
72
|
+
initialize_id = 1
|
|
73
|
+
_write_rpc(
|
|
74
|
+
proc.stdin,
|
|
75
|
+
{
|
|
76
|
+
"jsonrpc": "2.0",
|
|
77
|
+
"id": initialize_id,
|
|
78
|
+
"method": "initialize",
|
|
79
|
+
"params": {"rootUri": root_uri, "capabilities": {}},
|
|
80
|
+
},
|
|
81
|
+
)
|
|
82
|
+
_read_response(proc.stdout, initialize_id)
|
|
83
|
+
_write_rpc(proc.stdin, {"jsonrpc": "2.0", "method": "initialized", "params": {}})
|
|
84
|
+
|
|
85
|
+
cmd_id = 2
|
|
86
|
+
_write_rpc(
|
|
87
|
+
proc.stdin,
|
|
88
|
+
{
|
|
89
|
+
"jsonrpc": "2.0",
|
|
90
|
+
"id": cmd_id,
|
|
91
|
+
"method": "workspace/executeCommand",
|
|
92
|
+
"params": {"command": request.command, "arguments": request.arguments or []},
|
|
93
|
+
},
|
|
94
|
+
)
|
|
95
|
+
response = _read_response(proc.stdout, cmd_id)
|
|
96
|
+
|
|
97
|
+
shutdown_id = 3
|
|
98
|
+
_write_rpc(proc.stdin, {"jsonrpc": "2.0", "id": shutdown_id, "method": "shutdown"})
|
|
99
|
+
_read_response(proc.stdout, shutdown_id)
|
|
100
|
+
_write_rpc(proc.stdin, {"jsonrpc": "2.0", "method": "exit"})
|
|
101
|
+
out, err = proc.communicate(timeout=timeout)
|
|
102
|
+
if response.get("error"):
|
|
103
|
+
raise LspClientError(f"LSP error: {response['error']}")
|
|
104
|
+
if proc.returncode not in (0, None):
|
|
105
|
+
detail = err.decode("utf-8", errors="replace").strip()
|
|
106
|
+
raise LspClientError(f"LSP server failed (exit {proc.returncode}): {detail}")
|
|
107
|
+
if err:
|
|
108
|
+
detail = err.decode("utf-8", errors="replace").strip()
|
|
109
|
+
if detail:
|
|
110
|
+
raise LspClientError(f"LSP server error output: {detail}")
|
|
111
|
+
return response.get("result", {})
|